]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.16.3-201409180901.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.16.3-201409180901.patch
CommitLineData
3bac176c
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b7fa2f5..90cd9f8 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1138,6 +1138,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2141,6 +2145,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2418,6 +2426,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
290index ee78eba..a06b48d 100644
291--- a/Documentation/networking/filter.txt
292+++ b/Documentation/networking/filter.txt
293@@ -277,11 +277,10 @@ Possible BPF extensions are shown in the following table:
294 mark skb->mark
295 queue skb->queue_mapping
296 hatype skb->dev->type
297- rxhash skb->hash
298+ rxhash skb->rxhash
299 cpu raw_smp_processor_id()
300 vlan_tci vlan_tx_tag_get(skb)
301 vlan_pr vlan_tx_tag_present(skb)
302- rand prandom_u32()
303
304 These extensions can also be prefixed with '#'.
305 Examples for low-level BPF:
306@@ -309,18 +308,6 @@ Examples for low-level BPF:
307 ret #-1
308 drop: ret #0
309
310-** icmp random packet sampling, 1 in 4
311- ldh [12]
312- jne #0x800, drop
313- ldb [23]
314- jneq #1, drop
315- # get a random uint32 number
316- ld rand
317- mod #4
318- jneq #1, drop
319- ret #-1
320- drop: ret #0
321-
322 ** SECCOMP filter example:
323
324 ld [4] /* offsetof(struct seccomp_data, arch) */
325@@ -559,456 +546,6 @@ ffffffffa0069c8f + <x>:
326 For BPF JIT developers, bpf_jit_disasm, bpf_asm and bpf_dbg provides a useful
327 toolchain for developing and testing the kernel's JIT compiler.
328
329-BPF kernel internals
330---------------------
331-Internally, for the kernel interpreter, a different instruction set
332-format with similar underlying principles from BPF described in previous
333-paragraphs is being used. However, the instruction set format is modelled
334-closer to the underlying architecture to mimic native instruction sets, so
335-that a better performance can be achieved (more details later). This new
336-ISA is called 'eBPF' or 'internal BPF' interchangeably. (Note: eBPF which
337-originates from [e]xtended BPF is not the same as BPF extensions! While
338-eBPF is an ISA, BPF extensions date back to classic BPF's 'overloading'
339-of BPF_LD | BPF_{B,H,W} | BPF_ABS instruction.)
340-
341-It is designed to be JITed with one to one mapping, which can also open up
342-the possibility for GCC/LLVM compilers to generate optimized eBPF code through
343-an eBPF backend that performs almost as fast as natively compiled code.
344-
345-The new instruction set was originally designed with the possible goal in
346-mind to write programs in "restricted C" and compile into eBPF with a optional
347-GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with
348-minimal performance overhead over two steps, that is, C -> eBPF -> native code.
349-
350-Currently, the new format is being used for running user BPF programs, which
351-includes seccomp BPF, classic socket filters, cls_bpf traffic classifier,
352-team driver's classifier for its load-balancing mode, netfilter's xt_bpf
353-extension, PTP dissector/classifier, and much more. They are all internally
354-converted by the kernel into the new instruction set representation and run
355-in the eBPF interpreter. For in-kernel handlers, this all works transparently
356-by using sk_unattached_filter_create() for setting up the filter, resp.
357-sk_unattached_filter_destroy() for destroying it. The macro
358-SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
359-code to run the filter. 'filter' is a pointer to struct sk_filter that we
360-got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
361-skb pointer). All constraints and restrictions from sk_chk_filter() apply
362-before a conversion to the new layout is being done behind the scenes!
363-
364-Currently, the classic BPF format is being used for JITing on most of the
365-architectures. Only x86-64 performs JIT compilation from eBPF instruction set,
366-however, future work will migrate other JIT compilers as well, so that they
367-will profit from the very same benefits.
368-
369-Some core changes of the new internal format:
370-
371-- Number of registers increase from 2 to 10:
372-
373- The old format had two registers A and X, and a hidden frame pointer. The
374- new layout extends this to be 10 internal registers and a read-only frame
375- pointer. Since 64-bit CPUs are passing arguments to functions via registers
376- the number of args from eBPF program to in-kernel function is restricted
377- to 5 and one register is used to accept return value from an in-kernel
378- function. Natively, x86_64 passes first 6 arguments in registers, aarch64/
379- sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved
380- registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers.
381-
382- Therefore, eBPF calling convention is defined as:
383-
384- * R0 - return value from in-kernel function, and exit value for eBPF program
385- * R1 - R5 - arguments from eBPF program to in-kernel function
386- * R6 - R9 - callee saved registers that in-kernel function will preserve
387- * R10 - read-only frame pointer to access stack
388-
389- Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64,
390- etc, and eBPF calling convention maps directly to ABIs used by the kernel on
391- 64-bit architectures.
392-
393- On 32-bit architectures JIT may map programs that use only 32-bit arithmetic
394- and may let more complex programs to be interpreted.
395-
396- R0 - R5 are scratch registers and eBPF program needs spill/fill them if
397- necessary across calls. Note that there is only one eBPF program (== one
398- eBPF main routine) and it cannot call other eBPF functions, it can only
399- call predefined in-kernel functions, though.
400-
401-- Register width increases from 32-bit to 64-bit:
402-
403- Still, the semantics of the original 32-bit ALU operations are preserved
404- via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower
405- subregisters that zero-extend into 64-bit if they are being written to.
406- That behavior maps directly to x86_64 and arm64 subregister definition, but
407- makes other JITs more difficult.
408-
409- 32-bit architectures run 64-bit internal BPF programs via interpreter.
410- Their JITs may convert BPF programs that only use 32-bit subregisters into
411- native instruction set and let the rest being interpreted.
412-
413- Operation is 64-bit, because on 64-bit architectures, pointers are also
414- 64-bit wide, and we want to pass 64-bit values in/out of kernel functions,
415- so 32-bit eBPF registers would otherwise require to define register-pair
416- ABI, thus, there won't be able to use a direct eBPF register to HW register
417- mapping and JIT would need to do combine/split/move operations for every
418- register in and out of the function, which is complex, bug prone and slow.
419- Another reason is the use of atomic 64-bit counters.
420-
421-- Conditional jt/jf targets replaced with jt/fall-through:
422-
423- While the original design has constructs such as "if (cond) jump_true;
424- else jump_false;", they are being replaced into alternative constructs like
425- "if (cond) jump_true; /* else fall-through */".
426-
427-- Introduces bpf_call insn and register passing convention for zero overhead
428- calls from/to other kernel functions:
429-
430- Before an in-kernel function call, the internal BPF program needs to
431- place function arguments into R1 to R5 registers to satisfy calling
432- convention, then the interpreter will take them from registers and pass
433- to in-kernel function. If R1 - R5 registers are mapped to CPU registers
434- that are used for argument passing on given architecture, the JIT compiler
435- doesn't need to emit extra moves. Function arguments will be in the correct
436- registers and BPF_CALL instruction will be JITed as single 'call' HW
437- instruction. This calling convention was picked to cover common call
438- situations without performance penalty.
439-
440- After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
441- a return value of the function. Since R6 - R9 are callee saved, their state
442- is preserved across the call.
443-
444- For example, consider three C functions:
445-
446- u64 f1() { return (*_f2)(1); }
447- u64 f2(u64 a) { return f3(a + 1, a); }
448- u64 f3(u64 a, u64 b) { return a - b; }
449-
450- GCC can compile f1, f3 into x86_64:
451-
452- f1:
453- movl $1, %edi
454- movq _f2(%rip), %rax
455- jmp *%rax
456- f3:
457- movq %rdi, %rax
458- subq %rsi, %rax
459- ret
460-
461- Function f2 in eBPF may look like:
462-
463- f2:
464- bpf_mov R2, R1
465- bpf_add R1, 1
466- bpf_call f3
467- bpf_exit
468-
469- If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
470- returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
471- be used to call into f2.
472-
473- For practical reasons all eBPF programs have only one argument 'ctx' which is
474- already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
475- can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
476- are currently not supported, but these restrictions can be lifted if necessary
477- in the future.
478-
479- On 64-bit architectures all register map to HW registers one to one. For
480- example, x86_64 JIT compiler can map them as ...
481-
482- R0 - rax
483- R1 - rdi
484- R2 - rsi
485- R3 - rdx
486- R4 - rcx
487- R5 - r8
488- R6 - rbx
489- R7 - r13
490- R8 - r14
491- R9 - r15
492- R10 - rbp
493-
494- ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
495- and rbx, r12 - r15 are callee saved.
496-
497- Then the following internal BPF pseudo-program:
498-
499- bpf_mov R6, R1 /* save ctx */
500- bpf_mov R2, 2
501- bpf_mov R3, 3
502- bpf_mov R4, 4
503- bpf_mov R5, 5
504- bpf_call foo
505- bpf_mov R7, R0 /* save foo() return value */
506- bpf_mov R1, R6 /* restore ctx for next call */
507- bpf_mov R2, 6
508- bpf_mov R3, 7
509- bpf_mov R4, 8
510- bpf_mov R5, 9
511- bpf_call bar
512- bpf_add R0, R7
513- bpf_exit
514-
515- After JIT to x86_64 may look like:
516-
517- push %rbp
518- mov %rsp,%rbp
519- sub $0x228,%rsp
520- mov %rbx,-0x228(%rbp)
521- mov %r13,-0x220(%rbp)
522- mov %rdi,%rbx
523- mov $0x2,%esi
524- mov $0x3,%edx
525- mov $0x4,%ecx
526- mov $0x5,%r8d
527- callq foo
528- mov %rax,%r13
529- mov %rbx,%rdi
530- mov $0x2,%esi
531- mov $0x3,%edx
532- mov $0x4,%ecx
533- mov $0x5,%r8d
534- callq bar
535- add %r13,%rax
536- mov -0x228(%rbp),%rbx
537- mov -0x220(%rbp),%r13
538- leaveq
539- retq
540-
541- Which is in this example equivalent in C to:
542-
543- u64 bpf_filter(u64 ctx)
544- {
545- return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
546- }
547-
548- In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
549- arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
550- registers and place their return value into '%rax' which is R0 in eBPF.
551- Prologue and epilogue are emitted by JIT and are implicit in the
552- interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve
553- them across the calls as defined by calling convention.
554-
555- For example the following program is invalid:
556-
557- bpf_mov R1, 1
558- bpf_call foo
559- bpf_mov R0, R1
560- bpf_exit
561-
562- After the call the registers R1-R5 contain junk values and cannot be read.
563- In the future an eBPF verifier can be used to validate internal BPF programs.
564-
565-Also in the new design, eBPF is limited to 4096 insns, which means that any
566-program will terminate quickly and will only call a fixed number of kernel
567-functions. Original BPF and the new format are two operand instructions,
568-which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT.
569-
570-The input context pointer for invoking the interpreter function is generic,
571-its content is defined by a specific use case. For seccomp register R1 points
572-to seccomp_data, for converted BPF filters R1 points to a skb.
573-
574-A program, that is translated internally consists of the following elements:
575-
576- op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32
577-
578-So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
579-has room for new instructions. Some of them may use 16/24/32 byte encoding. New
580-instructions must be multiple of 8 bytes to preserve backward compatibility.
581-
582-Internal BPF is a general purpose RISC instruction set. Not every register and
583-every instruction are used during translation from original BPF to new format.
584-For example, socket filters are not using 'exclusive add' instruction, but
585-tracing filters may do to maintain counters of events, for example. Register R9
586-is not used by socket filters either, but more complex filters may be running
587-out of registers and would have to resort to spill/fill to stack.
588-
589-Internal BPF can used as generic assembler for last step performance
590-optimizations, socket filters and seccomp are using it as assembler. Tracing
591-filters may use it as assembler to generate code from kernel. In kernel usage
592-may not be bounded by security considerations, since generated internal BPF code
593-may be optimizing internal code path and not being exposed to the user space.
594-Safety of internal BPF can come from a verifier (TBD). In such use cases as
595-described, it may be used as safe instruction set.
596-
597-Just like the original BPF, the new format runs within a controlled environment,
598-is deterministic and the kernel can easily prove that. The safety of the program
599-can be determined in two steps: first step does depth-first-search to disallow
600-loops and other CFG validation; second step starts from the first insn and
601-descends all possible paths. It simulates execution of every insn and observes
602-the state change of registers and stack.
603-
604-eBPF opcode encoding
605---------------------
606-
607-eBPF is reusing most of the opcode encoding from classic to simplify conversion
608-of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code'
609-field is divided into three parts:
610-
611- +----------------+--------+--------------------+
612- | 4 bits | 1 bit | 3 bits |
613- | operation code | source | instruction class |
614- +----------------+--------+--------------------+
615- (MSB) (LSB)
616-
617-Three LSB bits store instruction class which is one of:
618-
619- Classic BPF classes: eBPF classes:
620-
621- BPF_LD 0x00 BPF_LD 0x00
622- BPF_LDX 0x01 BPF_LDX 0x01
623- BPF_ST 0x02 BPF_ST 0x02
624- BPF_STX 0x03 BPF_STX 0x03
625- BPF_ALU 0x04 BPF_ALU 0x04
626- BPF_JMP 0x05 BPF_JMP 0x05
627- BPF_RET 0x06 [ class 6 unused, for future if needed ]
628- BPF_MISC 0x07 BPF_ALU64 0x07
629-
630-When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
631-
632- BPF_K 0x00
633- BPF_X 0x08
634-
635- * in classic BPF, this means:
636-
637- BPF_SRC(code) == BPF_X - use register X as source operand
638- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
639-
640- * in eBPF, this means:
641-
642- BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand
643- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
644-
645-... and four MSB bits store operation code.
646-
647-If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
648-
649- BPF_ADD 0x00
650- BPF_SUB 0x10
651- BPF_MUL 0x20
652- BPF_DIV 0x30
653- BPF_OR 0x40
654- BPF_AND 0x50
655- BPF_LSH 0x60
656- BPF_RSH 0x70
657- BPF_NEG 0x80
658- BPF_MOD 0x90
659- BPF_XOR 0xa0
660- BPF_MOV 0xb0 /* eBPF only: mov reg to reg */
661- BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
662- BPF_END 0xd0 /* eBPF only: endianness conversion */
663-
664-If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
665-
666- BPF_JA 0x00
667- BPF_JEQ 0x10
668- BPF_JGT 0x20
669- BPF_JGE 0x30
670- BPF_JSET 0x40
671- BPF_JNE 0x50 /* eBPF only: jump != */
672- BPF_JSGT 0x60 /* eBPF only: signed '>' */
673- BPF_JSGE 0x70 /* eBPF only: signed '>=' */
674- BPF_CALL 0x80 /* eBPF only: function call */
675- BPF_EXIT 0x90 /* eBPF only: function return */
676-
677-So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
678-and eBPF. There are only two registers in classic BPF, so it means A += X.
679-In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly,
680-BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous
681-src_reg = (u32) src_reg ^ (u32) imm32 in eBPF.
682-
683-Classic BPF is using BPF_MISC class to represent A = X and X = A moves.
684-eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no
685-BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean
686-exactly the same operations as BPF_ALU, but with 64-bit wide operands
687-instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.:
688-dst_reg = dst_reg + src_reg
689-
690-Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
691-operation. Classic BPF_RET | BPF_K means copy imm32 into return register
692-and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
693-in eBPF means function exit only. The eBPF program needs to store return
694-value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
695-unused and reserved for future use.
696-
697-For load and store instructions the 8-bit 'code' field is divided as:
698-
699- +--------+--------+-------------------+
700- | 3 bits | 2 bits | 3 bits |
701- | mode | size | instruction class |
702- +--------+--------+-------------------+
703- (MSB) (LSB)
704-
705-Size modifier is one of ...
706-
707- BPF_W 0x00 /* word */
708- BPF_H 0x08 /* half word */
709- BPF_B 0x10 /* byte */
710- BPF_DW 0x18 /* eBPF only, double word */
711-
712-... which encodes size of load/store operation:
713-
714- B - 1 byte
715- H - 2 byte
716- W - 4 byte
717- DW - 8 byte (eBPF only)
718-
719-Mode modifier is one of:
720-
721- BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */
722- BPF_ABS 0x20
723- BPF_IND 0x40
724- BPF_MEM 0x60
725- BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
726- BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
727- BPF_XADD 0xc0 /* eBPF only, exclusive add */
728-
729-eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
730-(BPF_IND | <size> | BPF_LD) which are used to access packet data.
731-
732-They had to be carried over from classic to have strong performance of
733-socket filters running in eBPF interpreter. These instructions can only
734-be used when interpreter context is a pointer to 'struct sk_buff' and
735-have seven implicit operands. Register R6 is an implicit input that must
736-contain pointer to sk_buff. Register R0 is an implicit output which contains
737-the data fetched from the packet. Registers R1-R5 are scratch registers
738-and must not be used to store the data across BPF_ABS | BPF_LD or
739-BPF_IND | BPF_LD instructions.
740-
741-These instructions have implicit program exit condition as well. When
742-eBPF program is trying to access the data beyond the packet boundary,
743-the interpreter will abort the execution of the program. JIT compilers
744-therefore must preserve this property. src_reg and imm32 fields are
745-explicit inputs to these instructions.
746-
747-For example:
748-
749- BPF_IND | BPF_W | BPF_LD means:
750-
751- R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
752- and R1 - R5 were scratched.
753-
754-Unlike classic BPF instruction set, eBPF has generic load/store operations:
755-
756-BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
757-BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
758-BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
759-BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
760-BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
761-
762-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
763-2 byte atomic increments are not supported.
764-
765-Testing
766--------
767-
768-Next to the BPF toolchain, the kernel also ships a test module that contains
769-various test cases for classic and internal BPF that can be executed against
770-the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
771-enabled via Kconfig:
772-
773- CONFIG_TEST_BPF=m
774-
775-After the module has been built and installed, the test suite can be executed
776-via insmod or modprobe against 'test_bpf' module. Results of the test cases
777-including timings in nsec can be found in the kernel log (dmesg).
778-
779 Misc
780 ----
781
782@@ -1024,4 +561,3 @@ the underlying architecture.
783
784 Jay Schulist <jschlst@samba.org>
785 Daniel Borkmann <dborkman@redhat.com>
786-Alexei Starovoitov <ast@plumgrid.com>
787diff --git a/Makefile b/Makefile
788index 9b25a83..e77c38a 100644
789--- a/Makefile
790+++ b/Makefile
791@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
792
793 HOSTCC = gcc
794 HOSTCXX = g++
795-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
796-HOSTCXXFLAGS = -O2
797+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
798+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
799+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
800
801 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
802 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
803@@ -449,8 +450,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
804 # Rules shared between *config targets and build targets
805
806 # Basic helpers built in scripts/
807-PHONY += scripts_basic
808-scripts_basic:
809+PHONY += scripts_basic gcc-plugins
810+scripts_basic: gcc-plugins
811 $(Q)$(MAKE) $(build)=scripts/basic
812 $(Q)rm -f .tmp_quiet_recordmcount
813
814@@ -621,6 +622,75 @@ else
815 KBUILD_CFLAGS += -O2
816 endif
817
818+# Tell gcc to never replace conditional load with a non-conditional one
819+KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
820+
821+ifndef DISABLE_PAX_PLUGINS
822+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
823+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
824+else
825+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
826+endif
827+ifneq ($(PLUGINCC),)
828+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
829+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
830+endif
831+ifdef CONFIG_PAX_MEMORY_STACKLEAK
832+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
833+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
834+endif
835+ifdef CONFIG_KALLOCSTAT_PLUGIN
836+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
837+endif
838+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
839+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
840+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
841+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
842+endif
843+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
844+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
845+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
846+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
847+endif
848+endif
849+ifdef CONFIG_CHECKER_PLUGIN
850+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
851+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
852+endif
853+endif
854+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
855+ifdef CONFIG_PAX_SIZE_OVERFLOW
856+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
857+endif
858+ifdef CONFIG_PAX_LATENT_ENTROPY
859+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
860+endif
861+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
862+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
863+endif
864+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
865+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
866+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
867+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
868+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
869+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
870+ifeq ($(KBUILD_EXTMOD),)
871+gcc-plugins:
872+ $(Q)$(MAKE) $(build)=tools/gcc
873+else
874+gcc-plugins: ;
875+endif
876+else
877+gcc-plugins:
878+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
879+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
880+else
881+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
882+endif
883+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
884+endif
885+endif
886+
887 ifdef CONFIG_READABLE_ASM
888 # Disable optimizations that make assembler listings hard to read.
889 # reorder blocks reorders the control in the function
890@@ -692,7 +762,7 @@ KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
891
892 ifdef CONFIG_DEBUG_INFO
893 KBUILD_CFLAGS += -g
894-KBUILD_AFLAGS += -Wa,-gdwarf-2
895+KBUILD_AFLAGS += -Wa,--gdwarf-2
896 endif
897
898 ifdef CONFIG_DEBUG_INFO_REDUCED
899@@ -839,7 +909,7 @@ export mod_sign_cmd
900
901
902 ifeq ($(KBUILD_EXTMOD),)
903-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
904+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
905
906 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
907 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
908@@ -888,6 +958,8 @@ endif
909
910 # The actual objects are generated when descending,
911 # make sure no implicit rule kicks in
912+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
913+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
914 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
915
916 # Handle descending into subdirectories listed in $(vmlinux-dirs)
917@@ -897,7 +969,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
918 # Error messages still appears in the original language
919
920 PHONY += $(vmlinux-dirs)
921-$(vmlinux-dirs): prepare scripts
922+$(vmlinux-dirs): gcc-plugins prepare scripts
923 $(Q)$(MAKE) $(build)=$@
924
925 define filechk_kernel.release
926@@ -940,10 +1012,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
927
928 archprepare: archheaders archscripts prepare1 scripts_basic
929
930+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
931+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
932 prepare0: archprepare FORCE
933 $(Q)$(MAKE) $(build)=.
934
935 # All the preparing..
936+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
937 prepare: prepare0
938
939 # Generate some files
940@@ -1051,6 +1126,8 @@ all: modules
941 # using awk while concatenating to the final file.
942
943 PHONY += modules
944+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
945+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
946 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
947 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
948 @$(kecho) ' Building modules, stage 2.';
949@@ -1066,7 +1143,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
950
951 # Target to prepare building external modules
952 PHONY += modules_prepare
953-modules_prepare: prepare scripts
954+modules_prepare: gcc-plugins prepare scripts
955
956 # Target to install modules
957 PHONY += modules_install
958@@ -1132,7 +1209,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
959 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
960 signing_key.priv signing_key.x509 x509.genkey \
961 extra_certificates signing_key.x509.keyid \
962- signing_key.x509.signer include/linux/version.h
963+ signing_key.x509.signer include/linux/version.h \
964+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
965+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
966+ tools/gcc/randomize_layout_seed.h
967
968 # clean - Delete most, but leave enough to build external modules
969 #
970@@ -1171,7 +1251,7 @@ distclean: mrproper
971 @find $(srctree) $(RCS_FIND_IGNORE) \
972 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
973 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
974- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
975+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
976 -type f -print | xargs rm -f
977
978
979@@ -1332,6 +1412,8 @@ PHONY += $(module-dirs) modules
980 $(module-dirs): crmodverdir $(objtree)/Module.symvers
981 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
982
983+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
984+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
985 modules: $(module-dirs)
986 @$(kecho) ' Building modules, stage 2.';
987 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
988@@ -1471,17 +1553,21 @@ else
989 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
990 endif
991
992-%.s: %.c prepare scripts FORCE
993+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
994+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
995+%.s: %.c gcc-plugins prepare scripts FORCE
996 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
997 %.i: %.c prepare scripts FORCE
998 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
999-%.o: %.c prepare scripts FORCE
1000+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1001+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1002+%.o: %.c gcc-plugins prepare scripts FORCE
1003 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1004 %.lst: %.c prepare scripts FORCE
1005 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1006-%.s: %.S prepare scripts FORCE
1007+%.s: %.S gcc-plugins prepare scripts FORCE
1008 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1009-%.o: %.S prepare scripts FORCE
1010+%.o: %.S gcc-plugins prepare scripts FORCE
1011 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1012 %.symtypes: %.c prepare scripts FORCE
1013 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1014@@ -1491,11 +1577,15 @@ endif
1015 $(cmd_crmodverdir)
1016 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1017 $(build)=$(build-dir)
1018-%/: prepare scripts FORCE
1019+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1020+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1021+%/: gcc-plugins prepare scripts FORCE
1022 $(cmd_crmodverdir)
1023 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1024 $(build)=$(build-dir)
1025-%.ko: prepare scripts FORCE
1026+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1027+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1028+%.ko: gcc-plugins prepare scripts FORCE
1029 $(cmd_crmodverdir)
1030 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1031 $(build)=$(build-dir) $(@:.ko=.o)
1032diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
1033index ed60a1e..47f1a55 100644
1034--- a/arch/alpha/include/asm/atomic.h
1035+++ b/arch/alpha/include/asm/atomic.h
1036@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
1037 #define atomic_dec(v) atomic_sub(1,(v))
1038 #define atomic64_dec(v) atomic64_sub(1,(v))
1039
1040+#define atomic64_read_unchecked(v) atomic64_read(v)
1041+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1042+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1043+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1044+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1045+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1046+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1047+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1048+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1049+
1050 #endif /* _ALPHA_ATOMIC_H */
1051diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
1052index ad368a9..fbe0f25 100644
1053--- a/arch/alpha/include/asm/cache.h
1054+++ b/arch/alpha/include/asm/cache.h
1055@@ -4,19 +4,19 @@
1056 #ifndef __ARCH_ALPHA_CACHE_H
1057 #define __ARCH_ALPHA_CACHE_H
1058
1059+#include <linux/const.h>
1060
1061 /* Bytes per L1 (data) cache line. */
1062 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
1063-# define L1_CACHE_BYTES 64
1064 # define L1_CACHE_SHIFT 6
1065 #else
1066 /* Both EV4 and EV5 are write-through, read-allocate,
1067 direct-mapped, physical.
1068 */
1069-# define L1_CACHE_BYTES 32
1070 # define L1_CACHE_SHIFT 5
1071 #endif
1072
1073+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1074 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1075
1076 #endif
1077diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
1078index 968d999..d36b2df 100644
1079--- a/arch/alpha/include/asm/elf.h
1080+++ b/arch/alpha/include/asm/elf.h
1081@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1082
1083 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
1084
1085+#ifdef CONFIG_PAX_ASLR
1086+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
1087+
1088+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
1089+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
1090+#endif
1091+
1092 /* $0 is set by ld.so to a pointer to a function which might be
1093 registered using atexit. This provides a mean for the dynamic
1094 linker to call DT_FINI functions for shared libraries that have
1095diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
1096index aab14a0..b4fa3e7 100644
1097--- a/arch/alpha/include/asm/pgalloc.h
1098+++ b/arch/alpha/include/asm/pgalloc.h
1099@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1100 pgd_set(pgd, pmd);
1101 }
1102
1103+static inline void
1104+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1105+{
1106+ pgd_populate(mm, pgd, pmd);
1107+}
1108+
1109 extern pgd_t *pgd_alloc(struct mm_struct *mm);
1110
1111 static inline void
1112diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
1113index d8f9b7e..f6222fa 100644
1114--- a/arch/alpha/include/asm/pgtable.h
1115+++ b/arch/alpha/include/asm/pgtable.h
1116@@ -102,6 +102,17 @@ struct vm_area_struct;
1117 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
1118 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1119 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1120+
1121+#ifdef CONFIG_PAX_PAGEEXEC
1122+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
1123+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1124+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1125+#else
1126+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1127+# define PAGE_COPY_NOEXEC PAGE_COPY
1128+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1129+#endif
1130+
1131 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
1132
1133 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
1134diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
1135index 2fd00b7..cfd5069 100644
1136--- a/arch/alpha/kernel/module.c
1137+++ b/arch/alpha/kernel/module.c
1138@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
1139
1140 /* The small sections were sorted to the end of the segment.
1141 The following should definitely cover them. */
1142- gp = (u64)me->module_core + me->core_size - 0x8000;
1143+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
1144 got = sechdrs[me->arch.gotsecindex].sh_addr;
1145
1146 for (i = 0; i < n; i++) {
1147diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
1148index 1402fcc..0b1abd2 100644
1149--- a/arch/alpha/kernel/osf_sys.c
1150+++ b/arch/alpha/kernel/osf_sys.c
1151@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1152 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
1153
1154 static unsigned long
1155-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1156- unsigned long limit)
1157+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
1158+ unsigned long limit, unsigned long flags)
1159 {
1160 struct vm_unmapped_area_info info;
1161+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
1162
1163 info.flags = 0;
1164 info.length = len;
1165@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1166 info.high_limit = limit;
1167 info.align_mask = 0;
1168 info.align_offset = 0;
1169+ info.threadstack_offset = offset;
1170 return vm_unmapped_area(&info);
1171 }
1172
1173@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1174 merely specific addresses, but regions of memory -- perhaps
1175 this feature should be incorporated into all ports? */
1176
1177+#ifdef CONFIG_PAX_RANDMMAP
1178+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1179+#endif
1180+
1181 if (addr) {
1182- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
1183+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
1184 if (addr != (unsigned long) -ENOMEM)
1185 return addr;
1186 }
1187
1188 /* Next, try allocating at TASK_UNMAPPED_BASE. */
1189- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
1190- len, limit);
1191+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
1192+
1193 if (addr != (unsigned long) -ENOMEM)
1194 return addr;
1195
1196 /* Finally, try allocating in low memory. */
1197- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
1198+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
1199
1200 return addr;
1201 }
1202diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
1203index 98838a0..b304fb4 100644
1204--- a/arch/alpha/mm/fault.c
1205+++ b/arch/alpha/mm/fault.c
1206@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
1207 __reload_thread(pcb);
1208 }
1209
1210+#ifdef CONFIG_PAX_PAGEEXEC
1211+/*
1212+ * PaX: decide what to do with offenders (regs->pc = fault address)
1213+ *
1214+ * returns 1 when task should be killed
1215+ * 2 when patched PLT trampoline was detected
1216+ * 3 when unpatched PLT trampoline was detected
1217+ */
1218+static int pax_handle_fetch_fault(struct pt_regs *regs)
1219+{
1220+
1221+#ifdef CONFIG_PAX_EMUPLT
1222+ int err;
1223+
1224+ do { /* PaX: patched PLT emulation #1 */
1225+ unsigned int ldah, ldq, jmp;
1226+
1227+ err = get_user(ldah, (unsigned int *)regs->pc);
1228+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
1229+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
1230+
1231+ if (err)
1232+ break;
1233+
1234+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1235+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
1236+ jmp == 0x6BFB0000U)
1237+ {
1238+ unsigned long r27, addr;
1239+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1240+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
1241+
1242+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1243+ err = get_user(r27, (unsigned long *)addr);
1244+ if (err)
1245+ break;
1246+
1247+ regs->r27 = r27;
1248+ regs->pc = r27;
1249+ return 2;
1250+ }
1251+ } while (0);
1252+
1253+ do { /* PaX: patched PLT emulation #2 */
1254+ unsigned int ldah, lda, br;
1255+
1256+ err = get_user(ldah, (unsigned int *)regs->pc);
1257+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
1258+ err |= get_user(br, (unsigned int *)(regs->pc+8));
1259+
1260+ if (err)
1261+ break;
1262+
1263+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1264+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
1265+ (br & 0xFFE00000U) == 0xC3E00000U)
1266+ {
1267+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
1268+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1269+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
1270+
1271+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1272+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1273+ return 2;
1274+ }
1275+ } while (0);
1276+
1277+ do { /* PaX: unpatched PLT emulation */
1278+ unsigned int br;
1279+
1280+ err = get_user(br, (unsigned int *)regs->pc);
1281+
1282+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
1283+ unsigned int br2, ldq, nop, jmp;
1284+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
1285+
1286+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1287+ err = get_user(br2, (unsigned int *)addr);
1288+ err |= get_user(ldq, (unsigned int *)(addr+4));
1289+ err |= get_user(nop, (unsigned int *)(addr+8));
1290+ err |= get_user(jmp, (unsigned int *)(addr+12));
1291+ err |= get_user(resolver, (unsigned long *)(addr+16));
1292+
1293+ if (err)
1294+ break;
1295+
1296+ if (br2 == 0xC3600000U &&
1297+ ldq == 0xA77B000CU &&
1298+ nop == 0x47FF041FU &&
1299+ jmp == 0x6B7B0000U)
1300+ {
1301+ regs->r28 = regs->pc+4;
1302+ regs->r27 = addr+16;
1303+ regs->pc = resolver;
1304+ return 3;
1305+ }
1306+ }
1307+ } while (0);
1308+#endif
1309+
1310+ return 1;
1311+}
1312+
1313+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1314+{
1315+ unsigned long i;
1316+
1317+ printk(KERN_ERR "PAX: bytes at PC: ");
1318+ for (i = 0; i < 5; i++) {
1319+ unsigned int c;
1320+ if (get_user(c, (unsigned int *)pc+i))
1321+ printk(KERN_CONT "???????? ");
1322+ else
1323+ printk(KERN_CONT "%08x ", c);
1324+ }
1325+ printk("\n");
1326+}
1327+#endif
1328
1329 /*
1330 * This routine handles page faults. It determines the address,
1331@@ -133,8 +251,29 @@ retry:
1332 good_area:
1333 si_code = SEGV_ACCERR;
1334 if (cause < 0) {
1335- if (!(vma->vm_flags & VM_EXEC))
1336+ if (!(vma->vm_flags & VM_EXEC)) {
1337+
1338+#ifdef CONFIG_PAX_PAGEEXEC
1339+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
1340+ goto bad_area;
1341+
1342+ up_read(&mm->mmap_sem);
1343+ switch (pax_handle_fetch_fault(regs)) {
1344+
1345+#ifdef CONFIG_PAX_EMUPLT
1346+ case 2:
1347+ case 3:
1348+ return;
1349+#endif
1350+
1351+ }
1352+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
1353+ do_group_exit(SIGKILL);
1354+#else
1355 goto bad_area;
1356+#endif
1357+
1358+ }
1359 } else if (!cause) {
1360 /* Allow reads even for write-only mappings */
1361 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
1362diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
1363index 290f02ee..a639059 100644
1364--- a/arch/arm/Kconfig
1365+++ b/arch/arm/Kconfig
1366@@ -1787,7 +1787,7 @@ config ALIGNMENT_TRAP
1367
1368 config UACCESS_WITH_MEMCPY
1369 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1370- depends on MMU
1371+ depends on MMU && !PAX_MEMORY_UDEREF
1372 default y if CPU_FEROCEON
1373 help
1374 Implement faster copy_to_user and clear_user methods for CPU
1375@@ -2051,6 +2051,7 @@ config XIP_PHYS_ADDR
1376 config KEXEC
1377 bool "Kexec system call (EXPERIMENTAL)"
1378 depends on (!SMP || PM_SLEEP_SMP)
1379+ depends on !GRKERNSEC_KMEM
1380 help
1381 kexec is a system call that implements the ability to shutdown your
1382 current kernel, and to start another kernel. It is like a reboot
1383diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1384index 3040359..cf3bab0 100644
1385--- a/arch/arm/include/asm/atomic.h
1386+++ b/arch/arm/include/asm/atomic.h
1387@@ -18,17 +18,35 @@
1388 #include <asm/barrier.h>
1389 #include <asm/cmpxchg.h>
1390
1391+#ifdef CONFIG_GENERIC_ATOMIC64
1392+#include <asm-generic/atomic64.h>
1393+#endif
1394+
1395 #define ATOMIC_INIT(i) { (i) }
1396
1397 #ifdef __KERNEL__
1398
1399+#define _ASM_EXTABLE(from, to) \
1400+" .pushsection __ex_table,\"a\"\n"\
1401+" .align 3\n" \
1402+" .long " #from ", " #to"\n" \
1403+" .popsection"
1404+
1405 /*
1406 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1407 * strex/ldrex monitor on some implementations. The reason we can use it for
1408 * atomic_set() is the clrex or dummy strex done on every exception return.
1409 */
1410 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1411+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1412+{
1413+ return v->counter;
1414+}
1415 #define atomic_set(v,i) (((v)->counter) = (i))
1416+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1417+{
1418+ v->counter = i;
1419+}
1420
1421 #if __LINUX_ARM_ARCH__ >= 6
1422
1423@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
1424
1425 prefetchw(&v->counter);
1426 __asm__ __volatile__("@ atomic_add\n"
1427+"1: ldrex %1, [%3]\n"
1428+" adds %0, %1, %4\n"
1429+
1430+#ifdef CONFIG_PAX_REFCOUNT
1431+" bvc 3f\n"
1432+"2: bkpt 0xf103\n"
1433+"3:\n"
1434+#endif
1435+
1436+" strex %1, %0, [%3]\n"
1437+" teq %1, #0\n"
1438+" bne 1b"
1439+
1440+#ifdef CONFIG_PAX_REFCOUNT
1441+"\n4:\n"
1442+ _ASM_EXTABLE(2b, 4b)
1443+#endif
1444+
1445+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1446+ : "r" (&v->counter), "Ir" (i)
1447+ : "cc");
1448+}
1449+
1450+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1451+{
1452+ unsigned long tmp;
1453+ int result;
1454+
1455+ prefetchw(&v->counter);
1456+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1457 "1: ldrex %0, [%3]\n"
1458 " add %0, %0, %4\n"
1459 " strex %1, %0, [%3]\n"
1460@@ -63,6 +111,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1461 prefetchw(&v->counter);
1462
1463 __asm__ __volatile__("@ atomic_add_return\n"
1464+"1: ldrex %1, [%3]\n"
1465+" adds %0, %1, %4\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+" bvc 3f\n"
1469+" mov %0, %1\n"
1470+"2: bkpt 0xf103\n"
1471+"3:\n"
1472+#endif
1473+
1474+" strex %1, %0, [%3]\n"
1475+" teq %1, #0\n"
1476+" bne 1b"
1477+
1478+#ifdef CONFIG_PAX_REFCOUNT
1479+"\n4:\n"
1480+ _ASM_EXTABLE(2b, 4b)
1481+#endif
1482+
1483+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1484+ : "r" (&v->counter), "Ir" (i)
1485+ : "cc");
1486+
1487+ smp_mb();
1488+
1489+ return result;
1490+}
1491+
1492+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1493+{
1494+ unsigned long tmp;
1495+ int result;
1496+
1497+ smp_mb();
1498+ prefetchw(&v->counter);
1499+
1500+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1501 "1: ldrex %0, [%3]\n"
1502 " add %0, %0, %4\n"
1503 " strex %1, %0, [%3]\n"
1504@@ -84,6 +169,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1505
1506 prefetchw(&v->counter);
1507 __asm__ __volatile__("@ atomic_sub\n"
1508+"1: ldrex %1, [%3]\n"
1509+" subs %0, %1, %4\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+" bvc 3f\n"
1513+"2: bkpt 0xf103\n"
1514+"3:\n"
1515+#endif
1516+
1517+" strex %1, %0, [%3]\n"
1518+" teq %1, #0\n"
1519+" bne 1b"
1520+
1521+#ifdef CONFIG_PAX_REFCOUNT
1522+"\n4:\n"
1523+ _ASM_EXTABLE(2b, 4b)
1524+#endif
1525+
1526+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1527+ : "r" (&v->counter), "Ir" (i)
1528+ : "cc");
1529+}
1530+
1531+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1532+{
1533+ unsigned long tmp;
1534+ int result;
1535+
1536+ prefetchw(&v->counter);
1537+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1538 "1: ldrex %0, [%3]\n"
1539 " sub %0, %0, %4\n"
1540 " strex %1, %0, [%3]\n"
1541@@ -103,11 +218,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1542 prefetchw(&v->counter);
1543
1544 __asm__ __volatile__("@ atomic_sub_return\n"
1545-"1: ldrex %0, [%3]\n"
1546-" sub %0, %0, %4\n"
1547+"1: ldrex %1, [%3]\n"
1548+" subs %0, %1, %4\n"
1549+
1550+#ifdef CONFIG_PAX_REFCOUNT
1551+" bvc 3f\n"
1552+" mov %0, %1\n"
1553+"2: bkpt 0xf103\n"
1554+"3:\n"
1555+#endif
1556+
1557 " strex %1, %0, [%3]\n"
1558 " teq %1, #0\n"
1559 " bne 1b"
1560+
1561+#ifdef CONFIG_PAX_REFCOUNT
1562+"\n4:\n"
1563+ _ASM_EXTABLE(2b, 4b)
1564+#endif
1565+
1566 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1567 : "r" (&v->counter), "Ir" (i)
1568 : "cc");
1569@@ -152,12 +281,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1570 __asm__ __volatile__ ("@ atomic_add_unless\n"
1571 "1: ldrex %0, [%4]\n"
1572 " teq %0, %5\n"
1573-" beq 2f\n"
1574-" add %1, %0, %6\n"
1575+" beq 4f\n"
1576+" adds %1, %0, %6\n"
1577+
1578+#ifdef CONFIG_PAX_REFCOUNT
1579+" bvc 3f\n"
1580+"2: bkpt 0xf103\n"
1581+"3:\n"
1582+#endif
1583+
1584 " strex %2, %1, [%4]\n"
1585 " teq %2, #0\n"
1586 " bne 1b\n"
1587-"2:"
1588+"4:"
1589+
1590+#ifdef CONFIG_PAX_REFCOUNT
1591+ _ASM_EXTABLE(2b, 4b)
1592+#endif
1593+
1594 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1595 : "r" (&v->counter), "r" (u), "r" (a)
1596 : "cc");
1597@@ -168,6 +309,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1598 return oldval;
1599 }
1600
1601+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1602+{
1603+ unsigned long oldval, res;
1604+
1605+ smp_mb();
1606+
1607+ do {
1608+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1609+ "ldrex %1, [%3]\n"
1610+ "mov %0, #0\n"
1611+ "teq %1, %4\n"
1612+ "strexeq %0, %5, [%3]\n"
1613+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1614+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1615+ : "cc");
1616+ } while (res);
1617+
1618+ smp_mb();
1619+
1620+ return oldval;
1621+}
1622+
1623 #else /* ARM_ARCH_6 */
1624
1625 #ifdef CONFIG_SMP
1626@@ -186,7 +349,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1627
1628 return val;
1629 }
1630+
1631+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1632+{
1633+ return atomic_add_return(i, v);
1634+}
1635+
1636 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1637+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1638+{
1639+ (void) atomic_add_return(i, v);
1640+}
1641
1642 static inline int atomic_sub_return(int i, atomic_t *v)
1643 {
1644@@ -201,6 +374,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1645 return val;
1646 }
1647 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1648+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1649+{
1650+ (void) atomic_sub_return(i, v);
1651+}
1652
1653 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1654 {
1655@@ -216,6 +393,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1656 return ret;
1657 }
1658
1659+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1660+{
1661+ return atomic_cmpxchg(v, old, new);
1662+}
1663+
1664 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1665 {
1666 int c, old;
1667@@ -229,13 +411,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1668 #endif /* __LINUX_ARM_ARCH__ */
1669
1670 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1671+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1672+{
1673+ return xchg(&v->counter, new);
1674+}
1675
1676 #define atomic_inc(v) atomic_add(1, v)
1677+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1678+{
1679+ atomic_add_unchecked(1, v);
1680+}
1681 #define atomic_dec(v) atomic_sub(1, v)
1682+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1683+{
1684+ atomic_sub_unchecked(1, v);
1685+}
1686
1687 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1688+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1689+{
1690+ return atomic_add_return_unchecked(1, v) == 0;
1691+}
1692 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1693 #define atomic_inc_return(v) (atomic_add_return(1, v))
1694+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1695+{
1696+ return atomic_add_return_unchecked(1, v);
1697+}
1698 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1699 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1700
1701@@ -246,6 +448,14 @@ typedef struct {
1702 long long counter;
1703 } atomic64_t;
1704
1705+#ifdef CONFIG_PAX_REFCOUNT
1706+typedef struct {
1707+ long long counter;
1708+} atomic64_unchecked_t;
1709+#else
1710+typedef atomic64_t atomic64_unchecked_t;
1711+#endif
1712+
1713 #define ATOMIC64_INIT(i) { (i) }
1714
1715 #ifdef CONFIG_ARM_LPAE
1716@@ -262,6 +472,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1717 return result;
1718 }
1719
1720+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1721+{
1722+ long long result;
1723+
1724+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1725+" ldrd %0, %H0, [%1]"
1726+ : "=&r" (result)
1727+ : "r" (&v->counter), "Qo" (v->counter)
1728+ );
1729+
1730+ return result;
1731+}
1732+
1733 static inline void atomic64_set(atomic64_t *v, long long i)
1734 {
1735 __asm__ __volatile__("@ atomic64_set\n"
1736@@ -270,6 +493,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1737 : "r" (&v->counter), "r" (i)
1738 );
1739 }
1740+
1741+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1742+{
1743+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1744+" strd %2, %H2, [%1]"
1745+ : "=Qo" (v->counter)
1746+ : "r" (&v->counter), "r" (i)
1747+ );
1748+}
1749 #else
1750 static inline long long atomic64_read(const atomic64_t *v)
1751 {
1752@@ -284,6 +516,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1753 return result;
1754 }
1755
1756+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1757+{
1758+ long long result;
1759+
1760+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1761+" ldrexd %0, %H0, [%1]"
1762+ : "=&r" (result)
1763+ : "r" (&v->counter), "Qo" (v->counter)
1764+ );
1765+
1766+ return result;
1767+}
1768+
1769 static inline void atomic64_set(atomic64_t *v, long long i)
1770 {
1771 long long tmp;
1772@@ -298,6 +543,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1773 : "r" (&v->counter), "r" (i)
1774 : "cc");
1775 }
1776+
1777+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1778+{
1779+ long long tmp;
1780+
1781+ prefetchw(&v->counter);
1782+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1783+"1: ldrexd %0, %H0, [%2]\n"
1784+" strexd %0, %3, %H3, [%2]\n"
1785+" teq %0, #0\n"
1786+" bne 1b"
1787+ : "=&r" (tmp), "=Qo" (v->counter)
1788+ : "r" (&v->counter), "r" (i)
1789+ : "cc");
1790+}
1791 #endif
1792
1793 static inline void atomic64_add(long long i, atomic64_t *v)
1794@@ -309,6 +569,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1795 __asm__ __volatile__("@ atomic64_add\n"
1796 "1: ldrexd %0, %H0, [%3]\n"
1797 " adds %Q0, %Q0, %Q4\n"
1798+" adcs %R0, %R0, %R4\n"
1799+
1800+#ifdef CONFIG_PAX_REFCOUNT
1801+" bvc 3f\n"
1802+"2: bkpt 0xf103\n"
1803+"3:\n"
1804+#endif
1805+
1806+" strexd %1, %0, %H0, [%3]\n"
1807+" teq %1, #0\n"
1808+" bne 1b"
1809+
1810+#ifdef CONFIG_PAX_REFCOUNT
1811+"\n4:\n"
1812+ _ASM_EXTABLE(2b, 4b)
1813+#endif
1814+
1815+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1816+ : "r" (&v->counter), "r" (i)
1817+ : "cc");
1818+}
1819+
1820+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1821+{
1822+ long long result;
1823+ unsigned long tmp;
1824+
1825+ prefetchw(&v->counter);
1826+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1827+"1: ldrexd %0, %H0, [%3]\n"
1828+" adds %Q0, %Q0, %Q4\n"
1829 " adc %R0, %R0, %R4\n"
1830 " strexd %1, %0, %H0, [%3]\n"
1831 " teq %1, #0\n"
1832@@ -329,6 +620,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1833 __asm__ __volatile__("@ atomic64_add_return\n"
1834 "1: ldrexd %0, %H0, [%3]\n"
1835 " adds %Q0, %Q0, %Q4\n"
1836+" adcs %R0, %R0, %R4\n"
1837+
1838+#ifdef CONFIG_PAX_REFCOUNT
1839+" bvc 3f\n"
1840+" mov %0, %1\n"
1841+" mov %H0, %H1\n"
1842+"2: bkpt 0xf103\n"
1843+"3:\n"
1844+#endif
1845+
1846+" strexd %1, %0, %H0, [%3]\n"
1847+" teq %1, #0\n"
1848+" bne 1b"
1849+
1850+#ifdef CONFIG_PAX_REFCOUNT
1851+"\n4:\n"
1852+ _ASM_EXTABLE(2b, 4b)
1853+#endif
1854+
1855+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1856+ : "r" (&v->counter), "r" (i)
1857+ : "cc");
1858+
1859+ smp_mb();
1860+
1861+ return result;
1862+}
1863+
1864+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1865+{
1866+ long long result;
1867+ unsigned long tmp;
1868+
1869+ smp_mb();
1870+
1871+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1872+"1: ldrexd %0, %H0, [%3]\n"
1873+" adds %Q0, %Q0, %Q4\n"
1874 " adc %R0, %R0, %R4\n"
1875 " strexd %1, %0, %H0, [%3]\n"
1876 " teq %1, #0\n"
1877@@ -351,6 +680,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1878 __asm__ __volatile__("@ atomic64_sub\n"
1879 "1: ldrexd %0, %H0, [%3]\n"
1880 " subs %Q0, %Q0, %Q4\n"
1881+" sbcs %R0, %R0, %R4\n"
1882+
1883+#ifdef CONFIG_PAX_REFCOUNT
1884+" bvc 3f\n"
1885+"2: bkpt 0xf103\n"
1886+"3:\n"
1887+#endif
1888+
1889+" strexd %1, %0, %H0, [%3]\n"
1890+" teq %1, #0\n"
1891+" bne 1b"
1892+
1893+#ifdef CONFIG_PAX_REFCOUNT
1894+"\n4:\n"
1895+ _ASM_EXTABLE(2b, 4b)
1896+#endif
1897+
1898+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1899+ : "r" (&v->counter), "r" (i)
1900+ : "cc");
1901+}
1902+
1903+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1904+{
1905+ long long result;
1906+ unsigned long tmp;
1907+
1908+ prefetchw(&v->counter);
1909+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1910+"1: ldrexd %0, %H0, [%3]\n"
1911+" subs %Q0, %Q0, %Q4\n"
1912 " sbc %R0, %R0, %R4\n"
1913 " strexd %1, %0, %H0, [%3]\n"
1914 " teq %1, #0\n"
1915@@ -371,10 +731,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1916 __asm__ __volatile__("@ atomic64_sub_return\n"
1917 "1: ldrexd %0, %H0, [%3]\n"
1918 " subs %Q0, %Q0, %Q4\n"
1919-" sbc %R0, %R0, %R4\n"
1920+" sbcs %R0, %R0, %R4\n"
1921+
1922+#ifdef CONFIG_PAX_REFCOUNT
1923+" bvc 3f\n"
1924+" mov %0, %1\n"
1925+" mov %H0, %H1\n"
1926+"2: bkpt 0xf103\n"
1927+"3:\n"
1928+#endif
1929+
1930 " strexd %1, %0, %H0, [%3]\n"
1931 " teq %1, #0\n"
1932 " bne 1b"
1933+
1934+#ifdef CONFIG_PAX_REFCOUNT
1935+"\n4:\n"
1936+ _ASM_EXTABLE(2b, 4b)
1937+#endif
1938+
1939 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1940 : "r" (&v->counter), "r" (i)
1941 : "cc");
1942@@ -410,6 +785,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1943 return oldval;
1944 }
1945
1946+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1947+ long long new)
1948+{
1949+ long long oldval;
1950+ unsigned long res;
1951+
1952+ smp_mb();
1953+
1954+ do {
1955+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1956+ "ldrexd %1, %H1, [%3]\n"
1957+ "mov %0, #0\n"
1958+ "teq %1, %4\n"
1959+ "teqeq %H1, %H4\n"
1960+ "strexdeq %0, %5, %H5, [%3]"
1961+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1962+ : "r" (&ptr->counter), "r" (old), "r" (new)
1963+ : "cc");
1964+ } while (res);
1965+
1966+ smp_mb();
1967+
1968+ return oldval;
1969+}
1970+
1971 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1972 {
1973 long long result;
1974@@ -435,21 +835,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1975 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1976 {
1977 long long result;
1978- unsigned long tmp;
1979+ u64 tmp;
1980
1981 smp_mb();
1982 prefetchw(&v->counter);
1983
1984 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1985-"1: ldrexd %0, %H0, [%3]\n"
1986-" subs %Q0, %Q0, #1\n"
1987-" sbc %R0, %R0, #0\n"
1988+"1: ldrexd %1, %H1, [%3]\n"
1989+" subs %Q0, %Q1, #1\n"
1990+" sbcs %R0, %R1, #0\n"
1991+
1992+#ifdef CONFIG_PAX_REFCOUNT
1993+" bvc 3f\n"
1994+" mov %Q0, %Q1\n"
1995+" mov %R0, %R1\n"
1996+"2: bkpt 0xf103\n"
1997+"3:\n"
1998+#endif
1999+
2000 " teq %R0, #0\n"
2001-" bmi 2f\n"
2002+" bmi 4f\n"
2003 " strexd %1, %0, %H0, [%3]\n"
2004 " teq %1, #0\n"
2005 " bne 1b\n"
2006-"2:"
2007+"4:\n"
2008+
2009+#ifdef CONFIG_PAX_REFCOUNT
2010+ _ASM_EXTABLE(2b, 4b)
2011+#endif
2012+
2013 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
2014 : "r" (&v->counter)
2015 : "cc");
2016@@ -473,13 +887,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2017 " teq %0, %5\n"
2018 " teqeq %H0, %H5\n"
2019 " moveq %1, #0\n"
2020-" beq 2f\n"
2021+" beq 4f\n"
2022 " adds %Q0, %Q0, %Q6\n"
2023-" adc %R0, %R0, %R6\n"
2024+" adcs %R0, %R0, %R6\n"
2025+
2026+#ifdef CONFIG_PAX_REFCOUNT
2027+" bvc 3f\n"
2028+"2: bkpt 0xf103\n"
2029+"3:\n"
2030+#endif
2031+
2032 " strexd %2, %0, %H0, [%4]\n"
2033 " teq %2, #0\n"
2034 " bne 1b\n"
2035-"2:"
2036+"4:\n"
2037+
2038+#ifdef CONFIG_PAX_REFCOUNT
2039+ _ASM_EXTABLE(2b, 4b)
2040+#endif
2041+
2042 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
2043 : "r" (&v->counter), "r" (u), "r" (a)
2044 : "cc");
2045@@ -492,10 +918,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2046
2047 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
2048 #define atomic64_inc(v) atomic64_add(1LL, (v))
2049+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
2050 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
2051+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
2052 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2053 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
2054 #define atomic64_dec(v) atomic64_sub(1LL, (v))
2055+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
2056 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
2057 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
2058 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
2059diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
2060index c6a3e73..35cca85 100644
2061--- a/arch/arm/include/asm/barrier.h
2062+++ b/arch/arm/include/asm/barrier.h
2063@@ -63,7 +63,7 @@
2064 do { \
2065 compiletime_assert_atomic_type(*p); \
2066 smp_mb(); \
2067- ACCESS_ONCE(*p) = (v); \
2068+ ACCESS_ONCE_RW(*p) = (v); \
2069 } while (0)
2070
2071 #define smp_load_acquire(p) \
2072diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
2073index 75fe66b..ba3dee4 100644
2074--- a/arch/arm/include/asm/cache.h
2075+++ b/arch/arm/include/asm/cache.h
2076@@ -4,8 +4,10 @@
2077 #ifndef __ASMARM_CACHE_H
2078 #define __ASMARM_CACHE_H
2079
2080+#include <linux/const.h>
2081+
2082 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
2083-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2084+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2085
2086 /*
2087 * Memory returned by kmalloc() may be used for DMA, so we must make
2088@@ -24,5 +26,6 @@
2089 #endif
2090
2091 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2092+#define __read_only __attribute__ ((__section__(".data..read_only")))
2093
2094 #endif
2095diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
2096index fd43f7f..a817f5a 100644
2097--- a/arch/arm/include/asm/cacheflush.h
2098+++ b/arch/arm/include/asm/cacheflush.h
2099@@ -116,7 +116,7 @@ struct cpu_cache_fns {
2100 void (*dma_unmap_area)(const void *, size_t, int);
2101
2102 void (*dma_flush_range)(const void *, const void *);
2103-};
2104+} __no_const;
2105
2106 /*
2107 * Select the calling method
2108diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
2109index 5233151..87a71fa 100644
2110--- a/arch/arm/include/asm/checksum.h
2111+++ b/arch/arm/include/asm/checksum.h
2112@@ -37,7 +37,19 @@ __wsum
2113 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
2114
2115 __wsum
2116-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2117+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2118+
2119+static inline __wsum
2120+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
2121+{
2122+ __wsum ret;
2123+ pax_open_userland();
2124+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
2125+ pax_close_userland();
2126+ return ret;
2127+}
2128+
2129+
2130
2131 /*
2132 * Fold a partial checksum without adding pseudo headers
2133diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
2134index abb2c37..96db950 100644
2135--- a/arch/arm/include/asm/cmpxchg.h
2136+++ b/arch/arm/include/asm/cmpxchg.h
2137@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
2138
2139 #define xchg(ptr,x) \
2140 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2141+#define xchg_unchecked(ptr,x) \
2142+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2143
2144 #include <asm-generic/cmpxchg-local.h>
2145
2146diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
2147index 6ddbe44..b5e38b1 100644
2148--- a/arch/arm/include/asm/domain.h
2149+++ b/arch/arm/include/asm/domain.h
2150@@ -48,18 +48,37 @@
2151 * Domain types
2152 */
2153 #define DOMAIN_NOACCESS 0
2154-#define DOMAIN_CLIENT 1
2155 #ifdef CONFIG_CPU_USE_DOMAINS
2156+#define DOMAIN_USERCLIENT 1
2157+#define DOMAIN_KERNELCLIENT 1
2158 #define DOMAIN_MANAGER 3
2159+#define DOMAIN_VECTORS DOMAIN_USER
2160 #else
2161+
2162+#ifdef CONFIG_PAX_KERNEXEC
2163 #define DOMAIN_MANAGER 1
2164+#define DOMAIN_KERNEXEC 3
2165+#else
2166+#define DOMAIN_MANAGER 1
2167+#endif
2168+
2169+#ifdef CONFIG_PAX_MEMORY_UDEREF
2170+#define DOMAIN_USERCLIENT 0
2171+#define DOMAIN_UDEREF 1
2172+#define DOMAIN_VECTORS DOMAIN_KERNEL
2173+#else
2174+#define DOMAIN_USERCLIENT 1
2175+#define DOMAIN_VECTORS DOMAIN_USER
2176+#endif
2177+#define DOMAIN_KERNELCLIENT 1
2178+
2179 #endif
2180
2181 #define domain_val(dom,type) ((type) << (2*(dom)))
2182
2183 #ifndef __ASSEMBLY__
2184
2185-#ifdef CONFIG_CPU_USE_DOMAINS
2186+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2187 static inline void set_domain(unsigned val)
2188 {
2189 asm volatile(
2190@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
2191 isb();
2192 }
2193
2194-#define modify_domain(dom,type) \
2195- do { \
2196- struct thread_info *thread = current_thread_info(); \
2197- unsigned int domain = thread->cpu_domain; \
2198- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
2199- thread->cpu_domain = domain | domain_val(dom, type); \
2200- set_domain(thread->cpu_domain); \
2201- } while (0)
2202-
2203+extern void modify_domain(unsigned int dom, unsigned int type);
2204 #else
2205 static inline void set_domain(unsigned val) { }
2206 static inline void modify_domain(unsigned dom, unsigned type) { }
2207diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
2208index f4b46d3..abc9b2b 100644
2209--- a/arch/arm/include/asm/elf.h
2210+++ b/arch/arm/include/asm/elf.h
2211@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2212 the loader. We need to make sure that it is out of the way of the program
2213 that it will "exec", and that there is sufficient room for the brk. */
2214
2215-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2216+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2217+
2218+#ifdef CONFIG_PAX_ASLR
2219+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
2220+
2221+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2222+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2223+#endif
2224
2225 /* When the program starts, a1 contains a pointer to a function to be
2226 registered with atexit, as per the SVR4 ABI. A value of 0 means we
2227@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2228 extern void elf_set_personality(const struct elf32_hdr *);
2229 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
2230
2231-struct mm_struct;
2232-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2233-#define arch_randomize_brk arch_randomize_brk
2234-
2235 #ifdef CONFIG_MMU
2236 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2237 struct linux_binprm;
2238diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
2239index de53547..52b9a28 100644
2240--- a/arch/arm/include/asm/fncpy.h
2241+++ b/arch/arm/include/asm/fncpy.h
2242@@ -81,7 +81,9 @@
2243 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
2244 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
2245 \
2246+ pax_open_kernel(); \
2247 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
2248+ pax_close_kernel(); \
2249 flush_icache_range((unsigned long)(dest_buf), \
2250 (unsigned long)(dest_buf) + (size)); \
2251 \
2252diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
2253index 53e69da..3fdc896 100644
2254--- a/arch/arm/include/asm/futex.h
2255+++ b/arch/arm/include/asm/futex.h
2256@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2257 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2258 return -EFAULT;
2259
2260+ pax_open_userland();
2261+
2262 smp_mb();
2263 /* Prefetching cannot fault */
2264 prefetchw(uaddr);
2265@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2266 : "cc", "memory");
2267 smp_mb();
2268
2269+ pax_close_userland();
2270+
2271 *uval = val;
2272 return ret;
2273 }
2274@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2275 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2276 return -EFAULT;
2277
2278+ pax_open_userland();
2279+
2280 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
2281 "1: " TUSER(ldr) " %1, [%4]\n"
2282 " teq %1, %2\n"
2283@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2284 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
2285 : "cc", "memory");
2286
2287+ pax_close_userland();
2288+
2289 *uval = val;
2290 return ret;
2291 }
2292@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2293 return -EFAULT;
2294
2295 pagefault_disable(); /* implies preempt_disable() */
2296+ pax_open_userland();
2297
2298 switch (op) {
2299 case FUTEX_OP_SET:
2300@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2301 ret = -ENOSYS;
2302 }
2303
2304+ pax_close_userland();
2305 pagefault_enable(); /* subsumes preempt_enable() */
2306
2307 if (!ret) {
2308diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
2309index 83eb2f7..ed77159 100644
2310--- a/arch/arm/include/asm/kmap_types.h
2311+++ b/arch/arm/include/asm/kmap_types.h
2312@@ -4,6 +4,6 @@
2313 /*
2314 * This is the "bare minimum". AIO seems to require this.
2315 */
2316-#define KM_TYPE_NR 16
2317+#define KM_TYPE_NR 17
2318
2319 #endif
2320diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
2321index 9e614a1..3302cca 100644
2322--- a/arch/arm/include/asm/mach/dma.h
2323+++ b/arch/arm/include/asm/mach/dma.h
2324@@ -22,7 +22,7 @@ struct dma_ops {
2325 int (*residue)(unsigned int, dma_t *); /* optional */
2326 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
2327 const char *type;
2328-};
2329+} __do_const;
2330
2331 struct dma_struct {
2332 void *addr; /* single DMA address */
2333diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
2334index f98c7f3..e5c626d 100644
2335--- a/arch/arm/include/asm/mach/map.h
2336+++ b/arch/arm/include/asm/mach/map.h
2337@@ -23,17 +23,19 @@ struct map_desc {
2338
2339 /* types 0-3 are defined in asm/io.h */
2340 enum {
2341- MT_UNCACHED = 4,
2342- MT_CACHECLEAN,
2343- MT_MINICLEAN,
2344+ MT_UNCACHED_RW = 4,
2345+ MT_CACHECLEAN_RO,
2346+ MT_MINICLEAN_RO,
2347 MT_LOW_VECTORS,
2348 MT_HIGH_VECTORS,
2349- MT_MEMORY_RWX,
2350+ __MT_MEMORY_RWX,
2351 MT_MEMORY_RW,
2352- MT_ROM,
2353- MT_MEMORY_RWX_NONCACHED,
2354+ MT_MEMORY_RX,
2355+ MT_ROM_RX,
2356+ MT_MEMORY_RW_NONCACHED,
2357+ MT_MEMORY_RX_NONCACHED,
2358 MT_MEMORY_RW_DTCM,
2359- MT_MEMORY_RWX_ITCM,
2360+ MT_MEMORY_RX_ITCM,
2361 MT_MEMORY_RW_SO,
2362 MT_MEMORY_DMA_READY,
2363 };
2364diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
2365index 891a56b..48f337e 100644
2366--- a/arch/arm/include/asm/outercache.h
2367+++ b/arch/arm/include/asm/outercache.h
2368@@ -36,7 +36,7 @@ struct outer_cache_fns {
2369
2370 /* This is an ARM L2C thing */
2371 void (*write_sec)(unsigned long, unsigned);
2372-};
2373+} __no_const;
2374
2375 extern struct outer_cache_fns outer_cache;
2376
2377diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
2378index 4355f0e..cd9168e 100644
2379--- a/arch/arm/include/asm/page.h
2380+++ b/arch/arm/include/asm/page.h
2381@@ -23,6 +23,7 @@
2382
2383 #else
2384
2385+#include <linux/compiler.h>
2386 #include <asm/glue.h>
2387
2388 /*
2389@@ -114,7 +115,7 @@ struct cpu_user_fns {
2390 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
2391 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
2392 unsigned long vaddr, struct vm_area_struct *vma);
2393-};
2394+} __no_const;
2395
2396 #ifdef MULTI_USER
2397 extern struct cpu_user_fns cpu_user;
2398diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2399index 78a7793..e3dc06c 100644
2400--- a/arch/arm/include/asm/pgalloc.h
2401+++ b/arch/arm/include/asm/pgalloc.h
2402@@ -17,6 +17,7 @@
2403 #include <asm/processor.h>
2404 #include <asm/cacheflush.h>
2405 #include <asm/tlbflush.h>
2406+#include <asm/system_info.h>
2407
2408 #define check_pgt_cache() do { } while (0)
2409
2410@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2411 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2412 }
2413
2414+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2415+{
2416+ pud_populate(mm, pud, pmd);
2417+}
2418+
2419 #else /* !CONFIG_ARM_LPAE */
2420
2421 /*
2422@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2423 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2424 #define pmd_free(mm, pmd) do { } while (0)
2425 #define pud_populate(mm,pmd,pte) BUG()
2426+#define pud_populate_kernel(mm,pmd,pte) BUG()
2427
2428 #endif /* CONFIG_ARM_LPAE */
2429
2430@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2431 __free_page(pte);
2432 }
2433
2434+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2435+{
2436+#ifdef CONFIG_ARM_LPAE
2437+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2438+#else
2439+ if (addr & SECTION_SIZE)
2440+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2441+ else
2442+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2443+#endif
2444+ flush_pmd_entry(pmdp);
2445+}
2446+
2447 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2448 pmdval_t prot)
2449 {
2450@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2451 static inline void
2452 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2453 {
2454- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2455+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2456 }
2457 #define pmd_pgtable(pmd) pmd_page(pmd)
2458
2459diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2460index 5cfba15..f415e1a 100644
2461--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2462+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2463@@ -20,12 +20,15 @@
2464 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2465 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2466 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2467+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2468 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2469 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2470 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2471+
2472 /*
2473 * - section
2474 */
2475+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2476 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2477 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2478 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2479@@ -37,6 +40,7 @@
2480 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2481 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2482 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2483+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2484
2485 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2486 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2487@@ -66,6 +70,7 @@
2488 * - extended small page/tiny page
2489 */
2490 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2491+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2492 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2493 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2494 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2495diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2496index 219ac88..73ec32a 100644
2497--- a/arch/arm/include/asm/pgtable-2level.h
2498+++ b/arch/arm/include/asm/pgtable-2level.h
2499@@ -126,6 +126,9 @@
2500 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2501 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2502
2503+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2504+#define L_PTE_PXN (_AT(pteval_t, 0))
2505+
2506 /*
2507 * These are the memory types, defined to be compatible with
2508 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2509diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2510index 626989f..9d67a33 100644
2511--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2512+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2513@@ -75,6 +75,7 @@
2514 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2515 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2516 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2517+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2518 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2519
2520 /*
2521diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2522index 85c60ad..b0bbd7e 100644
2523--- a/arch/arm/include/asm/pgtable-3level.h
2524+++ b/arch/arm/include/asm/pgtable-3level.h
2525@@ -82,6 +82,7 @@
2526 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2527 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2528 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2529+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2530 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2531 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2532 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2533@@ -95,6 +96,7 @@
2534 /*
2535 * To be used in assembly code with the upper page attributes.
2536 */
2537+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2538 #define L_PTE_XN_HIGH (1 << (54 - 32))
2539 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2540
2541diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2542index 5478e5d..f5b5cb3 100644
2543--- a/arch/arm/include/asm/pgtable.h
2544+++ b/arch/arm/include/asm/pgtable.h
2545@@ -33,6 +33,9 @@
2546 #include <asm/pgtable-2level.h>
2547 #endif
2548
2549+#define ktla_ktva(addr) (addr)
2550+#define ktva_ktla(addr) (addr)
2551+
2552 /*
2553 * Just any arbitrary offset to the start of the vmalloc VM area: the
2554 * current 8MB value just means that there will be a 8MB "hole" after the
2555@@ -48,6 +51,9 @@
2556 #define LIBRARY_TEXT_START 0x0c000000
2557
2558 #ifndef __ASSEMBLY__
2559+extern pteval_t __supported_pte_mask;
2560+extern pmdval_t __supported_pmd_mask;
2561+
2562 extern void __pte_error(const char *file, int line, pte_t);
2563 extern void __pmd_error(const char *file, int line, pmd_t);
2564 extern void __pgd_error(const char *file, int line, pgd_t);
2565@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2566 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2567 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2568
2569+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2570+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2571+
2572+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2573+#include <asm/domain.h>
2574+#include <linux/thread_info.h>
2575+#include <linux/preempt.h>
2576+
2577+static inline int test_domain(int domain, int domaintype)
2578+{
2579+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2580+}
2581+#endif
2582+
2583+#ifdef CONFIG_PAX_KERNEXEC
2584+static inline unsigned long pax_open_kernel(void) {
2585+#ifdef CONFIG_ARM_LPAE
2586+ /* TODO */
2587+#else
2588+ preempt_disable();
2589+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2590+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2591+#endif
2592+ return 0;
2593+}
2594+
2595+static inline unsigned long pax_close_kernel(void) {
2596+#ifdef CONFIG_ARM_LPAE
2597+ /* TODO */
2598+#else
2599+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2600+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2601+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2602+ preempt_enable_no_resched();
2603+#endif
2604+ return 0;
2605+}
2606+#else
2607+static inline unsigned long pax_open_kernel(void) { return 0; }
2608+static inline unsigned long pax_close_kernel(void) { return 0; }
2609+#endif
2610+
2611 /*
2612 * This is the lowest virtual address we can permit any user space
2613 * mapping to be mapped at. This is particularly important for
2614@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2615 /*
2616 * The pgprot_* and protection_map entries will be fixed up in runtime
2617 * to include the cachable and bufferable bits based on memory policy,
2618- * as well as any architecture dependent bits like global/ASID and SMP
2619- * shared mapping bits.
2620+ * as well as any architecture dependent bits like global/ASID, PXN,
2621+ * and SMP shared mapping bits.
2622 */
2623 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2624
2625@@ -265,7 +313,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2626 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2627 {
2628 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2629- L_PTE_NONE | L_PTE_VALID;
2630+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2631 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2632 return pte;
2633 }
2634diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2635index c25ef3e..735f14b 100644
2636--- a/arch/arm/include/asm/psci.h
2637+++ b/arch/arm/include/asm/psci.h
2638@@ -32,7 +32,7 @@ struct psci_operations {
2639 int (*affinity_info)(unsigned long target_affinity,
2640 unsigned long lowest_affinity_level);
2641 int (*migrate_info_type)(void);
2642-};
2643+} __no_const;
2644
2645 extern struct psci_operations psci_ops;
2646 extern struct smp_operations psci_smp_ops;
2647diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2648index 2ec765c..beb1fe16 100644
2649--- a/arch/arm/include/asm/smp.h
2650+++ b/arch/arm/include/asm/smp.h
2651@@ -113,7 +113,7 @@ struct smp_operations {
2652 int (*cpu_disable)(unsigned int cpu);
2653 #endif
2654 #endif
2655-};
2656+} __no_const;
2657
2658 struct of_cpu_method {
2659 const char *method;
2660diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2661index e4e4208..086684a 100644
2662--- a/arch/arm/include/asm/thread_info.h
2663+++ b/arch/arm/include/asm/thread_info.h
2664@@ -88,9 +88,9 @@ struct thread_info {
2665 .flags = 0, \
2666 .preempt_count = INIT_PREEMPT_COUNT, \
2667 .addr_limit = KERNEL_DS, \
2668- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2669- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2670- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2671+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2672+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2673+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2674 .restart_block = { \
2675 .fn = do_no_restart_syscall, \
2676 }, \
2677@@ -164,7 +164,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2678 #define TIF_SYSCALL_AUDIT 9
2679 #define TIF_SYSCALL_TRACEPOINT 10
2680 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2681-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2682+/* within 8 bits of TIF_SYSCALL_TRACE
2683+ * to meet flexible second operand requirements
2684+ */
2685+#define TIF_GRSEC_SETXID 12
2686+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2687 #define TIF_USING_IWMMXT 17
2688 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2689 #define TIF_RESTORE_SIGMASK 20
2690@@ -178,10 +182,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2691 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2692 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2693 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2694+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2695
2696 /* Checks for any syscall work in entry-common.S */
2697 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2698- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2699+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2700
2701 /*
2702 * Change these and you break ASM code in entry-common.S
2703diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2704index 75d9579..b5b40e4 100644
2705--- a/arch/arm/include/asm/uaccess.h
2706+++ b/arch/arm/include/asm/uaccess.h
2707@@ -18,6 +18,7 @@
2708 #include <asm/domain.h>
2709 #include <asm/unified.h>
2710 #include <asm/compiler.h>
2711+#include <asm/pgtable.h>
2712
2713 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2714 #include <asm-generic/uaccess-unaligned.h>
2715@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2716 static inline void set_fs(mm_segment_t fs)
2717 {
2718 current_thread_info()->addr_limit = fs;
2719- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2720+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2721 }
2722
2723 #define segment_eq(a,b) ((a) == (b))
2724
2725+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2726+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2727+
2728+static inline void pax_open_userland(void)
2729+{
2730+
2731+#ifdef CONFIG_PAX_MEMORY_UDEREF
2732+ if (segment_eq(get_fs(), USER_DS)) {
2733+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2734+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2735+ }
2736+#endif
2737+
2738+}
2739+
2740+static inline void pax_close_userland(void)
2741+{
2742+
2743+#ifdef CONFIG_PAX_MEMORY_UDEREF
2744+ if (segment_eq(get_fs(), USER_DS)) {
2745+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2746+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2747+ }
2748+#endif
2749+
2750+}
2751+
2752 #define __addr_ok(addr) ({ \
2753 unsigned long flag; \
2754 __asm__("cmp %2, %0; movlo %0, #0" \
2755@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2756
2757 #define get_user(x,p) \
2758 ({ \
2759+ int __e; \
2760 might_fault(); \
2761- __get_user_check(x,p); \
2762+ pax_open_userland(); \
2763+ __e = __get_user_check(x,p); \
2764+ pax_close_userland(); \
2765+ __e; \
2766 })
2767
2768 extern int __put_user_1(void *, unsigned int);
2769@@ -196,8 +228,12 @@ extern int __put_user_8(void *, unsigned long long);
2770
2771 #define put_user(x,p) \
2772 ({ \
2773+ int __e; \
2774 might_fault(); \
2775- __put_user_check(x,p); \
2776+ pax_open_userland(); \
2777+ __e = __put_user_check(x,p); \
2778+ pax_close_userland(); \
2779+ __e; \
2780 })
2781
2782 #else /* CONFIG_MMU */
2783@@ -221,6 +257,7 @@ static inline void set_fs(mm_segment_t fs)
2784
2785 #endif /* CONFIG_MMU */
2786
2787+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2788 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2789
2790 #define user_addr_max() \
2791@@ -238,13 +275,17 @@ static inline void set_fs(mm_segment_t fs)
2792 #define __get_user(x,ptr) \
2793 ({ \
2794 long __gu_err = 0; \
2795+ pax_open_userland(); \
2796 __get_user_err((x),(ptr),__gu_err); \
2797+ pax_close_userland(); \
2798 __gu_err; \
2799 })
2800
2801 #define __get_user_error(x,ptr,err) \
2802 ({ \
2803+ pax_open_userland(); \
2804 __get_user_err((x),(ptr),err); \
2805+ pax_close_userland(); \
2806 (void) 0; \
2807 })
2808
2809@@ -320,13 +361,17 @@ do { \
2810 #define __put_user(x,ptr) \
2811 ({ \
2812 long __pu_err = 0; \
2813+ pax_open_userland(); \
2814 __put_user_err((x),(ptr),__pu_err); \
2815+ pax_close_userland(); \
2816 __pu_err; \
2817 })
2818
2819 #define __put_user_error(x,ptr,err) \
2820 ({ \
2821+ pax_open_userland(); \
2822 __put_user_err((x),(ptr),err); \
2823+ pax_close_userland(); \
2824 (void) 0; \
2825 })
2826
2827@@ -426,11 +471,44 @@ do { \
2828
2829
2830 #ifdef CONFIG_MMU
2831-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2832-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2833+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2834+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2835+
2836+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2837+{
2838+ unsigned long ret;
2839+
2840+ check_object_size(to, n, false);
2841+ pax_open_userland();
2842+ ret = ___copy_from_user(to, from, n);
2843+ pax_close_userland();
2844+ return ret;
2845+}
2846+
2847+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2848+{
2849+ unsigned long ret;
2850+
2851+ check_object_size(from, n, true);
2852+ pax_open_userland();
2853+ ret = ___copy_to_user(to, from, n);
2854+ pax_close_userland();
2855+ return ret;
2856+}
2857+
2858 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2859-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2860+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2861 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2862+
2863+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2864+{
2865+ unsigned long ret;
2866+ pax_open_userland();
2867+ ret = ___clear_user(addr, n);
2868+ pax_close_userland();
2869+ return ret;
2870+}
2871+
2872 #else
2873 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2874 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2875@@ -439,6 +517,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2876
2877 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2878 {
2879+ if ((long)n < 0)
2880+ return n;
2881+
2882 if (access_ok(VERIFY_READ, from, n))
2883 n = __copy_from_user(to, from, n);
2884 else /* security hole - plug it */
2885@@ -448,6 +529,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2886
2887 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2888 {
2889+ if ((long)n < 0)
2890+ return n;
2891+
2892 if (access_ok(VERIFY_WRITE, to, n))
2893 n = __copy_to_user(to, from, n);
2894 return n;
2895diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2896index 5af0ed1..cea83883 100644
2897--- a/arch/arm/include/uapi/asm/ptrace.h
2898+++ b/arch/arm/include/uapi/asm/ptrace.h
2899@@ -92,7 +92,7 @@
2900 * ARMv7 groups of PSR bits
2901 */
2902 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2903-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2904+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2905 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2906 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2907
2908diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2909index f7b450f..f5364c5 100644
2910--- a/arch/arm/kernel/armksyms.c
2911+++ b/arch/arm/kernel/armksyms.c
2912@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2913
2914 /* networking */
2915 EXPORT_SYMBOL(csum_partial);
2916-EXPORT_SYMBOL(csum_partial_copy_from_user);
2917+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2918 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2919 EXPORT_SYMBOL(__csum_ipv6_magic);
2920
2921@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2922 #ifdef CONFIG_MMU
2923 EXPORT_SYMBOL(copy_page);
2924
2925-EXPORT_SYMBOL(__copy_from_user);
2926-EXPORT_SYMBOL(__copy_to_user);
2927-EXPORT_SYMBOL(__clear_user);
2928+EXPORT_SYMBOL(___copy_from_user);
2929+EXPORT_SYMBOL(___copy_to_user);
2930+EXPORT_SYMBOL(___clear_user);
2931
2932 EXPORT_SYMBOL(__get_user_1);
2933 EXPORT_SYMBOL(__get_user_2);
2934diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2935index 52a949a..d8bbcab 100644
2936--- a/arch/arm/kernel/entry-armv.S
2937+++ b/arch/arm/kernel/entry-armv.S
2938@@ -47,6 +47,87 @@
2939 9997:
2940 .endm
2941
2942+ .macro pax_enter_kernel
2943+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2944+ @ make aligned space for saved DACR
2945+ sub sp, sp, #8
2946+ @ save regs
2947+ stmdb sp!, {r1, r2}
2948+ @ read DACR from cpu_domain into r1
2949+ mov r2, sp
2950+ @ assume 8K pages, since we have to split the immediate in two
2951+ bic r2, r2, #(0x1fc0)
2952+ bic r2, r2, #(0x3f)
2953+ ldr r1, [r2, #TI_CPU_DOMAIN]
2954+ @ store old DACR on stack
2955+ str r1, [sp, #8]
2956+#ifdef CONFIG_PAX_KERNEXEC
2957+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2958+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2959+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2960+#endif
2961+#ifdef CONFIG_PAX_MEMORY_UDEREF
2962+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2963+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2964+#endif
2965+ @ write r1 to current_thread_info()->cpu_domain
2966+ str r1, [r2, #TI_CPU_DOMAIN]
2967+ @ write r1 to DACR
2968+ mcr p15, 0, r1, c3, c0, 0
2969+ @ instruction sync
2970+ instr_sync
2971+ @ restore regs
2972+ ldmia sp!, {r1, r2}
2973+#endif
2974+ .endm
2975+
2976+ .macro pax_open_userland
2977+#ifdef CONFIG_PAX_MEMORY_UDEREF
2978+ @ save regs
2979+ stmdb sp!, {r0, r1}
2980+ @ read DACR from cpu_domain into r1
2981+ mov r0, sp
2982+ @ assume 8K pages, since we have to split the immediate in two
2983+ bic r0, r0, #(0x1fc0)
2984+ bic r0, r0, #(0x3f)
2985+ ldr r1, [r0, #TI_CPU_DOMAIN]
2986+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2987+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2988+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2989+ @ write r1 to current_thread_info()->cpu_domain
2990+ str r1, [r0, #TI_CPU_DOMAIN]
2991+ @ write r1 to DACR
2992+ mcr p15, 0, r1, c3, c0, 0
2993+ @ instruction sync
2994+ instr_sync
2995+ @ restore regs
2996+ ldmia sp!, {r0, r1}
2997+#endif
2998+ .endm
2999+
3000+ .macro pax_close_userland
3001+#ifdef CONFIG_PAX_MEMORY_UDEREF
3002+ @ save regs
3003+ stmdb sp!, {r0, r1}
3004+ @ read DACR from cpu_domain into r1
3005+ mov r0, sp
3006+ @ assume 8K pages, since we have to split the immediate in two
3007+ bic r0, r0, #(0x1fc0)
3008+ bic r0, r0, #(0x3f)
3009+ ldr r1, [r0, #TI_CPU_DOMAIN]
3010+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3011+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3012+ @ write r1 to current_thread_info()->cpu_domain
3013+ str r1, [r0, #TI_CPU_DOMAIN]
3014+ @ write r1 to DACR
3015+ mcr p15, 0, r1, c3, c0, 0
3016+ @ instruction sync
3017+ instr_sync
3018+ @ restore regs
3019+ ldmia sp!, {r0, r1}
3020+#endif
3021+ .endm
3022+
3023 .macro pabt_helper
3024 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
3025 #ifdef MULTI_PABORT
3026@@ -89,11 +170,15 @@
3027 * Invalid mode handlers
3028 */
3029 .macro inv_entry, reason
3030+
3031+ pax_enter_kernel
3032+
3033 sub sp, sp, #S_FRAME_SIZE
3034 ARM( stmib sp, {r1 - lr} )
3035 THUMB( stmia sp, {r0 - r12} )
3036 THUMB( str sp, [sp, #S_SP] )
3037 THUMB( str lr, [sp, #S_LR] )
3038+
3039 mov r1, #\reason
3040 .endm
3041
3042@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
3043 .macro svc_entry, stack_hole=0
3044 UNWIND(.fnstart )
3045 UNWIND(.save {r0 - pc} )
3046+
3047+ pax_enter_kernel
3048+
3049 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3050+
3051 #ifdef CONFIG_THUMB2_KERNEL
3052 SPFIX( str r0, [sp] ) @ temporarily saved
3053 SPFIX( mov r0, sp )
3054@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
3055 ldmia r0, {r3 - r5}
3056 add r7, sp, #S_SP - 4 @ here for interlock avoidance
3057 mov r6, #-1 @ "" "" "" ""
3058+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3059+ @ offset sp by 8 as done in pax_enter_kernel
3060+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
3061+#else
3062 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3063+#endif
3064 SPFIX( addeq r2, r2, #4 )
3065 str r3, [sp, #-4]! @ save the "real" r0 copied
3066 @ from the exception stack
3067@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
3068 .macro usr_entry
3069 UNWIND(.fnstart )
3070 UNWIND(.cantunwind ) @ don't unwind the user space
3071+
3072+ pax_enter_kernel_user
3073+
3074 sub sp, sp, #S_FRAME_SIZE
3075 ARM( stmib sp, {r1 - r12} )
3076 THUMB( stmia sp, {r0 - r12} )
3077@@ -421,7 +518,9 @@ __und_usr:
3078 tst r3, #PSR_T_BIT @ Thumb mode?
3079 bne __und_usr_thumb
3080 sub r4, r2, #4 @ ARM instr at LR - 4
3081+ pax_open_userland
3082 1: ldrt r0, [r4]
3083+ pax_close_userland
3084 ARM_BE8(rev r0, r0) @ little endian instruction
3085
3086 @ r0 = 32-bit ARM instruction which caused the exception
3087@@ -455,11 +554,15 @@ __und_usr_thumb:
3088 */
3089 .arch armv6t2
3090 #endif
3091+ pax_open_userland
3092 2: ldrht r5, [r4]
3093+ pax_close_userland
3094 ARM_BE8(rev16 r5, r5) @ little endian instruction
3095 cmp r5, #0xe800 @ 32bit instruction if xx != 0
3096 blo __und_usr_fault_16 @ 16bit undefined instruction
3097+ pax_open_userland
3098 3: ldrht r0, [r2]
3099+ pax_close_userland
3100 ARM_BE8(rev16 r0, r0) @ little endian instruction
3101 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
3102 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
3103@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
3104 */
3105 .pushsection .fixup, "ax"
3106 .align 2
3107-4: str r4, [sp, #S_PC] @ retry current instruction
3108+4: pax_close_userland
3109+ str r4, [sp, #S_PC] @ retry current instruction
3110 mov pc, r9
3111 .popsection
3112 .pushsection __ex_table,"a"
3113@@ -698,7 +802,7 @@ ENTRY(__switch_to)
3114 THUMB( str lr, [ip], #4 )
3115 ldr r4, [r2, #TI_TP_VALUE]
3116 ldr r5, [r2, #TI_TP_VALUE + 4]
3117-#ifdef CONFIG_CPU_USE_DOMAINS
3118+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3119 ldr r6, [r2, #TI_CPU_DOMAIN]
3120 #endif
3121 switch_tls r1, r4, r5, r3, r7
3122@@ -707,7 +811,7 @@ ENTRY(__switch_to)
3123 ldr r8, =__stack_chk_guard
3124 ldr r7, [r7, #TSK_STACK_CANARY]
3125 #endif
3126-#ifdef CONFIG_CPU_USE_DOMAINS
3127+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3128 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
3129 #endif
3130 mov r5, r0
3131diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
3132index 7139d4a..feaf37f 100644
3133--- a/arch/arm/kernel/entry-common.S
3134+++ b/arch/arm/kernel/entry-common.S
3135@@ -10,18 +10,46 @@
3136
3137 #include <asm/unistd.h>
3138 #include <asm/ftrace.h>
3139+#include <asm/domain.h>
3140 #include <asm/unwind.h>
3141
3142+#include "entry-header.S"
3143+
3144 #ifdef CONFIG_NEED_RET_TO_USER
3145 #include <mach/entry-macro.S>
3146 #else
3147 .macro arch_ret_to_user, tmp1, tmp2
3148+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3149+ @ save regs
3150+ stmdb sp!, {r1, r2}
3151+ @ read DACR from cpu_domain into r1
3152+ mov r2, sp
3153+ @ assume 8K pages, since we have to split the immediate in two
3154+ bic r2, r2, #(0x1fc0)
3155+ bic r2, r2, #(0x3f)
3156+ ldr r1, [r2, #TI_CPU_DOMAIN]
3157+#ifdef CONFIG_PAX_KERNEXEC
3158+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3159+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3160+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3161+#endif
3162+#ifdef CONFIG_PAX_MEMORY_UDEREF
3163+ @ set current DOMAIN_USER to DOMAIN_UDEREF
3164+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3165+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
3166+#endif
3167+ @ write r1 to current_thread_info()->cpu_domain
3168+ str r1, [r2, #TI_CPU_DOMAIN]
3169+ @ write r1 to DACR
3170+ mcr p15, 0, r1, c3, c0, 0
3171+ @ instruction sync
3172+ instr_sync
3173+ @ restore regs
3174+ ldmia sp!, {r1, r2}
3175+#endif
3176 .endm
3177 #endif
3178
3179-#include "entry-header.S"
3180-
3181-
3182 .align 5
3183 /*
3184 * This is the fast syscall return path. We do as little as
3185@@ -405,6 +433,12 @@ ENTRY(vector_swi)
3186 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
3187 #endif
3188
3189+ /*
3190+ * do this here to avoid a performance hit of wrapping the code above
3191+ * that directly dereferences userland to parse the SWI instruction
3192+ */
3193+ pax_enter_kernel_user
3194+
3195 adr tbl, sys_call_table @ load syscall table pointer
3196
3197 #if defined(CONFIG_OABI_COMPAT)
3198diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
3199index 5d702f8..f5fc51a 100644
3200--- a/arch/arm/kernel/entry-header.S
3201+++ b/arch/arm/kernel/entry-header.S
3202@@ -188,6 +188,60 @@
3203 msr cpsr_c, \rtemp @ switch back to the SVC mode
3204 .endm
3205
3206+ .macro pax_enter_kernel_user
3207+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3208+ @ save regs
3209+ stmdb sp!, {r0, r1}
3210+ @ read DACR from cpu_domain into r1
3211+ mov r0, sp
3212+ @ assume 8K pages, since we have to split the immediate in two
3213+ bic r0, r0, #(0x1fc0)
3214+ bic r0, r0, #(0x3f)
3215+ ldr r1, [r0, #TI_CPU_DOMAIN]
3216+#ifdef CONFIG_PAX_MEMORY_UDEREF
3217+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3218+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3219+#endif
3220+#ifdef CONFIG_PAX_KERNEXEC
3221+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3222+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3223+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3224+#endif
3225+ @ write r1 to current_thread_info()->cpu_domain
3226+ str r1, [r0, #TI_CPU_DOMAIN]
3227+ @ write r1 to DACR
3228+ mcr p15, 0, r1, c3, c0, 0
3229+ @ instruction sync
3230+ instr_sync
3231+ @ restore regs
3232+ ldmia sp!, {r0, r1}
3233+#endif
3234+ .endm
3235+
3236+ .macro pax_exit_kernel
3237+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3238+ @ save regs
3239+ stmdb sp!, {r0, r1}
3240+ @ read old DACR from stack into r1
3241+ ldr r1, [sp, #(8 + S_SP)]
3242+ sub r1, r1, #8
3243+ ldr r1, [r1]
3244+
3245+ @ write r1 to current_thread_info()->cpu_domain
3246+ mov r0, sp
3247+ @ assume 8K pages, since we have to split the immediate in two
3248+ bic r0, r0, #(0x1fc0)
3249+ bic r0, r0, #(0x3f)
3250+ str r1, [r0, #TI_CPU_DOMAIN]
3251+ @ write r1 to DACR
3252+ mcr p15, 0, r1, c3, c0, 0
3253+ @ instruction sync
3254+ instr_sync
3255+ @ restore regs
3256+ ldmia sp!, {r0, r1}
3257+#endif
3258+ .endm
3259+
3260 #ifndef CONFIG_THUMB2_KERNEL
3261 .macro svc_exit, rpsr, irq = 0
3262 .if \irq != 0
3263@@ -207,6 +261,9 @@
3264 blne trace_hardirqs_off
3265 #endif
3266 .endif
3267+
3268+ pax_exit_kernel
3269+
3270 msr spsr_cxsf, \rpsr
3271 #if defined(CONFIG_CPU_V6)
3272 ldr r0, [sp]
3273@@ -265,6 +322,9 @@
3274 blne trace_hardirqs_off
3275 #endif
3276 .endif
3277+
3278+ pax_exit_kernel
3279+
3280 ldr lr, [sp, #S_SP] @ top of the stack
3281 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
3282 clrex @ clear the exclusive monitor
3283diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
3284index 918875d..cd5fa27 100644
3285--- a/arch/arm/kernel/fiq.c
3286+++ b/arch/arm/kernel/fiq.c
3287@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
3288 void *base = vectors_page;
3289 unsigned offset = FIQ_OFFSET;
3290
3291+ pax_open_kernel();
3292 memcpy(base + offset, start, length);
3293+ pax_close_kernel();
3294+
3295 if (!cache_is_vipt_nonaliasing())
3296 flush_icache_range((unsigned long)base + offset, offset +
3297 length);
3298diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
3299index 2c35f0f..7747ee6 100644
3300--- a/arch/arm/kernel/head.S
3301+++ b/arch/arm/kernel/head.S
3302@@ -437,7 +437,7 @@ __enable_mmu:
3303 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
3304 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
3305 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
3306- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
3307+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
3308 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
3309 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
3310 #endif
3311diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
3312index 45e4781..8eac93d 100644
3313--- a/arch/arm/kernel/module.c
3314+++ b/arch/arm/kernel/module.c
3315@@ -38,12 +38,39 @@
3316 #endif
3317
3318 #ifdef CONFIG_MMU
3319-void *module_alloc(unsigned long size)
3320+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
3321 {
3322+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
3323+ return NULL;
3324 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
3325- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
3326+ GFP_KERNEL, prot, NUMA_NO_NODE,
3327 __builtin_return_address(0));
3328 }
3329+
3330+void *module_alloc(unsigned long size)
3331+{
3332+
3333+#ifdef CONFIG_PAX_KERNEXEC
3334+ return __module_alloc(size, PAGE_KERNEL);
3335+#else
3336+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3337+#endif
3338+
3339+}
3340+
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+void module_free_exec(struct module *mod, void *module_region)
3343+{
3344+ module_free(mod, module_region);
3345+}
3346+EXPORT_SYMBOL(module_free_exec);
3347+
3348+void *module_alloc_exec(unsigned long size)
3349+{
3350+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3351+}
3352+EXPORT_SYMBOL(module_alloc_exec);
3353+#endif
3354 #endif
3355
3356 int
3357diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
3358index 07314af..c46655c 100644
3359--- a/arch/arm/kernel/patch.c
3360+++ b/arch/arm/kernel/patch.c
3361@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3362 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
3363 int size;
3364
3365+ pax_open_kernel();
3366 if (thumb2 && __opcode_is_thumb16(insn)) {
3367 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
3368 size = sizeof(u16);
3369@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3370 *(u32 *)addr = insn;
3371 size = sizeof(u32);
3372 }
3373+ pax_close_kernel();
3374
3375 flush_icache_range((uintptr_t)(addr),
3376 (uintptr_t)(addr) + size);
3377diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3378index 81ef686..f4130b8 100644
3379--- a/arch/arm/kernel/process.c
3380+++ b/arch/arm/kernel/process.c
3381@@ -212,6 +212,7 @@ void machine_power_off(void)
3382
3383 if (pm_power_off)
3384 pm_power_off();
3385+ BUG();
3386 }
3387
3388 /*
3389@@ -225,7 +226,7 @@ void machine_power_off(void)
3390 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3391 * to use. Implementing such co-ordination would be essentially impossible.
3392 */
3393-void machine_restart(char *cmd)
3394+__noreturn void machine_restart(char *cmd)
3395 {
3396 local_irq_disable();
3397 smp_send_stop();
3398@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3399
3400 show_regs_print_info(KERN_DEFAULT);
3401
3402- print_symbol("PC is at %s\n", instruction_pointer(regs));
3403- print_symbol("LR is at %s\n", regs->ARM_lr);
3404+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3405+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3406 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3407 "sp : %08lx ip : %08lx fp : %08lx\n",
3408 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3409@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
3410 return 0;
3411 }
3412
3413-unsigned long arch_randomize_brk(struct mm_struct *mm)
3414-{
3415- unsigned long range_end = mm->brk + 0x02000000;
3416- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3417-}
3418-
3419 #ifdef CONFIG_MMU
3420 #ifdef CONFIG_KUSER_HELPERS
3421 /*
3422@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
3423
3424 static int __init gate_vma_init(void)
3425 {
3426- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3427+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3428 return 0;
3429 }
3430 arch_initcall(gate_vma_init);
3431@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
3432
3433 const char *arch_vma_name(struct vm_area_struct *vma)
3434 {
3435- return is_gate_vma(vma) ? "[vectors]" :
3436- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3437- "[sigpage]" : NULL;
3438+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3439 }
3440
3441-static struct page *signal_page;
3442-extern struct page *get_signal_page(void);
3443-
3444 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3445 {
3446 struct mm_struct *mm = current->mm;
3447- unsigned long addr;
3448- int ret;
3449-
3450- if (!signal_page)
3451- signal_page = get_signal_page();
3452- if (!signal_page)
3453- return -ENOMEM;
3454
3455 down_write(&mm->mmap_sem);
3456- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3457- if (IS_ERR_VALUE(addr)) {
3458- ret = addr;
3459- goto up_fail;
3460- }
3461-
3462- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3463- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3464- &signal_page);
3465-
3466- if (ret == 0)
3467- mm->context.sigpage = addr;
3468-
3469- up_fail:
3470+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3471 up_write(&mm->mmap_sem);
3472- return ret;
3473+ return 0;
3474 }
3475 #endif
3476diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3477index f73891b..cf3004e 100644
3478--- a/arch/arm/kernel/psci.c
3479+++ b/arch/arm/kernel/psci.c
3480@@ -28,7 +28,7 @@
3481 #include <asm/psci.h>
3482 #include <asm/system_misc.h>
3483
3484-struct psci_operations psci_ops;
3485+struct psci_operations psci_ops __read_only;
3486
3487 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3488 typedef int (*psci_initcall_t)(const struct device_node *);
3489diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3490index 0c27ed6..b67388e 100644
3491--- a/arch/arm/kernel/ptrace.c
3492+++ b/arch/arm/kernel/ptrace.c
3493@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3494 regs->ARM_ip = ip;
3495 }
3496
3497+#ifdef CONFIG_GRKERNSEC_SETXID
3498+extern void gr_delayed_cred_worker(void);
3499+#endif
3500+
3501 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3502 {
3503 current_thread_info()->syscall = scno;
3504
3505+#ifdef CONFIG_GRKERNSEC_SETXID
3506+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3507+ gr_delayed_cred_worker();
3508+#endif
3509+
3510 /* Do the secure computing check first; failures should be fast. */
3511 if (secure_computing(scno) == -1)
3512 return -1;
3513diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3514index 8a16ee5..4f560e5 100644
3515--- a/arch/arm/kernel/setup.c
3516+++ b/arch/arm/kernel/setup.c
3517@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3518 unsigned int elf_hwcap2 __read_mostly;
3519 EXPORT_SYMBOL(elf_hwcap2);
3520
3521+pteval_t __supported_pte_mask __read_only;
3522+pmdval_t __supported_pmd_mask __read_only;
3523
3524 #ifdef MULTI_CPU
3525-struct processor processor __read_mostly;
3526+struct processor processor __read_only;
3527 #endif
3528 #ifdef MULTI_TLB
3529-struct cpu_tlb_fns cpu_tlb __read_mostly;
3530+struct cpu_tlb_fns cpu_tlb __read_only;
3531 #endif
3532 #ifdef MULTI_USER
3533-struct cpu_user_fns cpu_user __read_mostly;
3534+struct cpu_user_fns cpu_user __read_only;
3535 #endif
3536 #ifdef MULTI_CACHE
3537-struct cpu_cache_fns cpu_cache __read_mostly;
3538+struct cpu_cache_fns cpu_cache __read_only;
3539 #endif
3540 #ifdef CONFIG_OUTER_CACHE
3541-struct outer_cache_fns outer_cache __read_mostly;
3542+struct outer_cache_fns outer_cache __read_only;
3543 EXPORT_SYMBOL(outer_cache);
3544 #endif
3545
3546@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3547 asm("mrc p15, 0, %0, c0, c1, 4"
3548 : "=r" (mmfr0));
3549 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3550- (mmfr0 & 0x000000f0) >= 0x00000030)
3551+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3552 cpu_arch = CPU_ARCH_ARMv7;
3553- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3554+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3555+ __supported_pte_mask |= L_PTE_PXN;
3556+ __supported_pmd_mask |= PMD_PXNTABLE;
3557+ }
3558+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3559 (mmfr0 & 0x000000f0) == 0x00000020)
3560 cpu_arch = CPU_ARCH_ARMv6;
3561 else
3562diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3563index bd19834..e4d8c66 100644
3564--- a/arch/arm/kernel/signal.c
3565+++ b/arch/arm/kernel/signal.c
3566@@ -24,8 +24,6 @@
3567
3568 extern const unsigned long sigreturn_codes[7];
3569
3570-static unsigned long signal_return_offset;
3571-
3572 #ifdef CONFIG_CRUNCH
3573 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3574 {
3575@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3576 * except when the MPU has protected the vectors
3577 * page from PL0
3578 */
3579- retcode = mm->context.sigpage + signal_return_offset +
3580- (idx << 2) + thumb;
3581+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3582 } else
3583 #endif
3584 {
3585@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3586 } while (thread_flags & _TIF_WORK_MASK);
3587 return 0;
3588 }
3589-
3590-struct page *get_signal_page(void)
3591-{
3592- unsigned long ptr;
3593- unsigned offset;
3594- struct page *page;
3595- void *addr;
3596-
3597- page = alloc_pages(GFP_KERNEL, 0);
3598-
3599- if (!page)
3600- return NULL;
3601-
3602- addr = page_address(page);
3603-
3604- /* Give the signal return code some randomness */
3605- offset = 0x200 + (get_random_int() & 0x7fc);
3606- signal_return_offset = offset;
3607-
3608- /*
3609- * Copy signal return handlers into the vector page, and
3610- * set sigreturn to be a pointer to these.
3611- */
3612- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3613-
3614- ptr = (unsigned long)addr + offset;
3615- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3616-
3617- return page;
3618-}
3619diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3620index 7c4fada..8581286 100644
3621--- a/arch/arm/kernel/smp.c
3622+++ b/arch/arm/kernel/smp.c
3623@@ -73,7 +73,7 @@ enum ipi_msg_type {
3624
3625 static DECLARE_COMPLETION(cpu_running);
3626
3627-static struct smp_operations smp_ops;
3628+static struct smp_operations smp_ops __read_only;
3629
3630 void __init smp_set_ops(struct smp_operations *ops)
3631 {
3632diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3633index 7a3be1d..b00c7de 100644
3634--- a/arch/arm/kernel/tcm.c
3635+++ b/arch/arm/kernel/tcm.c
3636@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3637 .virtual = ITCM_OFFSET,
3638 .pfn = __phys_to_pfn(ITCM_OFFSET),
3639 .length = 0,
3640- .type = MT_MEMORY_RWX_ITCM,
3641+ .type = MT_MEMORY_RX_ITCM,
3642 }
3643 };
3644
3645@@ -267,7 +267,9 @@ no_dtcm:
3646 start = &__sitcm_text;
3647 end = &__eitcm_text;
3648 ram = &__itcm_start;
3649+ pax_open_kernel();
3650 memcpy(start, ram, itcm_code_sz);
3651+ pax_close_kernel();
3652 pr_debug("CPU ITCM: copied code from %p - %p\n",
3653 start, end);
3654 itcm_present = true;
3655diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3656index abd2fc0..895dbb6 100644
3657--- a/arch/arm/kernel/traps.c
3658+++ b/arch/arm/kernel/traps.c
3659@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3660 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3661 {
3662 #ifdef CONFIG_KALLSYMS
3663- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3664+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3665 #else
3666 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3667 #endif
3668@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3669 static int die_owner = -1;
3670 static unsigned int die_nest_count;
3671
3672+extern void gr_handle_kernel_exploit(void);
3673+
3674 static unsigned long oops_begin(void)
3675 {
3676 int cpu;
3677@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3678 panic("Fatal exception in interrupt");
3679 if (panic_on_oops)
3680 panic("Fatal exception");
3681+
3682+ gr_handle_kernel_exploit();
3683+
3684 if (signr)
3685 do_exit(signr);
3686 }
3687@@ -643,7 +648,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3688 * The user helper at 0xffff0fe0 must be used instead.
3689 * (see entry-armv.S for details)
3690 */
3691+ pax_open_kernel();
3692 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3693+ pax_close_kernel();
3694 }
3695 return 0;
3696
3697@@ -900,7 +907,11 @@ void __init early_trap_init(void *vectors_base)
3698 kuser_init(vectors_base);
3699
3700 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3701- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3702+
3703+#ifndef CONFIG_PAX_MEMORY_UDEREF
3704+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3705+#endif
3706+
3707 #else /* ifndef CONFIG_CPU_V7M */
3708 /*
3709 * on V7-M there is no need to copy the vector table to a dedicated
3710diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3711index 7bcee5c..e2f3249 100644
3712--- a/arch/arm/kernel/vmlinux.lds.S
3713+++ b/arch/arm/kernel/vmlinux.lds.S
3714@@ -8,7 +8,11 @@
3715 #include <asm/thread_info.h>
3716 #include <asm/memory.h>
3717 #include <asm/page.h>
3718-
3719+
3720+#ifdef CONFIG_PAX_KERNEXEC
3721+#include <asm/pgtable.h>
3722+#endif
3723+
3724 #define PROC_INFO \
3725 . = ALIGN(4); \
3726 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3727@@ -34,7 +38,7 @@
3728 #endif
3729
3730 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3731- defined(CONFIG_GENERIC_BUG)
3732+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3733 #define ARM_EXIT_KEEP(x) x
3734 #define ARM_EXIT_DISCARD(x)
3735 #else
3736@@ -90,6 +94,11 @@ SECTIONS
3737 _text = .;
3738 HEAD_TEXT
3739 }
3740+
3741+#ifdef CONFIG_PAX_KERNEXEC
3742+ . = ALIGN(1<<SECTION_SHIFT);
3743+#endif
3744+
3745 .text : { /* Real text segment */
3746 _stext = .; /* Text and read-only data */
3747 __exception_text_start = .;
3748@@ -112,6 +121,8 @@ SECTIONS
3749 ARM_CPU_KEEP(PROC_INFO)
3750 }
3751
3752+ _etext = .; /* End of text section */
3753+
3754 RO_DATA(PAGE_SIZE)
3755
3756 . = ALIGN(4);
3757@@ -142,7 +153,9 @@ SECTIONS
3758
3759 NOTES
3760
3761- _etext = .; /* End of text and rodata section */
3762+#ifdef CONFIG_PAX_KERNEXEC
3763+ . = ALIGN(1<<SECTION_SHIFT);
3764+#endif
3765
3766 #ifndef CONFIG_XIP_KERNEL
3767 . = ALIGN(PAGE_SIZE);
3768@@ -220,6 +233,11 @@ SECTIONS
3769 . = PAGE_OFFSET + TEXT_OFFSET;
3770 #else
3771 __init_end = .;
3772+
3773+#ifdef CONFIG_PAX_KERNEXEC
3774+ . = ALIGN(1<<SECTION_SHIFT);
3775+#endif
3776+
3777 . = ALIGN(THREAD_SIZE);
3778 __data_loc = .;
3779 #endif
3780diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3781index 3c82b37..69fa3d2 100644
3782--- a/arch/arm/kvm/arm.c
3783+++ b/arch/arm/kvm/arm.c
3784@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3785 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3786
3787 /* The VMID used in the VTTBR */
3788-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3789+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3790 static u8 kvm_next_vmid;
3791 static DEFINE_SPINLOCK(kvm_vmid_lock);
3792
3793@@ -409,7 +409,7 @@ void force_vm_exit(const cpumask_t *mask)
3794 */
3795 static bool need_new_vmid_gen(struct kvm *kvm)
3796 {
3797- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3798+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3799 }
3800
3801 /**
3802@@ -442,7 +442,7 @@ static void update_vttbr(struct kvm *kvm)
3803
3804 /* First user of a new VMID generation? */
3805 if (unlikely(kvm_next_vmid == 0)) {
3806- atomic64_inc(&kvm_vmid_gen);
3807+ atomic64_inc_unchecked(&kvm_vmid_gen);
3808 kvm_next_vmid = 1;
3809
3810 /*
3811@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
3812 kvm_call_hyp(__kvm_flush_vm_context);
3813 }
3814
3815- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3816+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3817 kvm->arch.vmid = kvm_next_vmid;
3818 kvm_next_vmid++;
3819
3820@@ -1034,7 +1034,7 @@ static void check_kvm_target_cpu(void *ret)
3821 /**
3822 * Initialize Hyp-mode and memory mappings on all CPUs.
3823 */
3824-int kvm_arch_init(void *opaque)
3825+int kvm_arch_init(const void *opaque)
3826 {
3827 int err;
3828 int ret, cpu;
3829diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3830index 14a0d98..7771a7d 100644
3831--- a/arch/arm/lib/clear_user.S
3832+++ b/arch/arm/lib/clear_user.S
3833@@ -12,14 +12,14 @@
3834
3835 .text
3836
3837-/* Prototype: int __clear_user(void *addr, size_t sz)
3838+/* Prototype: int ___clear_user(void *addr, size_t sz)
3839 * Purpose : clear some user memory
3840 * Params : addr - user memory address to clear
3841 * : sz - number of bytes to clear
3842 * Returns : number of bytes NOT cleared
3843 */
3844 ENTRY(__clear_user_std)
3845-WEAK(__clear_user)
3846+WEAK(___clear_user)
3847 stmfd sp!, {r1, lr}
3848 mov r2, #0
3849 cmp r1, #4
3850@@ -44,7 +44,7 @@ WEAK(__clear_user)
3851 USER( strnebt r2, [r0])
3852 mov r0, #0
3853 ldmfd sp!, {r1, pc}
3854-ENDPROC(__clear_user)
3855+ENDPROC(___clear_user)
3856 ENDPROC(__clear_user_std)
3857
3858 .pushsection .fixup,"ax"
3859diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3860index 66a477a..bee61d3 100644
3861--- a/arch/arm/lib/copy_from_user.S
3862+++ b/arch/arm/lib/copy_from_user.S
3863@@ -16,7 +16,7 @@
3864 /*
3865 * Prototype:
3866 *
3867- * size_t __copy_from_user(void *to, const void *from, size_t n)
3868+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3869 *
3870 * Purpose:
3871 *
3872@@ -84,11 +84,11 @@
3873
3874 .text
3875
3876-ENTRY(__copy_from_user)
3877+ENTRY(___copy_from_user)
3878
3879 #include "copy_template.S"
3880
3881-ENDPROC(__copy_from_user)
3882+ENDPROC(___copy_from_user)
3883
3884 .pushsection .fixup,"ax"
3885 .align 0
3886diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3887index 6ee2f67..d1cce76 100644
3888--- a/arch/arm/lib/copy_page.S
3889+++ b/arch/arm/lib/copy_page.S
3890@@ -10,6 +10,7 @@
3891 * ASM optimised string functions
3892 */
3893 #include <linux/linkage.h>
3894+#include <linux/const.h>
3895 #include <asm/assembler.h>
3896 #include <asm/asm-offsets.h>
3897 #include <asm/cache.h>
3898diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3899index d066df6..df28194 100644
3900--- a/arch/arm/lib/copy_to_user.S
3901+++ b/arch/arm/lib/copy_to_user.S
3902@@ -16,7 +16,7 @@
3903 /*
3904 * Prototype:
3905 *
3906- * size_t __copy_to_user(void *to, const void *from, size_t n)
3907+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3908 *
3909 * Purpose:
3910 *
3911@@ -88,11 +88,11 @@
3912 .text
3913
3914 ENTRY(__copy_to_user_std)
3915-WEAK(__copy_to_user)
3916+WEAK(___copy_to_user)
3917
3918 #include "copy_template.S"
3919
3920-ENDPROC(__copy_to_user)
3921+ENDPROC(___copy_to_user)
3922 ENDPROC(__copy_to_user_std)
3923
3924 .pushsection .fixup,"ax"
3925diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3926index 7d08b43..f7ca7ea 100644
3927--- a/arch/arm/lib/csumpartialcopyuser.S
3928+++ b/arch/arm/lib/csumpartialcopyuser.S
3929@@ -57,8 +57,8 @@
3930 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3931 */
3932
3933-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3934-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3935+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3936+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3937
3938 #include "csumpartialcopygeneric.S"
3939
3940diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3941index 5306de3..aed6d03 100644
3942--- a/arch/arm/lib/delay.c
3943+++ b/arch/arm/lib/delay.c
3944@@ -28,7 +28,7 @@
3945 /*
3946 * Default to the loop-based delay implementation.
3947 */
3948-struct arm_delay_ops arm_delay_ops = {
3949+struct arm_delay_ops arm_delay_ops __read_only = {
3950 .delay = __loop_delay,
3951 .const_udelay = __loop_const_udelay,
3952 .udelay = __loop_udelay,
3953diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3954index 3e58d71..029817c 100644
3955--- a/arch/arm/lib/uaccess_with_memcpy.c
3956+++ b/arch/arm/lib/uaccess_with_memcpy.c
3957@@ -136,7 +136,7 @@ out:
3958 }
3959
3960 unsigned long
3961-__copy_to_user(void __user *to, const void *from, unsigned long n)
3962+___copy_to_user(void __user *to, const void *from, unsigned long n)
3963 {
3964 /*
3965 * This test is stubbed out of the main function above to keep
3966@@ -190,7 +190,7 @@ out:
3967 return n;
3968 }
3969
3970-unsigned long __clear_user(void __user *addr, unsigned long n)
3971+unsigned long ___clear_user(void __user *addr, unsigned long n)
3972 {
3973 /* See rational for this in __copy_to_user() above. */
3974 if (n < 64)
3975diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3976index f7a07a5..258e1f7 100644
3977--- a/arch/arm/mach-at91/setup.c
3978+++ b/arch/arm/mach-at91/setup.c
3979@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3980
3981 desc->pfn = __phys_to_pfn(base);
3982 desc->length = length;
3983- desc->type = MT_MEMORY_RWX_NONCACHED;
3984+ desc->type = MT_MEMORY_RW_NONCACHED;
3985
3986 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3987 base, length, desc->virtual);
3988diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3989index 255f33a..507b157 100644
3990--- a/arch/arm/mach-kirkwood/common.c
3991+++ b/arch/arm/mach-kirkwood/common.c
3992@@ -157,7 +157,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3993 clk_gate_ops.disable(hw);
3994 }
3995
3996-static struct clk_ops clk_gate_fn_ops;
3997+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3998+{
3999+ return clk_gate_ops.is_enabled(hw);
4000+}
4001+
4002+static struct clk_ops clk_gate_fn_ops = {
4003+ .enable = clk_gate_fn_enable,
4004+ .disable = clk_gate_fn_disable,
4005+ .is_enabled = clk_gate_fn_is_enabled,
4006+};
4007
4008 static struct clk __init *clk_register_gate_fn(struct device *dev,
4009 const char *name,
4010@@ -191,14 +200,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
4011 gate_fn->fn_en = fn_en;
4012 gate_fn->fn_dis = fn_dis;
4013
4014- /* ops is the gate ops, but with our enable/disable functions */
4015- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
4016- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
4017- clk_gate_fn_ops = clk_gate_ops;
4018- clk_gate_fn_ops.enable = clk_gate_fn_enable;
4019- clk_gate_fn_ops.disable = clk_gate_fn_disable;
4020- }
4021-
4022 clk = clk_register(dev, &gate_fn->gate.hw);
4023
4024 if (IS_ERR(clk))
4025diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
4026index 2bdc323..cf1c607 100644
4027--- a/arch/arm/mach-mvebu/coherency.c
4028+++ b/arch/arm/mach-mvebu/coherency.c
4029@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
4030
4031 /*
4032 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
4033- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
4034+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
4035 * is needed as a workaround for a deadlock issue between the PCIe
4036 * interface and the cache controller.
4037 */
4038@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
4039 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
4040
4041 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
4042- mtype = MT_UNCACHED;
4043+ mtype = MT_UNCACHED_RW;
4044
4045 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
4046 }
4047diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
4048index aead77a..a2253fa 100644
4049--- a/arch/arm/mach-omap2/board-n8x0.c
4050+++ b/arch/arm/mach-omap2/board-n8x0.c
4051@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
4052 }
4053 #endif
4054
4055-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
4056+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
4057 .late_init = n8x0_menelaus_late_init,
4058 };
4059
4060diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
4061index 8bc1338..8b28b69 100644
4062--- a/arch/arm/mach-omap2/gpmc.c
4063+++ b/arch/arm/mach-omap2/gpmc.c
4064@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
4065 };
4066
4067 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
4068-static struct irq_chip gpmc_irq_chip;
4069 static int gpmc_irq_start;
4070
4071 static struct resource gpmc_mem_root;
4072@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
4073
4074 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
4075
4076+static struct irq_chip gpmc_irq_chip = {
4077+ .name = "gpmc",
4078+ .irq_startup = gpmc_irq_noop_ret,
4079+ .irq_enable = gpmc_irq_enable,
4080+ .irq_disable = gpmc_irq_disable,
4081+ .irq_shutdown = gpmc_irq_noop,
4082+ .irq_ack = gpmc_irq_noop,
4083+ .irq_mask = gpmc_irq_noop,
4084+ .irq_unmask = gpmc_irq_noop,
4085+
4086+};
4087+
4088 static int gpmc_setup_irq(void)
4089 {
4090 int i;
4091@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
4092 return gpmc_irq_start;
4093 }
4094
4095- gpmc_irq_chip.name = "gpmc";
4096- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
4097- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
4098- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
4099- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
4100- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
4101- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
4102- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
4103-
4104 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
4105 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
4106
4107diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4108index 4001325..b14e2a0 100644
4109--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4110+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4111@@ -84,7 +84,7 @@ struct cpu_pm_ops {
4112 int (*finish_suspend)(unsigned long cpu_state);
4113 void (*resume)(void);
4114 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
4115-};
4116+} __no_const;
4117
4118 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
4119 static struct powerdomain *mpuss_pd;
4120@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
4121 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
4122 {}
4123
4124-struct cpu_pm_ops omap_pm_ops = {
4125+static struct cpu_pm_ops omap_pm_ops __read_only = {
4126 .finish_suspend = default_finish_suspend,
4127 .resume = dummy_cpu_resume,
4128 .scu_prepare = dummy_scu_prepare,
4129diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
4130index 37843a7..a98df13 100644
4131--- a/arch/arm/mach-omap2/omap-wakeupgen.c
4132+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
4133@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
4134 return NOTIFY_OK;
4135 }
4136
4137-static struct notifier_block __refdata irq_hotplug_notifier = {
4138+static struct notifier_block irq_hotplug_notifier = {
4139 .notifier_call = irq_cpu_hotplug_notify,
4140 };
4141
4142diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
4143index 01ef59d..32ae28a8 100644
4144--- a/arch/arm/mach-omap2/omap_device.c
4145+++ b/arch/arm/mach-omap2/omap_device.c
4146@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
4147 struct platform_device __init *omap_device_build(const char *pdev_name,
4148 int pdev_id,
4149 struct omap_hwmod *oh,
4150- void *pdata, int pdata_len)
4151+ const void *pdata, int pdata_len)
4152 {
4153 struct omap_hwmod *ohs[] = { oh };
4154
4155@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
4156 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
4157 int pdev_id,
4158 struct omap_hwmod **ohs,
4159- int oh_cnt, void *pdata,
4160+ int oh_cnt, const void *pdata,
4161 int pdata_len)
4162 {
4163 int ret = -ENOMEM;
4164diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
4165index 78c02b3..c94109a 100644
4166--- a/arch/arm/mach-omap2/omap_device.h
4167+++ b/arch/arm/mach-omap2/omap_device.h
4168@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
4169 /* Core code interface */
4170
4171 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
4172- struct omap_hwmod *oh, void *pdata,
4173+ struct omap_hwmod *oh, const void *pdata,
4174 int pdata_len);
4175
4176 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
4177 struct omap_hwmod **oh, int oh_cnt,
4178- void *pdata, int pdata_len);
4179+ const void *pdata, int pdata_len);
4180
4181 struct omap_device *omap_device_alloc(struct platform_device *pdev,
4182 struct omap_hwmod **ohs, int oh_cnt);
4183diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
4184index da1b256..ab2a327 100644
4185--- a/arch/arm/mach-omap2/omap_hwmod.c
4186+++ b/arch/arm/mach-omap2/omap_hwmod.c
4187@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
4188 int (*init_clkdm)(struct omap_hwmod *oh);
4189 void (*update_context_lost)(struct omap_hwmod *oh);
4190 int (*get_context_lost)(struct omap_hwmod *oh);
4191-};
4192+} __no_const;
4193
4194 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
4195-static struct omap_hwmod_soc_ops soc_ops;
4196+static struct omap_hwmod_soc_ops soc_ops __read_only;
4197
4198 /* omap_hwmod_list contains all registered struct omap_hwmods */
4199 static LIST_HEAD(omap_hwmod_list);
4200diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
4201index 95fee54..cfa9cf1 100644
4202--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
4203+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
4204@@ -10,6 +10,7 @@
4205
4206 #include <linux/kernel.h>
4207 #include <linux/init.h>
4208+#include <asm/pgtable.h>
4209
4210 #include "powerdomain.h"
4211
4212@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
4213
4214 void __init am43xx_powerdomains_init(void)
4215 {
4216- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4217+ pax_open_kernel();
4218+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4219+ pax_close_kernel();
4220 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
4221 pwrdm_register_pwrdms(powerdomains_am43xx);
4222 pwrdm_complete_init();
4223diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
4224index 97d6607..8429d14 100644
4225--- a/arch/arm/mach-omap2/wd_timer.c
4226+++ b/arch/arm/mach-omap2/wd_timer.c
4227@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
4228 struct omap_hwmod *oh;
4229 char *oh_name = "wd_timer2";
4230 char *dev_name = "omap_wdt";
4231- struct omap_wd_timer_platform_data pdata;
4232+ static struct omap_wd_timer_platform_data pdata = {
4233+ .read_reset_sources = prm_read_reset_sources
4234+ };
4235
4236 if (!cpu_class_is_omap2() || of_have_populated_dt())
4237 return 0;
4238@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
4239 return -EINVAL;
4240 }
4241
4242- pdata.read_reset_sources = prm_read_reset_sources;
4243-
4244 pdev = omap_device_build(dev_name, id, oh, &pdata,
4245 sizeof(struct omap_wd_timer_platform_data));
4246 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
4247diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
4248index b82dcae..44ee5b6 100644
4249--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
4250+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
4251@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
4252 bool entered_lp2 = false;
4253
4254 if (tegra_pending_sgi())
4255- ACCESS_ONCE(abort_flag) = true;
4256+ ACCESS_ONCE_RW(abort_flag) = true;
4257
4258 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
4259
4260diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
4261index 2dea8b5..6499da2 100644
4262--- a/arch/arm/mach-ux500/setup.h
4263+++ b/arch/arm/mach-ux500/setup.h
4264@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
4265 .type = MT_DEVICE, \
4266 }
4267
4268-#define __MEM_DEV_DESC(x, sz) { \
4269- .virtual = IO_ADDRESS(x), \
4270- .pfn = __phys_to_pfn(x), \
4271- .length = sz, \
4272- .type = MT_MEMORY_RWX, \
4273-}
4274-
4275 extern struct smp_operations ux500_smp_ops;
4276 extern void ux500_cpu_die(unsigned int cpu);
4277
4278diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
4279index c348eae..456a1a4 100644
4280--- a/arch/arm/mm/Kconfig
4281+++ b/arch/arm/mm/Kconfig
4282@@ -446,6 +446,7 @@ config CPU_32v5
4283
4284 config CPU_32v6
4285 bool
4286+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4287 select TLS_REG_EMUL if !CPU_32v6K && !MMU
4288
4289 config CPU_32v6K
4290@@ -600,6 +601,7 @@ config CPU_CP15_MPU
4291
4292 config CPU_USE_DOMAINS
4293 bool
4294+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4295 help
4296 This option enables or disables the use of domain switching
4297 via the set_fs() function.
4298@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
4299 config KUSER_HELPERS
4300 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
4301 default y
4302+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
4303 help
4304 Warning: disabling this option may break user programs.
4305
4306@@ -811,7 +814,7 @@ config KUSER_HELPERS
4307 See Documentation/arm/kernel_user_helpers.txt for details.
4308
4309 However, the fixed address nature of these helpers can be used
4310- by ROP (return orientated programming) authors when creating
4311+ by ROP (Return Oriented Programming) authors when creating
4312 exploits.
4313
4314 If all of the binaries and libraries which run on your platform
4315diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
4316index b8cb1a2..6a5624a 100644
4317--- a/arch/arm/mm/alignment.c
4318+++ b/arch/arm/mm/alignment.c
4319@@ -214,10 +214,12 @@ union offset_union {
4320 #define __get16_unaligned_check(ins,val,addr) \
4321 do { \
4322 unsigned int err = 0, v, a = addr; \
4323+ pax_open_userland(); \
4324 __get8_unaligned_check(ins,v,a,err); \
4325 val = v << ((BE) ? 8 : 0); \
4326 __get8_unaligned_check(ins,v,a,err); \
4327 val |= v << ((BE) ? 0 : 8); \
4328+ pax_close_userland(); \
4329 if (err) \
4330 goto fault; \
4331 } while (0)
4332@@ -231,6 +233,7 @@ union offset_union {
4333 #define __get32_unaligned_check(ins,val,addr) \
4334 do { \
4335 unsigned int err = 0, v, a = addr; \
4336+ pax_open_userland(); \
4337 __get8_unaligned_check(ins,v,a,err); \
4338 val = v << ((BE) ? 24 : 0); \
4339 __get8_unaligned_check(ins,v,a,err); \
4340@@ -239,6 +242,7 @@ union offset_union {
4341 val |= v << ((BE) ? 8 : 16); \
4342 __get8_unaligned_check(ins,v,a,err); \
4343 val |= v << ((BE) ? 0 : 24); \
4344+ pax_close_userland(); \
4345 if (err) \
4346 goto fault; \
4347 } while (0)
4348@@ -252,6 +256,7 @@ union offset_union {
4349 #define __put16_unaligned_check(ins,val,addr) \
4350 do { \
4351 unsigned int err = 0, v = val, a = addr; \
4352+ pax_open_userland(); \
4353 __asm__( FIRST_BYTE_16 \
4354 ARM( "1: "ins" %1, [%2], #1\n" ) \
4355 THUMB( "1: "ins" %1, [%2]\n" ) \
4356@@ -271,6 +276,7 @@ union offset_union {
4357 " .popsection\n" \
4358 : "=r" (err), "=&r" (v), "=&r" (a) \
4359 : "0" (err), "1" (v), "2" (a)); \
4360+ pax_close_userland(); \
4361 if (err) \
4362 goto fault; \
4363 } while (0)
4364@@ -284,6 +290,7 @@ union offset_union {
4365 #define __put32_unaligned_check(ins,val,addr) \
4366 do { \
4367 unsigned int err = 0, v = val, a = addr; \
4368+ pax_open_userland(); \
4369 __asm__( FIRST_BYTE_32 \
4370 ARM( "1: "ins" %1, [%2], #1\n" ) \
4371 THUMB( "1: "ins" %1, [%2]\n" ) \
4372@@ -313,6 +320,7 @@ union offset_union {
4373 " .popsection\n" \
4374 : "=r" (err), "=&r" (v), "=&r" (a) \
4375 : "0" (err), "1" (v), "2" (a)); \
4376+ pax_close_userland(); \
4377 if (err) \
4378 goto fault; \
4379 } while (0)
4380diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
4381index 7c3fb41..bfb87d8 100644
4382--- a/arch/arm/mm/cache-l2x0.c
4383+++ b/arch/arm/mm/cache-l2x0.c
4384@@ -41,7 +41,7 @@ struct l2c_init_data {
4385 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
4386 void (*save)(void __iomem *);
4387 struct outer_cache_fns outer_cache;
4388-};
4389+} __do_const;
4390
4391 #define CACHE_LINE_SIZE 32
4392
4393diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
4394index 6eb97b3..ac509f6 100644
4395--- a/arch/arm/mm/context.c
4396+++ b/arch/arm/mm/context.c
4397@@ -43,7 +43,7 @@
4398 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4399
4400 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4401-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4402+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4403 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4404
4405 static DEFINE_PER_CPU(atomic64_t, active_asids);
4406@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4407 {
4408 static u32 cur_idx = 1;
4409 u64 asid = atomic64_read(&mm->context.id);
4410- u64 generation = atomic64_read(&asid_generation);
4411+ u64 generation = atomic64_read_unchecked(&asid_generation);
4412
4413 if (asid != 0 && is_reserved_asid(asid)) {
4414 /*
4415@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4416 */
4417 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4418 if (asid == NUM_USER_ASIDS) {
4419- generation = atomic64_add_return(ASID_FIRST_VERSION,
4420+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4421 &asid_generation);
4422 flush_context(cpu);
4423 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4424@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4425 cpu_set_reserved_ttbr0();
4426
4427 asid = atomic64_read(&mm->context.id);
4428- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4429+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4430 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4431 goto switch_mm_fastpath;
4432
4433 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4434 /* Check that our ASID belongs to the current generation. */
4435 asid = atomic64_read(&mm->context.id);
4436- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4437+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4438 asid = new_context(mm, cpu);
4439 atomic64_set(&mm->context.id, asid);
4440 }
4441diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4442index eb8830a..5360ce7 100644
4443--- a/arch/arm/mm/fault.c
4444+++ b/arch/arm/mm/fault.c
4445@@ -25,6 +25,7 @@
4446 #include <asm/system_misc.h>
4447 #include <asm/system_info.h>
4448 #include <asm/tlbflush.h>
4449+#include <asm/sections.h>
4450
4451 #include "fault.h"
4452
4453@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4454 if (fixup_exception(regs))
4455 return;
4456
4457+#ifdef CONFIG_PAX_MEMORY_UDEREF
4458+ if (addr < TASK_SIZE) {
4459+ if (current->signal->curr_ip)
4460+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4461+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4462+ else
4463+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4464+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4465+ }
4466+#endif
4467+
4468+#ifdef CONFIG_PAX_KERNEXEC
4469+ if ((fsr & FSR_WRITE) &&
4470+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4471+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4472+ {
4473+ if (current->signal->curr_ip)
4474+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4475+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4476+ else
4477+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4478+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4479+ }
4480+#endif
4481+
4482 /*
4483 * No handler, we'll have to terminate things with extreme prejudice.
4484 */
4485@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4486 }
4487 #endif
4488
4489+#ifdef CONFIG_PAX_PAGEEXEC
4490+ if (fsr & FSR_LNX_PF) {
4491+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4492+ do_group_exit(SIGKILL);
4493+ }
4494+#endif
4495+
4496 tsk->thread.address = addr;
4497 tsk->thread.error_code = fsr;
4498 tsk->thread.trap_no = 14;
4499@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4500 }
4501 #endif /* CONFIG_MMU */
4502
4503+#ifdef CONFIG_PAX_PAGEEXEC
4504+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4505+{
4506+ long i;
4507+
4508+ printk(KERN_ERR "PAX: bytes at PC: ");
4509+ for (i = 0; i < 20; i++) {
4510+ unsigned char c;
4511+ if (get_user(c, (__force unsigned char __user *)pc+i))
4512+ printk(KERN_CONT "?? ");
4513+ else
4514+ printk(KERN_CONT "%02x ", c);
4515+ }
4516+ printk("\n");
4517+
4518+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4519+ for (i = -1; i < 20; i++) {
4520+ unsigned long c;
4521+ if (get_user(c, (__force unsigned long __user *)sp+i))
4522+ printk(KERN_CONT "???????? ");
4523+ else
4524+ printk(KERN_CONT "%08lx ", c);
4525+ }
4526+ printk("\n");
4527+}
4528+#endif
4529+
4530 /*
4531 * First Level Translation Fault Handler
4532 *
4533@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4534 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4535 struct siginfo info;
4536
4537+#ifdef CONFIG_PAX_MEMORY_UDEREF
4538+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4539+ if (current->signal->curr_ip)
4540+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4541+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4542+ else
4543+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4544+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4545+ goto die;
4546+ }
4547+#endif
4548+
4549 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4550 return;
4551
4552+die:
4553 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4554 inf->name, fsr, addr);
4555
4556@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4557 ifsr_info[nr].name = name;
4558 }
4559
4560+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4561+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4562+
4563 asmlinkage void __exception
4564 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4565 {
4566 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4567 struct siginfo info;
4568+ unsigned long pc = instruction_pointer(regs);
4569+
4570+ if (user_mode(regs)) {
4571+ unsigned long sigpage = current->mm->context.sigpage;
4572+
4573+ if (sigpage <= pc && pc < sigpage + 7*4) {
4574+ if (pc < sigpage + 3*4)
4575+ sys_sigreturn(regs);
4576+ else
4577+ sys_rt_sigreturn(regs);
4578+ return;
4579+ }
4580+ if (pc == 0xffff0f60UL) {
4581+ /*
4582+ * PaX: __kuser_cmpxchg64 emulation
4583+ */
4584+ // TODO
4585+ //regs->ARM_pc = regs->ARM_lr;
4586+ //return;
4587+ }
4588+ if (pc == 0xffff0fa0UL) {
4589+ /*
4590+ * PaX: __kuser_memory_barrier emulation
4591+ */
4592+ // dmb(); implied by the exception
4593+ regs->ARM_pc = regs->ARM_lr;
4594+ return;
4595+ }
4596+ if (pc == 0xffff0fc0UL) {
4597+ /*
4598+ * PaX: __kuser_cmpxchg emulation
4599+ */
4600+ // TODO
4601+ //long new;
4602+ //int op;
4603+
4604+ //op = FUTEX_OP_SET << 28;
4605+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4606+ //regs->ARM_r0 = old != new;
4607+ //regs->ARM_pc = regs->ARM_lr;
4608+ //return;
4609+ }
4610+ if (pc == 0xffff0fe0UL) {
4611+ /*
4612+ * PaX: __kuser_get_tls emulation
4613+ */
4614+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4615+ regs->ARM_pc = regs->ARM_lr;
4616+ return;
4617+ }
4618+ }
4619+
4620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4621+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4622+ if (current->signal->curr_ip)
4623+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4624+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4625+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4626+ else
4627+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4628+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4629+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4630+ goto die;
4631+ }
4632+#endif
4633+
4634+#ifdef CONFIG_PAX_REFCOUNT
4635+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4636+ unsigned int bkpt;
4637+
4638+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4639+ current->thread.error_code = ifsr;
4640+ current->thread.trap_no = 0;
4641+ pax_report_refcount_overflow(regs);
4642+ fixup_exception(regs);
4643+ return;
4644+ }
4645+ }
4646+#endif
4647
4648 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4649 return;
4650
4651+die:
4652 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4653 inf->name, ifsr, addr);
4654
4655diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4656index cf08bdf..772656c 100644
4657--- a/arch/arm/mm/fault.h
4658+++ b/arch/arm/mm/fault.h
4659@@ -3,6 +3,7 @@
4660
4661 /*
4662 * Fault status register encodings. We steal bit 31 for our own purposes.
4663+ * Set when the FSR value is from an instruction fault.
4664 */
4665 #define FSR_LNX_PF (1 << 31)
4666 #define FSR_WRITE (1 << 11)
4667@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4668 }
4669 #endif
4670
4671+/* valid for LPAE and !LPAE */
4672+static inline int is_xn_fault(unsigned int fsr)
4673+{
4674+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4675+}
4676+
4677+static inline int is_domain_fault(unsigned int fsr)
4678+{
4679+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4680+}
4681+
4682 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4683 unsigned long search_exception_table(unsigned long addr);
4684
4685diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4686index 659c75d..6f8c029 100644
4687--- a/arch/arm/mm/init.c
4688+++ b/arch/arm/mm/init.c
4689@@ -31,6 +31,8 @@
4690 #include <asm/setup.h>
4691 #include <asm/tlb.h>
4692 #include <asm/fixmap.h>
4693+#include <asm/system_info.h>
4694+#include <asm/cp15.h>
4695
4696 #include <asm/mach/arch.h>
4697 #include <asm/mach/map.h>
4698@@ -619,7 +621,46 @@ void free_initmem(void)
4699 {
4700 #ifdef CONFIG_HAVE_TCM
4701 extern char __tcm_start, __tcm_end;
4702+#endif
4703
4704+#ifdef CONFIG_PAX_KERNEXEC
4705+ unsigned long addr;
4706+ pgd_t *pgd;
4707+ pud_t *pud;
4708+ pmd_t *pmd;
4709+ int cpu_arch = cpu_architecture();
4710+ unsigned int cr = get_cr();
4711+
4712+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4713+ /* make pages tables, etc before .text NX */
4714+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4715+ pgd = pgd_offset_k(addr);
4716+ pud = pud_offset(pgd, addr);
4717+ pmd = pmd_offset(pud, addr);
4718+ __section_update(pmd, addr, PMD_SECT_XN);
4719+ }
4720+ /* make init NX */
4721+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4722+ pgd = pgd_offset_k(addr);
4723+ pud = pud_offset(pgd, addr);
4724+ pmd = pmd_offset(pud, addr);
4725+ __section_update(pmd, addr, PMD_SECT_XN);
4726+ }
4727+ /* make kernel code/rodata RX */
4728+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4729+ pgd = pgd_offset_k(addr);
4730+ pud = pud_offset(pgd, addr);
4731+ pmd = pmd_offset(pud, addr);
4732+#ifdef CONFIG_ARM_LPAE
4733+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4734+#else
4735+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4736+#endif
4737+ }
4738+ }
4739+#endif
4740+
4741+#ifdef CONFIG_HAVE_TCM
4742 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4743 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4744 #endif
4745diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4746index d1e5ad7..84dcbf2 100644
4747--- a/arch/arm/mm/ioremap.c
4748+++ b/arch/arm/mm/ioremap.c
4749@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4750 unsigned int mtype;
4751
4752 if (cached)
4753- mtype = MT_MEMORY_RWX;
4754+ mtype = MT_MEMORY_RX;
4755 else
4756- mtype = MT_MEMORY_RWX_NONCACHED;
4757+ mtype = MT_MEMORY_RX_NONCACHED;
4758
4759 return __arm_ioremap_caller(phys_addr, size, mtype,
4760 __builtin_return_address(0));
4761diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4762index 5e85ed3..b10a7ed 100644
4763--- a/arch/arm/mm/mmap.c
4764+++ b/arch/arm/mm/mmap.c
4765@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4766 struct vm_area_struct *vma;
4767 int do_align = 0;
4768 int aliasing = cache_is_vipt_aliasing();
4769+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4770 struct vm_unmapped_area_info info;
4771
4772 /*
4773@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4774 if (len > TASK_SIZE)
4775 return -ENOMEM;
4776
4777+#ifdef CONFIG_PAX_RANDMMAP
4778+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4779+#endif
4780+
4781 if (addr) {
4782 if (do_align)
4783 addr = COLOUR_ALIGN(addr, pgoff);
4784@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4785 addr = PAGE_ALIGN(addr);
4786
4787 vma = find_vma(mm, addr);
4788- if (TASK_SIZE - len >= addr &&
4789- (!vma || addr + len <= vma->vm_start))
4790+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4791 return addr;
4792 }
4793
4794@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4795 info.high_limit = TASK_SIZE;
4796 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4797 info.align_offset = pgoff << PAGE_SHIFT;
4798+ info.threadstack_offset = offset;
4799 return vm_unmapped_area(&info);
4800 }
4801
4802@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4803 unsigned long addr = addr0;
4804 int do_align = 0;
4805 int aliasing = cache_is_vipt_aliasing();
4806+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4807 struct vm_unmapped_area_info info;
4808
4809 /*
4810@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4811 return addr;
4812 }
4813
4814+#ifdef CONFIG_PAX_RANDMMAP
4815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4816+#endif
4817+
4818 /* requesting a specific address */
4819 if (addr) {
4820 if (do_align)
4821@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4822 else
4823 addr = PAGE_ALIGN(addr);
4824 vma = find_vma(mm, addr);
4825- if (TASK_SIZE - len >= addr &&
4826- (!vma || addr + len <= vma->vm_start))
4827+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4828 return addr;
4829 }
4830
4831@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4832 info.high_limit = mm->mmap_base;
4833 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4834 info.align_offset = pgoff << PAGE_SHIFT;
4835+ info.threadstack_offset = offset;
4836 addr = vm_unmapped_area(&info);
4837
4838 /*
4839@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4840 {
4841 unsigned long random_factor = 0UL;
4842
4843+#ifdef CONFIG_PAX_RANDMMAP
4844+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4845+#endif
4846+
4847 /* 8 bits of randomness in 20 address space bits */
4848 if ((current->flags & PF_RANDOMIZE) &&
4849 !(current->personality & ADDR_NO_RANDOMIZE))
4850@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4851
4852 if (mmap_is_legacy()) {
4853 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4854+
4855+#ifdef CONFIG_PAX_RANDMMAP
4856+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4857+ mm->mmap_base += mm->delta_mmap;
4858+#endif
4859+
4860 mm->get_unmapped_area = arch_get_unmapped_area;
4861 } else {
4862 mm->mmap_base = mmap_base(random_factor);
4863+
4864+#ifdef CONFIG_PAX_RANDMMAP
4865+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4866+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4867+#endif
4868+
4869 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4870 }
4871 }
4872diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4873index 6e3ba8d..9cbb4d7 100644
4874--- a/arch/arm/mm/mmu.c
4875+++ b/arch/arm/mm/mmu.c
4876@@ -40,6 +40,22 @@
4877 #include "mm.h"
4878 #include "tcm.h"
4879
4880+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4881+void modify_domain(unsigned int dom, unsigned int type)
4882+{
4883+ struct thread_info *thread = current_thread_info();
4884+ unsigned int domain = thread->cpu_domain;
4885+ /*
4886+ * DOMAIN_MANAGER might be defined to some other value,
4887+ * use the arch-defined constant
4888+ */
4889+ domain &= ~domain_val(dom, 3);
4890+ thread->cpu_domain = domain | domain_val(dom, type);
4891+ set_domain(thread->cpu_domain);
4892+}
4893+EXPORT_SYMBOL(modify_domain);
4894+#endif
4895+
4896 /*
4897 * empty_zero_page is a special page that is used for
4898 * zero-initialized data and COW.
4899@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4900 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4901 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4902
4903-static struct mem_type mem_types[] = {
4904+#ifdef CONFIG_PAX_KERNEXEC
4905+#define L_PTE_KERNEXEC L_PTE_RDONLY
4906+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4907+#else
4908+#define L_PTE_KERNEXEC L_PTE_DIRTY
4909+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4910+#endif
4911+
4912+static struct mem_type mem_types[] __read_only = {
4913 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4914 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4915 L_PTE_SHARED,
4916@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4917 .prot_sect = PROT_SECT_DEVICE,
4918 .domain = DOMAIN_IO,
4919 },
4920- [MT_UNCACHED] = {
4921+ [MT_UNCACHED_RW] = {
4922 .prot_pte = PROT_PTE_DEVICE,
4923 .prot_l1 = PMD_TYPE_TABLE,
4924 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4925 .domain = DOMAIN_IO,
4926 },
4927- [MT_CACHECLEAN] = {
4928- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4929+ [MT_CACHECLEAN_RO] = {
4930+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4931 .domain = DOMAIN_KERNEL,
4932 },
4933 #ifndef CONFIG_ARM_LPAE
4934- [MT_MINICLEAN] = {
4935- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4936+ [MT_MINICLEAN_RO] = {
4937+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4938 .domain = DOMAIN_KERNEL,
4939 },
4940 #endif
4941@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4942 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4943 L_PTE_RDONLY,
4944 .prot_l1 = PMD_TYPE_TABLE,
4945- .domain = DOMAIN_USER,
4946+ .domain = DOMAIN_VECTORS,
4947 },
4948 [MT_HIGH_VECTORS] = {
4949 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4950 L_PTE_USER | L_PTE_RDONLY,
4951 .prot_l1 = PMD_TYPE_TABLE,
4952- .domain = DOMAIN_USER,
4953+ .domain = DOMAIN_VECTORS,
4954 },
4955- [MT_MEMORY_RWX] = {
4956+ [__MT_MEMORY_RWX] = {
4957 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4958 .prot_l1 = PMD_TYPE_TABLE,
4959 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4960@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4961 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4962 .domain = DOMAIN_KERNEL,
4963 },
4964- [MT_ROM] = {
4965- .prot_sect = PMD_TYPE_SECT,
4966+ [MT_MEMORY_RX] = {
4967+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4968+ .prot_l1 = PMD_TYPE_TABLE,
4969+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4970+ .domain = DOMAIN_KERNEL,
4971+ },
4972+ [MT_ROM_RX] = {
4973+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4974 .domain = DOMAIN_KERNEL,
4975 },
4976- [MT_MEMORY_RWX_NONCACHED] = {
4977+ [MT_MEMORY_RW_NONCACHED] = {
4978 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4979 L_PTE_MT_BUFFERABLE,
4980 .prot_l1 = PMD_TYPE_TABLE,
4981 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4982 .domain = DOMAIN_KERNEL,
4983 },
4984+ [MT_MEMORY_RX_NONCACHED] = {
4985+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4986+ L_PTE_MT_BUFFERABLE,
4987+ .prot_l1 = PMD_TYPE_TABLE,
4988+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4989+ .domain = DOMAIN_KERNEL,
4990+ },
4991 [MT_MEMORY_RW_DTCM] = {
4992 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4993 L_PTE_XN,
4994@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4995 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4996 .domain = DOMAIN_KERNEL,
4997 },
4998- [MT_MEMORY_RWX_ITCM] = {
4999- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
5000+ [MT_MEMORY_RX_ITCM] = {
5001+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
5002 .prot_l1 = PMD_TYPE_TABLE,
5003+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
5004 .domain = DOMAIN_KERNEL,
5005 },
5006 [MT_MEMORY_RW_SO] = {
5007@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
5008 * Mark cache clean areas and XIP ROM read only
5009 * from SVC mode and no access from userspace.
5010 */
5011- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5012- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5013- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5014+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5015+#ifdef CONFIG_PAX_KERNEXEC
5016+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5017+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5018+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5019+#endif
5020+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5021+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5022 #endif
5023
5024 /*
5025@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
5026 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
5027 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
5028 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
5029- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
5030- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
5031+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
5032+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
5033 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
5034 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
5035+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
5036+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
5037 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
5038- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
5039- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
5040+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
5041+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
5042+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
5043+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
5044 }
5045 }
5046
5047@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
5048 if (cpu_arch >= CPU_ARCH_ARMv6) {
5049 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
5050 /* Non-cacheable Normal is XCB = 001 */
5051- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5052+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5053+ PMD_SECT_BUFFERED;
5054+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5055 PMD_SECT_BUFFERED;
5056 } else {
5057 /* For both ARMv6 and non-TEX-remapping ARMv7 */
5058- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5059+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5060+ PMD_SECT_TEX(1);
5061+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5062 PMD_SECT_TEX(1);
5063 }
5064 } else {
5065- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5066+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5067+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5068 }
5069
5070 #ifdef CONFIG_ARM_LPAE
5071@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
5072 vecs_pgprot |= PTE_EXT_AF;
5073 #endif
5074
5075+ user_pgprot |= __supported_pte_mask;
5076+
5077 for (i = 0; i < 16; i++) {
5078 pteval_t v = pgprot_val(protection_map[i]);
5079 protection_map[i] = __pgprot(v | user_pgprot);
5080@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
5081
5082 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
5083 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
5084- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5085- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5086+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5087+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5088 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
5089 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
5090+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
5091+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
5092 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
5093- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
5094- mem_types[MT_ROM].prot_sect |= cp->pmd;
5095+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
5096+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
5097+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
5098
5099 switch (cp->pmd) {
5100 case PMD_SECT_WT:
5101- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
5102+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
5103 break;
5104 case PMD_SECT_WB:
5105 case PMD_SECT_WBWA:
5106- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
5107+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
5108 break;
5109 }
5110 pr_info("Memory policy: %sData cache %s\n",
5111@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
5112 return;
5113 }
5114
5115- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
5116+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
5117 md->virtual >= PAGE_OFFSET &&
5118 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
5119 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
5120@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
5121 * called function. This means you can't use any function or debugging
5122 * method which may touch any device, otherwise the kernel _will_ crash.
5123 */
5124+
5125+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
5126+
5127 static void __init devicemaps_init(const struct machine_desc *mdesc)
5128 {
5129 struct map_desc map;
5130 unsigned long addr;
5131- void *vectors;
5132
5133- /*
5134- * Allocate the vector page early.
5135- */
5136- vectors = early_alloc(PAGE_SIZE * 2);
5137-
5138- early_trap_init(vectors);
5139+ early_trap_init(&vectors);
5140
5141 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
5142 pmd_clear(pmd_off_k(addr));
5143@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5144 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
5145 map.virtual = MODULES_VADDR;
5146 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
5147- map.type = MT_ROM;
5148+ map.type = MT_ROM_RX;
5149 create_mapping(&map);
5150 #endif
5151
5152@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5153 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
5154 map.virtual = FLUSH_BASE;
5155 map.length = SZ_1M;
5156- map.type = MT_CACHECLEAN;
5157+ map.type = MT_CACHECLEAN_RO;
5158 create_mapping(&map);
5159 #endif
5160 #ifdef FLUSH_BASE_MINICACHE
5161 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
5162 map.virtual = FLUSH_BASE_MINICACHE;
5163 map.length = SZ_1M;
5164- map.type = MT_MINICLEAN;
5165+ map.type = MT_MINICLEAN_RO;
5166 create_mapping(&map);
5167 #endif
5168
5169@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5170 * location (0xffff0000). If we aren't using high-vectors, also
5171 * create a mapping at the low-vectors virtual address.
5172 */
5173- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
5174+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
5175 map.virtual = 0xffff0000;
5176 map.length = PAGE_SIZE;
5177 #ifdef CONFIG_KUSER_HELPERS
5178@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
5179 static void __init map_lowmem(void)
5180 {
5181 struct memblock_region *reg;
5182+#ifndef CONFIG_PAX_KERNEXEC
5183 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
5184 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
5185+#endif
5186
5187 /* Map all the lowmem memory banks. */
5188 for_each_memblock(memory, reg) {
5189@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
5190 if (start >= end)
5191 break;
5192
5193+#ifdef CONFIG_PAX_KERNEXEC
5194+ map.pfn = __phys_to_pfn(start);
5195+ map.virtual = __phys_to_virt(start);
5196+ map.length = end - start;
5197+
5198+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
5199+ struct map_desc kernel;
5200+ struct map_desc initmap;
5201+
5202+ /* when freeing initmem we will make this RW */
5203+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
5204+ initmap.virtual = (unsigned long)__init_begin;
5205+ initmap.length = _sdata - __init_begin;
5206+ initmap.type = __MT_MEMORY_RWX;
5207+ create_mapping(&initmap);
5208+
5209+ /* when freeing initmem we will make this RX */
5210+ kernel.pfn = __phys_to_pfn(__pa(_stext));
5211+ kernel.virtual = (unsigned long)_stext;
5212+ kernel.length = __init_begin - _stext;
5213+ kernel.type = __MT_MEMORY_RWX;
5214+ create_mapping(&kernel);
5215+
5216+ if (map.virtual < (unsigned long)_stext) {
5217+ map.length = (unsigned long)_stext - map.virtual;
5218+ map.type = __MT_MEMORY_RWX;
5219+ create_mapping(&map);
5220+ }
5221+
5222+ map.pfn = __phys_to_pfn(__pa(_sdata));
5223+ map.virtual = (unsigned long)_sdata;
5224+ map.length = end - __pa(_sdata);
5225+ }
5226+
5227+ map.type = MT_MEMORY_RW;
5228+ create_mapping(&map);
5229+#else
5230 if (end < kernel_x_start || start >= kernel_x_end) {
5231 map.pfn = __phys_to_pfn(start);
5232 map.virtual = __phys_to_virt(start);
5233 map.length = end - start;
5234- map.type = MT_MEMORY_RWX;
5235+ map.type = __MT_MEMORY_RWX;
5236
5237 create_mapping(&map);
5238 } else {
5239@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
5240 map.pfn = __phys_to_pfn(kernel_x_start);
5241 map.virtual = __phys_to_virt(kernel_x_start);
5242 map.length = kernel_x_end - kernel_x_start;
5243- map.type = MT_MEMORY_RWX;
5244+ map.type = __MT_MEMORY_RWX;
5245
5246 create_mapping(&map);
5247
5248@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
5249 create_mapping(&map);
5250 }
5251 }
5252+#endif
5253 }
5254 }
5255
5256diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
5257index 5b217f4..c23f40e 100644
5258--- a/arch/arm/plat-iop/setup.c
5259+++ b/arch/arm/plat-iop/setup.c
5260@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
5261 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
5262 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
5263 .length = IOP3XX_PERIPHERAL_SIZE,
5264- .type = MT_UNCACHED,
5265+ .type = MT_UNCACHED_RW,
5266 },
5267 };
5268
5269diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
5270index a5bc92d..0bb4730 100644
5271--- a/arch/arm/plat-omap/sram.c
5272+++ b/arch/arm/plat-omap/sram.c
5273@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
5274 * Looks like we need to preserve some bootloader code at the
5275 * beginning of SRAM for jumping to flash for reboot to work...
5276 */
5277+ pax_open_kernel();
5278 memset_io(omap_sram_base + omap_sram_skip, 0,
5279 omap_sram_size - omap_sram_skip);
5280+ pax_close_kernel();
5281 }
5282diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
5283index ce6d763..cfea917 100644
5284--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
5285+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
5286@@ -47,7 +47,7 @@ struct samsung_dma_ops {
5287 int (*started)(unsigned ch);
5288 int (*flush)(unsigned ch);
5289 int (*stop)(unsigned ch);
5290-};
5291+} __no_const;
5292
5293 extern void *samsung_dmadev_get_ops(void);
5294 extern void *s3c_dma_get_ops(void);
5295diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
5296index 6389d60..b5d3bdd 100644
5297--- a/arch/arm64/include/asm/barrier.h
5298+++ b/arch/arm64/include/asm/barrier.h
5299@@ -41,7 +41,7 @@
5300 do { \
5301 compiletime_assert_atomic_type(*p); \
5302 barrier(); \
5303- ACCESS_ONCE(*p) = (v); \
5304+ ACCESS_ONCE_RW(*p) = (v); \
5305 } while (0)
5306
5307 #define smp_load_acquire(p) \
5308diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
5309index 3bf8f4e..5dd5491 100644
5310--- a/arch/arm64/include/asm/uaccess.h
5311+++ b/arch/arm64/include/asm/uaccess.h
5312@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
5313 flag; \
5314 })
5315
5316+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5317 #define access_ok(type, addr, size) __range_ok(addr, size)
5318 #define user_addr_max get_fs
5319
5320diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
5321index c3a58a1..78fbf54 100644
5322--- a/arch/avr32/include/asm/cache.h
5323+++ b/arch/avr32/include/asm/cache.h
5324@@ -1,8 +1,10 @@
5325 #ifndef __ASM_AVR32_CACHE_H
5326 #define __ASM_AVR32_CACHE_H
5327
5328+#include <linux/const.h>
5329+
5330 #define L1_CACHE_SHIFT 5
5331-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5332+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5333
5334 /*
5335 * Memory returned by kmalloc() may be used for DMA, so we must make
5336diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5337index d232888..87c8df1 100644
5338--- a/arch/avr32/include/asm/elf.h
5339+++ b/arch/avr32/include/asm/elf.h
5340@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5341 the loader. We need to make sure that it is out of the way of the program
5342 that it will "exec", and that there is sufficient room for the brk. */
5343
5344-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
5345+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5346
5347+#ifdef CONFIG_PAX_ASLR
5348+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5349+
5350+#define PAX_DELTA_MMAP_LEN 15
5351+#define PAX_DELTA_STACK_LEN 15
5352+#endif
5353
5354 /* This yields a mask that user programs can use to figure out what
5355 instruction set this CPU supports. This could be done in user space,
5356diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5357index 479330b..53717a8 100644
5358--- a/arch/avr32/include/asm/kmap_types.h
5359+++ b/arch/avr32/include/asm/kmap_types.h
5360@@ -2,9 +2,9 @@
5361 #define __ASM_AVR32_KMAP_TYPES_H
5362
5363 #ifdef CONFIG_DEBUG_HIGHMEM
5364-# define KM_TYPE_NR 29
5365+# define KM_TYPE_NR 30
5366 #else
5367-# define KM_TYPE_NR 14
5368+# define KM_TYPE_NR 15
5369 #endif
5370
5371 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5372diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5373index 0eca933..eb78c7b 100644
5374--- a/arch/avr32/mm/fault.c
5375+++ b/arch/avr32/mm/fault.c
5376@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5377
5378 int exception_trace = 1;
5379
5380+#ifdef CONFIG_PAX_PAGEEXEC
5381+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5382+{
5383+ unsigned long i;
5384+
5385+ printk(KERN_ERR "PAX: bytes at PC: ");
5386+ for (i = 0; i < 20; i++) {
5387+ unsigned char c;
5388+ if (get_user(c, (unsigned char *)pc+i))
5389+ printk(KERN_CONT "???????? ");
5390+ else
5391+ printk(KERN_CONT "%02x ", c);
5392+ }
5393+ printk("\n");
5394+}
5395+#endif
5396+
5397 /*
5398 * This routine handles page faults. It determines the address and the
5399 * problem, and then passes it off to one of the appropriate routines.
5400@@ -176,6 +193,16 @@ bad_area:
5401 up_read(&mm->mmap_sem);
5402
5403 if (user_mode(regs)) {
5404+
5405+#ifdef CONFIG_PAX_PAGEEXEC
5406+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5407+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5408+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5409+ do_group_exit(SIGKILL);
5410+ }
5411+ }
5412+#endif
5413+
5414 if (exception_trace && printk_ratelimit())
5415 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5416 "sp %08lx ecr %lu\n",
5417diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5418index 568885a..f8008df 100644
5419--- a/arch/blackfin/include/asm/cache.h
5420+++ b/arch/blackfin/include/asm/cache.h
5421@@ -7,6 +7,7 @@
5422 #ifndef __ARCH_BLACKFIN_CACHE_H
5423 #define __ARCH_BLACKFIN_CACHE_H
5424
5425+#include <linux/const.h>
5426 #include <linux/linkage.h> /* for asmlinkage */
5427
5428 /*
5429@@ -14,7 +15,7 @@
5430 * Blackfin loads 32 bytes for cache
5431 */
5432 #define L1_CACHE_SHIFT 5
5433-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5434+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5435 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5436
5437 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5438diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5439index aea2718..3639a60 100644
5440--- a/arch/cris/include/arch-v10/arch/cache.h
5441+++ b/arch/cris/include/arch-v10/arch/cache.h
5442@@ -1,8 +1,9 @@
5443 #ifndef _ASM_ARCH_CACHE_H
5444 #define _ASM_ARCH_CACHE_H
5445
5446+#include <linux/const.h>
5447 /* Etrax 100LX have 32-byte cache-lines. */
5448-#define L1_CACHE_BYTES 32
5449 #define L1_CACHE_SHIFT 5
5450+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5451
5452 #endif /* _ASM_ARCH_CACHE_H */
5453diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5454index 7caf25d..ee65ac5 100644
5455--- a/arch/cris/include/arch-v32/arch/cache.h
5456+++ b/arch/cris/include/arch-v32/arch/cache.h
5457@@ -1,11 +1,12 @@
5458 #ifndef _ASM_CRIS_ARCH_CACHE_H
5459 #define _ASM_CRIS_ARCH_CACHE_H
5460
5461+#include <linux/const.h>
5462 #include <arch/hwregs/dma.h>
5463
5464 /* A cache-line is 32 bytes. */
5465-#define L1_CACHE_BYTES 32
5466 #define L1_CACHE_SHIFT 5
5467+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5468
5469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5470
5471diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5472index f6c3a16..cd422a4 100644
5473--- a/arch/frv/include/asm/atomic.h
5474+++ b/arch/frv/include/asm/atomic.h
5475@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5476 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5477 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5478
5479+#define atomic64_read_unchecked(v) atomic64_read(v)
5480+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5481+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5482+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5483+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5484+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5485+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5486+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5487+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5488+
5489 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5490 {
5491 int c, old;
5492diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5493index 2797163..c2a401d 100644
5494--- a/arch/frv/include/asm/cache.h
5495+++ b/arch/frv/include/asm/cache.h
5496@@ -12,10 +12,11 @@
5497 #ifndef __ASM_CACHE_H
5498 #define __ASM_CACHE_H
5499
5500+#include <linux/const.h>
5501
5502 /* bytes per L1 cache line */
5503 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5504-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5505+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5506
5507 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5508 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5509diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5510index 43901f2..0d8b865 100644
5511--- a/arch/frv/include/asm/kmap_types.h
5512+++ b/arch/frv/include/asm/kmap_types.h
5513@@ -2,6 +2,6 @@
5514 #ifndef _ASM_KMAP_TYPES_H
5515 #define _ASM_KMAP_TYPES_H
5516
5517-#define KM_TYPE_NR 17
5518+#define KM_TYPE_NR 18
5519
5520 #endif
5521diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5522index 836f147..4cf23f5 100644
5523--- a/arch/frv/mm/elf-fdpic.c
5524+++ b/arch/frv/mm/elf-fdpic.c
5525@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5526 {
5527 struct vm_area_struct *vma;
5528 struct vm_unmapped_area_info info;
5529+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5530
5531 if (len > TASK_SIZE)
5532 return -ENOMEM;
5533@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5534 if (addr) {
5535 addr = PAGE_ALIGN(addr);
5536 vma = find_vma(current->mm, addr);
5537- if (TASK_SIZE - len >= addr &&
5538- (!vma || addr + len <= vma->vm_start))
5539+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5540 goto success;
5541 }
5542
5543@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5544 info.high_limit = (current->mm->start_stack - 0x00200000);
5545 info.align_mask = 0;
5546 info.align_offset = 0;
5547+ info.threadstack_offset = offset;
5548 addr = vm_unmapped_area(&info);
5549 if (!(addr & ~PAGE_MASK))
5550 goto success;
5551diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5552index f4ca594..adc72fd6 100644
5553--- a/arch/hexagon/include/asm/cache.h
5554+++ b/arch/hexagon/include/asm/cache.h
5555@@ -21,9 +21,11 @@
5556 #ifndef __ASM_CACHE_H
5557 #define __ASM_CACHE_H
5558
5559+#include <linux/const.h>
5560+
5561 /* Bytes per L1 cache line */
5562-#define L1_CACHE_SHIFT (5)
5563-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5564+#define L1_CACHE_SHIFT 5
5565+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5566
5567 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5568 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5569diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5570index 2f3abcf..e63c7fa 100644
5571--- a/arch/ia64/Kconfig
5572+++ b/arch/ia64/Kconfig
5573@@ -547,6 +547,7 @@ source "drivers/sn/Kconfig"
5574 config KEXEC
5575 bool "kexec system call"
5576 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5577+ depends on !GRKERNSEC_KMEM
5578 help
5579 kexec is a system call that implements the ability to shutdown your
5580 current kernel, and to start another kernel. It is like a reboot
5581diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5582index f37238f..810b95f 100644
5583--- a/arch/ia64/Makefile
5584+++ b/arch/ia64/Makefile
5585@@ -99,5 +99,6 @@ endef
5586 archprepare: make_nr_irqs_h FORCE
5587 PHONY += make_nr_irqs_h FORCE
5588
5589+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5590 make_nr_irqs_h: FORCE
5591 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5592diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5593index 0f8bf48..40ea950 100644
5594--- a/arch/ia64/include/asm/atomic.h
5595+++ b/arch/ia64/include/asm/atomic.h
5596@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5597 #define atomic64_inc(v) atomic64_add(1, (v))
5598 #define atomic64_dec(v) atomic64_sub(1, (v))
5599
5600+#define atomic64_read_unchecked(v) atomic64_read(v)
5601+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5602+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5603+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5604+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5605+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5606+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5607+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5608+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5609+
5610 #endif /* _ASM_IA64_ATOMIC_H */
5611diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5612index a48957c..e097b56 100644
5613--- a/arch/ia64/include/asm/barrier.h
5614+++ b/arch/ia64/include/asm/barrier.h
5615@@ -67,7 +67,7 @@
5616 do { \
5617 compiletime_assert_atomic_type(*p); \
5618 barrier(); \
5619- ACCESS_ONCE(*p) = (v); \
5620+ ACCESS_ONCE_RW(*p) = (v); \
5621 } while (0)
5622
5623 #define smp_load_acquire(p) \
5624diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5625index 988254a..e1ee885 100644
5626--- a/arch/ia64/include/asm/cache.h
5627+++ b/arch/ia64/include/asm/cache.h
5628@@ -1,6 +1,7 @@
5629 #ifndef _ASM_IA64_CACHE_H
5630 #define _ASM_IA64_CACHE_H
5631
5632+#include <linux/const.h>
5633
5634 /*
5635 * Copyright (C) 1998-2000 Hewlett-Packard Co
5636@@ -9,7 +10,7 @@
5637
5638 /* Bytes per L1 (data) cache line. */
5639 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5640-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5641+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5642
5643 #ifdef CONFIG_SMP
5644 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5645diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5646index 5a83c5c..4d7f553 100644
5647--- a/arch/ia64/include/asm/elf.h
5648+++ b/arch/ia64/include/asm/elf.h
5649@@ -42,6 +42,13 @@
5650 */
5651 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5652
5653+#ifdef CONFIG_PAX_ASLR
5654+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5655+
5656+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5657+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5658+#endif
5659+
5660 #define PT_IA_64_UNWIND 0x70000001
5661
5662 /* IA-64 relocations: */
5663diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5664index 5767cdf..7462574 100644
5665--- a/arch/ia64/include/asm/pgalloc.h
5666+++ b/arch/ia64/include/asm/pgalloc.h
5667@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5668 pgd_val(*pgd_entry) = __pa(pud);
5669 }
5670
5671+static inline void
5672+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5673+{
5674+ pgd_populate(mm, pgd_entry, pud);
5675+}
5676+
5677 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5678 {
5679 return quicklist_alloc(0, GFP_KERNEL, NULL);
5680@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5681 pud_val(*pud_entry) = __pa(pmd);
5682 }
5683
5684+static inline void
5685+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5686+{
5687+ pud_populate(mm, pud_entry, pmd);
5688+}
5689+
5690 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5691 {
5692 return quicklist_alloc(0, GFP_KERNEL, NULL);
5693diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5694index 7935115..c0eca6a 100644
5695--- a/arch/ia64/include/asm/pgtable.h
5696+++ b/arch/ia64/include/asm/pgtable.h
5697@@ -12,7 +12,7 @@
5698 * David Mosberger-Tang <davidm@hpl.hp.com>
5699 */
5700
5701-
5702+#include <linux/const.h>
5703 #include <asm/mman.h>
5704 #include <asm/page.h>
5705 #include <asm/processor.h>
5706@@ -142,6 +142,17 @@
5707 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5708 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5709 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5710+
5711+#ifdef CONFIG_PAX_PAGEEXEC
5712+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5713+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5714+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5715+#else
5716+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5717+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5718+# define PAGE_COPY_NOEXEC PAGE_COPY
5719+#endif
5720+
5721 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5722 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5723 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5724diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5725index 45698cd..e8e2dbc 100644
5726--- a/arch/ia64/include/asm/spinlock.h
5727+++ b/arch/ia64/include/asm/spinlock.h
5728@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5729 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5730
5731 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5732- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5733+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5734 }
5735
5736 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5737diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5738index 449c8c0..3d4b1e9 100644
5739--- a/arch/ia64/include/asm/uaccess.h
5740+++ b/arch/ia64/include/asm/uaccess.h
5741@@ -70,6 +70,7 @@
5742 && ((segment).seg == KERNEL_DS.seg \
5743 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5744 })
5745+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5746 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5747
5748 /*
5749@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5750 static inline unsigned long
5751 __copy_to_user (void __user *to, const void *from, unsigned long count)
5752 {
5753+ if (count > INT_MAX)
5754+ return count;
5755+
5756+ if (!__builtin_constant_p(count))
5757+ check_object_size(from, count, true);
5758+
5759 return __copy_user(to, (__force void __user *) from, count);
5760 }
5761
5762 static inline unsigned long
5763 __copy_from_user (void *to, const void __user *from, unsigned long count)
5764 {
5765+ if (count > INT_MAX)
5766+ return count;
5767+
5768+ if (!__builtin_constant_p(count))
5769+ check_object_size(to, count, false);
5770+
5771 return __copy_user((__force void __user *) to, from, count);
5772 }
5773
5774@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5775 ({ \
5776 void __user *__cu_to = (to); \
5777 const void *__cu_from = (from); \
5778- long __cu_len = (n); \
5779+ unsigned long __cu_len = (n); \
5780 \
5781- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5782+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5783+ if (!__builtin_constant_p(n)) \
5784+ check_object_size(__cu_from, __cu_len, true); \
5785 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5786+ } \
5787 __cu_len; \
5788 })
5789
5790@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5791 ({ \
5792 void *__cu_to = (to); \
5793 const void __user *__cu_from = (from); \
5794- long __cu_len = (n); \
5795+ unsigned long __cu_len = (n); \
5796 \
5797 __chk_user_ptr(__cu_from); \
5798- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5799+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5800+ if (!__builtin_constant_p(n)) \
5801+ check_object_size(__cu_to, __cu_len, false); \
5802 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5803+ } \
5804 __cu_len; \
5805 })
5806
5807diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5808index 24603be..948052d 100644
5809--- a/arch/ia64/kernel/module.c
5810+++ b/arch/ia64/kernel/module.c
5811@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5812 void
5813 module_free (struct module *mod, void *module_region)
5814 {
5815- if (mod && mod->arch.init_unw_table &&
5816- module_region == mod->module_init) {
5817+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5818 unw_remove_unwind_table(mod->arch.init_unw_table);
5819 mod->arch.init_unw_table = NULL;
5820 }
5821@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5822 }
5823
5824 static inline int
5825+in_init_rx (const struct module *mod, uint64_t addr)
5826+{
5827+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5828+}
5829+
5830+static inline int
5831+in_init_rw (const struct module *mod, uint64_t addr)
5832+{
5833+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5834+}
5835+
5836+static inline int
5837 in_init (const struct module *mod, uint64_t addr)
5838 {
5839- return addr - (uint64_t) mod->module_init < mod->init_size;
5840+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5841+}
5842+
5843+static inline int
5844+in_core_rx (const struct module *mod, uint64_t addr)
5845+{
5846+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5847+}
5848+
5849+static inline int
5850+in_core_rw (const struct module *mod, uint64_t addr)
5851+{
5852+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5853 }
5854
5855 static inline int
5856 in_core (const struct module *mod, uint64_t addr)
5857 {
5858- return addr - (uint64_t) mod->module_core < mod->core_size;
5859+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5860 }
5861
5862 static inline int
5863@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5864 break;
5865
5866 case RV_BDREL:
5867- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5868+ if (in_init_rx(mod, val))
5869+ val -= (uint64_t) mod->module_init_rx;
5870+ else if (in_init_rw(mod, val))
5871+ val -= (uint64_t) mod->module_init_rw;
5872+ else if (in_core_rx(mod, val))
5873+ val -= (uint64_t) mod->module_core_rx;
5874+ else if (in_core_rw(mod, val))
5875+ val -= (uint64_t) mod->module_core_rw;
5876 break;
5877
5878 case RV_LTV:
5879@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5880 * addresses have been selected...
5881 */
5882 uint64_t gp;
5883- if (mod->core_size > MAX_LTOFF)
5884+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5885 /*
5886 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5887 * at the end of the module.
5888 */
5889- gp = mod->core_size - MAX_LTOFF / 2;
5890+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5891 else
5892- gp = mod->core_size / 2;
5893- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5894+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5895+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5896 mod->arch.gp = gp;
5897 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5898 }
5899diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5900index c39c3cd..3c77738 100644
5901--- a/arch/ia64/kernel/palinfo.c
5902+++ b/arch/ia64/kernel/palinfo.c
5903@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5904 return NOTIFY_OK;
5905 }
5906
5907-static struct notifier_block __refdata palinfo_cpu_notifier =
5908+static struct notifier_block palinfo_cpu_notifier =
5909 {
5910 .notifier_call = palinfo_cpu_callback,
5911 .priority = 0,
5912diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5913index 41e33f8..65180b2a 100644
5914--- a/arch/ia64/kernel/sys_ia64.c
5915+++ b/arch/ia64/kernel/sys_ia64.c
5916@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5917 unsigned long align_mask = 0;
5918 struct mm_struct *mm = current->mm;
5919 struct vm_unmapped_area_info info;
5920+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5921
5922 if (len > RGN_MAP_LIMIT)
5923 return -ENOMEM;
5924@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5925 if (REGION_NUMBER(addr) == RGN_HPAGE)
5926 addr = 0;
5927 #endif
5928+
5929+#ifdef CONFIG_PAX_RANDMMAP
5930+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5931+ addr = mm->free_area_cache;
5932+ else
5933+#endif
5934+
5935 if (!addr)
5936 addr = TASK_UNMAPPED_BASE;
5937
5938@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5939 info.high_limit = TASK_SIZE;
5940 info.align_mask = align_mask;
5941 info.align_offset = 0;
5942+ info.threadstack_offset = offset;
5943 return vm_unmapped_area(&info);
5944 }
5945
5946diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5947index 84f8a52..7c76178 100644
5948--- a/arch/ia64/kernel/vmlinux.lds.S
5949+++ b/arch/ia64/kernel/vmlinux.lds.S
5950@@ -192,7 +192,7 @@ SECTIONS {
5951 /* Per-cpu data: */
5952 . = ALIGN(PERCPU_PAGE_SIZE);
5953 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5954- __phys_per_cpu_start = __per_cpu_load;
5955+ __phys_per_cpu_start = per_cpu_load;
5956 /*
5957 * ensure percpu data fits
5958 * into percpu page size
5959diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5960index 7225dad..2a7c8256 100644
5961--- a/arch/ia64/mm/fault.c
5962+++ b/arch/ia64/mm/fault.c
5963@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5964 return pte_present(pte);
5965 }
5966
5967+#ifdef CONFIG_PAX_PAGEEXEC
5968+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5969+{
5970+ unsigned long i;
5971+
5972+ printk(KERN_ERR "PAX: bytes at PC: ");
5973+ for (i = 0; i < 8; i++) {
5974+ unsigned int c;
5975+ if (get_user(c, (unsigned int *)pc+i))
5976+ printk(KERN_CONT "???????? ");
5977+ else
5978+ printk(KERN_CONT "%08x ", c);
5979+ }
5980+ printk("\n");
5981+}
5982+#endif
5983+
5984 # define VM_READ_BIT 0
5985 # define VM_WRITE_BIT 1
5986 # define VM_EXEC_BIT 2
5987@@ -151,8 +168,21 @@ retry:
5988 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5989 goto bad_area;
5990
5991- if ((vma->vm_flags & mask) != mask)
5992+ if ((vma->vm_flags & mask) != mask) {
5993+
5994+#ifdef CONFIG_PAX_PAGEEXEC
5995+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5996+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5997+ goto bad_area;
5998+
5999+ up_read(&mm->mmap_sem);
6000+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
6001+ do_group_exit(SIGKILL);
6002+ }
6003+#endif
6004+
6005 goto bad_area;
6006+ }
6007
6008 /*
6009 * If for any reason at all we couldn't handle the fault, make
6010diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
6011index 76069c1..c2aa816 100644
6012--- a/arch/ia64/mm/hugetlbpage.c
6013+++ b/arch/ia64/mm/hugetlbpage.c
6014@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
6015 unsigned long pgoff, unsigned long flags)
6016 {
6017 struct vm_unmapped_area_info info;
6018+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
6019
6020 if (len > RGN_MAP_LIMIT)
6021 return -ENOMEM;
6022@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
6023 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
6024 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
6025 info.align_offset = 0;
6026+ info.threadstack_offset = offset;
6027 return vm_unmapped_area(&info);
6028 }
6029
6030diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
6031index 25c3502..560dae7 100644
6032--- a/arch/ia64/mm/init.c
6033+++ b/arch/ia64/mm/init.c
6034@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
6035 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
6036 vma->vm_end = vma->vm_start + PAGE_SIZE;
6037 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
6038+
6039+#ifdef CONFIG_PAX_PAGEEXEC
6040+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
6041+ vma->vm_flags &= ~VM_EXEC;
6042+
6043+#ifdef CONFIG_PAX_MPROTECT
6044+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
6045+ vma->vm_flags &= ~VM_MAYEXEC;
6046+#endif
6047+
6048+ }
6049+#endif
6050+
6051 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6052 down_write(&current->mm->mmap_sem);
6053 if (insert_vm_struct(current->mm, vma)) {
6054diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
6055index 40b3ee9..8c2c112 100644
6056--- a/arch/m32r/include/asm/cache.h
6057+++ b/arch/m32r/include/asm/cache.h
6058@@ -1,8 +1,10 @@
6059 #ifndef _ASM_M32R_CACHE_H
6060 #define _ASM_M32R_CACHE_H
6061
6062+#include <linux/const.h>
6063+
6064 /* L1 cache line size */
6065 #define L1_CACHE_SHIFT 4
6066-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6067+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6068
6069 #endif /* _ASM_M32R_CACHE_H */
6070diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
6071index 82abd15..d95ae5d 100644
6072--- a/arch/m32r/lib/usercopy.c
6073+++ b/arch/m32r/lib/usercopy.c
6074@@ -14,6 +14,9 @@
6075 unsigned long
6076 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6077 {
6078+ if ((long)n < 0)
6079+ return n;
6080+
6081 prefetch(from);
6082 if (access_ok(VERIFY_WRITE, to, n))
6083 __copy_user(to,from,n);
6084@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6085 unsigned long
6086 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
6087 {
6088+ if ((long)n < 0)
6089+ return n;
6090+
6091 prefetchw(to);
6092 if (access_ok(VERIFY_READ, from, n))
6093 __copy_user_zeroing(to,from,n);
6094diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
6095index 0395c51..5f26031 100644
6096--- a/arch/m68k/include/asm/cache.h
6097+++ b/arch/m68k/include/asm/cache.h
6098@@ -4,9 +4,11 @@
6099 #ifndef __ARCH_M68K_CACHE_H
6100 #define __ARCH_M68K_CACHE_H
6101
6102+#include <linux/const.h>
6103+
6104 /* bytes per L1 cache line */
6105 #define L1_CACHE_SHIFT 4
6106-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
6107+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6108
6109 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
6110
6111diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
6112index c7591e8..ecef036 100644
6113--- a/arch/metag/include/asm/barrier.h
6114+++ b/arch/metag/include/asm/barrier.h
6115@@ -89,7 +89,7 @@ static inline void fence(void)
6116 do { \
6117 compiletime_assert_atomic_type(*p); \
6118 smp_mb(); \
6119- ACCESS_ONCE(*p) = (v); \
6120+ ACCESS_ONCE_RW(*p) = (v); \
6121 } while (0)
6122
6123 #define smp_load_acquire(p) \
6124diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
6125index 3c52fa6..11b2ad8 100644
6126--- a/arch/metag/mm/hugetlbpage.c
6127+++ b/arch/metag/mm/hugetlbpage.c
6128@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
6129 info.high_limit = TASK_SIZE;
6130 info.align_mask = PAGE_MASK & HUGEPT_MASK;
6131 info.align_offset = 0;
6132+ info.threadstack_offset = 0;
6133 return vm_unmapped_area(&info);
6134 }
6135
6136diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
6137index 4efe96a..60e8699 100644
6138--- a/arch/microblaze/include/asm/cache.h
6139+++ b/arch/microblaze/include/asm/cache.h
6140@@ -13,11 +13,12 @@
6141 #ifndef _ASM_MICROBLAZE_CACHE_H
6142 #define _ASM_MICROBLAZE_CACHE_H
6143
6144+#include <linux/const.h>
6145 #include <asm/registers.h>
6146
6147 #define L1_CACHE_SHIFT 5
6148 /* word-granular cache in microblaze */
6149-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6150+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6151
6152 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6153
6154diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
6155index 4e238e6..7c9ed92 100644
6156--- a/arch/mips/Kconfig
6157+++ b/arch/mips/Kconfig
6158@@ -2392,6 +2392,7 @@ source "kernel/Kconfig.preempt"
6159
6160 config KEXEC
6161 bool "Kexec system call"
6162+ depends on !GRKERNSEC_KMEM
6163 help
6164 kexec is a system call that implements the ability to shutdown your
6165 current kernel, and to start another kernel. It is like a reboot
6166diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
6167index 02f2444..506969c 100644
6168--- a/arch/mips/cavium-octeon/dma-octeon.c
6169+++ b/arch/mips/cavium-octeon/dma-octeon.c
6170@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
6171 if (dma_release_from_coherent(dev, order, vaddr))
6172 return;
6173
6174- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
6175+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
6176 }
6177
6178 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
6179diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
6180index 37b2bef..02122b8 100644
6181--- a/arch/mips/include/asm/atomic.h
6182+++ b/arch/mips/include/asm/atomic.h
6183@@ -21,15 +21,39 @@
6184 #include <asm/cmpxchg.h>
6185 #include <asm/war.h>
6186
6187+#ifdef CONFIG_GENERIC_ATOMIC64
6188+#include <asm-generic/atomic64.h>
6189+#endif
6190+
6191 #define ATOMIC_INIT(i) { (i) }
6192
6193+#ifdef CONFIG_64BIT
6194+#define _ASM_EXTABLE(from, to) \
6195+" .section __ex_table,\"a\"\n" \
6196+" .dword " #from ", " #to"\n" \
6197+" .previous\n"
6198+#else
6199+#define _ASM_EXTABLE(from, to) \
6200+" .section __ex_table,\"a\"\n" \
6201+" .word " #from ", " #to"\n" \
6202+" .previous\n"
6203+#endif
6204+
6205 /*
6206 * atomic_read - read atomic variable
6207 * @v: pointer of type atomic_t
6208 *
6209 * Atomically reads the value of @v.
6210 */
6211-#define atomic_read(v) (*(volatile int *)&(v)->counter)
6212+static inline int atomic_read(const atomic_t *v)
6213+{
6214+ return (*(volatile const int *) &v->counter);
6215+}
6216+
6217+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6218+{
6219+ return (*(volatile const int *) &v->counter);
6220+}
6221
6222 /*
6223 * atomic_set - set atomic variable
6224@@ -38,7 +62,15 @@
6225 *
6226 * Atomically sets the value of @v to @i.
6227 */
6228-#define atomic_set(v, i) ((v)->counter = (i))
6229+static inline void atomic_set(atomic_t *v, int i)
6230+{
6231+ v->counter = i;
6232+}
6233+
6234+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6235+{
6236+ v->counter = i;
6237+}
6238
6239 /*
6240 * atomic_add - add integer to atomic variable
6241@@ -47,7 +79,67 @@
6242 *
6243 * Atomically adds @i to @v.
6244 */
6245-static __inline__ void atomic_add(int i, atomic_t * v)
6246+static __inline__ void atomic_add(int i, atomic_t *v)
6247+{
6248+ int temp;
6249+
6250+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6251+ __asm__ __volatile__(
6252+ " .set mips3 \n"
6253+ "1: ll %0, %1 # atomic_add \n"
6254+#ifdef CONFIG_PAX_REFCOUNT
6255+ /* Exception on overflow. */
6256+ "2: add %0, %2 \n"
6257+#else
6258+ " addu %0, %2 \n"
6259+#endif
6260+ " sc %0, %1 \n"
6261+ " beqzl %0, 1b \n"
6262+#ifdef CONFIG_PAX_REFCOUNT
6263+ "3: \n"
6264+ _ASM_EXTABLE(2b, 3b)
6265+#endif
6266+ " .set mips0 \n"
6267+ : "=&r" (temp), "+m" (v->counter)
6268+ : "Ir" (i));
6269+ } else if (kernel_uses_llsc) {
6270+ __asm__ __volatile__(
6271+ " .set mips3 \n"
6272+ "1: ll %0, %1 # atomic_add \n"
6273+#ifdef CONFIG_PAX_REFCOUNT
6274+ /* Exception on overflow. */
6275+ "2: add %0, %2 \n"
6276+#else
6277+ " addu %0, %2 \n"
6278+#endif
6279+ " sc %0, %1 \n"
6280+ " beqz %0, 1b \n"
6281+#ifdef CONFIG_PAX_REFCOUNT
6282+ "3: \n"
6283+ _ASM_EXTABLE(2b, 3b)
6284+#endif
6285+ " .set mips0 \n"
6286+ : "=&r" (temp), "+m" (v->counter)
6287+ : "Ir" (i));
6288+ } else {
6289+ unsigned long flags;
6290+
6291+ raw_local_irq_save(flags);
6292+ __asm__ __volatile__(
6293+#ifdef CONFIG_PAX_REFCOUNT
6294+ /* Exception on overflow. */
6295+ "1: add %0, %1 \n"
6296+ "2: \n"
6297+ _ASM_EXTABLE(1b, 2b)
6298+#else
6299+ " addu %0, %1 \n"
6300+#endif
6301+ : "+r" (v->counter) : "Ir" (i));
6302+ raw_local_irq_restore(flags);
6303+ }
6304+}
6305+
6306+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6307 {
6308 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6309 int temp;
6310@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
6311 *
6312 * Atomically subtracts @i from @v.
6313 */
6314-static __inline__ void atomic_sub(int i, atomic_t * v)
6315+static __inline__ void atomic_sub(int i, atomic_t *v)
6316+{
6317+ int temp;
6318+
6319+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6320+ __asm__ __volatile__(
6321+ " .set mips3 \n"
6322+ "1: ll %0, %1 # atomic64_sub \n"
6323+#ifdef CONFIG_PAX_REFCOUNT
6324+ /* Exception on overflow. */
6325+ "2: sub %0, %2 \n"
6326+#else
6327+ " subu %0, %2 \n"
6328+#endif
6329+ " sc %0, %1 \n"
6330+ " beqzl %0, 1b \n"
6331+#ifdef CONFIG_PAX_REFCOUNT
6332+ "3: \n"
6333+ _ASM_EXTABLE(2b, 3b)
6334+#endif
6335+ " .set mips0 \n"
6336+ : "=&r" (temp), "+m" (v->counter)
6337+ : "Ir" (i));
6338+ } else if (kernel_uses_llsc) {
6339+ __asm__ __volatile__(
6340+ " .set mips3 \n"
6341+ "1: ll %0, %1 # atomic64_sub \n"
6342+#ifdef CONFIG_PAX_REFCOUNT
6343+ /* Exception on overflow. */
6344+ "2: sub %0, %2 \n"
6345+#else
6346+ " subu %0, %2 \n"
6347+#endif
6348+ " sc %0, %1 \n"
6349+ " beqz %0, 1b \n"
6350+#ifdef CONFIG_PAX_REFCOUNT
6351+ "3: \n"
6352+ _ASM_EXTABLE(2b, 3b)
6353+#endif
6354+ " .set mips0 \n"
6355+ : "=&r" (temp), "+m" (v->counter)
6356+ : "Ir" (i));
6357+ } else {
6358+ unsigned long flags;
6359+
6360+ raw_local_irq_save(flags);
6361+ __asm__ __volatile__(
6362+#ifdef CONFIG_PAX_REFCOUNT
6363+ /* Exception on overflow. */
6364+ "1: sub %0, %1 \n"
6365+ "2: \n"
6366+ _ASM_EXTABLE(1b, 2b)
6367+#else
6368+ " subu %0, %1 \n"
6369+#endif
6370+ : "+r" (v->counter) : "Ir" (i));
6371+ raw_local_irq_restore(flags);
6372+ }
6373+}
6374+
6375+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6376 {
6377 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6378 int temp;
6379@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6380 /*
6381 * Same as above, but return the result value
6382 */
6383-static __inline__ int atomic_add_return(int i, atomic_t * v)
6384+static __inline__ int atomic_add_return(int i, atomic_t *v)
6385+{
6386+ int result;
6387+ int temp;
6388+
6389+ smp_mb__before_llsc();
6390+
6391+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6392+ __asm__ __volatile__(
6393+ " .set mips3 \n"
6394+ "1: ll %1, %2 # atomic_add_return \n"
6395+#ifdef CONFIG_PAX_REFCOUNT
6396+ "2: add %0, %1, %3 \n"
6397+#else
6398+ " addu %0, %1, %3 \n"
6399+#endif
6400+ " sc %0, %2 \n"
6401+ " beqzl %0, 1b \n"
6402+#ifdef CONFIG_PAX_REFCOUNT
6403+ " b 4f \n"
6404+ " .set noreorder \n"
6405+ "3: b 5f \n"
6406+ " move %0, %1 \n"
6407+ " .set reorder \n"
6408+ _ASM_EXTABLE(2b, 3b)
6409+#endif
6410+ "4: addu %0, %1, %3 \n"
6411+#ifdef CONFIG_PAX_REFCOUNT
6412+ "5: \n"
6413+#endif
6414+ " .set mips0 \n"
6415+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6416+ : "Ir" (i));
6417+ } else if (kernel_uses_llsc) {
6418+ __asm__ __volatile__(
6419+ " .set mips3 \n"
6420+ "1: ll %1, %2 # atomic_add_return \n"
6421+#ifdef CONFIG_PAX_REFCOUNT
6422+ "2: add %0, %1, %3 \n"
6423+#else
6424+ " addu %0, %1, %3 \n"
6425+#endif
6426+ " sc %0, %2 \n"
6427+ " bnez %0, 4f \n"
6428+ " b 1b \n"
6429+#ifdef CONFIG_PAX_REFCOUNT
6430+ " .set noreorder \n"
6431+ "3: b 5f \n"
6432+ " move %0, %1 \n"
6433+ " .set reorder \n"
6434+ _ASM_EXTABLE(2b, 3b)
6435+#endif
6436+ "4: addu %0, %1, %3 \n"
6437+#ifdef CONFIG_PAX_REFCOUNT
6438+ "5: \n"
6439+#endif
6440+ " .set mips0 \n"
6441+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6442+ : "Ir" (i));
6443+ } else {
6444+ unsigned long flags;
6445+
6446+ raw_local_irq_save(flags);
6447+ __asm__ __volatile__(
6448+ " lw %0, %1 \n"
6449+#ifdef CONFIG_PAX_REFCOUNT
6450+ /* Exception on overflow. */
6451+ "1: add %0, %2 \n"
6452+#else
6453+ " addu %0, %2 \n"
6454+#endif
6455+ " sw %0, %1 \n"
6456+#ifdef CONFIG_PAX_REFCOUNT
6457+ /* Note: Dest reg is not modified on overflow */
6458+ "2: \n"
6459+ _ASM_EXTABLE(1b, 2b)
6460+#endif
6461+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6462+ raw_local_irq_restore(flags);
6463+ }
6464+
6465+ smp_llsc_mb();
6466+
6467+ return result;
6468+}
6469+
6470+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6471 {
6472 int result;
6473
6474@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6475 return result;
6476 }
6477
6478-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6479+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6480+{
6481+ int result;
6482+ int temp;
6483+
6484+ smp_mb__before_llsc();
6485+
6486+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6487+ __asm__ __volatile__(
6488+ " .set mips3 \n"
6489+ "1: ll %1, %2 # atomic_sub_return \n"
6490+#ifdef CONFIG_PAX_REFCOUNT
6491+ "2: sub %0, %1, %3 \n"
6492+#else
6493+ " subu %0, %1, %3 \n"
6494+#endif
6495+ " sc %0, %2 \n"
6496+ " beqzl %0, 1b \n"
6497+#ifdef CONFIG_PAX_REFCOUNT
6498+ " b 4f \n"
6499+ " .set noreorder \n"
6500+ "3: b 5f \n"
6501+ " move %0, %1 \n"
6502+ " .set reorder \n"
6503+ _ASM_EXTABLE(2b, 3b)
6504+#endif
6505+ "4: subu %0, %1, %3 \n"
6506+#ifdef CONFIG_PAX_REFCOUNT
6507+ "5: \n"
6508+#endif
6509+ " .set mips0 \n"
6510+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6511+ : "Ir" (i), "m" (v->counter)
6512+ : "memory");
6513+ } else if (kernel_uses_llsc) {
6514+ __asm__ __volatile__(
6515+ " .set mips3 \n"
6516+ "1: ll %1, %2 # atomic_sub_return \n"
6517+#ifdef CONFIG_PAX_REFCOUNT
6518+ "2: sub %0, %1, %3 \n"
6519+#else
6520+ " subu %0, %1, %3 \n"
6521+#endif
6522+ " sc %0, %2 \n"
6523+ " bnez %0, 4f \n"
6524+ " b 1b \n"
6525+#ifdef CONFIG_PAX_REFCOUNT
6526+ " .set noreorder \n"
6527+ "3: b 5f \n"
6528+ " move %0, %1 \n"
6529+ " .set reorder \n"
6530+ _ASM_EXTABLE(2b, 3b)
6531+#endif
6532+ "4: subu %0, %1, %3 \n"
6533+#ifdef CONFIG_PAX_REFCOUNT
6534+ "5: \n"
6535+#endif
6536+ " .set mips0 \n"
6537+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6538+ : "Ir" (i));
6539+ } else {
6540+ unsigned long flags;
6541+
6542+ raw_local_irq_save(flags);
6543+ __asm__ __volatile__(
6544+ " lw %0, %1 \n"
6545+#ifdef CONFIG_PAX_REFCOUNT
6546+ /* Exception on overflow. */
6547+ "1: sub %0, %2 \n"
6548+#else
6549+ " subu %0, %2 \n"
6550+#endif
6551+ " sw %0, %1 \n"
6552+#ifdef CONFIG_PAX_REFCOUNT
6553+ /* Note: Dest reg is not modified on overflow */
6554+ "2: \n"
6555+ _ASM_EXTABLE(1b, 2b)
6556+#endif
6557+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6558+ raw_local_irq_restore(flags);
6559+ }
6560+
6561+ smp_llsc_mb();
6562+
6563+ return result;
6564+}
6565+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6566 {
6567 int result;
6568
6569@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6570 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6571 * The function returns the old value of @v minus @i.
6572 */
6573-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6574+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6575 {
6576 int result;
6577
6578@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6579 return result;
6580 }
6581
6582-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6583-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6584+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6585+{
6586+ return cmpxchg(&v->counter, old, new);
6587+}
6588+
6589+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6590+ int new)
6591+{
6592+ return cmpxchg(&(v->counter), old, new);
6593+}
6594+
6595+static inline int atomic_xchg(atomic_t *v, int new)
6596+{
6597+ return xchg(&v->counter, new);
6598+}
6599+
6600+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6601+{
6602+ return xchg(&(v->counter), new);
6603+}
6604
6605 /**
6606 * __atomic_add_unless - add unless the number is a given value
6607@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6608
6609 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6610 #define atomic_inc_return(v) atomic_add_return(1, (v))
6611+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6612+{
6613+ return atomic_add_return_unchecked(1, v);
6614+}
6615
6616 /*
6617 * atomic_sub_and_test - subtract value from variable and test result
6618@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6619 * other cases.
6620 */
6621 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6622+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6623+{
6624+ return atomic_add_return_unchecked(1, v) == 0;
6625+}
6626
6627 /*
6628 * atomic_dec_and_test - decrement by 1 and test
6629@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6630 * Atomically increments @v by 1.
6631 */
6632 #define atomic_inc(v) atomic_add(1, (v))
6633+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6634+{
6635+ atomic_add_unchecked(1, v);
6636+}
6637
6638 /*
6639 * atomic_dec - decrement and test
6640@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6641 * Atomically decrements @v by 1.
6642 */
6643 #define atomic_dec(v) atomic_sub(1, (v))
6644+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6645+{
6646+ atomic_sub_unchecked(1, v);
6647+}
6648
6649 /*
6650 * atomic_add_negative - add and test if negative
6651@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6652 * @v: pointer of type atomic64_t
6653 *
6654 */
6655-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6656+static inline long atomic64_read(const atomic64_t *v)
6657+{
6658+ return (*(volatile const long *) &v->counter);
6659+}
6660+
6661+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6662+{
6663+ return (*(volatile const long *) &v->counter);
6664+}
6665
6666 /*
6667 * atomic64_set - set atomic variable
6668 * @v: pointer of type atomic64_t
6669 * @i: required value
6670 */
6671-#define atomic64_set(v, i) ((v)->counter = (i))
6672+static inline void atomic64_set(atomic64_t *v, long i)
6673+{
6674+ v->counter = i;
6675+}
6676+
6677+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6678+{
6679+ v->counter = i;
6680+}
6681
6682 /*
6683 * atomic64_add - add integer to atomic variable
6684@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6685 *
6686 * Atomically adds @i to @v.
6687 */
6688-static __inline__ void atomic64_add(long i, atomic64_t * v)
6689+static __inline__ void atomic64_add(long i, atomic64_t *v)
6690+{
6691+ long temp;
6692+
6693+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6694+ __asm__ __volatile__(
6695+ " .set mips3 \n"
6696+ "1: lld %0, %1 # atomic64_add \n"
6697+#ifdef CONFIG_PAX_REFCOUNT
6698+ /* Exception on overflow. */
6699+ "2: dadd %0, %2 \n"
6700+#else
6701+ " daddu %0, %2 \n"
6702+#endif
6703+ " scd %0, %1 \n"
6704+ " beqzl %0, 1b \n"
6705+#ifdef CONFIG_PAX_REFCOUNT
6706+ "3: \n"
6707+ _ASM_EXTABLE(2b, 3b)
6708+#endif
6709+ " .set mips0 \n"
6710+ : "=&r" (temp), "+m" (v->counter)
6711+ : "Ir" (i));
6712+ } else if (kernel_uses_llsc) {
6713+ __asm__ __volatile__(
6714+ " .set mips3 \n"
6715+ "1: lld %0, %1 # atomic64_add \n"
6716+#ifdef CONFIG_PAX_REFCOUNT
6717+ /* Exception on overflow. */
6718+ "2: dadd %0, %2 \n"
6719+#else
6720+ " daddu %0, %2 \n"
6721+#endif
6722+ " scd %0, %1 \n"
6723+ " beqz %0, 1b \n"
6724+#ifdef CONFIG_PAX_REFCOUNT
6725+ "3: \n"
6726+ _ASM_EXTABLE(2b, 3b)
6727+#endif
6728+ " .set mips0 \n"
6729+ : "=&r" (temp), "+m" (v->counter)
6730+ : "Ir" (i));
6731+ } else {
6732+ unsigned long flags;
6733+
6734+ raw_local_irq_save(flags);
6735+ __asm__ __volatile__(
6736+#ifdef CONFIG_PAX_REFCOUNT
6737+ /* Exception on overflow. */
6738+ "1: dadd %0, %1 \n"
6739+ "2: \n"
6740+ _ASM_EXTABLE(1b, 2b)
6741+#else
6742+ " daddu %0, %1 \n"
6743+#endif
6744+ : "+r" (v->counter) : "Ir" (i));
6745+ raw_local_irq_restore(flags);
6746+ }
6747+}
6748+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6749 {
6750 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6751 long temp;
6752@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6753 *
6754 * Atomically subtracts @i from @v.
6755 */
6756-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6757+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6758+{
6759+ long temp;
6760+
6761+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6762+ __asm__ __volatile__(
6763+ " .set mips3 \n"
6764+ "1: lld %0, %1 # atomic64_sub \n"
6765+#ifdef CONFIG_PAX_REFCOUNT
6766+ /* Exception on overflow. */
6767+ "2: dsub %0, %2 \n"
6768+#else
6769+ " dsubu %0, %2 \n"
6770+#endif
6771+ " scd %0, %1 \n"
6772+ " beqzl %0, 1b \n"
6773+#ifdef CONFIG_PAX_REFCOUNT
6774+ "3: \n"
6775+ _ASM_EXTABLE(2b, 3b)
6776+#endif
6777+ " .set mips0 \n"
6778+ : "=&r" (temp), "+m" (v->counter)
6779+ : "Ir" (i));
6780+ } else if (kernel_uses_llsc) {
6781+ __asm__ __volatile__(
6782+ " .set mips3 \n"
6783+ "1: lld %0, %1 # atomic64_sub \n"
6784+#ifdef CONFIG_PAX_REFCOUNT
6785+ /* Exception on overflow. */
6786+ "2: dsub %0, %2 \n"
6787+#else
6788+ " dsubu %0, %2 \n"
6789+#endif
6790+ " scd %0, %1 \n"
6791+ " beqz %0, 1b \n"
6792+#ifdef CONFIG_PAX_REFCOUNT
6793+ "3: \n"
6794+ _ASM_EXTABLE(2b, 3b)
6795+#endif
6796+ " .set mips0 \n"
6797+ : "=&r" (temp), "+m" (v->counter)
6798+ : "Ir" (i));
6799+ } else {
6800+ unsigned long flags;
6801+
6802+ raw_local_irq_save(flags);
6803+ __asm__ __volatile__(
6804+#ifdef CONFIG_PAX_REFCOUNT
6805+ /* Exception on overflow. */
6806+ "1: dsub %0, %1 \n"
6807+ "2: \n"
6808+ _ASM_EXTABLE(1b, 2b)
6809+#else
6810+ " dsubu %0, %1 \n"
6811+#endif
6812+ : "+r" (v->counter) : "Ir" (i));
6813+ raw_local_irq_restore(flags);
6814+ }
6815+}
6816+
6817+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6818 {
6819 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6820 long temp;
6821@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6822 /*
6823 * Same as above, but return the result value
6824 */
6825-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6826+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6827+{
6828+ long result;
6829+ long temp;
6830+
6831+ smp_mb__before_llsc();
6832+
6833+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6834+ __asm__ __volatile__(
6835+ " .set mips3 \n"
6836+ "1: lld %1, %2 # atomic64_add_return \n"
6837+#ifdef CONFIG_PAX_REFCOUNT
6838+ "2: dadd %0, %1, %3 \n"
6839+#else
6840+ " daddu %0, %1, %3 \n"
6841+#endif
6842+ " scd %0, %2 \n"
6843+ " beqzl %0, 1b \n"
6844+#ifdef CONFIG_PAX_REFCOUNT
6845+ " b 4f \n"
6846+ " .set noreorder \n"
6847+ "3: b 5f \n"
6848+ " move %0, %1 \n"
6849+ " .set reorder \n"
6850+ _ASM_EXTABLE(2b, 3b)
6851+#endif
6852+ "4: daddu %0, %1, %3 \n"
6853+#ifdef CONFIG_PAX_REFCOUNT
6854+ "5: \n"
6855+#endif
6856+ " .set mips0 \n"
6857+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6858+ : "Ir" (i));
6859+ } else if (kernel_uses_llsc) {
6860+ __asm__ __volatile__(
6861+ " .set mips3 \n"
6862+ "1: lld %1, %2 # atomic64_add_return \n"
6863+#ifdef CONFIG_PAX_REFCOUNT
6864+ "2: dadd %0, %1, %3 \n"
6865+#else
6866+ " daddu %0, %1, %3 \n"
6867+#endif
6868+ " scd %0, %2 \n"
6869+ " bnez %0, 4f \n"
6870+ " b 1b \n"
6871+#ifdef CONFIG_PAX_REFCOUNT
6872+ " .set noreorder \n"
6873+ "3: b 5f \n"
6874+ " move %0, %1 \n"
6875+ " .set reorder \n"
6876+ _ASM_EXTABLE(2b, 3b)
6877+#endif
6878+ "4: daddu %0, %1, %3 \n"
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ "5: \n"
6881+#endif
6882+ " .set mips0 \n"
6883+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6884+ : "Ir" (i), "m" (v->counter)
6885+ : "memory");
6886+ } else {
6887+ unsigned long flags;
6888+
6889+ raw_local_irq_save(flags);
6890+ __asm__ __volatile__(
6891+ " ld %0, %1 \n"
6892+#ifdef CONFIG_PAX_REFCOUNT
6893+ /* Exception on overflow. */
6894+ "1: dadd %0, %2 \n"
6895+#else
6896+ " daddu %0, %2 \n"
6897+#endif
6898+ " sd %0, %1 \n"
6899+#ifdef CONFIG_PAX_REFCOUNT
6900+ /* Note: Dest reg is not modified on overflow */
6901+ "2: \n"
6902+ _ASM_EXTABLE(1b, 2b)
6903+#endif
6904+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6905+ raw_local_irq_restore(flags);
6906+ }
6907+
6908+ smp_llsc_mb();
6909+
6910+ return result;
6911+}
6912+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6913 {
6914 long result;
6915
6916@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6917 return result;
6918 }
6919
6920-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6921+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6922+{
6923+ long result;
6924+ long temp;
6925+
6926+ smp_mb__before_llsc();
6927+
6928+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6929+ long temp;
6930+
6931+ __asm__ __volatile__(
6932+ " .set mips3 \n"
6933+ "1: lld %1, %2 # atomic64_sub_return \n"
6934+#ifdef CONFIG_PAX_REFCOUNT
6935+ "2: dsub %0, %1, %3 \n"
6936+#else
6937+ " dsubu %0, %1, %3 \n"
6938+#endif
6939+ " scd %0, %2 \n"
6940+ " beqzl %0, 1b \n"
6941+#ifdef CONFIG_PAX_REFCOUNT
6942+ " b 4f \n"
6943+ " .set noreorder \n"
6944+ "3: b 5f \n"
6945+ " move %0, %1 \n"
6946+ " .set reorder \n"
6947+ _ASM_EXTABLE(2b, 3b)
6948+#endif
6949+ "4: dsubu %0, %1, %3 \n"
6950+#ifdef CONFIG_PAX_REFCOUNT
6951+ "5: \n"
6952+#endif
6953+ " .set mips0 \n"
6954+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6955+ : "Ir" (i), "m" (v->counter)
6956+ : "memory");
6957+ } else if (kernel_uses_llsc) {
6958+ __asm__ __volatile__(
6959+ " .set mips3 \n"
6960+ "1: lld %1, %2 # atomic64_sub_return \n"
6961+#ifdef CONFIG_PAX_REFCOUNT
6962+ "2: dsub %0, %1, %3 \n"
6963+#else
6964+ " dsubu %0, %1, %3 \n"
6965+#endif
6966+ " scd %0, %2 \n"
6967+ " bnez %0, 4f \n"
6968+ " b 1b \n"
6969+#ifdef CONFIG_PAX_REFCOUNT
6970+ " .set noreorder \n"
6971+ "3: b 5f \n"
6972+ " move %0, %1 \n"
6973+ " .set reorder \n"
6974+ _ASM_EXTABLE(2b, 3b)
6975+#endif
6976+ "4: dsubu %0, %1, %3 \n"
6977+#ifdef CONFIG_PAX_REFCOUNT
6978+ "5: \n"
6979+#endif
6980+ " .set mips0 \n"
6981+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6982+ : "Ir" (i), "m" (v->counter)
6983+ : "memory");
6984+ } else {
6985+ unsigned long flags;
6986+
6987+ raw_local_irq_save(flags);
6988+ __asm__ __volatile__(
6989+ " ld %0, %1 \n"
6990+#ifdef CONFIG_PAX_REFCOUNT
6991+ /* Exception on overflow. */
6992+ "1: dsub %0, %2 \n"
6993+#else
6994+ " dsubu %0, %2 \n"
6995+#endif
6996+ " sd %0, %1 \n"
6997+#ifdef CONFIG_PAX_REFCOUNT
6998+ /* Note: Dest reg is not modified on overflow */
6999+ "2: \n"
7000+ _ASM_EXTABLE(1b, 2b)
7001+#endif
7002+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
7003+ raw_local_irq_restore(flags);
7004+ }
7005+
7006+ smp_llsc_mb();
7007+
7008+ return result;
7009+}
7010+
7011+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
7012 {
7013 long result;
7014
7015@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
7016 * Atomically test @v and subtract @i if @v is greater or equal than @i.
7017 * The function returns the old value of @v minus @i.
7018 */
7019-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
7020+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
7021 {
7022 long result;
7023
7024@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
7025 return result;
7026 }
7027
7028-#define atomic64_cmpxchg(v, o, n) \
7029- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7030-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
7031+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7032+{
7033+ return cmpxchg(&v->counter, old, new);
7034+}
7035+
7036+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
7037+ long new)
7038+{
7039+ return cmpxchg(&(v->counter), old, new);
7040+}
7041+
7042+static inline long atomic64_xchg(atomic64_t *v, long new)
7043+{
7044+ return xchg(&v->counter, new);
7045+}
7046+
7047+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7048+{
7049+ return xchg(&(v->counter), new);
7050+}
7051
7052 /**
7053 * atomic64_add_unless - add unless the number is a given value
7054@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7055
7056 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
7057 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
7058+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
7059
7060 /*
7061 * atomic64_sub_and_test - subtract value from variable and test result
7062@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7063 * other cases.
7064 */
7065 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7066+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
7067
7068 /*
7069 * atomic64_dec_and_test - decrement by 1 and test
7070@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7071 * Atomically increments @v by 1.
7072 */
7073 #define atomic64_inc(v) atomic64_add(1, (v))
7074+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
7075
7076 /*
7077 * atomic64_dec - decrement and test
7078@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7079 * Atomically decrements @v by 1.
7080 */
7081 #define atomic64_dec(v) atomic64_sub(1, (v))
7082+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
7083
7084 /*
7085 * atomic64_add_negative - add and test if negative
7086diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
7087index d0101dd..266982c 100644
7088--- a/arch/mips/include/asm/barrier.h
7089+++ b/arch/mips/include/asm/barrier.h
7090@@ -184,7 +184,7 @@
7091 do { \
7092 compiletime_assert_atomic_type(*p); \
7093 smp_mb(); \
7094- ACCESS_ONCE(*p) = (v); \
7095+ ACCESS_ONCE_RW(*p) = (v); \
7096 } while (0)
7097
7098 #define smp_load_acquire(p) \
7099diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
7100index b4db69f..8f3b093 100644
7101--- a/arch/mips/include/asm/cache.h
7102+++ b/arch/mips/include/asm/cache.h
7103@@ -9,10 +9,11 @@
7104 #ifndef _ASM_CACHE_H
7105 #define _ASM_CACHE_H
7106
7107+#include <linux/const.h>
7108 #include <kmalloc.h>
7109
7110 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
7111-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7112+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7113
7114 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
7115 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7116diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
7117index d414405..6bb4ba2 100644
7118--- a/arch/mips/include/asm/elf.h
7119+++ b/arch/mips/include/asm/elf.h
7120@@ -398,13 +398,16 @@ extern const char *__elf_platform;
7121 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7122 #endif
7123
7124+#ifdef CONFIG_PAX_ASLR
7125+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7126+
7127+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7128+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7129+#endif
7130+
7131 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7132 struct linux_binprm;
7133 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7134 int uses_interp);
7135
7136-struct mm_struct;
7137-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7138-#define arch_randomize_brk arch_randomize_brk
7139-
7140 #endif /* _ASM_ELF_H */
7141diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
7142index c1f6afa..38cc6e9 100644
7143--- a/arch/mips/include/asm/exec.h
7144+++ b/arch/mips/include/asm/exec.h
7145@@ -12,6 +12,6 @@
7146 #ifndef _ASM_EXEC_H
7147 #define _ASM_EXEC_H
7148
7149-extern unsigned long arch_align_stack(unsigned long sp);
7150+#define arch_align_stack(x) ((x) & ~0xfUL)
7151
7152 #endif /* _ASM_EXEC_H */
7153diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
7154index 9e8ef59..1139d6b 100644
7155--- a/arch/mips/include/asm/hw_irq.h
7156+++ b/arch/mips/include/asm/hw_irq.h
7157@@ -10,7 +10,7 @@
7158
7159 #include <linux/atomic.h>
7160
7161-extern atomic_t irq_err_count;
7162+extern atomic_unchecked_t irq_err_count;
7163
7164 /*
7165 * interrupt-retrigger: NOP for now. This may not be appropriate for all
7166diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
7167index 46dfc3c..a16b13a 100644
7168--- a/arch/mips/include/asm/local.h
7169+++ b/arch/mips/include/asm/local.h
7170@@ -12,15 +12,25 @@ typedef struct
7171 atomic_long_t a;
7172 } local_t;
7173
7174+typedef struct {
7175+ atomic_long_unchecked_t a;
7176+} local_unchecked_t;
7177+
7178 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
7179
7180 #define local_read(l) atomic_long_read(&(l)->a)
7181+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
7182 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
7183+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
7184
7185 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
7186+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
7187 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
7188+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
7189 #define local_inc(l) atomic_long_inc(&(l)->a)
7190+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
7191 #define local_dec(l) atomic_long_dec(&(l)->a)
7192+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
7193
7194 /*
7195 * Same as above, but return the result value
7196@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
7197 return result;
7198 }
7199
7200+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
7201+{
7202+ unsigned long result;
7203+
7204+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
7205+ unsigned long temp;
7206+
7207+ __asm__ __volatile__(
7208+ " .set mips3 \n"
7209+ "1:" __LL "%1, %2 # local_add_return \n"
7210+ " addu %0, %1, %3 \n"
7211+ __SC "%0, %2 \n"
7212+ " beqzl %0, 1b \n"
7213+ " addu %0, %1, %3 \n"
7214+ " .set mips0 \n"
7215+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7216+ : "Ir" (i), "m" (l->a.counter)
7217+ : "memory");
7218+ } else if (kernel_uses_llsc) {
7219+ unsigned long temp;
7220+
7221+ __asm__ __volatile__(
7222+ " .set mips3 \n"
7223+ "1:" __LL "%1, %2 # local_add_return \n"
7224+ " addu %0, %1, %3 \n"
7225+ __SC "%0, %2 \n"
7226+ " beqz %0, 1b \n"
7227+ " addu %0, %1, %3 \n"
7228+ " .set mips0 \n"
7229+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7230+ : "Ir" (i), "m" (l->a.counter)
7231+ : "memory");
7232+ } else {
7233+ unsigned long flags;
7234+
7235+ local_irq_save(flags);
7236+ result = l->a.counter;
7237+ result += i;
7238+ l->a.counter = result;
7239+ local_irq_restore(flags);
7240+ }
7241+
7242+ return result;
7243+}
7244+
7245 static __inline__ long local_sub_return(long i, local_t * l)
7246 {
7247 unsigned long result;
7248@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
7249
7250 #define local_cmpxchg(l, o, n) \
7251 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7252+#define local_cmpxchg_unchecked(l, o, n) \
7253+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7254 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
7255
7256 /**
7257diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
7258index 5699ec3..95def83 100644
7259--- a/arch/mips/include/asm/page.h
7260+++ b/arch/mips/include/asm/page.h
7261@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
7262 #ifdef CONFIG_CPU_MIPS32
7263 typedef struct { unsigned long pte_low, pte_high; } pte_t;
7264 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
7265- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
7266+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
7267 #else
7268 typedef struct { unsigned long long pte; } pte_t;
7269 #define pte_val(x) ((x).pte)
7270diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
7271index b336037..5b874cc 100644
7272--- a/arch/mips/include/asm/pgalloc.h
7273+++ b/arch/mips/include/asm/pgalloc.h
7274@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7275 {
7276 set_pud(pud, __pud((unsigned long)pmd));
7277 }
7278+
7279+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7280+{
7281+ pud_populate(mm, pud, pmd);
7282+}
7283 #endif
7284
7285 /*
7286diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
7287index 539ddd1..8783f9a 100644
7288--- a/arch/mips/include/asm/pgtable.h
7289+++ b/arch/mips/include/asm/pgtable.h
7290@@ -20,6 +20,9 @@
7291 #include <asm/io.h>
7292 #include <asm/pgtable-bits.h>
7293
7294+#define ktla_ktva(addr) (addr)
7295+#define ktva_ktla(addr) (addr)
7296+
7297 struct mm_struct;
7298 struct vm_area_struct;
7299
7300diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
7301index 7de8658..c109224 100644
7302--- a/arch/mips/include/asm/thread_info.h
7303+++ b/arch/mips/include/asm/thread_info.h
7304@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
7305 #define TIF_SECCOMP 4 /* secure computing */
7306 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
7307 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
7308+/* li takes a 32bit immediate */
7309+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
7310+
7311 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
7312 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
7313 #define TIF_NOHZ 19 /* in adaptive nohz mode */
7314@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
7315 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
7316 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
7317 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7318+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7319
7320 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7321 _TIF_SYSCALL_AUDIT | \
7322- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
7323+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
7324+ _TIF_GRSEC_SETXID)
7325
7326 /* work to do in syscall_trace_leave() */
7327 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7328- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
7329+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7330
7331 /* work to do on interrupt/exception return */
7332 #define _TIF_WORK_MASK \
7333@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
7334 /* work to do on any return to u-space */
7335 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
7336 _TIF_WORK_SYSCALL_EXIT | \
7337- _TIF_SYSCALL_TRACEPOINT)
7338+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7339
7340 /*
7341 * We stash processor id into a COP0 register to retrieve it fast
7342diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
7343index a109510..94ee3f6 100644
7344--- a/arch/mips/include/asm/uaccess.h
7345+++ b/arch/mips/include/asm/uaccess.h
7346@@ -130,6 +130,7 @@ extern u64 __ua_limit;
7347 __ok == 0; \
7348 })
7349
7350+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
7351 #define access_ok(type, addr, size) \
7352 likely(__access_ok((addr), (size), __access_mask))
7353
7354diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7355index 1188e00..41cf144 100644
7356--- a/arch/mips/kernel/binfmt_elfn32.c
7357+++ b/arch/mips/kernel/binfmt_elfn32.c
7358@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7359 #undef ELF_ET_DYN_BASE
7360 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7361
7362+#ifdef CONFIG_PAX_ASLR
7363+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7364+
7365+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7366+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7367+#endif
7368+
7369 #include <asm/processor.h>
7370 #include <linux/module.h>
7371 #include <linux/elfcore.h>
7372diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7373index 71df942..199dd19 100644
7374--- a/arch/mips/kernel/binfmt_elfo32.c
7375+++ b/arch/mips/kernel/binfmt_elfo32.c
7376@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7377 #undef ELF_ET_DYN_BASE
7378 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7379
7380+#ifdef CONFIG_PAX_ASLR
7381+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7382+
7383+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7384+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7385+#endif
7386+
7387 #include <asm/processor.h>
7388
7389 /* These MUST be defined before elf.h gets included */
7390diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7391index 50b3648..c2f3cec 100644
7392--- a/arch/mips/kernel/i8259.c
7393+++ b/arch/mips/kernel/i8259.c
7394@@ -201,7 +201,7 @@ spurious_8259A_irq:
7395 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7396 spurious_irq_mask |= irqmask;
7397 }
7398- atomic_inc(&irq_err_count);
7399+ atomic_inc_unchecked(&irq_err_count);
7400 /*
7401 * Theoretically we do not have to handle this IRQ,
7402 * but in Linux this does not cause problems and is
7403diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7404index 44a1f79..2bd6aa3 100644
7405--- a/arch/mips/kernel/irq-gt641xx.c
7406+++ b/arch/mips/kernel/irq-gt641xx.c
7407@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7408 }
7409 }
7410
7411- atomic_inc(&irq_err_count);
7412+ atomic_inc_unchecked(&irq_err_count);
7413 }
7414
7415 void __init gt641xx_irq_init(void)
7416diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7417index d2bfbc2..a8eacd2 100644
7418--- a/arch/mips/kernel/irq.c
7419+++ b/arch/mips/kernel/irq.c
7420@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7421 printk("unexpected IRQ # %d\n", irq);
7422 }
7423
7424-atomic_t irq_err_count;
7425+atomic_unchecked_t irq_err_count;
7426
7427 int arch_show_interrupts(struct seq_file *p, int prec)
7428 {
7429- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7430+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7431 return 0;
7432 }
7433
7434 asmlinkage void spurious_interrupt(void)
7435 {
7436- atomic_inc(&irq_err_count);
7437+ atomic_inc_unchecked(&irq_err_count);
7438 }
7439
7440 void __init init_IRQ(void)
7441@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7442 #endif
7443 }
7444
7445+
7446 #ifdef DEBUG_STACKOVERFLOW
7447+extern void gr_handle_kernel_exploit(void);
7448+
7449 static inline void check_stack_overflow(void)
7450 {
7451 unsigned long sp;
7452@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7453 printk("do_IRQ: stack overflow: %ld\n",
7454 sp - sizeof(struct thread_info));
7455 dump_stack();
7456+ gr_handle_kernel_exploit();
7457 }
7458 }
7459 #else
7460diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7461index c4c2069..bde8051 100644
7462--- a/arch/mips/kernel/pm-cps.c
7463+++ b/arch/mips/kernel/pm-cps.c
7464@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7465 nc_core_ready_count = nc_addr;
7466
7467 /* Ensure ready_count is zero-initialised before the assembly runs */
7468- ACCESS_ONCE(*nc_core_ready_count) = 0;
7469+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7470 coupled_barrier(&per_cpu(pm_barrier, core), online);
7471
7472 /* Run the generated entry code */
7473diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7474index 0a1ec0f..d9e93b6 100644
7475--- a/arch/mips/kernel/process.c
7476+++ b/arch/mips/kernel/process.c
7477@@ -572,15 +572,3 @@ unsigned long get_wchan(struct task_struct *task)
7478 out:
7479 return pc;
7480 }
7481-
7482-/*
7483- * Don't forget that the stack pointer must be aligned on a 8 bytes
7484- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7485- */
7486-unsigned long arch_align_stack(unsigned long sp)
7487-{
7488- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7489- sp -= get_random_int() & ~PAGE_MASK;
7490-
7491- return sp & ALMASK;
7492-}
7493diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7494index aae7119..8b16434 100644
7495--- a/arch/mips/kernel/ptrace.c
7496+++ b/arch/mips/kernel/ptrace.c
7497@@ -762,6 +762,10 @@ long arch_ptrace(struct task_struct *child, long request,
7498 return ret;
7499 }
7500
7501+#ifdef CONFIG_GRKERNSEC_SETXID
7502+extern void gr_delayed_cred_worker(void);
7503+#endif
7504+
7505 /*
7506 * Notification of system call entry/exit
7507 * - triggered by current->work.syscall_trace
7508@@ -778,6 +782,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7509 tracehook_report_syscall_entry(regs))
7510 ret = -1;
7511
7512+#ifdef CONFIG_GRKERNSEC_SETXID
7513+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7514+ gr_delayed_cred_worker();
7515+#endif
7516+
7517 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7518 trace_sys_enter(regs, regs->regs[2]);
7519
7520diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7521index 07fc524..b9d7f28 100644
7522--- a/arch/mips/kernel/reset.c
7523+++ b/arch/mips/kernel/reset.c
7524@@ -13,6 +13,7 @@
7525 #include <linux/reboot.h>
7526
7527 #include <asm/reboot.h>
7528+#include <asm/bug.h>
7529
7530 /*
7531 * Urgs ... Too many MIPS machines to handle this in a generic way.
7532@@ -29,16 +30,19 @@ void machine_restart(char *command)
7533 {
7534 if (_machine_restart)
7535 _machine_restart(command);
7536+ BUG();
7537 }
7538
7539 void machine_halt(void)
7540 {
7541 if (_machine_halt)
7542 _machine_halt();
7543+ BUG();
7544 }
7545
7546 void machine_power_off(void)
7547 {
7548 if (pm_power_off)
7549 pm_power_off();
7550+ BUG();
7551 }
7552diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7553index 2242bdd..b284048 100644
7554--- a/arch/mips/kernel/sync-r4k.c
7555+++ b/arch/mips/kernel/sync-r4k.c
7556@@ -18,8 +18,8 @@
7557 #include <asm/mipsregs.h>
7558
7559 static atomic_t count_start_flag = ATOMIC_INIT(0);
7560-static atomic_t count_count_start = ATOMIC_INIT(0);
7561-static atomic_t count_count_stop = ATOMIC_INIT(0);
7562+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7563+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7564 static atomic_t count_reference = ATOMIC_INIT(0);
7565
7566 #define COUNTON 100
7567@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7568
7569 for (i = 0; i < NR_LOOPS; i++) {
7570 /* slaves loop on '!= 2' */
7571- while (atomic_read(&count_count_start) != 1)
7572+ while (atomic_read_unchecked(&count_count_start) != 1)
7573 mb();
7574- atomic_set(&count_count_stop, 0);
7575+ atomic_set_unchecked(&count_count_stop, 0);
7576 smp_wmb();
7577
7578 /* this lets the slaves write their count register */
7579- atomic_inc(&count_count_start);
7580+ atomic_inc_unchecked(&count_count_start);
7581
7582 /*
7583 * Everyone initialises count in the last loop:
7584@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7585 /*
7586 * Wait for all slaves to leave the synchronization point:
7587 */
7588- while (atomic_read(&count_count_stop) != 1)
7589+ while (atomic_read_unchecked(&count_count_stop) != 1)
7590 mb();
7591- atomic_set(&count_count_start, 0);
7592+ atomic_set_unchecked(&count_count_start, 0);
7593 smp_wmb();
7594- atomic_inc(&count_count_stop);
7595+ atomic_inc_unchecked(&count_count_stop);
7596 }
7597 /* Arrange for an interrupt in a short while */
7598 write_c0_compare(read_c0_count() + COUNTON);
7599@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7600 initcount = atomic_read(&count_reference);
7601
7602 for (i = 0; i < NR_LOOPS; i++) {
7603- atomic_inc(&count_count_start);
7604- while (atomic_read(&count_count_start) != 2)
7605+ atomic_inc_unchecked(&count_count_start);
7606+ while (atomic_read_unchecked(&count_count_start) != 2)
7607 mb();
7608
7609 /*
7610@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7611 if (i == NR_LOOPS-1)
7612 write_c0_count(initcount);
7613
7614- atomic_inc(&count_count_stop);
7615- while (atomic_read(&count_count_stop) != 2)
7616+ atomic_inc_unchecked(&count_count_stop);
7617+ while (atomic_read_unchecked(&count_count_stop) != 2)
7618 mb();
7619 }
7620 /* Arrange for an interrupt in a short while */
7621diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7622index 51706d6..ec1178c 100644
7623--- a/arch/mips/kernel/traps.c
7624+++ b/arch/mips/kernel/traps.c
7625@@ -687,7 +687,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7626 siginfo_t info;
7627
7628 prev_state = exception_enter();
7629- die_if_kernel("Integer overflow", regs);
7630+ if (unlikely(!user_mode(regs))) {
7631+
7632+#ifdef CONFIG_PAX_REFCOUNT
7633+ if (fixup_exception(regs)) {
7634+ pax_report_refcount_overflow(regs);
7635+ exception_exit(prev_state);
7636+ return;
7637+ }
7638+#endif
7639+
7640+ die("Integer overflow", regs);
7641+ }
7642
7643 info.si_code = FPE_INTOVF;
7644 info.si_signo = SIGFPE;
7645diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
7646index f3c56a1..6a2f01c 100644
7647--- a/arch/mips/kvm/kvm_mips.c
7648+++ b/arch/mips/kvm/kvm_mips.c
7649@@ -841,7 +841,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7650 return r;
7651 }
7652
7653-int kvm_arch_init(void *opaque)
7654+int kvm_arch_init(const void *opaque)
7655 {
7656 int ret;
7657
7658diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7659index becc42b..9e43d4b 100644
7660--- a/arch/mips/mm/fault.c
7661+++ b/arch/mips/mm/fault.c
7662@@ -28,6 +28,23 @@
7663 #include <asm/highmem.h> /* For VMALLOC_END */
7664 #include <linux/kdebug.h>
7665
7666+#ifdef CONFIG_PAX_PAGEEXEC
7667+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7668+{
7669+ unsigned long i;
7670+
7671+ printk(KERN_ERR "PAX: bytes at PC: ");
7672+ for (i = 0; i < 5; i++) {
7673+ unsigned int c;
7674+ if (get_user(c, (unsigned int *)pc+i))
7675+ printk(KERN_CONT "???????? ");
7676+ else
7677+ printk(KERN_CONT "%08x ", c);
7678+ }
7679+ printk("\n");
7680+}
7681+#endif
7682+
7683 /*
7684 * This routine handles page faults. It determines the address,
7685 * and the problem, and then passes it off to one of the appropriate
7686@@ -199,6 +216,14 @@ bad_area:
7687 bad_area_nosemaphore:
7688 /* User mode accesses just cause a SIGSEGV */
7689 if (user_mode(regs)) {
7690+
7691+#ifdef CONFIG_PAX_PAGEEXEC
7692+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7693+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7694+ do_group_exit(SIGKILL);
7695+ }
7696+#endif
7697+
7698 tsk->thread.cp0_badvaddr = address;
7699 tsk->thread.error_code = write;
7700 #if 0
7701diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7702index f1baadd..5472dca 100644
7703--- a/arch/mips/mm/mmap.c
7704+++ b/arch/mips/mm/mmap.c
7705@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7706 struct vm_area_struct *vma;
7707 unsigned long addr = addr0;
7708 int do_color_align;
7709+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7710 struct vm_unmapped_area_info info;
7711
7712 if (unlikely(len > TASK_SIZE))
7713@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7714 do_color_align = 1;
7715
7716 /* requesting a specific address */
7717+
7718+#ifdef CONFIG_PAX_RANDMMAP
7719+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7720+#endif
7721+
7722 if (addr) {
7723 if (do_color_align)
7724 addr = COLOUR_ALIGN(addr, pgoff);
7725@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7726 addr = PAGE_ALIGN(addr);
7727
7728 vma = find_vma(mm, addr);
7729- if (TASK_SIZE - len >= addr &&
7730- (!vma || addr + len <= vma->vm_start))
7731+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7732 return addr;
7733 }
7734
7735 info.length = len;
7736 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7737 info.align_offset = pgoff << PAGE_SHIFT;
7738+ info.threadstack_offset = offset;
7739
7740 if (dir == DOWN) {
7741 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7742@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7743 {
7744 unsigned long random_factor = 0UL;
7745
7746+#ifdef CONFIG_PAX_RANDMMAP
7747+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7748+#endif
7749+
7750 if (current->flags & PF_RANDOMIZE) {
7751 random_factor = get_random_int();
7752 random_factor = random_factor << PAGE_SHIFT;
7753@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7754
7755 if (mmap_is_legacy()) {
7756 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7757+
7758+#ifdef CONFIG_PAX_RANDMMAP
7759+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7760+ mm->mmap_base += mm->delta_mmap;
7761+#endif
7762+
7763 mm->get_unmapped_area = arch_get_unmapped_area;
7764 } else {
7765 mm->mmap_base = mmap_base(random_factor);
7766+
7767+#ifdef CONFIG_PAX_RANDMMAP
7768+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7769+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7770+#endif
7771+
7772 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7773 }
7774 }
7775
7776-static inline unsigned long brk_rnd(void)
7777-{
7778- unsigned long rnd = get_random_int();
7779-
7780- rnd = rnd << PAGE_SHIFT;
7781- /* 8MB for 32bit, 256MB for 64bit */
7782- if (TASK_IS_32BIT_ADDR)
7783- rnd = rnd & 0x7ffffful;
7784- else
7785- rnd = rnd & 0xffffffful;
7786-
7787- return rnd;
7788-}
7789-
7790-unsigned long arch_randomize_brk(struct mm_struct *mm)
7791-{
7792- unsigned long base = mm->brk;
7793- unsigned long ret;
7794-
7795- ret = PAGE_ALIGN(base + brk_rnd());
7796-
7797- if (ret < mm->brk)
7798- return mm->brk;
7799-
7800- return ret;
7801-}
7802-
7803 int __virt_addr_valid(const volatile void *kaddr)
7804 {
7805 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7806diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7807index 59cccd9..f39ac2f 100644
7808--- a/arch/mips/pci/pci-octeon.c
7809+++ b/arch/mips/pci/pci-octeon.c
7810@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7811
7812
7813 static struct pci_ops octeon_pci_ops = {
7814- octeon_read_config,
7815- octeon_write_config,
7816+ .read = octeon_read_config,
7817+ .write = octeon_write_config,
7818 };
7819
7820 static struct resource octeon_pci_mem_resource = {
7821diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7822index 5e36c33..eb4a17b 100644
7823--- a/arch/mips/pci/pcie-octeon.c
7824+++ b/arch/mips/pci/pcie-octeon.c
7825@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7826 }
7827
7828 static struct pci_ops octeon_pcie0_ops = {
7829- octeon_pcie0_read_config,
7830- octeon_pcie0_write_config,
7831+ .read = octeon_pcie0_read_config,
7832+ .write = octeon_pcie0_write_config,
7833 };
7834
7835 static struct resource octeon_pcie0_mem_resource = {
7836@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7837 };
7838
7839 static struct pci_ops octeon_pcie1_ops = {
7840- octeon_pcie1_read_config,
7841- octeon_pcie1_write_config,
7842+ .read = octeon_pcie1_read_config,
7843+ .write = octeon_pcie1_write_config,
7844 };
7845
7846 static struct resource octeon_pcie1_mem_resource = {
7847@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7848 };
7849
7850 static struct pci_ops octeon_dummy_ops = {
7851- octeon_dummy_read_config,
7852- octeon_dummy_write_config,
7853+ .read = octeon_dummy_read_config,
7854+ .write = octeon_dummy_write_config,
7855 };
7856
7857 static struct resource octeon_dummy_mem_resource = {
7858diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7859index a2358b4..7cead4f 100644
7860--- a/arch/mips/sgi-ip27/ip27-nmi.c
7861+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7862@@ -187,9 +187,9 @@ void
7863 cont_nmi_dump(void)
7864 {
7865 #ifndef REAL_NMI_SIGNAL
7866- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7867+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7868
7869- atomic_inc(&nmied_cpus);
7870+ atomic_inc_unchecked(&nmied_cpus);
7871 #endif
7872 /*
7873 * Only allow 1 cpu to proceed
7874@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7875 udelay(10000);
7876 }
7877 #else
7878- while (atomic_read(&nmied_cpus) != num_online_cpus());
7879+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7880 #endif
7881
7882 /*
7883diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7884index a046b30..6799527 100644
7885--- a/arch/mips/sni/rm200.c
7886+++ b/arch/mips/sni/rm200.c
7887@@ -270,7 +270,7 @@ spurious_8259A_irq:
7888 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7889 spurious_irq_mask |= irqmask;
7890 }
7891- atomic_inc(&irq_err_count);
7892+ atomic_inc_unchecked(&irq_err_count);
7893 /*
7894 * Theoretically we do not have to handle this IRQ,
7895 * but in Linux this does not cause problems and is
7896diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7897index 41e873b..34d33a7 100644
7898--- a/arch/mips/vr41xx/common/icu.c
7899+++ b/arch/mips/vr41xx/common/icu.c
7900@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7901
7902 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7903
7904- atomic_inc(&irq_err_count);
7905+ atomic_inc_unchecked(&irq_err_count);
7906
7907 return -1;
7908 }
7909diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7910index ae0e4ee..e8f0692 100644
7911--- a/arch/mips/vr41xx/common/irq.c
7912+++ b/arch/mips/vr41xx/common/irq.c
7913@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7914 irq_cascade_t *cascade;
7915
7916 if (irq >= NR_IRQS) {
7917- atomic_inc(&irq_err_count);
7918+ atomic_inc_unchecked(&irq_err_count);
7919 return;
7920 }
7921
7922@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7923 ret = cascade->get_irq(irq);
7924 irq = ret;
7925 if (ret < 0)
7926- atomic_inc(&irq_err_count);
7927+ atomic_inc_unchecked(&irq_err_count);
7928 else
7929 irq_dispatch(irq);
7930 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7931diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7932index 967d144..db12197 100644
7933--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7934+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7935@@ -11,12 +11,14 @@
7936 #ifndef _ASM_PROC_CACHE_H
7937 #define _ASM_PROC_CACHE_H
7938
7939+#include <linux/const.h>
7940+
7941 /* L1 cache */
7942
7943 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7944 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7945-#define L1_CACHE_BYTES 16 /* bytes per entry */
7946 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7947+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7948 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7949
7950 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7951diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7952index bcb5df2..84fabd2 100644
7953--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7954+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7955@@ -16,13 +16,15 @@
7956 #ifndef _ASM_PROC_CACHE_H
7957 #define _ASM_PROC_CACHE_H
7958
7959+#include <linux/const.h>
7960+
7961 /*
7962 * L1 cache
7963 */
7964 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7965 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7966-#define L1_CACHE_BYTES 32 /* bytes per entry */
7967 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7968+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7969 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7970
7971 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7972diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7973index 4ce7a01..449202a 100644
7974--- a/arch/openrisc/include/asm/cache.h
7975+++ b/arch/openrisc/include/asm/cache.h
7976@@ -19,11 +19,13 @@
7977 #ifndef __ASM_OPENRISC_CACHE_H
7978 #define __ASM_OPENRISC_CACHE_H
7979
7980+#include <linux/const.h>
7981+
7982 /* FIXME: How can we replace these with values from the CPU...
7983 * they shouldn't be hard-coded!
7984 */
7985
7986-#define L1_CACHE_BYTES 16
7987 #define L1_CACHE_SHIFT 4
7988+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7989
7990 #endif /* __ASM_OPENRISC_CACHE_H */
7991diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7992index 0be2db2..1b0f26d 100644
7993--- a/arch/parisc/include/asm/atomic.h
7994+++ b/arch/parisc/include/asm/atomic.h
7995@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7996 return dec;
7997 }
7998
7999+#define atomic64_read_unchecked(v) atomic64_read(v)
8000+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8001+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8002+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8003+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8004+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8005+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8006+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8007+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8008+
8009 #endif /* !CONFIG_64BIT */
8010
8011
8012diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
8013index 47f11c7..3420df2 100644
8014--- a/arch/parisc/include/asm/cache.h
8015+++ b/arch/parisc/include/asm/cache.h
8016@@ -5,6 +5,7 @@
8017 #ifndef __ARCH_PARISC_CACHE_H
8018 #define __ARCH_PARISC_CACHE_H
8019
8020+#include <linux/const.h>
8021
8022 /*
8023 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
8024@@ -15,13 +16,13 @@
8025 * just ruin performance.
8026 */
8027 #ifdef CONFIG_PA20
8028-#define L1_CACHE_BYTES 64
8029 #define L1_CACHE_SHIFT 6
8030 #else
8031-#define L1_CACHE_BYTES 32
8032 #define L1_CACHE_SHIFT 5
8033 #endif
8034
8035+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8036+
8037 #ifndef __ASSEMBLY__
8038
8039 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8040diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
8041index 3391d06..c23a2cc 100644
8042--- a/arch/parisc/include/asm/elf.h
8043+++ b/arch/parisc/include/asm/elf.h
8044@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
8045
8046 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
8047
8048+#ifdef CONFIG_PAX_ASLR
8049+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8050+
8051+#define PAX_DELTA_MMAP_LEN 16
8052+#define PAX_DELTA_STACK_LEN 16
8053+#endif
8054+
8055 /* This yields a mask that user programs can use to figure out what
8056 instruction set this CPU supports. This could be done in user space,
8057 but it's not easy, and we've already done it here. */
8058diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
8059index f213f5b..0af3e8e 100644
8060--- a/arch/parisc/include/asm/pgalloc.h
8061+++ b/arch/parisc/include/asm/pgalloc.h
8062@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8063 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
8064 }
8065
8066+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8067+{
8068+ pgd_populate(mm, pgd, pmd);
8069+}
8070+
8071 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
8072 {
8073 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
8074@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
8075 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
8076 #define pmd_free(mm, x) do { } while (0)
8077 #define pgd_populate(mm, pmd, pte) BUG()
8078+#define pgd_populate_kernel(mm, pmd, pte) BUG()
8079
8080 #endif
8081
8082diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
8083index 22b89d1..ce34230 100644
8084--- a/arch/parisc/include/asm/pgtable.h
8085+++ b/arch/parisc/include/asm/pgtable.h
8086@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
8087 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
8088 #define PAGE_COPY PAGE_EXECREAD
8089 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
8090+
8091+#ifdef CONFIG_PAX_PAGEEXEC
8092+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
8093+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8094+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8095+#else
8096+# define PAGE_SHARED_NOEXEC PAGE_SHARED
8097+# define PAGE_COPY_NOEXEC PAGE_COPY
8098+# define PAGE_READONLY_NOEXEC PAGE_READONLY
8099+#endif
8100+
8101 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
8102 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
8103 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
8104diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
8105index 4006964..fcb3cc2 100644
8106--- a/arch/parisc/include/asm/uaccess.h
8107+++ b/arch/parisc/include/asm/uaccess.h
8108@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
8109 const void __user *from,
8110 unsigned long n)
8111 {
8112- int sz = __compiletime_object_size(to);
8113+ size_t sz = __compiletime_object_size(to);
8114 int ret = -EFAULT;
8115
8116- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
8117+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
8118 ret = __copy_from_user(to, from, n);
8119 else
8120 copy_from_user_overflow();
8121diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
8122index 50dfafc..b9fc230 100644
8123--- a/arch/parisc/kernel/module.c
8124+++ b/arch/parisc/kernel/module.c
8125@@ -98,16 +98,38 @@
8126
8127 /* three functions to determine where in the module core
8128 * or init pieces the location is */
8129+static inline int in_init_rx(struct module *me, void *loc)
8130+{
8131+ return (loc >= me->module_init_rx &&
8132+ loc < (me->module_init_rx + me->init_size_rx));
8133+}
8134+
8135+static inline int in_init_rw(struct module *me, void *loc)
8136+{
8137+ return (loc >= me->module_init_rw &&
8138+ loc < (me->module_init_rw + me->init_size_rw));
8139+}
8140+
8141 static inline int in_init(struct module *me, void *loc)
8142 {
8143- return (loc >= me->module_init &&
8144- loc <= (me->module_init + me->init_size));
8145+ return in_init_rx(me, loc) || in_init_rw(me, loc);
8146+}
8147+
8148+static inline int in_core_rx(struct module *me, void *loc)
8149+{
8150+ return (loc >= me->module_core_rx &&
8151+ loc < (me->module_core_rx + me->core_size_rx));
8152+}
8153+
8154+static inline int in_core_rw(struct module *me, void *loc)
8155+{
8156+ return (loc >= me->module_core_rw &&
8157+ loc < (me->module_core_rw + me->core_size_rw));
8158 }
8159
8160 static inline int in_core(struct module *me, void *loc)
8161 {
8162- return (loc >= me->module_core &&
8163- loc <= (me->module_core + me->core_size));
8164+ return in_core_rx(me, loc) || in_core_rw(me, loc);
8165 }
8166
8167 static inline int in_local(struct module *me, void *loc)
8168@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
8169 }
8170
8171 /* align things a bit */
8172- me->core_size = ALIGN(me->core_size, 16);
8173- me->arch.got_offset = me->core_size;
8174- me->core_size += gots * sizeof(struct got_entry);
8175+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8176+ me->arch.got_offset = me->core_size_rw;
8177+ me->core_size_rw += gots * sizeof(struct got_entry);
8178
8179- me->core_size = ALIGN(me->core_size, 16);
8180- me->arch.fdesc_offset = me->core_size;
8181- me->core_size += fdescs * sizeof(Elf_Fdesc);
8182+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8183+ me->arch.fdesc_offset = me->core_size_rw;
8184+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
8185
8186 me->arch.got_max = gots;
8187 me->arch.fdesc_max = fdescs;
8188@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8189
8190 BUG_ON(value == 0);
8191
8192- got = me->module_core + me->arch.got_offset;
8193+ got = me->module_core_rw + me->arch.got_offset;
8194 for (i = 0; got[i].addr; i++)
8195 if (got[i].addr == value)
8196 goto out;
8197@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8198 #ifdef CONFIG_64BIT
8199 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8200 {
8201- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
8202+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
8203
8204 if (!value) {
8205 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
8206@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8207
8208 /* Create new one */
8209 fdesc->addr = value;
8210- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8211+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8212 return (Elf_Addr)fdesc;
8213 }
8214 #endif /* CONFIG_64BIT */
8215@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
8216
8217 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
8218 end = table + sechdrs[me->arch.unwind_section].sh_size;
8219- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8220+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8221
8222 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
8223 me->arch.unwind_section, table, end, gp);
8224diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
8225index e1ffea2..46ed66e 100644
8226--- a/arch/parisc/kernel/sys_parisc.c
8227+++ b/arch/parisc/kernel/sys_parisc.c
8228@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8229 unsigned long task_size = TASK_SIZE;
8230 int do_color_align, last_mmap;
8231 struct vm_unmapped_area_info info;
8232+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8233
8234 if (len > task_size)
8235 return -ENOMEM;
8236@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8237 goto found_addr;
8238 }
8239
8240+#ifdef CONFIG_PAX_RANDMMAP
8241+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8242+#endif
8243+
8244 if (addr) {
8245 if (do_color_align && last_mmap)
8246 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8247@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8248 info.high_limit = mmap_upper_limit();
8249 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8250 info.align_offset = shared_align_offset(last_mmap, pgoff);
8251+ info.threadstack_offset = offset;
8252 addr = vm_unmapped_area(&info);
8253
8254 found_addr:
8255@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8256 unsigned long addr = addr0;
8257 int do_color_align, last_mmap;
8258 struct vm_unmapped_area_info info;
8259+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8260
8261 #ifdef CONFIG_64BIT
8262 /* This should only ever run for 32-bit processes. */
8263@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8264 }
8265
8266 /* requesting a specific address */
8267+#ifdef CONFIG_PAX_RANDMMAP
8268+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8269+#endif
8270+
8271 if (addr) {
8272 if (do_color_align && last_mmap)
8273 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8274@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8275 info.high_limit = mm->mmap_base;
8276 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8277 info.align_offset = shared_align_offset(last_mmap, pgoff);
8278+ info.threadstack_offset = offset;
8279 addr = vm_unmapped_area(&info);
8280 if (!(addr & ~PAGE_MASK))
8281 goto found_addr;
8282@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8283 mm->mmap_legacy_base = mmap_legacy_base();
8284 mm->mmap_base = mmap_upper_limit();
8285
8286+#ifdef CONFIG_PAX_RANDMMAP
8287+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
8288+ mm->mmap_legacy_base += mm->delta_mmap;
8289+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8290+ }
8291+#endif
8292+
8293 if (mmap_is_legacy()) {
8294 mm->mmap_base = mm->mmap_legacy_base;
8295 mm->get_unmapped_area = arch_get_unmapped_area;
8296diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
8297index 47ee620..1107387 100644
8298--- a/arch/parisc/kernel/traps.c
8299+++ b/arch/parisc/kernel/traps.c
8300@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
8301
8302 down_read(&current->mm->mmap_sem);
8303 vma = find_vma(current->mm,regs->iaoq[0]);
8304- if (vma && (regs->iaoq[0] >= vma->vm_start)
8305- && (vma->vm_flags & VM_EXEC)) {
8306-
8307+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
8308 fault_address = regs->iaoq[0];
8309 fault_space = regs->iasq[0];
8310
8311diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
8312index 3ca9c11..d163ef7 100644
8313--- a/arch/parisc/mm/fault.c
8314+++ b/arch/parisc/mm/fault.c
8315@@ -15,6 +15,7 @@
8316 #include <linux/sched.h>
8317 #include <linux/interrupt.h>
8318 #include <linux/module.h>
8319+#include <linux/unistd.h>
8320
8321 #include <asm/uaccess.h>
8322 #include <asm/traps.h>
8323@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
8324 static unsigned long
8325 parisc_acctyp(unsigned long code, unsigned int inst)
8326 {
8327- if (code == 6 || code == 16)
8328+ if (code == 6 || code == 7 || code == 16)
8329 return VM_EXEC;
8330
8331 switch (inst & 0xf0000000) {
8332@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
8333 }
8334 #endif
8335
8336+#ifdef CONFIG_PAX_PAGEEXEC
8337+/*
8338+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
8339+ *
8340+ * returns 1 when task should be killed
8341+ * 2 when rt_sigreturn trampoline was detected
8342+ * 3 when unpatched PLT trampoline was detected
8343+ */
8344+static int pax_handle_fetch_fault(struct pt_regs *regs)
8345+{
8346+
8347+#ifdef CONFIG_PAX_EMUPLT
8348+ int err;
8349+
8350+ do { /* PaX: unpatched PLT emulation */
8351+ unsigned int bl, depwi;
8352+
8353+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8354+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8355+
8356+ if (err)
8357+ break;
8358+
8359+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8360+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8361+
8362+ err = get_user(ldw, (unsigned int *)addr);
8363+ err |= get_user(bv, (unsigned int *)(addr+4));
8364+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8365+
8366+ if (err)
8367+ break;
8368+
8369+ if (ldw == 0x0E801096U &&
8370+ bv == 0xEAC0C000U &&
8371+ ldw2 == 0x0E881095U)
8372+ {
8373+ unsigned int resolver, map;
8374+
8375+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8376+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8377+ if (err)
8378+ break;
8379+
8380+ regs->gr[20] = instruction_pointer(regs)+8;
8381+ regs->gr[21] = map;
8382+ regs->gr[22] = resolver;
8383+ regs->iaoq[0] = resolver | 3UL;
8384+ regs->iaoq[1] = regs->iaoq[0] + 4;
8385+ return 3;
8386+ }
8387+ }
8388+ } while (0);
8389+#endif
8390+
8391+#ifdef CONFIG_PAX_EMUTRAMP
8392+
8393+#ifndef CONFIG_PAX_EMUSIGRT
8394+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8395+ return 1;
8396+#endif
8397+
8398+ do { /* PaX: rt_sigreturn emulation */
8399+ unsigned int ldi1, ldi2, bel, nop;
8400+
8401+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8402+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8403+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8404+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8405+
8406+ if (err)
8407+ break;
8408+
8409+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8410+ ldi2 == 0x3414015AU &&
8411+ bel == 0xE4008200U &&
8412+ nop == 0x08000240U)
8413+ {
8414+ regs->gr[25] = (ldi1 & 2) >> 1;
8415+ regs->gr[20] = __NR_rt_sigreturn;
8416+ regs->gr[31] = regs->iaoq[1] + 16;
8417+ regs->sr[0] = regs->iasq[1];
8418+ regs->iaoq[0] = 0x100UL;
8419+ regs->iaoq[1] = regs->iaoq[0] + 4;
8420+ regs->iasq[0] = regs->sr[2];
8421+ regs->iasq[1] = regs->sr[2];
8422+ return 2;
8423+ }
8424+ } while (0);
8425+#endif
8426+
8427+ return 1;
8428+}
8429+
8430+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8431+{
8432+ unsigned long i;
8433+
8434+ printk(KERN_ERR "PAX: bytes at PC: ");
8435+ for (i = 0; i < 5; i++) {
8436+ unsigned int c;
8437+ if (get_user(c, (unsigned int *)pc+i))
8438+ printk(KERN_CONT "???????? ");
8439+ else
8440+ printk(KERN_CONT "%08x ", c);
8441+ }
8442+ printk("\n");
8443+}
8444+#endif
8445+
8446 int fixup_exception(struct pt_regs *regs)
8447 {
8448 const struct exception_table_entry *fix;
8449@@ -234,8 +345,33 @@ retry:
8450
8451 good_area:
8452
8453- if ((vma->vm_flags & acc_type) != acc_type)
8454+ if ((vma->vm_flags & acc_type) != acc_type) {
8455+
8456+#ifdef CONFIG_PAX_PAGEEXEC
8457+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8458+ (address & ~3UL) == instruction_pointer(regs))
8459+ {
8460+ up_read(&mm->mmap_sem);
8461+ switch (pax_handle_fetch_fault(regs)) {
8462+
8463+#ifdef CONFIG_PAX_EMUPLT
8464+ case 3:
8465+ return;
8466+#endif
8467+
8468+#ifdef CONFIG_PAX_EMUTRAMP
8469+ case 2:
8470+ return;
8471+#endif
8472+
8473+ }
8474+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8475+ do_group_exit(SIGKILL);
8476+ }
8477+#endif
8478+
8479 goto bad_area;
8480+ }
8481
8482 /*
8483 * If for any reason at all we couldn't handle the fault, make
8484diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8485index 80b94b0..a3274fb 100644
8486--- a/arch/powerpc/Kconfig
8487+++ b/arch/powerpc/Kconfig
8488@@ -398,6 +398,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8489 config KEXEC
8490 bool "kexec system call"
8491 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8492+ depends on !GRKERNSEC_KMEM
8493 help
8494 kexec is a system call that implements the ability to shutdown your
8495 current kernel, and to start another kernel. It is like a reboot
8496diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8497index 28992d0..c797b20 100644
8498--- a/arch/powerpc/include/asm/atomic.h
8499+++ b/arch/powerpc/include/asm/atomic.h
8500@@ -519,6 +519,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
8501 return t1;
8502 }
8503
8504+#define atomic64_read_unchecked(v) atomic64_read(v)
8505+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8506+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8507+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8508+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8509+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8510+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8511+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8512+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8513+
8514 #endif /* __powerpc64__ */
8515
8516 #endif /* __KERNEL__ */
8517diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8518index bab79a1..4a3eabc 100644
8519--- a/arch/powerpc/include/asm/barrier.h
8520+++ b/arch/powerpc/include/asm/barrier.h
8521@@ -73,7 +73,7 @@
8522 do { \
8523 compiletime_assert_atomic_type(*p); \
8524 __lwsync(); \
8525- ACCESS_ONCE(*p) = (v); \
8526+ ACCESS_ONCE_RW(*p) = (v); \
8527 } while (0)
8528
8529 #define smp_load_acquire(p) \
8530diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8531index ed0afc1..0332825 100644
8532--- a/arch/powerpc/include/asm/cache.h
8533+++ b/arch/powerpc/include/asm/cache.h
8534@@ -3,6 +3,7 @@
8535
8536 #ifdef __KERNEL__
8537
8538+#include <linux/const.h>
8539
8540 /* bytes per L1 cache line */
8541 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8542@@ -22,7 +23,7 @@
8543 #define L1_CACHE_SHIFT 7
8544 #endif
8545
8546-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8547+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8548
8549 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8550
8551diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8552index 888d8f3..66f581c 100644
8553--- a/arch/powerpc/include/asm/elf.h
8554+++ b/arch/powerpc/include/asm/elf.h
8555@@ -28,8 +28,19 @@
8556 the loader. We need to make sure that it is out of the way of the program
8557 that it will "exec", and that there is sufficient room for the brk. */
8558
8559-extern unsigned long randomize_et_dyn(unsigned long base);
8560-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8561+#define ELF_ET_DYN_BASE (0x20000000)
8562+
8563+#ifdef CONFIG_PAX_ASLR
8564+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8565+
8566+#ifdef __powerpc64__
8567+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8568+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8569+#else
8570+#define PAX_DELTA_MMAP_LEN 15
8571+#define PAX_DELTA_STACK_LEN 15
8572+#endif
8573+#endif
8574
8575 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8576
8577@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8578 (0x7ff >> (PAGE_SHIFT - 12)) : \
8579 (0x3ffff >> (PAGE_SHIFT - 12)))
8580
8581-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8582-#define arch_randomize_brk arch_randomize_brk
8583-
8584-
8585 #ifdef CONFIG_SPU_BASE
8586 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8587 #define NT_SPU 1
8588diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8589index 8196e9c..d83a9f3 100644
8590--- a/arch/powerpc/include/asm/exec.h
8591+++ b/arch/powerpc/include/asm/exec.h
8592@@ -4,6 +4,6 @@
8593 #ifndef _ASM_POWERPC_EXEC_H
8594 #define _ASM_POWERPC_EXEC_H
8595
8596-extern unsigned long arch_align_stack(unsigned long sp);
8597+#define arch_align_stack(x) ((x) & ~0xfUL)
8598
8599 #endif /* _ASM_POWERPC_EXEC_H */
8600diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8601index 5acabbd..7ea14fa 100644
8602--- a/arch/powerpc/include/asm/kmap_types.h
8603+++ b/arch/powerpc/include/asm/kmap_types.h
8604@@ -10,7 +10,7 @@
8605 * 2 of the License, or (at your option) any later version.
8606 */
8607
8608-#define KM_TYPE_NR 16
8609+#define KM_TYPE_NR 17
8610
8611 #endif /* __KERNEL__ */
8612 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8613diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8614index b8da913..60b608a 100644
8615--- a/arch/powerpc/include/asm/local.h
8616+++ b/arch/powerpc/include/asm/local.h
8617@@ -9,15 +9,26 @@ typedef struct
8618 atomic_long_t a;
8619 } local_t;
8620
8621+typedef struct
8622+{
8623+ atomic_long_unchecked_t a;
8624+} local_unchecked_t;
8625+
8626 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8627
8628 #define local_read(l) atomic_long_read(&(l)->a)
8629+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8630 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8631+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8632
8633 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8634+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8635 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8636+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8637 #define local_inc(l) atomic_long_inc(&(l)->a)
8638+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8639 #define local_dec(l) atomic_long_dec(&(l)->a)
8640+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8641
8642 static __inline__ long local_add_return(long a, local_t *l)
8643 {
8644@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8645
8646 return t;
8647 }
8648+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8649
8650 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8651
8652@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8653
8654 return t;
8655 }
8656+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8657
8658 static __inline__ long local_inc_return(local_t *l)
8659 {
8660@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8661
8662 #define local_cmpxchg(l, o, n) \
8663 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8664+#define local_cmpxchg_unchecked(l, o, n) \
8665+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8666 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8667
8668 /**
8669diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8670index 8565c25..2865190 100644
8671--- a/arch/powerpc/include/asm/mman.h
8672+++ b/arch/powerpc/include/asm/mman.h
8673@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8674 }
8675 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8676
8677-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8678+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8679 {
8680 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8681 }
8682diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8683index 32e4e21..62afb12 100644
8684--- a/arch/powerpc/include/asm/page.h
8685+++ b/arch/powerpc/include/asm/page.h
8686@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8687 * and needs to be executable. This means the whole heap ends
8688 * up being executable.
8689 */
8690-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8691- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8692+#define VM_DATA_DEFAULT_FLAGS32 \
8693+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8694+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8695
8696 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8697 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8698@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8699 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8700 #endif
8701
8702+#define ktla_ktva(addr) (addr)
8703+#define ktva_ktla(addr) (addr)
8704+
8705 #ifndef CONFIG_PPC_BOOK3S_64
8706 /*
8707 * Use the top bit of the higher-level page table entries to indicate whether
8708diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8709index 88693ce..ac6f9ab 100644
8710--- a/arch/powerpc/include/asm/page_64.h
8711+++ b/arch/powerpc/include/asm/page_64.h
8712@@ -153,15 +153,18 @@ do { \
8713 * stack by default, so in the absence of a PT_GNU_STACK program header
8714 * we turn execute permission off.
8715 */
8716-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8717- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8718+#define VM_STACK_DEFAULT_FLAGS32 \
8719+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8720+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8721
8722 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8723 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8724
8725+#ifndef CONFIG_PAX_PAGEEXEC
8726 #define VM_STACK_DEFAULT_FLAGS \
8727 (is_32bit_task() ? \
8728 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8729+#endif
8730
8731 #include <asm-generic/getorder.h>
8732
8733diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8734index 4b0be20..c15a27d 100644
8735--- a/arch/powerpc/include/asm/pgalloc-64.h
8736+++ b/arch/powerpc/include/asm/pgalloc-64.h
8737@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8738 #ifndef CONFIG_PPC_64K_PAGES
8739
8740 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8741+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8742
8743 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8744 {
8745@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8746 pud_set(pud, (unsigned long)pmd);
8747 }
8748
8749+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8750+{
8751+ pud_populate(mm, pud, pmd);
8752+}
8753+
8754 #define pmd_populate(mm, pmd, pte_page) \
8755 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8756 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8757@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8758 #endif
8759
8760 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8761+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8762
8763 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8764 pte_t *pte)
8765diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8766index d98c1ec..9f61569 100644
8767--- a/arch/powerpc/include/asm/pgtable.h
8768+++ b/arch/powerpc/include/asm/pgtable.h
8769@@ -2,6 +2,7 @@
8770 #define _ASM_POWERPC_PGTABLE_H
8771 #ifdef __KERNEL__
8772
8773+#include <linux/const.h>
8774 #ifndef __ASSEMBLY__
8775 #include <linux/mmdebug.h>
8776 #include <asm/processor.h> /* For TASK_SIZE */
8777diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8778index 4aad413..85d86bf 100644
8779--- a/arch/powerpc/include/asm/pte-hash32.h
8780+++ b/arch/powerpc/include/asm/pte-hash32.h
8781@@ -21,6 +21,7 @@
8782 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8783 #define _PAGE_USER 0x004 /* usermode access allowed */
8784 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8785+#define _PAGE_EXEC _PAGE_GUARDED
8786 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8787 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8788 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8789diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8790index bffd89d..a6641ed 100644
8791--- a/arch/powerpc/include/asm/reg.h
8792+++ b/arch/powerpc/include/asm/reg.h
8793@@ -251,6 +251,7 @@
8794 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8795 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8796 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8797+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8798 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8799 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8800 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8801diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8802index 5a6614a..d89995d1 100644
8803--- a/arch/powerpc/include/asm/smp.h
8804+++ b/arch/powerpc/include/asm/smp.h
8805@@ -51,7 +51,7 @@ struct smp_ops_t {
8806 int (*cpu_disable)(void);
8807 void (*cpu_die)(unsigned int nr);
8808 int (*cpu_bootable)(unsigned int nr);
8809-};
8810+} __no_const;
8811
8812 extern void smp_send_debugger_break(void);
8813 extern void start_secondary_resume(void);
8814diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8815index b034ecd..af7e31f 100644
8816--- a/arch/powerpc/include/asm/thread_info.h
8817+++ b/arch/powerpc/include/asm/thread_info.h
8818@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8819 #if defined(CONFIG_PPC64)
8820 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8821 #endif
8822+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8823+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8824
8825 /* as above, but as bit values */
8826 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8827@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8828 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8829 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8830 #define _TIF_NOHZ (1<<TIF_NOHZ)
8831+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8832 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8833 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8834- _TIF_NOHZ)
8835+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8836
8837 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8838 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8839diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8840index 9485b43..3bd3c16 100644
8841--- a/arch/powerpc/include/asm/uaccess.h
8842+++ b/arch/powerpc/include/asm/uaccess.h
8843@@ -58,6 +58,7 @@
8844
8845 #endif
8846
8847+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8848 #define access_ok(type, addr, size) \
8849 (__chk_user_ptr(addr), \
8850 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8851@@ -318,52 +319,6 @@ do { \
8852 extern unsigned long __copy_tofrom_user(void __user *to,
8853 const void __user *from, unsigned long size);
8854
8855-#ifndef __powerpc64__
8856-
8857-static inline unsigned long copy_from_user(void *to,
8858- const void __user *from, unsigned long n)
8859-{
8860- unsigned long over;
8861-
8862- if (access_ok(VERIFY_READ, from, n))
8863- return __copy_tofrom_user((__force void __user *)to, from, n);
8864- if ((unsigned long)from < TASK_SIZE) {
8865- over = (unsigned long)from + n - TASK_SIZE;
8866- return __copy_tofrom_user((__force void __user *)to, from,
8867- n - over) + over;
8868- }
8869- return n;
8870-}
8871-
8872-static inline unsigned long copy_to_user(void __user *to,
8873- const void *from, unsigned long n)
8874-{
8875- unsigned long over;
8876-
8877- if (access_ok(VERIFY_WRITE, to, n))
8878- return __copy_tofrom_user(to, (__force void __user *)from, n);
8879- if ((unsigned long)to < TASK_SIZE) {
8880- over = (unsigned long)to + n - TASK_SIZE;
8881- return __copy_tofrom_user(to, (__force void __user *)from,
8882- n - over) + over;
8883- }
8884- return n;
8885-}
8886-
8887-#else /* __powerpc64__ */
8888-
8889-#define __copy_in_user(to, from, size) \
8890- __copy_tofrom_user((to), (from), (size))
8891-
8892-extern unsigned long copy_from_user(void *to, const void __user *from,
8893- unsigned long n);
8894-extern unsigned long copy_to_user(void __user *to, const void *from,
8895- unsigned long n);
8896-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8897- unsigned long n);
8898-
8899-#endif /* __powerpc64__ */
8900-
8901 static inline unsigned long __copy_from_user_inatomic(void *to,
8902 const void __user *from, unsigned long n)
8903 {
8904@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8905 if (ret == 0)
8906 return 0;
8907 }
8908+
8909+ if (!__builtin_constant_p(n))
8910+ check_object_size(to, n, false);
8911+
8912 return __copy_tofrom_user((__force void __user *)to, from, n);
8913 }
8914
8915@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8916 if (ret == 0)
8917 return 0;
8918 }
8919+
8920+ if (!__builtin_constant_p(n))
8921+ check_object_size(from, n, true);
8922+
8923 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8924 }
8925
8926@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8927 return __copy_to_user_inatomic(to, from, size);
8928 }
8929
8930+#ifndef __powerpc64__
8931+
8932+static inline unsigned long __must_check copy_from_user(void *to,
8933+ const void __user *from, unsigned long n)
8934+{
8935+ unsigned long over;
8936+
8937+ if ((long)n < 0)
8938+ return n;
8939+
8940+ if (access_ok(VERIFY_READ, from, n)) {
8941+ if (!__builtin_constant_p(n))
8942+ check_object_size(to, n, false);
8943+ return __copy_tofrom_user((__force void __user *)to, from, n);
8944+ }
8945+ if ((unsigned long)from < TASK_SIZE) {
8946+ over = (unsigned long)from + n - TASK_SIZE;
8947+ if (!__builtin_constant_p(n - over))
8948+ check_object_size(to, n - over, false);
8949+ return __copy_tofrom_user((__force void __user *)to, from,
8950+ n - over) + over;
8951+ }
8952+ return n;
8953+}
8954+
8955+static inline unsigned long __must_check copy_to_user(void __user *to,
8956+ const void *from, unsigned long n)
8957+{
8958+ unsigned long over;
8959+
8960+ if ((long)n < 0)
8961+ return n;
8962+
8963+ if (access_ok(VERIFY_WRITE, to, n)) {
8964+ if (!__builtin_constant_p(n))
8965+ check_object_size(from, n, true);
8966+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8967+ }
8968+ if ((unsigned long)to < TASK_SIZE) {
8969+ over = (unsigned long)to + n - TASK_SIZE;
8970+ if (!__builtin_constant_p(n))
8971+ check_object_size(from, n - over, true);
8972+ return __copy_tofrom_user(to, (__force void __user *)from,
8973+ n - over) + over;
8974+ }
8975+ return n;
8976+}
8977+
8978+#else /* __powerpc64__ */
8979+
8980+#define __copy_in_user(to, from, size) \
8981+ __copy_tofrom_user((to), (from), (size))
8982+
8983+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8984+{
8985+ if ((long)n < 0 || n > INT_MAX)
8986+ return n;
8987+
8988+ if (!__builtin_constant_p(n))
8989+ check_object_size(to, n, false);
8990+
8991+ if (likely(access_ok(VERIFY_READ, from, n)))
8992+ n = __copy_from_user(to, from, n);
8993+ else
8994+ memset(to, 0, n);
8995+ return n;
8996+}
8997+
8998+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8999+{
9000+ if ((long)n < 0 || n > INT_MAX)
9001+ return n;
9002+
9003+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
9004+ if (!__builtin_constant_p(n))
9005+ check_object_size(from, n, true);
9006+ n = __copy_to_user(to, from, n);
9007+ }
9008+ return n;
9009+}
9010+
9011+extern unsigned long copy_in_user(void __user *to, const void __user *from,
9012+ unsigned long n);
9013+
9014+#endif /* __powerpc64__ */
9015+
9016 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9017
9018 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9019diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9020index 670c312..60c2b52 100644
9021--- a/arch/powerpc/kernel/Makefile
9022+++ b/arch/powerpc/kernel/Makefile
9023@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9024 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9025 endif
9026
9027+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9028+
9029 obj-y := cputable.o ptrace.o syscalls.o \
9030 irq.o align.o signal_32.o pmc.o vdso.o \
9031 process.o systbl.o idle.o \
9032diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9033index bb9cac6..5181202 100644
9034--- a/arch/powerpc/kernel/exceptions-64e.S
9035+++ b/arch/powerpc/kernel/exceptions-64e.S
9036@@ -1010,6 +1010,7 @@ storage_fault_common:
9037 std r14,_DAR(r1)
9038 std r15,_DSISR(r1)
9039 addi r3,r1,STACK_FRAME_OVERHEAD
9040+ bl save_nvgprs
9041 mr r4,r14
9042 mr r5,r15
9043 ld r14,PACA_EXGEN+EX_R14(r13)
9044@@ -1018,8 +1019,7 @@ storage_fault_common:
9045 cmpdi r3,0
9046 bne- 1f
9047 b ret_from_except_lite
9048-1: bl save_nvgprs
9049- mr r5,r3
9050+1: mr r5,r3
9051 addi r3,r1,STACK_FRAME_OVERHEAD
9052 ld r4,_DAR(r1)
9053 bl bad_page_fault
9054diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9055index a7d36b1..53af150 100644
9056--- a/arch/powerpc/kernel/exceptions-64s.S
9057+++ b/arch/powerpc/kernel/exceptions-64s.S
9058@@ -1637,10 +1637,10 @@ handle_page_fault:
9059 11: ld r4,_DAR(r1)
9060 ld r5,_DSISR(r1)
9061 addi r3,r1,STACK_FRAME_OVERHEAD
9062+ bl save_nvgprs
9063 bl do_page_fault
9064 cmpdi r3,0
9065 beq+ 12f
9066- bl save_nvgprs
9067 mr r5,r3
9068 addi r3,r1,STACK_FRAME_OVERHEAD
9069 lwz r4,_DAR(r1)
9070diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9071index 248ee7e..1eb60dd 100644
9072--- a/arch/powerpc/kernel/irq.c
9073+++ b/arch/powerpc/kernel/irq.c
9074@@ -447,6 +447,8 @@ void migrate_irqs(void)
9075 }
9076 #endif
9077
9078+extern void gr_handle_kernel_exploit(void);
9079+
9080 static inline void check_stack_overflow(void)
9081 {
9082 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9083@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void)
9084 printk("do_IRQ: stack overflow: %ld\n",
9085 sp - sizeof(struct thread_info));
9086 dump_stack();
9087+ gr_handle_kernel_exploit();
9088 }
9089 #endif
9090 }
9091diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9092index 6cff040..74ac5d1b 100644
9093--- a/arch/powerpc/kernel/module_32.c
9094+++ b/arch/powerpc/kernel/module_32.c
9095@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9096 me->arch.core_plt_section = i;
9097 }
9098 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9099- printk("Module doesn't contain .plt or .init.plt sections.\n");
9100+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9101 return -ENOEXEC;
9102 }
9103
9104@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9105
9106 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9107 /* Init, or core PLT? */
9108- if (location >= mod->module_core
9109- && location < mod->module_core + mod->core_size)
9110+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9111+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9112 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9113- else
9114+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9115+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9116 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9117+ else {
9118+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9119+ return ~0UL;
9120+ }
9121
9122 /* Find this entry, or if that fails, the next avail. entry */
9123 while (entry->jump[0]) {
9124@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9125 }
9126 #ifdef CONFIG_DYNAMIC_FTRACE
9127 module->arch.tramp =
9128- do_plt_call(module->module_core,
9129+ do_plt_call(module->module_core_rx,
9130 (unsigned long)ftrace_caller,
9131 sechdrs, module);
9132 #endif
9133diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9134index be99774..9879c82 100644
9135--- a/arch/powerpc/kernel/process.c
9136+++ b/arch/powerpc/kernel/process.c
9137@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9138 * Lookup NIP late so we have the best change of getting the
9139 * above info out without failing
9140 */
9141- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9142- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9143+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9144+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9145 #endif
9146 show_stack(current, (unsigned long *) regs->gpr[1]);
9147 if (!user_mode(regs))
9148@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9149 newsp = stack[0];
9150 ip = stack[STACK_FRAME_LR_SAVE];
9151 if (!firstframe || ip != lr) {
9152- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9153+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9155 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9156- printk(" (%pS)",
9157+ printk(" (%pA)",
9158 (void *)current->ret_stack[curr_frame].ret);
9159 curr_frame--;
9160 }
9161@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9162 struct pt_regs *regs = (struct pt_regs *)
9163 (sp + STACK_FRAME_OVERHEAD);
9164 lr = regs->link;
9165- printk("--- Exception: %lx at %pS\n LR = %pS\n",
9166+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
9167 regs->trap, (void *)regs->nip, (void *)lr);
9168 firstframe = 1;
9169 }
9170@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
9171 mtspr(SPRN_CTRLT, ctrl);
9172 }
9173 #endif /* CONFIG_PPC64 */
9174-
9175-unsigned long arch_align_stack(unsigned long sp)
9176-{
9177- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9178- sp -= get_random_int() & ~PAGE_MASK;
9179- return sp & ~0xf;
9180-}
9181-
9182-static inline unsigned long brk_rnd(void)
9183-{
9184- unsigned long rnd = 0;
9185-
9186- /* 8MB for 32bit, 1GB for 64bit */
9187- if (is_32bit_task())
9188- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9189- else
9190- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9191-
9192- return rnd << PAGE_SHIFT;
9193-}
9194-
9195-unsigned long arch_randomize_brk(struct mm_struct *mm)
9196-{
9197- unsigned long base = mm->brk;
9198- unsigned long ret;
9199-
9200-#ifdef CONFIG_PPC_STD_MMU_64
9201- /*
9202- * If we are using 1TB segments and we are allowed to randomise
9203- * the heap, we can put it above 1TB so it is backed by a 1TB
9204- * segment. Otherwise the heap will be in the bottom 1TB
9205- * which always uses 256MB segments and this may result in a
9206- * performance penalty.
9207- */
9208- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9209- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9210-#endif
9211-
9212- ret = PAGE_ALIGN(base + brk_rnd());
9213-
9214- if (ret < mm->brk)
9215- return mm->brk;
9216-
9217- return ret;
9218-}
9219-
9220-unsigned long randomize_et_dyn(unsigned long base)
9221-{
9222- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9223-
9224- if (ret < base)
9225- return base;
9226-
9227- return ret;
9228-}
9229diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9230index 2e3d2bf..35df241 100644
9231--- a/arch/powerpc/kernel/ptrace.c
9232+++ b/arch/powerpc/kernel/ptrace.c
9233@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9234 return ret;
9235 }
9236
9237+#ifdef CONFIG_GRKERNSEC_SETXID
9238+extern void gr_delayed_cred_worker(void);
9239+#endif
9240+
9241 /*
9242 * We must return the syscall number to actually look up in the table.
9243 * This can be -1L to skip running any syscall at all.
9244@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9245
9246 secure_computing_strict(regs->gpr[0]);
9247
9248+#ifdef CONFIG_GRKERNSEC_SETXID
9249+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9250+ gr_delayed_cred_worker();
9251+#endif
9252+
9253 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9254 tracehook_report_syscall_entry(regs))
9255 /*
9256@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9257 {
9258 int step;
9259
9260+#ifdef CONFIG_GRKERNSEC_SETXID
9261+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9262+ gr_delayed_cred_worker();
9263+#endif
9264+
9265 audit_syscall_exit(regs);
9266
9267 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9268diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9269index 1bc5a17..910d3f3 100644
9270--- a/arch/powerpc/kernel/signal_32.c
9271+++ b/arch/powerpc/kernel/signal_32.c
9272@@ -1012,7 +1012,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
9273 /* Save user registers on the stack */
9274 frame = &rt_sf->uc.uc_mcontext;
9275 addr = frame;
9276- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9277+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9278 sigret = 0;
9279 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9280 } else {
9281diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9282index 97c1e4b..f427f81 100644
9283--- a/arch/powerpc/kernel/signal_64.c
9284+++ b/arch/powerpc/kernel/signal_64.c
9285@@ -755,7 +755,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
9286 current->thread.fp_state.fpscr = 0;
9287
9288 /* Set up to return from userspace. */
9289- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9290+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9291 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9292 } else {
9293 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9294diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9295index 239f1cd..5359f76 100644
9296--- a/arch/powerpc/kernel/traps.c
9297+++ b/arch/powerpc/kernel/traps.c
9298@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9299 return flags;
9300 }
9301
9302+extern void gr_handle_kernel_exploit(void);
9303+
9304 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9305 int signr)
9306 {
9307@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9308 panic("Fatal exception in interrupt");
9309 if (panic_on_oops)
9310 panic("Fatal exception");
9311+
9312+ gr_handle_kernel_exploit();
9313+
9314 do_exit(signr);
9315 }
9316
9317diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9318index ce74c33..0803371 100644
9319--- a/arch/powerpc/kernel/vdso.c
9320+++ b/arch/powerpc/kernel/vdso.c
9321@@ -35,6 +35,7 @@
9322 #include <asm/vdso.h>
9323 #include <asm/vdso_datapage.h>
9324 #include <asm/setup.h>
9325+#include <asm/mman.h>
9326
9327 #undef DEBUG
9328
9329@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9330 vdso_base = VDSO32_MBASE;
9331 #endif
9332
9333- current->mm->context.vdso_base = 0;
9334+ current->mm->context.vdso_base = ~0UL;
9335
9336 /* vDSO has a problem and was disabled, just don't "enable" it for the
9337 * process
9338@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9339 vdso_base = get_unmapped_area(NULL, vdso_base,
9340 (vdso_pages << PAGE_SHIFT) +
9341 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9342- 0, 0);
9343+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9344 if (IS_ERR_VALUE(vdso_base)) {
9345 rc = vdso_base;
9346 goto fail_mmapsem;
9347diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9348index 61c738a..b1092d6 100644
9349--- a/arch/powerpc/kvm/powerpc.c
9350+++ b/arch/powerpc/kvm/powerpc.c
9351@@ -1195,7 +1195,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9352 }
9353 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9354
9355-int kvm_arch_init(void *opaque)
9356+int kvm_arch_init(const void *opaque)
9357 {
9358 return 0;
9359 }
9360diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9361index 5eea6f3..5d10396 100644
9362--- a/arch/powerpc/lib/usercopy_64.c
9363+++ b/arch/powerpc/lib/usercopy_64.c
9364@@ -9,22 +9,6 @@
9365 #include <linux/module.h>
9366 #include <asm/uaccess.h>
9367
9368-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9369-{
9370- if (likely(access_ok(VERIFY_READ, from, n)))
9371- n = __copy_from_user(to, from, n);
9372- else
9373- memset(to, 0, n);
9374- return n;
9375-}
9376-
9377-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9378-{
9379- if (likely(access_ok(VERIFY_WRITE, to, n)))
9380- n = __copy_to_user(to, from, n);
9381- return n;
9382-}
9383-
9384 unsigned long copy_in_user(void __user *to, const void __user *from,
9385 unsigned long n)
9386 {
9387@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9388 return n;
9389 }
9390
9391-EXPORT_SYMBOL(copy_from_user);
9392-EXPORT_SYMBOL(copy_to_user);
9393 EXPORT_SYMBOL(copy_in_user);
9394
9395diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9396index 51ab9e7..7d3c78b 100644
9397--- a/arch/powerpc/mm/fault.c
9398+++ b/arch/powerpc/mm/fault.c
9399@@ -33,6 +33,10 @@
9400 #include <linux/magic.h>
9401 #include <linux/ratelimit.h>
9402 #include <linux/context_tracking.h>
9403+#include <linux/slab.h>
9404+#include <linux/pagemap.h>
9405+#include <linux/compiler.h>
9406+#include <linux/unistd.h>
9407
9408 #include <asm/firmware.h>
9409 #include <asm/page.h>
9410@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9411 }
9412 #endif
9413
9414+#ifdef CONFIG_PAX_PAGEEXEC
9415+/*
9416+ * PaX: decide what to do with offenders (regs->nip = fault address)
9417+ *
9418+ * returns 1 when task should be killed
9419+ */
9420+static int pax_handle_fetch_fault(struct pt_regs *regs)
9421+{
9422+ return 1;
9423+}
9424+
9425+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9426+{
9427+ unsigned long i;
9428+
9429+ printk(KERN_ERR "PAX: bytes at PC: ");
9430+ for (i = 0; i < 5; i++) {
9431+ unsigned int c;
9432+ if (get_user(c, (unsigned int __user *)pc+i))
9433+ printk(KERN_CONT "???????? ");
9434+ else
9435+ printk(KERN_CONT "%08x ", c);
9436+ }
9437+ printk("\n");
9438+}
9439+#endif
9440+
9441 /*
9442 * Check whether the instruction at regs->nip is a store using
9443 * an update addressing form which will update r1.
9444@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9445 * indicate errors in DSISR but can validly be set in SRR1.
9446 */
9447 if (trap == 0x400)
9448- error_code &= 0x48200000;
9449+ error_code &= 0x58200000;
9450 else
9451 is_write = error_code & DSISR_ISSTORE;
9452 #else
9453@@ -378,7 +409,7 @@ good_area:
9454 * "undefined". Of those that can be set, this is the only
9455 * one which seems bad.
9456 */
9457- if (error_code & 0x10000000)
9458+ if (error_code & DSISR_GUARDED)
9459 /* Guarded storage error. */
9460 goto bad_area;
9461 #endif /* CONFIG_8xx */
9462@@ -393,7 +424,7 @@ good_area:
9463 * processors use the same I/D cache coherency mechanism
9464 * as embedded.
9465 */
9466- if (error_code & DSISR_PROTFAULT)
9467+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9468 goto bad_area;
9469 #endif /* CONFIG_PPC_STD_MMU */
9470
9471@@ -483,6 +514,23 @@ bad_area:
9472 bad_area_nosemaphore:
9473 /* User mode accesses cause a SIGSEGV */
9474 if (user_mode(regs)) {
9475+
9476+#ifdef CONFIG_PAX_PAGEEXEC
9477+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9478+#ifdef CONFIG_PPC_STD_MMU
9479+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9480+#else
9481+ if (is_exec && regs->nip == address) {
9482+#endif
9483+ switch (pax_handle_fetch_fault(regs)) {
9484+ }
9485+
9486+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9487+ do_group_exit(SIGKILL);
9488+ }
9489+ }
9490+#endif
9491+
9492 _exception(SIGSEGV, regs, code, address);
9493 goto bail;
9494 }
9495diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9496index cb8bdbe..cde4bc7 100644
9497--- a/arch/powerpc/mm/mmap.c
9498+++ b/arch/powerpc/mm/mmap.c
9499@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9500 return sysctl_legacy_va_layout;
9501 }
9502
9503-static unsigned long mmap_rnd(void)
9504+static unsigned long mmap_rnd(struct mm_struct *mm)
9505 {
9506 unsigned long rnd = 0;
9507
9508+#ifdef CONFIG_PAX_RANDMMAP
9509+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9510+#endif
9511+
9512 if (current->flags & PF_RANDOMIZE) {
9513 /* 8MB for 32bit, 1GB for 64bit */
9514 if (is_32bit_task())
9515@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9516 return rnd << PAGE_SHIFT;
9517 }
9518
9519-static inline unsigned long mmap_base(void)
9520+static inline unsigned long mmap_base(struct mm_struct *mm)
9521 {
9522 unsigned long gap = rlimit(RLIMIT_STACK);
9523
9524@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9525 else if (gap > MAX_GAP)
9526 gap = MAX_GAP;
9527
9528- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9529+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9530 }
9531
9532 /*
9533@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9534 */
9535 if (mmap_is_legacy()) {
9536 mm->mmap_base = TASK_UNMAPPED_BASE;
9537+
9538+#ifdef CONFIG_PAX_RANDMMAP
9539+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9540+ mm->mmap_base += mm->delta_mmap;
9541+#endif
9542+
9543 mm->get_unmapped_area = arch_get_unmapped_area;
9544 } else {
9545- mm->mmap_base = mmap_base();
9546+ mm->mmap_base = mmap_base(mm);
9547+
9548+#ifdef CONFIG_PAX_RANDMMAP
9549+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9550+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9551+#endif
9552+
9553 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9554 }
9555 }
9556diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9557index b0c75cc..ef7fb93 100644
9558--- a/arch/powerpc/mm/slice.c
9559+++ b/arch/powerpc/mm/slice.c
9560@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9561 if ((mm->task_size - len) < addr)
9562 return 0;
9563 vma = find_vma(mm, addr);
9564- return (!vma || (addr + len) <= vma->vm_start);
9565+ return check_heap_stack_gap(vma, addr, len, 0);
9566 }
9567
9568 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9569@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9570 info.align_offset = 0;
9571
9572 addr = TASK_UNMAPPED_BASE;
9573+
9574+#ifdef CONFIG_PAX_RANDMMAP
9575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9576+ addr += mm->delta_mmap;
9577+#endif
9578+
9579 while (addr < TASK_SIZE) {
9580 info.low_limit = addr;
9581 if (!slice_scan_available(addr, available, 1, &addr))
9582@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9583 if (fixed && addr > (mm->task_size - len))
9584 return -ENOMEM;
9585
9586+#ifdef CONFIG_PAX_RANDMMAP
9587+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9588+ addr = 0;
9589+#endif
9590+
9591 /* If hint, make sure it matches our alignment restrictions */
9592 if (!fixed && addr) {
9593 addr = _ALIGN_UP(addr, 1ul << pshift);
9594diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9595index 4278acf..67fd0e6 100644
9596--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9597+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9598@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9599 }
9600
9601 static struct pci_ops scc_pciex_pci_ops = {
9602- scc_pciex_read_config,
9603- scc_pciex_write_config,
9604+ .read = scc_pciex_read_config,
9605+ .write = scc_pciex_write_config,
9606 };
9607
9608 static void pciex_clear_intr_all(unsigned int __iomem *base)
9609diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9610index 9098692..3d54cd1 100644
9611--- a/arch/powerpc/platforms/cell/spufs/file.c
9612+++ b/arch/powerpc/platforms/cell/spufs/file.c
9613@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9614 return VM_FAULT_NOPAGE;
9615 }
9616
9617-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9618+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9619 unsigned long address,
9620- void *buf, int len, int write)
9621+ void *buf, size_t len, int write)
9622 {
9623 struct spu_context *ctx = vma->vm_file->private_data;
9624 unsigned long offset = address - vma->vm_start;
9625diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9626index fa934fe..c296056 100644
9627--- a/arch/s390/include/asm/atomic.h
9628+++ b/arch/s390/include/asm/atomic.h
9629@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9630 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9631 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9632
9633+#define atomic64_read_unchecked(v) atomic64_read(v)
9634+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9635+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9636+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9637+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9638+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9639+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9640+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9641+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9642+
9643 #endif /* __ARCH_S390_ATOMIC__ */
9644diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9645index 19ff956..8d39cb1 100644
9646--- a/arch/s390/include/asm/barrier.h
9647+++ b/arch/s390/include/asm/barrier.h
9648@@ -37,7 +37,7 @@
9649 do { \
9650 compiletime_assert_atomic_type(*p); \
9651 barrier(); \
9652- ACCESS_ONCE(*p) = (v); \
9653+ ACCESS_ONCE_RW(*p) = (v); \
9654 } while (0)
9655
9656 #define smp_load_acquire(p) \
9657diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9658index 4d7ccac..d03d0ad 100644
9659--- a/arch/s390/include/asm/cache.h
9660+++ b/arch/s390/include/asm/cache.h
9661@@ -9,8 +9,10 @@
9662 #ifndef __ARCH_S390_CACHE_H
9663 #define __ARCH_S390_CACHE_H
9664
9665-#define L1_CACHE_BYTES 256
9666+#include <linux/const.h>
9667+
9668 #define L1_CACHE_SHIFT 8
9669+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9670 #define NET_SKB_PAD 32
9671
9672 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9673diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9674index 78f4f87..598ce39 100644
9675--- a/arch/s390/include/asm/elf.h
9676+++ b/arch/s390/include/asm/elf.h
9677@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9678 the loader. We need to make sure that it is out of the way of the program
9679 that it will "exec", and that there is sufficient room for the brk. */
9680
9681-extern unsigned long randomize_et_dyn(unsigned long base);
9682-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9683+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9684+
9685+#ifdef CONFIG_PAX_ASLR
9686+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9687+
9688+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9689+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9690+#endif
9691
9692 /* This yields a mask that user programs can use to figure out what
9693 instruction set this CPU supports. */
9694@@ -222,9 +228,6 @@ struct linux_binprm;
9695 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9696 int arch_setup_additional_pages(struct linux_binprm *, int);
9697
9698-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9699-#define arch_randomize_brk arch_randomize_brk
9700-
9701 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9702
9703 #endif
9704diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9705index c4a93d6..4d2a9b4 100644
9706--- a/arch/s390/include/asm/exec.h
9707+++ b/arch/s390/include/asm/exec.h
9708@@ -7,6 +7,6 @@
9709 #ifndef __ASM_EXEC_H
9710 #define __ASM_EXEC_H
9711
9712-extern unsigned long arch_align_stack(unsigned long sp);
9713+#define arch_align_stack(x) ((x) & ~0xfUL)
9714
9715 #endif /* __ASM_EXEC_H */
9716diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9717index cd4c68e..6764641 100644
9718--- a/arch/s390/include/asm/uaccess.h
9719+++ b/arch/s390/include/asm/uaccess.h
9720@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9721 __range_ok((unsigned long)(addr), (size)); \
9722 })
9723
9724+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9725 #define access_ok(type, addr, size) __access_ok(addr, size)
9726
9727 /*
9728@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9729 copy_to_user(void __user *to, const void *from, unsigned long n)
9730 {
9731 might_fault();
9732+
9733+ if ((long)n < 0)
9734+ return n;
9735+
9736 return __copy_to_user(to, from, n);
9737 }
9738
9739@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9740 static inline unsigned long __must_check
9741 copy_from_user(void *to, const void __user *from, unsigned long n)
9742 {
9743- unsigned int sz = __compiletime_object_size(to);
9744+ size_t sz = __compiletime_object_size(to);
9745
9746 might_fault();
9747- if (unlikely(sz != -1 && sz < n)) {
9748+
9749+ if ((long)n < 0)
9750+ return n;
9751+
9752+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9753 copy_from_user_overflow();
9754 return n;
9755 }
9756diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9757index b89b591..fd9609d 100644
9758--- a/arch/s390/kernel/module.c
9759+++ b/arch/s390/kernel/module.c
9760@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9761
9762 /* Increase core size by size of got & plt and set start
9763 offsets for got and plt. */
9764- me->core_size = ALIGN(me->core_size, 4);
9765- me->arch.got_offset = me->core_size;
9766- me->core_size += me->arch.got_size;
9767- me->arch.plt_offset = me->core_size;
9768- me->core_size += me->arch.plt_size;
9769+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9770+ me->arch.got_offset = me->core_size_rw;
9771+ me->core_size_rw += me->arch.got_size;
9772+ me->arch.plt_offset = me->core_size_rx;
9773+ me->core_size_rx += me->arch.plt_size;
9774 return 0;
9775 }
9776
9777@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9778 if (info->got_initialized == 0) {
9779 Elf_Addr *gotent;
9780
9781- gotent = me->module_core + me->arch.got_offset +
9782+ gotent = me->module_core_rw + me->arch.got_offset +
9783 info->got_offset;
9784 *gotent = val;
9785 info->got_initialized = 1;
9786@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9787 rc = apply_rela_bits(loc, val, 0, 64, 0);
9788 else if (r_type == R_390_GOTENT ||
9789 r_type == R_390_GOTPLTENT) {
9790- val += (Elf_Addr) me->module_core - loc;
9791+ val += (Elf_Addr) me->module_core_rw - loc;
9792 rc = apply_rela_bits(loc, val, 1, 32, 1);
9793 }
9794 break;
9795@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9796 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9797 if (info->plt_initialized == 0) {
9798 unsigned int *ip;
9799- ip = me->module_core + me->arch.plt_offset +
9800+ ip = me->module_core_rx + me->arch.plt_offset +
9801 info->plt_offset;
9802 #ifndef CONFIG_64BIT
9803 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9804@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9805 val - loc + 0xffffUL < 0x1ffffeUL) ||
9806 (r_type == R_390_PLT32DBL &&
9807 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9808- val = (Elf_Addr) me->module_core +
9809+ val = (Elf_Addr) me->module_core_rx +
9810 me->arch.plt_offset +
9811 info->plt_offset;
9812 val += rela->r_addend - loc;
9813@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9814 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9815 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9816 val = val + rela->r_addend -
9817- ((Elf_Addr) me->module_core + me->arch.got_offset);
9818+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9819 if (r_type == R_390_GOTOFF16)
9820 rc = apply_rela_bits(loc, val, 0, 16, 0);
9821 else if (r_type == R_390_GOTOFF32)
9822@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9823 break;
9824 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9825 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9826- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9827+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9828 rela->r_addend - loc;
9829 if (r_type == R_390_GOTPC)
9830 rc = apply_rela_bits(loc, val, 1, 32, 0);
9831diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9832index 93b9ca4..4ea1454 100644
9833--- a/arch/s390/kernel/process.c
9834+++ b/arch/s390/kernel/process.c
9835@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9836 }
9837 return 0;
9838 }
9839-
9840-unsigned long arch_align_stack(unsigned long sp)
9841-{
9842- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9843- sp -= get_random_int() & ~PAGE_MASK;
9844- return sp & ~0xf;
9845-}
9846-
9847-static inline unsigned long brk_rnd(void)
9848-{
9849- /* 8MB for 32bit, 1GB for 64bit */
9850- if (is_32bit_task())
9851- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9852- else
9853- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9854-}
9855-
9856-unsigned long arch_randomize_brk(struct mm_struct *mm)
9857-{
9858- unsigned long ret;
9859-
9860- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9861- return (ret > mm->brk) ? ret : mm->brk;
9862-}
9863-
9864-unsigned long randomize_et_dyn(unsigned long base)
9865-{
9866- unsigned long ret;
9867-
9868- if (!(current->flags & PF_RANDOMIZE))
9869- return base;
9870- ret = PAGE_ALIGN(base + brk_rnd());
9871- return (ret > base) ? ret : base;
9872-}
9873diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9874index 9b436c2..54fbf0a 100644
9875--- a/arch/s390/mm/mmap.c
9876+++ b/arch/s390/mm/mmap.c
9877@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9878 */
9879 if (mmap_is_legacy()) {
9880 mm->mmap_base = mmap_base_legacy();
9881+
9882+#ifdef CONFIG_PAX_RANDMMAP
9883+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9884+ mm->mmap_base += mm->delta_mmap;
9885+#endif
9886+
9887 mm->get_unmapped_area = arch_get_unmapped_area;
9888 } else {
9889 mm->mmap_base = mmap_base();
9890+
9891+#ifdef CONFIG_PAX_RANDMMAP
9892+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9893+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9894+#endif
9895+
9896 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9897 }
9898 }
9899@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9900 */
9901 if (mmap_is_legacy()) {
9902 mm->mmap_base = mmap_base_legacy();
9903+
9904+#ifdef CONFIG_PAX_RANDMMAP
9905+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9906+ mm->mmap_base += mm->delta_mmap;
9907+#endif
9908+
9909 mm->get_unmapped_area = s390_get_unmapped_area;
9910 } else {
9911 mm->mmap_base = mmap_base();
9912+
9913+#ifdef CONFIG_PAX_RANDMMAP
9914+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9915+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9916+#endif
9917+
9918 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9919 }
9920 }
9921diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9922index ae3d59f..f65f075 100644
9923--- a/arch/score/include/asm/cache.h
9924+++ b/arch/score/include/asm/cache.h
9925@@ -1,7 +1,9 @@
9926 #ifndef _ASM_SCORE_CACHE_H
9927 #define _ASM_SCORE_CACHE_H
9928
9929+#include <linux/const.h>
9930+
9931 #define L1_CACHE_SHIFT 4
9932-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9933+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9934
9935 #endif /* _ASM_SCORE_CACHE_H */
9936diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9937index f9f3cd5..58ff438 100644
9938--- a/arch/score/include/asm/exec.h
9939+++ b/arch/score/include/asm/exec.h
9940@@ -1,6 +1,6 @@
9941 #ifndef _ASM_SCORE_EXEC_H
9942 #define _ASM_SCORE_EXEC_H
9943
9944-extern unsigned long arch_align_stack(unsigned long sp);
9945+#define arch_align_stack(x) (x)
9946
9947 #endif /* _ASM_SCORE_EXEC_H */
9948diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9949index a1519ad3..e8ac1ff 100644
9950--- a/arch/score/kernel/process.c
9951+++ b/arch/score/kernel/process.c
9952@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9953
9954 return task_pt_regs(task)->cp0_epc;
9955 }
9956-
9957-unsigned long arch_align_stack(unsigned long sp)
9958-{
9959- return sp;
9960-}
9961diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9962index ef9e555..331bd29 100644
9963--- a/arch/sh/include/asm/cache.h
9964+++ b/arch/sh/include/asm/cache.h
9965@@ -9,10 +9,11 @@
9966 #define __ASM_SH_CACHE_H
9967 #ifdef __KERNEL__
9968
9969+#include <linux/const.h>
9970 #include <linux/init.h>
9971 #include <cpu/cache.h>
9972
9973-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9974+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9975
9976 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9977
9978diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9979index 6777177..cb5e44f 100644
9980--- a/arch/sh/mm/mmap.c
9981+++ b/arch/sh/mm/mmap.c
9982@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9983 struct mm_struct *mm = current->mm;
9984 struct vm_area_struct *vma;
9985 int do_colour_align;
9986+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9987 struct vm_unmapped_area_info info;
9988
9989 if (flags & MAP_FIXED) {
9990@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9991 if (filp || (flags & MAP_SHARED))
9992 do_colour_align = 1;
9993
9994+#ifdef CONFIG_PAX_RANDMMAP
9995+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9996+#endif
9997+
9998 if (addr) {
9999 if (do_colour_align)
10000 addr = COLOUR_ALIGN(addr, pgoff);
10001@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10002 addr = PAGE_ALIGN(addr);
10003
10004 vma = find_vma(mm, addr);
10005- if (TASK_SIZE - len >= addr &&
10006- (!vma || addr + len <= vma->vm_start))
10007+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10008 return addr;
10009 }
10010
10011 info.flags = 0;
10012 info.length = len;
10013- info.low_limit = TASK_UNMAPPED_BASE;
10014+ info.low_limit = mm->mmap_base;
10015 info.high_limit = TASK_SIZE;
10016 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10017 info.align_offset = pgoff << PAGE_SHIFT;
10018@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10019 struct mm_struct *mm = current->mm;
10020 unsigned long addr = addr0;
10021 int do_colour_align;
10022+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10023 struct vm_unmapped_area_info info;
10024
10025 if (flags & MAP_FIXED) {
10026@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10027 if (filp || (flags & MAP_SHARED))
10028 do_colour_align = 1;
10029
10030+#ifdef CONFIG_PAX_RANDMMAP
10031+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10032+#endif
10033+
10034 /* requesting a specific address */
10035 if (addr) {
10036 if (do_colour_align)
10037@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10038 addr = PAGE_ALIGN(addr);
10039
10040 vma = find_vma(mm, addr);
10041- if (TASK_SIZE - len >= addr &&
10042- (!vma || addr + len <= vma->vm_start))
10043+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10044 return addr;
10045 }
10046
10047@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10048 VM_BUG_ON(addr != -ENOMEM);
10049 info.flags = 0;
10050 info.low_limit = TASK_UNMAPPED_BASE;
10051+
10052+#ifdef CONFIG_PAX_RANDMMAP
10053+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10054+ info.low_limit += mm->delta_mmap;
10055+#endif
10056+
10057 info.high_limit = TASK_SIZE;
10058 addr = vm_unmapped_area(&info);
10059 }
10060diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10061index bb894c8..8141d5c 100644
10062--- a/arch/sparc/include/asm/atomic_64.h
10063+++ b/arch/sparc/include/asm/atomic_64.h
10064@@ -15,18 +15,40 @@
10065 #define ATOMIC64_INIT(i) { (i) }
10066
10067 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10068+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10069+{
10070+ return v->counter;
10071+}
10072 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10073+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10074+{
10075+ return v->counter;
10076+}
10077
10078 #define atomic_set(v, i) (((v)->counter) = i)
10079+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10080+{
10081+ v->counter = i;
10082+}
10083 #define atomic64_set(v, i) (((v)->counter) = i)
10084+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10085+{
10086+ v->counter = i;
10087+}
10088
10089 void atomic_add(int, atomic_t *);
10090+void atomic_add_unchecked(int, atomic_unchecked_t *);
10091 void atomic64_add(long, atomic64_t *);
10092+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10093 void atomic_sub(int, atomic_t *);
10094+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10095 void atomic64_sub(long, atomic64_t *);
10096+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10097
10098 int atomic_add_ret(int, atomic_t *);
10099+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10100 long atomic64_add_ret(long, atomic64_t *);
10101+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10102 int atomic_sub_ret(int, atomic_t *);
10103 long atomic64_sub_ret(long, atomic64_t *);
10104
10105@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10106 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10107
10108 #define atomic_inc_return(v) atomic_add_ret(1, v)
10109+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10110+{
10111+ return atomic_add_ret_unchecked(1, v);
10112+}
10113 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10114+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10115+{
10116+ return atomic64_add_ret_unchecked(1, v);
10117+}
10118
10119 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10120 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10121
10122 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10123+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10124+{
10125+ return atomic_add_ret_unchecked(i, v);
10126+}
10127 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10128+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10129+{
10130+ return atomic64_add_ret_unchecked(i, v);
10131+}
10132
10133 /*
10134 * atomic_inc_and_test - increment and test
10135@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10136 * other cases.
10137 */
10138 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10140+{
10141+ return atomic_inc_return_unchecked(v) == 0;
10142+}
10143 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10144
10145 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10146@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10147 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10148
10149 #define atomic_inc(v) atomic_add(1, v)
10150+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10151+{
10152+ atomic_add_unchecked(1, v);
10153+}
10154 #define atomic64_inc(v) atomic64_add(1, v)
10155+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10156+{
10157+ atomic64_add_unchecked(1, v);
10158+}
10159
10160 #define atomic_dec(v) atomic_sub(1, v)
10161+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10162+{
10163+ atomic_sub_unchecked(1, v);
10164+}
10165 #define atomic64_dec(v) atomic64_sub(1, v)
10166+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10167+{
10168+ atomic64_sub_unchecked(1, v);
10169+}
10170
10171 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10172 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10173
10174 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10175+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10176+{
10177+ return cmpxchg(&v->counter, old, new);
10178+}
10179 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10180+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10181+{
10182+ return xchg(&v->counter, new);
10183+}
10184
10185 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10186 {
10187- int c, old;
10188+ int c, old, new;
10189 c = atomic_read(v);
10190 for (;;) {
10191- if (unlikely(c == (u)))
10192+ if (unlikely(c == u))
10193 break;
10194- old = atomic_cmpxchg((v), c, c + (a));
10195+
10196+ asm volatile("addcc %2, %0, %0\n"
10197+
10198+#ifdef CONFIG_PAX_REFCOUNT
10199+ "tvs %%icc, 6\n"
10200+#endif
10201+
10202+ : "=r" (new)
10203+ : "0" (c), "ir" (a)
10204+ : "cc");
10205+
10206+ old = atomic_cmpxchg(v, c, new);
10207 if (likely(old == c))
10208 break;
10209 c = old;
10210@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10211 #define atomic64_cmpxchg(v, o, n) \
10212 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10213 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10214+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10215+{
10216+ return xchg(&v->counter, new);
10217+}
10218
10219 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10220 {
10221- long c, old;
10222+ long c, old, new;
10223 c = atomic64_read(v);
10224 for (;;) {
10225- if (unlikely(c == (u)))
10226+ if (unlikely(c == u))
10227 break;
10228- old = atomic64_cmpxchg((v), c, c + (a));
10229+
10230+ asm volatile("addcc %2, %0, %0\n"
10231+
10232+#ifdef CONFIG_PAX_REFCOUNT
10233+ "tvs %%xcc, 6\n"
10234+#endif
10235+
10236+ : "=r" (new)
10237+ : "0" (c), "ir" (a)
10238+ : "cc");
10239+
10240+ old = atomic64_cmpxchg(v, c, new);
10241 if (likely(old == c))
10242 break;
10243 c = old;
10244 }
10245- return c != (u);
10246+ return c != u;
10247 }
10248
10249 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10250diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10251index 305dcc3..7835030 100644
10252--- a/arch/sparc/include/asm/barrier_64.h
10253+++ b/arch/sparc/include/asm/barrier_64.h
10254@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10255 do { \
10256 compiletime_assert_atomic_type(*p); \
10257 barrier(); \
10258- ACCESS_ONCE(*p) = (v); \
10259+ ACCESS_ONCE_RW(*p) = (v); \
10260 } while (0)
10261
10262 #define smp_load_acquire(p) \
10263diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10264index 5bb6991..5c2132e 100644
10265--- a/arch/sparc/include/asm/cache.h
10266+++ b/arch/sparc/include/asm/cache.h
10267@@ -7,10 +7,12 @@
10268 #ifndef _SPARC_CACHE_H
10269 #define _SPARC_CACHE_H
10270
10271+#include <linux/const.h>
10272+
10273 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10274
10275 #define L1_CACHE_SHIFT 5
10276-#define L1_CACHE_BYTES 32
10277+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10278
10279 #ifdef CONFIG_SPARC32
10280 #define SMP_CACHE_BYTES_SHIFT 5
10281diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10282index a24e41f..47677ff 100644
10283--- a/arch/sparc/include/asm/elf_32.h
10284+++ b/arch/sparc/include/asm/elf_32.h
10285@@ -114,6 +114,13 @@ typedef struct {
10286
10287 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10288
10289+#ifdef CONFIG_PAX_ASLR
10290+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10291+
10292+#define PAX_DELTA_MMAP_LEN 16
10293+#define PAX_DELTA_STACK_LEN 16
10294+#endif
10295+
10296 /* This yields a mask that user programs can use to figure out what
10297 instruction set this cpu supports. This can NOT be done in userspace
10298 on Sparc. */
10299diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10300index 370ca1e..d4f4a98 100644
10301--- a/arch/sparc/include/asm/elf_64.h
10302+++ b/arch/sparc/include/asm/elf_64.h
10303@@ -189,6 +189,13 @@ typedef struct {
10304 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10305 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10306
10307+#ifdef CONFIG_PAX_ASLR
10308+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10309+
10310+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10311+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10312+#endif
10313+
10314 extern unsigned long sparc64_elf_hwcap;
10315 #define ELF_HWCAP sparc64_elf_hwcap
10316
10317diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10318index a3890da..f6a408e 100644
10319--- a/arch/sparc/include/asm/pgalloc_32.h
10320+++ b/arch/sparc/include/asm/pgalloc_32.h
10321@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10322 }
10323
10324 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10325+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10326
10327 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10328 unsigned long address)
10329diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10330index 39a7ac4..2c9b586 100644
10331--- a/arch/sparc/include/asm/pgalloc_64.h
10332+++ b/arch/sparc/include/asm/pgalloc_64.h
10333@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
10334 }
10335
10336 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
10337+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10338
10339 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
10340 {
10341diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10342index 59ba6f6..4518128 100644
10343--- a/arch/sparc/include/asm/pgtable.h
10344+++ b/arch/sparc/include/asm/pgtable.h
10345@@ -5,4 +5,8 @@
10346 #else
10347 #include <asm/pgtable_32.h>
10348 #endif
10349+
10350+#define ktla_ktva(addr) (addr)
10351+#define ktva_ktla(addr) (addr)
10352+
10353 #endif
10354diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10355index b9b91ae..950b91e 100644
10356--- a/arch/sparc/include/asm/pgtable_32.h
10357+++ b/arch/sparc/include/asm/pgtable_32.h
10358@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10359 #define PAGE_SHARED SRMMU_PAGE_SHARED
10360 #define PAGE_COPY SRMMU_PAGE_COPY
10361 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10362+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10363+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10364+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10365 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10366
10367 /* Top-level page directory - dummy used by init-mm.
10368@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10369
10370 /* xwr */
10371 #define __P000 PAGE_NONE
10372-#define __P001 PAGE_READONLY
10373-#define __P010 PAGE_COPY
10374-#define __P011 PAGE_COPY
10375+#define __P001 PAGE_READONLY_NOEXEC
10376+#define __P010 PAGE_COPY_NOEXEC
10377+#define __P011 PAGE_COPY_NOEXEC
10378 #define __P100 PAGE_READONLY
10379 #define __P101 PAGE_READONLY
10380 #define __P110 PAGE_COPY
10381 #define __P111 PAGE_COPY
10382
10383 #define __S000 PAGE_NONE
10384-#define __S001 PAGE_READONLY
10385-#define __S010 PAGE_SHARED
10386-#define __S011 PAGE_SHARED
10387+#define __S001 PAGE_READONLY_NOEXEC
10388+#define __S010 PAGE_SHARED_NOEXEC
10389+#define __S011 PAGE_SHARED_NOEXEC
10390 #define __S100 PAGE_READONLY
10391 #define __S101 PAGE_READONLY
10392 #define __S110 PAGE_SHARED
10393diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10394index 79da178..c2eede8 100644
10395--- a/arch/sparc/include/asm/pgtsrmmu.h
10396+++ b/arch/sparc/include/asm/pgtsrmmu.h
10397@@ -115,6 +115,11 @@
10398 SRMMU_EXEC | SRMMU_REF)
10399 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10400 SRMMU_EXEC | SRMMU_REF)
10401+
10402+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10403+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10404+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10405+
10406 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10407 SRMMU_DIRTY | SRMMU_REF)
10408
10409diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10410index f5fffd8..a0669f0 100644
10411--- a/arch/sparc/include/asm/setup.h
10412+++ b/arch/sparc/include/asm/setup.h
10413@@ -53,8 +53,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10414 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10415
10416 /* init_64.c */
10417-extern atomic_t dcpage_flushes;
10418-extern atomic_t dcpage_flushes_xcall;
10419+extern atomic_unchecked_t dcpage_flushes;
10420+extern atomic_unchecked_t dcpage_flushes_xcall;
10421
10422 extern int sysctl_tsb_ratio;
10423 #endif
10424diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10425index 9689176..63c18ea 100644
10426--- a/arch/sparc/include/asm/spinlock_64.h
10427+++ b/arch/sparc/include/asm/spinlock_64.h
10428@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10429
10430 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10431
10432-static void inline arch_read_lock(arch_rwlock_t *lock)
10433+static inline void arch_read_lock(arch_rwlock_t *lock)
10434 {
10435 unsigned long tmp1, tmp2;
10436
10437 __asm__ __volatile__ (
10438 "1: ldsw [%2], %0\n"
10439 " brlz,pn %0, 2f\n"
10440-"4: add %0, 1, %1\n"
10441+"4: addcc %0, 1, %1\n"
10442+
10443+#ifdef CONFIG_PAX_REFCOUNT
10444+" tvs %%icc, 6\n"
10445+#endif
10446+
10447 " cas [%2], %0, %1\n"
10448 " cmp %0, %1\n"
10449 " bne,pn %%icc, 1b\n"
10450@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10451 " .previous"
10452 : "=&r" (tmp1), "=&r" (tmp2)
10453 : "r" (lock)
10454- : "memory");
10455+ : "memory", "cc");
10456 }
10457
10458-static int inline arch_read_trylock(arch_rwlock_t *lock)
10459+static inline int arch_read_trylock(arch_rwlock_t *lock)
10460 {
10461 int tmp1, tmp2;
10462
10463@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10464 "1: ldsw [%2], %0\n"
10465 " brlz,a,pn %0, 2f\n"
10466 " mov 0, %0\n"
10467-" add %0, 1, %1\n"
10468+" addcc %0, 1, %1\n"
10469+
10470+#ifdef CONFIG_PAX_REFCOUNT
10471+" tvs %%icc, 6\n"
10472+#endif
10473+
10474 " cas [%2], %0, %1\n"
10475 " cmp %0, %1\n"
10476 " bne,pn %%icc, 1b\n"
10477@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10478 return tmp1;
10479 }
10480
10481-static void inline arch_read_unlock(arch_rwlock_t *lock)
10482+static inline void arch_read_unlock(arch_rwlock_t *lock)
10483 {
10484 unsigned long tmp1, tmp2;
10485
10486 __asm__ __volatile__(
10487 "1: lduw [%2], %0\n"
10488-" sub %0, 1, %1\n"
10489+" subcc %0, 1, %1\n"
10490+
10491+#ifdef CONFIG_PAX_REFCOUNT
10492+" tvs %%icc, 6\n"
10493+#endif
10494+
10495 " cas [%2], %0, %1\n"
10496 " cmp %0, %1\n"
10497 " bne,pn %%xcc, 1b\n"
10498@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10499 : "memory");
10500 }
10501
10502-static void inline arch_write_lock(arch_rwlock_t *lock)
10503+static inline void arch_write_lock(arch_rwlock_t *lock)
10504 {
10505 unsigned long mask, tmp1, tmp2;
10506
10507@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10508 : "memory");
10509 }
10510
10511-static void inline arch_write_unlock(arch_rwlock_t *lock)
10512+static inline void arch_write_unlock(arch_rwlock_t *lock)
10513 {
10514 __asm__ __volatile__(
10515 " stw %%g0, [%0]"
10516@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10517 : "memory");
10518 }
10519
10520-static int inline arch_write_trylock(arch_rwlock_t *lock)
10521+static inline int arch_write_trylock(arch_rwlock_t *lock)
10522 {
10523 unsigned long mask, tmp1, tmp2, result;
10524
10525diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10526index 96efa7a..16858bf 100644
10527--- a/arch/sparc/include/asm/thread_info_32.h
10528+++ b/arch/sparc/include/asm/thread_info_32.h
10529@@ -49,6 +49,8 @@ struct thread_info {
10530 unsigned long w_saved;
10531
10532 struct restart_block restart_block;
10533+
10534+ unsigned long lowest_stack;
10535 };
10536
10537 /*
10538diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10539index a5f01ac..703b554 100644
10540--- a/arch/sparc/include/asm/thread_info_64.h
10541+++ b/arch/sparc/include/asm/thread_info_64.h
10542@@ -63,6 +63,8 @@ struct thread_info {
10543 struct pt_regs *kern_una_regs;
10544 unsigned int kern_una_insn;
10545
10546+ unsigned long lowest_stack;
10547+
10548 unsigned long fpregs[0] __attribute__ ((aligned(64)));
10549 };
10550
10551@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10552 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10553 /* flag bit 4 is available */
10554 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10555-/* flag bit 6 is available */
10556+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10557 #define TIF_32BIT 7 /* 32-bit binary */
10558 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10559 #define TIF_SECCOMP 9 /* secure computing */
10560 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10561 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10562+
10563 /* NOTE: Thread flags >= 12 should be ones we have no interest
10564 * in using in assembly, else we can't use the mask as
10565 * an immediate value in instructions such as andcc.
10566@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10567 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10568 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10569 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10570+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10571
10572 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10573 _TIF_DO_NOTIFY_RESUME_MASK | \
10574 _TIF_NEED_RESCHED)
10575 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10576
10577+#define _TIF_WORK_SYSCALL \
10578+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10579+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10580+
10581+
10582 /*
10583 * Thread-synchronous status.
10584 *
10585diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10586index bd56c28..4b63d83 100644
10587--- a/arch/sparc/include/asm/uaccess.h
10588+++ b/arch/sparc/include/asm/uaccess.h
10589@@ -1,5 +1,6 @@
10590 #ifndef ___ASM_SPARC_UACCESS_H
10591 #define ___ASM_SPARC_UACCESS_H
10592+
10593 #if defined(__sparc__) && defined(__arch64__)
10594 #include <asm/uaccess_64.h>
10595 #else
10596diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10597index 9634d08..f55fe4f 100644
10598--- a/arch/sparc/include/asm/uaccess_32.h
10599+++ b/arch/sparc/include/asm/uaccess_32.h
10600@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10601
10602 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10603 {
10604- if (n && __access_ok((unsigned long) to, n))
10605+ if ((long)n < 0)
10606+ return n;
10607+
10608+ if (n && __access_ok((unsigned long) to, n)) {
10609+ if (!__builtin_constant_p(n))
10610+ check_object_size(from, n, true);
10611 return __copy_user(to, (__force void __user *) from, n);
10612- else
10613+ } else
10614 return n;
10615 }
10616
10617 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10618 {
10619+ if ((long)n < 0)
10620+ return n;
10621+
10622+ if (!__builtin_constant_p(n))
10623+ check_object_size(from, n, true);
10624+
10625 return __copy_user(to, (__force void __user *) from, n);
10626 }
10627
10628 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10629 {
10630- if (n && __access_ok((unsigned long) from, n))
10631+ if ((long)n < 0)
10632+ return n;
10633+
10634+ if (n && __access_ok((unsigned long) from, n)) {
10635+ if (!__builtin_constant_p(n))
10636+ check_object_size(to, n, false);
10637 return __copy_user((__force void __user *) to, from, n);
10638- else
10639+ } else
10640 return n;
10641 }
10642
10643 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10644 {
10645+ if ((long)n < 0)
10646+ return n;
10647+
10648 return __copy_user((__force void __user *) to, from, n);
10649 }
10650
10651diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10652index c990a5e..f17b9c1 100644
10653--- a/arch/sparc/include/asm/uaccess_64.h
10654+++ b/arch/sparc/include/asm/uaccess_64.h
10655@@ -10,6 +10,7 @@
10656 #include <linux/compiler.h>
10657 #include <linux/string.h>
10658 #include <linux/thread_info.h>
10659+#include <linux/kernel.h>
10660 #include <asm/asi.h>
10661 #include <asm/spitfire.h>
10662 #include <asm-generic/uaccess-unaligned.h>
10663@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10664 static inline unsigned long __must_check
10665 copy_from_user(void *to, const void __user *from, unsigned long size)
10666 {
10667- unsigned long ret = ___copy_from_user(to, from, size);
10668+ unsigned long ret;
10669
10670+ if ((long)size < 0 || size > INT_MAX)
10671+ return size;
10672+
10673+ if (!__builtin_constant_p(size))
10674+ check_object_size(to, size, false);
10675+
10676+ ret = ___copy_from_user(to, from, size);
10677 if (unlikely(ret))
10678 ret = copy_from_user_fixup(to, from, size);
10679
10680@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10681 static inline unsigned long __must_check
10682 copy_to_user(void __user *to, const void *from, unsigned long size)
10683 {
10684- unsigned long ret = ___copy_to_user(to, from, size);
10685+ unsigned long ret;
10686
10687+ if ((long)size < 0 || size > INT_MAX)
10688+ return size;
10689+
10690+ if (!__builtin_constant_p(size))
10691+ check_object_size(from, size, true);
10692+
10693+ ret = ___copy_to_user(to, from, size);
10694 if (unlikely(ret))
10695 ret = copy_to_user_fixup(to, from, size);
10696 return ret;
10697diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10698index 7cf9c6e..6206648 100644
10699--- a/arch/sparc/kernel/Makefile
10700+++ b/arch/sparc/kernel/Makefile
10701@@ -4,7 +4,7 @@
10702 #
10703
10704 asflags-y := -ansi
10705-ccflags-y := -Werror
10706+#ccflags-y := -Werror
10707
10708 extra-y := head_$(BITS).o
10709
10710diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10711index 50e7b62..79fae35 100644
10712--- a/arch/sparc/kernel/process_32.c
10713+++ b/arch/sparc/kernel/process_32.c
10714@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10715
10716 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10717 r->psr, r->pc, r->npc, r->y, print_tainted());
10718- printk("PC: <%pS>\n", (void *) r->pc);
10719+ printk("PC: <%pA>\n", (void *) r->pc);
10720 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10721 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10722 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10723 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10724 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10725 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10726- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10727+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10728
10729 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10730 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10731@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10732 rw = (struct reg_window32 *) fp;
10733 pc = rw->ins[7];
10734 printk("[%08lx : ", pc);
10735- printk("%pS ] ", (void *) pc);
10736+ printk("%pA ] ", (void *) pc);
10737 fp = rw->ins[6];
10738 } while (++count < 16);
10739 printk("\n");
10740diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10741index 027e099..6d4178f 100644
10742--- a/arch/sparc/kernel/process_64.c
10743+++ b/arch/sparc/kernel/process_64.c
10744@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10745 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10746 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10747 if (regs->tstate & TSTATE_PRIV)
10748- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10749+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10750 }
10751
10752 void show_regs(struct pt_regs *regs)
10753@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10754
10755 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10756 regs->tpc, regs->tnpc, regs->y, print_tainted());
10757- printk("TPC: <%pS>\n", (void *) regs->tpc);
10758+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10759 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10760 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10761 regs->u_regs[3]);
10762@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10763 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10764 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10765 regs->u_regs[15]);
10766- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10767+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10768 show_regwindow(regs);
10769 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10770 }
10771@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10772 ((tp && tp->task) ? tp->task->pid : -1));
10773
10774 if (gp->tstate & TSTATE_PRIV) {
10775- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10776+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10777 (void *) gp->tpc,
10778 (void *) gp->o7,
10779 (void *) gp->i7,
10780diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10781index 79cc0d1..ec62734 100644
10782--- a/arch/sparc/kernel/prom_common.c
10783+++ b/arch/sparc/kernel/prom_common.c
10784@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10785
10786 unsigned int prom_early_allocated __initdata;
10787
10788-static struct of_pdt_ops prom_sparc_ops __initdata = {
10789+static struct of_pdt_ops prom_sparc_ops __initconst = {
10790 .nextprop = prom_common_nextprop,
10791 .getproplen = prom_getproplen,
10792 .getproperty = prom_getproperty,
10793diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10794index c13c9f2..d572c34 100644
10795--- a/arch/sparc/kernel/ptrace_64.c
10796+++ b/arch/sparc/kernel/ptrace_64.c
10797@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10798 return ret;
10799 }
10800
10801+#ifdef CONFIG_GRKERNSEC_SETXID
10802+extern void gr_delayed_cred_worker(void);
10803+#endif
10804+
10805 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10806 {
10807 int ret = 0;
10808@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10809 if (test_thread_flag(TIF_NOHZ))
10810 user_exit();
10811
10812+#ifdef CONFIG_GRKERNSEC_SETXID
10813+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10814+ gr_delayed_cred_worker();
10815+#endif
10816+
10817 if (test_thread_flag(TIF_SYSCALL_TRACE))
10818 ret = tracehook_report_syscall_entry(regs);
10819
10820@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10821 if (test_thread_flag(TIF_NOHZ))
10822 user_exit();
10823
10824+#ifdef CONFIG_GRKERNSEC_SETXID
10825+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10826+ gr_delayed_cred_worker();
10827+#endif
10828+
10829 audit_syscall_exit(regs);
10830
10831 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10832diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10833index 41aa247..eadfb74 100644
10834--- a/arch/sparc/kernel/smp_64.c
10835+++ b/arch/sparc/kernel/smp_64.c
10836@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10837 return;
10838
10839 #ifdef CONFIG_DEBUG_DCFLUSH
10840- atomic_inc(&dcpage_flushes);
10841+ atomic_inc_unchecked(&dcpage_flushes);
10842 #endif
10843
10844 this_cpu = get_cpu();
10845@@ -907,7 +907,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10846 xcall_deliver(data0, __pa(pg_addr),
10847 (u64) pg_addr, cpumask_of(cpu));
10848 #ifdef CONFIG_DEBUG_DCFLUSH
10849- atomic_inc(&dcpage_flushes_xcall);
10850+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10851 #endif
10852 }
10853 }
10854@@ -926,7 +926,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10855 preempt_disable();
10856
10857 #ifdef CONFIG_DEBUG_DCFLUSH
10858- atomic_inc(&dcpage_flushes);
10859+ atomic_inc_unchecked(&dcpage_flushes);
10860 #endif
10861 data0 = 0;
10862 pg_addr = page_address(page);
10863@@ -943,7 +943,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10864 xcall_deliver(data0, __pa(pg_addr),
10865 (u64) pg_addr, cpu_online_mask);
10866 #ifdef CONFIG_DEBUG_DCFLUSH
10867- atomic_inc(&dcpage_flushes_xcall);
10868+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10869 #endif
10870 }
10871 __local_flush_dcache_page(page);
10872diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10873index 646988d..b88905f 100644
10874--- a/arch/sparc/kernel/sys_sparc_32.c
10875+++ b/arch/sparc/kernel/sys_sparc_32.c
10876@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10877 if (len > TASK_SIZE - PAGE_SIZE)
10878 return -ENOMEM;
10879 if (!addr)
10880- addr = TASK_UNMAPPED_BASE;
10881+ addr = current->mm->mmap_base;
10882
10883 info.flags = 0;
10884 info.length = len;
10885diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10886index c85403d..6af95c9 100644
10887--- a/arch/sparc/kernel/sys_sparc_64.c
10888+++ b/arch/sparc/kernel/sys_sparc_64.c
10889@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10890 struct vm_area_struct * vma;
10891 unsigned long task_size = TASK_SIZE;
10892 int do_color_align;
10893+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10894 struct vm_unmapped_area_info info;
10895
10896 if (flags & MAP_FIXED) {
10897 /* We do not accept a shared mapping if it would violate
10898 * cache aliasing constraints.
10899 */
10900- if ((flags & MAP_SHARED) &&
10901+ if ((filp || (flags & MAP_SHARED)) &&
10902 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10903 return -EINVAL;
10904 return addr;
10905@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10906 if (filp || (flags & MAP_SHARED))
10907 do_color_align = 1;
10908
10909+#ifdef CONFIG_PAX_RANDMMAP
10910+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10911+#endif
10912+
10913 if (addr) {
10914 if (do_color_align)
10915 addr = COLOR_ALIGN(addr, pgoff);
10916@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10917 addr = PAGE_ALIGN(addr);
10918
10919 vma = find_vma(mm, addr);
10920- if (task_size - len >= addr &&
10921- (!vma || addr + len <= vma->vm_start))
10922+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10923 return addr;
10924 }
10925
10926 info.flags = 0;
10927 info.length = len;
10928- info.low_limit = TASK_UNMAPPED_BASE;
10929+ info.low_limit = mm->mmap_base;
10930 info.high_limit = min(task_size, VA_EXCLUDE_START);
10931 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10932 info.align_offset = pgoff << PAGE_SHIFT;
10933+ info.threadstack_offset = offset;
10934 addr = vm_unmapped_area(&info);
10935
10936 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10937 VM_BUG_ON(addr != -ENOMEM);
10938 info.low_limit = VA_EXCLUDE_END;
10939+
10940+#ifdef CONFIG_PAX_RANDMMAP
10941+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10942+ info.low_limit += mm->delta_mmap;
10943+#endif
10944+
10945 info.high_limit = task_size;
10946 addr = vm_unmapped_area(&info);
10947 }
10948@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10949 unsigned long task_size = STACK_TOP32;
10950 unsigned long addr = addr0;
10951 int do_color_align;
10952+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10953 struct vm_unmapped_area_info info;
10954
10955 /* This should only ever run for 32-bit processes. */
10956@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10957 /* We do not accept a shared mapping if it would violate
10958 * cache aliasing constraints.
10959 */
10960- if ((flags & MAP_SHARED) &&
10961+ if ((filp || (flags & MAP_SHARED)) &&
10962 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10963 return -EINVAL;
10964 return addr;
10965@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10966 if (filp || (flags & MAP_SHARED))
10967 do_color_align = 1;
10968
10969+#ifdef CONFIG_PAX_RANDMMAP
10970+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10971+#endif
10972+
10973 /* requesting a specific address */
10974 if (addr) {
10975 if (do_color_align)
10976@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10977 addr = PAGE_ALIGN(addr);
10978
10979 vma = find_vma(mm, addr);
10980- if (task_size - len >= addr &&
10981- (!vma || addr + len <= vma->vm_start))
10982+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10983 return addr;
10984 }
10985
10986@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10987 info.high_limit = mm->mmap_base;
10988 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10989 info.align_offset = pgoff << PAGE_SHIFT;
10990+ info.threadstack_offset = offset;
10991 addr = vm_unmapped_area(&info);
10992
10993 /*
10994@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10995 VM_BUG_ON(addr != -ENOMEM);
10996 info.flags = 0;
10997 info.low_limit = TASK_UNMAPPED_BASE;
10998+
10999+#ifdef CONFIG_PAX_RANDMMAP
11000+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11001+ info.low_limit += mm->delta_mmap;
11002+#endif
11003+
11004 info.high_limit = STACK_TOP32;
11005 addr = vm_unmapped_area(&info);
11006 }
11007@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
11008 EXPORT_SYMBOL(get_fb_unmapped_area);
11009
11010 /* Essentially the same as PowerPC. */
11011-static unsigned long mmap_rnd(void)
11012+static unsigned long mmap_rnd(struct mm_struct *mm)
11013 {
11014 unsigned long rnd = 0UL;
11015
11016+#ifdef CONFIG_PAX_RANDMMAP
11017+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11018+#endif
11019+
11020 if (current->flags & PF_RANDOMIZE) {
11021 unsigned long val = get_random_int();
11022 if (test_thread_flag(TIF_32BIT))
11023@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11024
11025 void arch_pick_mmap_layout(struct mm_struct *mm)
11026 {
11027- unsigned long random_factor = mmap_rnd();
11028+ unsigned long random_factor = mmap_rnd(mm);
11029 unsigned long gap;
11030
11031 /*
11032@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11033 gap == RLIM_INFINITY ||
11034 sysctl_legacy_va_layout) {
11035 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11036+
11037+#ifdef CONFIG_PAX_RANDMMAP
11038+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11039+ mm->mmap_base += mm->delta_mmap;
11040+#endif
11041+
11042 mm->get_unmapped_area = arch_get_unmapped_area;
11043 } else {
11044 /* We know it's 32-bit */
11045@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11046 gap = (task_size / 6 * 5);
11047
11048 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11049+
11050+#ifdef CONFIG_PAX_RANDMMAP
11051+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11052+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11053+#endif
11054+
11055 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11056 }
11057 }
11058diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11059index 33a17e7..d87fb1f 100644
11060--- a/arch/sparc/kernel/syscalls.S
11061+++ b/arch/sparc/kernel/syscalls.S
11062@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11063 #endif
11064 .align 32
11065 1: ldx [%g6 + TI_FLAGS], %l5
11066- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11067+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11068 be,pt %icc, rtrap
11069 nop
11070 call syscall_trace_leave
11071@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11072
11073 srl %i3, 0, %o3 ! IEU0
11074 srl %i2, 0, %o2 ! IEU0 Group
11075- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11076+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11077 bne,pn %icc, linux_syscall_trace32 ! CTI
11078 mov %i0, %l5 ! IEU1
11079 5: call %l7 ! CTI Group brk forced
11080@@ -208,7 +208,7 @@ linux_sparc_syscall:
11081
11082 mov %i3, %o3 ! IEU1
11083 mov %i4, %o4 ! IEU0 Group
11084- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11085+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11086 bne,pn %icc, linux_syscall_trace ! CTI Group
11087 mov %i0, %l5 ! IEU0
11088 2: call %l7 ! CTI Group brk forced
11089@@ -223,7 +223,7 @@ ret_sys_call:
11090
11091 cmp %o0, -ERESTART_RESTARTBLOCK
11092 bgeu,pn %xcc, 1f
11093- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11094+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11095 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11096
11097 2:
11098diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11099index 6fd386c5..6907d81 100644
11100--- a/arch/sparc/kernel/traps_32.c
11101+++ b/arch/sparc/kernel/traps_32.c
11102@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11103 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11104 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11105
11106+extern void gr_handle_kernel_exploit(void);
11107+
11108 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11109 {
11110 static int die_counter;
11111@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11112 count++ < 30 &&
11113 (((unsigned long) rw) >= PAGE_OFFSET) &&
11114 !(((unsigned long) rw) & 0x7)) {
11115- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11116+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11117 (void *) rw->ins[7]);
11118 rw = (struct reg_window32 *)rw->ins[6];
11119 }
11120 }
11121 printk("Instruction DUMP:");
11122 instruction_dump ((unsigned long *) regs->pc);
11123- if(regs->psr & PSR_PS)
11124+ if(regs->psr & PSR_PS) {
11125+ gr_handle_kernel_exploit();
11126 do_exit(SIGKILL);
11127+ }
11128 do_exit(SIGSEGV);
11129 }
11130
11131diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11132index fb6640e..2daada8 100644
11133--- a/arch/sparc/kernel/traps_64.c
11134+++ b/arch/sparc/kernel/traps_64.c
11135@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11136 i + 1,
11137 p->trapstack[i].tstate, p->trapstack[i].tpc,
11138 p->trapstack[i].tnpc, p->trapstack[i].tt);
11139- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11140+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11141 }
11142 }
11143
11144@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11145
11146 lvl -= 0x100;
11147 if (regs->tstate & TSTATE_PRIV) {
11148+
11149+#ifdef CONFIG_PAX_REFCOUNT
11150+ if (lvl == 6)
11151+ pax_report_refcount_overflow(regs);
11152+#endif
11153+
11154 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11155 die_if_kernel(buffer, regs);
11156 }
11157@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11158 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11159 {
11160 char buffer[32];
11161-
11162+
11163 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11164 0, lvl, SIGTRAP) == NOTIFY_STOP)
11165 return;
11166
11167+#ifdef CONFIG_PAX_REFCOUNT
11168+ if (lvl == 6)
11169+ pax_report_refcount_overflow(regs);
11170+#endif
11171+
11172 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11173
11174 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11175@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11176 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11177 printk("%s" "ERROR(%d): ",
11178 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11179- printk("TPC<%pS>\n", (void *) regs->tpc);
11180+ printk("TPC<%pA>\n", (void *) regs->tpc);
11181 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11182 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11183 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11184@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11185 smp_processor_id(),
11186 (type & 0x1) ? 'I' : 'D',
11187 regs->tpc);
11188- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11189+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11190 panic("Irrecoverable Cheetah+ parity error.");
11191 }
11192
11193@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11194 smp_processor_id(),
11195 (type & 0x1) ? 'I' : 'D',
11196 regs->tpc);
11197- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11198+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11199 }
11200
11201 struct sun4v_error_entry {
11202@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11203 /*0x38*/u64 reserved_5;
11204 };
11205
11206-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11207-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11208+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11209+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11210
11211 static const char *sun4v_err_type_to_str(u8 type)
11212 {
11213@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11214 }
11215
11216 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11217- int cpu, const char *pfx, atomic_t *ocnt)
11218+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11219 {
11220 u64 *raw_ptr = (u64 *) ent;
11221 u32 attrs;
11222@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11223
11224 show_regs(regs);
11225
11226- if ((cnt = atomic_read(ocnt)) != 0) {
11227- atomic_set(ocnt, 0);
11228+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11229+ atomic_set_unchecked(ocnt, 0);
11230 wmb();
11231 printk("%s: Queue overflowed %d times.\n",
11232 pfx, cnt);
11233@@ -2048,7 +2059,7 @@ out:
11234 */
11235 void sun4v_resum_overflow(struct pt_regs *regs)
11236 {
11237- atomic_inc(&sun4v_resum_oflow_cnt);
11238+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11239 }
11240
11241 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11242@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11243 /* XXX Actually even this can make not that much sense. Perhaps
11244 * XXX we should just pull the plug and panic directly from here?
11245 */
11246- atomic_inc(&sun4v_nonresum_oflow_cnt);
11247+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11248 }
11249
11250 unsigned long sun4v_err_itlb_vaddr;
11251@@ -2116,9 +2127,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11252
11253 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11254 regs->tpc, tl);
11255- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11256+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11257 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11258- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11259+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11260 (void *) regs->u_regs[UREG_I7]);
11261 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11262 "pte[%lx] error[%lx]\n",
11263@@ -2140,9 +2151,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11264
11265 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11266 regs->tpc, tl);
11267- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11268+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11269 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11270- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11271+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11272 (void *) regs->u_regs[UREG_I7]);
11273 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11274 "pte[%lx] error[%lx]\n",
11275@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11276 fp = (unsigned long)sf->fp + STACK_BIAS;
11277 }
11278
11279- printk(" [%016lx] %pS\n", pc, (void *) pc);
11280+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11281 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11282 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11283 int index = tsk->curr_ret_stack;
11284 if (tsk->ret_stack && index >= graph) {
11285 pc = tsk->ret_stack[index - graph].ret;
11286- printk(" [%016lx] %pS\n", pc, (void *) pc);
11287+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11288 graph++;
11289 }
11290 }
11291@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11292 return (struct reg_window *) (fp + STACK_BIAS);
11293 }
11294
11295+extern void gr_handle_kernel_exploit(void);
11296+
11297 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11298 {
11299 static int die_counter;
11300@@ -2411,7 +2424,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11301 while (rw &&
11302 count++ < 30 &&
11303 kstack_valid(tp, (unsigned long) rw)) {
11304- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11305+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11306 (void *) rw->ins[7]);
11307
11308 rw = kernel_stack_up(rw);
11309@@ -2424,8 +2437,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11310 }
11311 user_instruction_dump ((unsigned int __user *) regs->tpc);
11312 }
11313- if (regs->tstate & TSTATE_PRIV)
11314+ if (regs->tstate & TSTATE_PRIV) {
11315+ gr_handle_kernel_exploit();
11316 do_exit(SIGKILL);
11317+ }
11318 do_exit(SIGSEGV);
11319 }
11320 EXPORT_SYMBOL(die_if_kernel);
11321diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11322index 62098a8..547ab2c 100644
11323--- a/arch/sparc/kernel/unaligned_64.c
11324+++ b/arch/sparc/kernel/unaligned_64.c
11325@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11326 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11327
11328 if (__ratelimit(&ratelimit)) {
11329- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11330+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11331 regs->tpc, (void *) regs->tpc);
11332 }
11333 }
11334diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11335index 3269b02..64f5231 100644
11336--- a/arch/sparc/lib/Makefile
11337+++ b/arch/sparc/lib/Makefile
11338@@ -2,7 +2,7 @@
11339 #
11340
11341 asflags-y := -ansi -DST_DIV0=0x02
11342-ccflags-y := -Werror
11343+#ccflags-y := -Werror
11344
11345 lib-$(CONFIG_SPARC32) += ashrdi3.o
11346 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11347diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11348index 85c233d..68500e0 100644
11349--- a/arch/sparc/lib/atomic_64.S
11350+++ b/arch/sparc/lib/atomic_64.S
11351@@ -17,7 +17,12 @@
11352 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11353 BACKOFF_SETUP(%o2)
11354 1: lduw [%o1], %g1
11355- add %g1, %o0, %g7
11356+ addcc %g1, %o0, %g7
11357+
11358+#ifdef CONFIG_PAX_REFCOUNT
11359+ tvs %icc, 6
11360+#endif
11361+
11362 cas [%o1], %g1, %g7
11363 cmp %g1, %g7
11364 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11365@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11366 2: BACKOFF_SPIN(%o2, %o3, 1b)
11367 ENDPROC(atomic_add)
11368
11369+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11370+ BACKOFF_SETUP(%o2)
11371+1: lduw [%o1], %g1
11372+ add %g1, %o0, %g7
11373+ cas [%o1], %g1, %g7
11374+ cmp %g1, %g7
11375+ bne,pn %icc, 2f
11376+ nop
11377+ retl
11378+ nop
11379+2: BACKOFF_SPIN(%o2, %o3, 1b)
11380+ENDPROC(atomic_add_unchecked)
11381+
11382 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11383 BACKOFF_SETUP(%o2)
11384 1: lduw [%o1], %g1
11385- sub %g1, %o0, %g7
11386+ subcc %g1, %o0, %g7
11387+
11388+#ifdef CONFIG_PAX_REFCOUNT
11389+ tvs %icc, 6
11390+#endif
11391+
11392 cas [%o1], %g1, %g7
11393 cmp %g1, %g7
11394 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11395@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11396 2: BACKOFF_SPIN(%o2, %o3, 1b)
11397 ENDPROC(atomic_sub)
11398
11399+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11400+ BACKOFF_SETUP(%o2)
11401+1: lduw [%o1], %g1
11402+ sub %g1, %o0, %g7
11403+ cas [%o1], %g1, %g7
11404+ cmp %g1, %g7
11405+ bne,pn %icc, 2f
11406+ nop
11407+ retl
11408+ nop
11409+2: BACKOFF_SPIN(%o2, %o3, 1b)
11410+ENDPROC(atomic_sub_unchecked)
11411+
11412 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11413 BACKOFF_SETUP(%o2)
11414 1: lduw [%o1], %g1
11415- add %g1, %o0, %g7
11416+ addcc %g1, %o0, %g7
11417+
11418+#ifdef CONFIG_PAX_REFCOUNT
11419+ tvs %icc, 6
11420+#endif
11421+
11422 cas [%o1], %g1, %g7
11423 cmp %g1, %g7
11424 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11425@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11426 2: BACKOFF_SPIN(%o2, %o3, 1b)
11427 ENDPROC(atomic_add_ret)
11428
11429+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11430+ BACKOFF_SETUP(%o2)
11431+1: lduw [%o1], %g1
11432+ addcc %g1, %o0, %g7
11433+ cas [%o1], %g1, %g7
11434+ cmp %g1, %g7
11435+ bne,pn %icc, 2f
11436+ add %g7, %o0, %g7
11437+ sra %g7, 0, %o0
11438+ retl
11439+ nop
11440+2: BACKOFF_SPIN(%o2, %o3, 1b)
11441+ENDPROC(atomic_add_ret_unchecked)
11442+
11443 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11444 BACKOFF_SETUP(%o2)
11445 1: lduw [%o1], %g1
11446- sub %g1, %o0, %g7
11447+ subcc %g1, %o0, %g7
11448+
11449+#ifdef CONFIG_PAX_REFCOUNT
11450+ tvs %icc, 6
11451+#endif
11452+
11453 cas [%o1], %g1, %g7
11454 cmp %g1, %g7
11455 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11456@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11457 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11458 BACKOFF_SETUP(%o2)
11459 1: ldx [%o1], %g1
11460- add %g1, %o0, %g7
11461+ addcc %g1, %o0, %g7
11462+
11463+#ifdef CONFIG_PAX_REFCOUNT
11464+ tvs %xcc, 6
11465+#endif
11466+
11467 casx [%o1], %g1, %g7
11468 cmp %g1, %g7
11469 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11470@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11471 2: BACKOFF_SPIN(%o2, %o3, 1b)
11472 ENDPROC(atomic64_add)
11473
11474+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11475+ BACKOFF_SETUP(%o2)
11476+1: ldx [%o1], %g1
11477+ addcc %g1, %o0, %g7
11478+ casx [%o1], %g1, %g7
11479+ cmp %g1, %g7
11480+ bne,pn %xcc, 2f
11481+ nop
11482+ retl
11483+ nop
11484+2: BACKOFF_SPIN(%o2, %o3, 1b)
11485+ENDPROC(atomic64_add_unchecked)
11486+
11487 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11488 BACKOFF_SETUP(%o2)
11489 1: ldx [%o1], %g1
11490- sub %g1, %o0, %g7
11491+ subcc %g1, %o0, %g7
11492+
11493+#ifdef CONFIG_PAX_REFCOUNT
11494+ tvs %xcc, 6
11495+#endif
11496+
11497 casx [%o1], %g1, %g7
11498 cmp %g1, %g7
11499 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11500@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11501 2: BACKOFF_SPIN(%o2, %o3, 1b)
11502 ENDPROC(atomic64_sub)
11503
11504+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11505+ BACKOFF_SETUP(%o2)
11506+1: ldx [%o1], %g1
11507+ subcc %g1, %o0, %g7
11508+ casx [%o1], %g1, %g7
11509+ cmp %g1, %g7
11510+ bne,pn %xcc, 2f
11511+ nop
11512+ retl
11513+ nop
11514+2: BACKOFF_SPIN(%o2, %o3, 1b)
11515+ENDPROC(atomic64_sub_unchecked)
11516+
11517 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11518 BACKOFF_SETUP(%o2)
11519 1: ldx [%o1], %g1
11520- add %g1, %o0, %g7
11521+ addcc %g1, %o0, %g7
11522+
11523+#ifdef CONFIG_PAX_REFCOUNT
11524+ tvs %xcc, 6
11525+#endif
11526+
11527 casx [%o1], %g1, %g7
11528 cmp %g1, %g7
11529 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11530@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11531 2: BACKOFF_SPIN(%o2, %o3, 1b)
11532 ENDPROC(atomic64_add_ret)
11533
11534+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11535+ BACKOFF_SETUP(%o2)
11536+1: ldx [%o1], %g1
11537+ addcc %g1, %o0, %g7
11538+ casx [%o1], %g1, %g7
11539+ cmp %g1, %g7
11540+ bne,pn %xcc, 2f
11541+ add %g7, %o0, %g7
11542+ mov %g7, %o0
11543+ retl
11544+ nop
11545+2: BACKOFF_SPIN(%o2, %o3, 1b)
11546+ENDPROC(atomic64_add_ret_unchecked)
11547+
11548 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11549 BACKOFF_SETUP(%o2)
11550 1: ldx [%o1], %g1
11551- sub %g1, %o0, %g7
11552+ subcc %g1, %o0, %g7
11553+
11554+#ifdef CONFIG_PAX_REFCOUNT
11555+ tvs %xcc, 6
11556+#endif
11557+
11558 casx [%o1], %g1, %g7
11559 cmp %g1, %g7
11560 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11561diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11562index 323335b..ed85ea2 100644
11563--- a/arch/sparc/lib/ksyms.c
11564+++ b/arch/sparc/lib/ksyms.c
11565@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11566
11567 /* Atomic counter implementation. */
11568 EXPORT_SYMBOL(atomic_add);
11569+EXPORT_SYMBOL(atomic_add_unchecked);
11570 EXPORT_SYMBOL(atomic_add_ret);
11571+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11572 EXPORT_SYMBOL(atomic_sub);
11573+EXPORT_SYMBOL(atomic_sub_unchecked);
11574 EXPORT_SYMBOL(atomic_sub_ret);
11575 EXPORT_SYMBOL(atomic64_add);
11576+EXPORT_SYMBOL(atomic64_add_unchecked);
11577 EXPORT_SYMBOL(atomic64_add_ret);
11578+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11579 EXPORT_SYMBOL(atomic64_sub);
11580+EXPORT_SYMBOL(atomic64_sub_unchecked);
11581 EXPORT_SYMBOL(atomic64_sub_ret);
11582 EXPORT_SYMBOL(atomic64_dec_if_positive);
11583
11584diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11585index 30c3ecc..736f015 100644
11586--- a/arch/sparc/mm/Makefile
11587+++ b/arch/sparc/mm/Makefile
11588@@ -2,7 +2,7 @@
11589 #
11590
11591 asflags-y := -ansi
11592-ccflags-y := -Werror
11593+#ccflags-y := -Werror
11594
11595 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11596 obj-y += fault_$(BITS).o
11597diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11598index 908e8c1..1524793 100644
11599--- a/arch/sparc/mm/fault_32.c
11600+++ b/arch/sparc/mm/fault_32.c
11601@@ -21,6 +21,9 @@
11602 #include <linux/perf_event.h>
11603 #include <linux/interrupt.h>
11604 #include <linux/kdebug.h>
11605+#include <linux/slab.h>
11606+#include <linux/pagemap.h>
11607+#include <linux/compiler.h>
11608
11609 #include <asm/page.h>
11610 #include <asm/pgtable.h>
11611@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11612 return safe_compute_effective_address(regs, insn);
11613 }
11614
11615+#ifdef CONFIG_PAX_PAGEEXEC
11616+#ifdef CONFIG_PAX_DLRESOLVE
11617+static void pax_emuplt_close(struct vm_area_struct *vma)
11618+{
11619+ vma->vm_mm->call_dl_resolve = 0UL;
11620+}
11621+
11622+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11623+{
11624+ unsigned int *kaddr;
11625+
11626+ vmf->page = alloc_page(GFP_HIGHUSER);
11627+ if (!vmf->page)
11628+ return VM_FAULT_OOM;
11629+
11630+ kaddr = kmap(vmf->page);
11631+ memset(kaddr, 0, PAGE_SIZE);
11632+ kaddr[0] = 0x9DE3BFA8U; /* save */
11633+ flush_dcache_page(vmf->page);
11634+ kunmap(vmf->page);
11635+ return VM_FAULT_MAJOR;
11636+}
11637+
11638+static const struct vm_operations_struct pax_vm_ops = {
11639+ .close = pax_emuplt_close,
11640+ .fault = pax_emuplt_fault
11641+};
11642+
11643+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11644+{
11645+ int ret;
11646+
11647+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11648+ vma->vm_mm = current->mm;
11649+ vma->vm_start = addr;
11650+ vma->vm_end = addr + PAGE_SIZE;
11651+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11652+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11653+ vma->vm_ops = &pax_vm_ops;
11654+
11655+ ret = insert_vm_struct(current->mm, vma);
11656+ if (ret)
11657+ return ret;
11658+
11659+ ++current->mm->total_vm;
11660+ return 0;
11661+}
11662+#endif
11663+
11664+/*
11665+ * PaX: decide what to do with offenders (regs->pc = fault address)
11666+ *
11667+ * returns 1 when task should be killed
11668+ * 2 when patched PLT trampoline was detected
11669+ * 3 when unpatched PLT trampoline was detected
11670+ */
11671+static int pax_handle_fetch_fault(struct pt_regs *regs)
11672+{
11673+
11674+#ifdef CONFIG_PAX_EMUPLT
11675+ int err;
11676+
11677+ do { /* PaX: patched PLT emulation #1 */
11678+ unsigned int sethi1, sethi2, jmpl;
11679+
11680+ err = get_user(sethi1, (unsigned int *)regs->pc);
11681+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11682+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11683+
11684+ if (err)
11685+ break;
11686+
11687+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11688+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11689+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11690+ {
11691+ unsigned int addr;
11692+
11693+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11694+ addr = regs->u_regs[UREG_G1];
11695+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11696+ regs->pc = addr;
11697+ regs->npc = addr+4;
11698+ return 2;
11699+ }
11700+ } while (0);
11701+
11702+ do { /* PaX: patched PLT emulation #2 */
11703+ unsigned int ba;
11704+
11705+ err = get_user(ba, (unsigned int *)regs->pc);
11706+
11707+ if (err)
11708+ break;
11709+
11710+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11711+ unsigned int addr;
11712+
11713+ if ((ba & 0xFFC00000U) == 0x30800000U)
11714+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11715+ else
11716+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11717+ regs->pc = addr;
11718+ regs->npc = addr+4;
11719+ return 2;
11720+ }
11721+ } while (0);
11722+
11723+ do { /* PaX: patched PLT emulation #3 */
11724+ unsigned int sethi, bajmpl, nop;
11725+
11726+ err = get_user(sethi, (unsigned int *)regs->pc);
11727+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11728+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11729+
11730+ if (err)
11731+ break;
11732+
11733+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11734+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11735+ nop == 0x01000000U)
11736+ {
11737+ unsigned int addr;
11738+
11739+ addr = (sethi & 0x003FFFFFU) << 10;
11740+ regs->u_regs[UREG_G1] = addr;
11741+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11742+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11743+ else
11744+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11745+ regs->pc = addr;
11746+ regs->npc = addr+4;
11747+ return 2;
11748+ }
11749+ } while (0);
11750+
11751+ do { /* PaX: unpatched PLT emulation step 1 */
11752+ unsigned int sethi, ba, nop;
11753+
11754+ err = get_user(sethi, (unsigned int *)regs->pc);
11755+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11756+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11757+
11758+ if (err)
11759+ break;
11760+
11761+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11762+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11763+ nop == 0x01000000U)
11764+ {
11765+ unsigned int addr, save, call;
11766+
11767+ if ((ba & 0xFFC00000U) == 0x30800000U)
11768+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11769+ else
11770+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11771+
11772+ err = get_user(save, (unsigned int *)addr);
11773+ err |= get_user(call, (unsigned int *)(addr+4));
11774+ err |= get_user(nop, (unsigned int *)(addr+8));
11775+ if (err)
11776+ break;
11777+
11778+#ifdef CONFIG_PAX_DLRESOLVE
11779+ if (save == 0x9DE3BFA8U &&
11780+ (call & 0xC0000000U) == 0x40000000U &&
11781+ nop == 0x01000000U)
11782+ {
11783+ struct vm_area_struct *vma;
11784+ unsigned long call_dl_resolve;
11785+
11786+ down_read(&current->mm->mmap_sem);
11787+ call_dl_resolve = current->mm->call_dl_resolve;
11788+ up_read(&current->mm->mmap_sem);
11789+ if (likely(call_dl_resolve))
11790+ goto emulate;
11791+
11792+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11793+
11794+ down_write(&current->mm->mmap_sem);
11795+ if (current->mm->call_dl_resolve) {
11796+ call_dl_resolve = current->mm->call_dl_resolve;
11797+ up_write(&current->mm->mmap_sem);
11798+ if (vma)
11799+ kmem_cache_free(vm_area_cachep, vma);
11800+ goto emulate;
11801+ }
11802+
11803+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11804+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11805+ up_write(&current->mm->mmap_sem);
11806+ if (vma)
11807+ kmem_cache_free(vm_area_cachep, vma);
11808+ return 1;
11809+ }
11810+
11811+ if (pax_insert_vma(vma, call_dl_resolve)) {
11812+ up_write(&current->mm->mmap_sem);
11813+ kmem_cache_free(vm_area_cachep, vma);
11814+ return 1;
11815+ }
11816+
11817+ current->mm->call_dl_resolve = call_dl_resolve;
11818+ up_write(&current->mm->mmap_sem);
11819+
11820+emulate:
11821+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11822+ regs->pc = call_dl_resolve;
11823+ regs->npc = addr+4;
11824+ return 3;
11825+ }
11826+#endif
11827+
11828+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11829+ if ((save & 0xFFC00000U) == 0x05000000U &&
11830+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11831+ nop == 0x01000000U)
11832+ {
11833+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11834+ regs->u_regs[UREG_G2] = addr + 4;
11835+ addr = (save & 0x003FFFFFU) << 10;
11836+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11837+ regs->pc = addr;
11838+ regs->npc = addr+4;
11839+ return 3;
11840+ }
11841+ }
11842+ } while (0);
11843+
11844+ do { /* PaX: unpatched PLT emulation step 2 */
11845+ unsigned int save, call, nop;
11846+
11847+ err = get_user(save, (unsigned int *)(regs->pc-4));
11848+ err |= get_user(call, (unsigned int *)regs->pc);
11849+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11850+ if (err)
11851+ break;
11852+
11853+ if (save == 0x9DE3BFA8U &&
11854+ (call & 0xC0000000U) == 0x40000000U &&
11855+ nop == 0x01000000U)
11856+ {
11857+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11858+
11859+ regs->u_regs[UREG_RETPC] = regs->pc;
11860+ regs->pc = dl_resolve;
11861+ regs->npc = dl_resolve+4;
11862+ return 3;
11863+ }
11864+ } while (0);
11865+#endif
11866+
11867+ return 1;
11868+}
11869+
11870+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11871+{
11872+ unsigned long i;
11873+
11874+ printk(KERN_ERR "PAX: bytes at PC: ");
11875+ for (i = 0; i < 8; i++) {
11876+ unsigned int c;
11877+ if (get_user(c, (unsigned int *)pc+i))
11878+ printk(KERN_CONT "???????? ");
11879+ else
11880+ printk(KERN_CONT "%08x ", c);
11881+ }
11882+ printk("\n");
11883+}
11884+#endif
11885+
11886 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11887 int text_fault)
11888 {
11889@@ -226,6 +500,24 @@ good_area:
11890 if (!(vma->vm_flags & VM_WRITE))
11891 goto bad_area;
11892 } else {
11893+
11894+#ifdef CONFIG_PAX_PAGEEXEC
11895+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11896+ up_read(&mm->mmap_sem);
11897+ switch (pax_handle_fetch_fault(regs)) {
11898+
11899+#ifdef CONFIG_PAX_EMUPLT
11900+ case 2:
11901+ case 3:
11902+ return;
11903+#endif
11904+
11905+ }
11906+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11907+ do_group_exit(SIGKILL);
11908+ }
11909+#endif
11910+
11911 /* Allow reads even for write-only mappings */
11912 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11913 goto bad_area;
11914diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11915index 587cd05..fbdf17a 100644
11916--- a/arch/sparc/mm/fault_64.c
11917+++ b/arch/sparc/mm/fault_64.c
11918@@ -22,6 +22,9 @@
11919 #include <linux/kdebug.h>
11920 #include <linux/percpu.h>
11921 #include <linux/context_tracking.h>
11922+#include <linux/slab.h>
11923+#include <linux/pagemap.h>
11924+#include <linux/compiler.h>
11925
11926 #include <asm/page.h>
11927 #include <asm/pgtable.h>
11928@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11929 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11930 regs->tpc);
11931 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11932- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11933+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11934 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11935 dump_stack();
11936 unhandled_fault(regs->tpc, current, regs);
11937@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11938 show_regs(regs);
11939 }
11940
11941+#ifdef CONFIG_PAX_PAGEEXEC
11942+#ifdef CONFIG_PAX_DLRESOLVE
11943+static void pax_emuplt_close(struct vm_area_struct *vma)
11944+{
11945+ vma->vm_mm->call_dl_resolve = 0UL;
11946+}
11947+
11948+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11949+{
11950+ unsigned int *kaddr;
11951+
11952+ vmf->page = alloc_page(GFP_HIGHUSER);
11953+ if (!vmf->page)
11954+ return VM_FAULT_OOM;
11955+
11956+ kaddr = kmap(vmf->page);
11957+ memset(kaddr, 0, PAGE_SIZE);
11958+ kaddr[0] = 0x9DE3BFA8U; /* save */
11959+ flush_dcache_page(vmf->page);
11960+ kunmap(vmf->page);
11961+ return VM_FAULT_MAJOR;
11962+}
11963+
11964+static const struct vm_operations_struct pax_vm_ops = {
11965+ .close = pax_emuplt_close,
11966+ .fault = pax_emuplt_fault
11967+};
11968+
11969+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11970+{
11971+ int ret;
11972+
11973+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11974+ vma->vm_mm = current->mm;
11975+ vma->vm_start = addr;
11976+ vma->vm_end = addr + PAGE_SIZE;
11977+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11978+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11979+ vma->vm_ops = &pax_vm_ops;
11980+
11981+ ret = insert_vm_struct(current->mm, vma);
11982+ if (ret)
11983+ return ret;
11984+
11985+ ++current->mm->total_vm;
11986+ return 0;
11987+}
11988+#endif
11989+
11990+/*
11991+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11992+ *
11993+ * returns 1 when task should be killed
11994+ * 2 when patched PLT trampoline was detected
11995+ * 3 when unpatched PLT trampoline was detected
11996+ */
11997+static int pax_handle_fetch_fault(struct pt_regs *regs)
11998+{
11999+
12000+#ifdef CONFIG_PAX_EMUPLT
12001+ int err;
12002+
12003+ do { /* PaX: patched PLT emulation #1 */
12004+ unsigned int sethi1, sethi2, jmpl;
12005+
12006+ err = get_user(sethi1, (unsigned int *)regs->tpc);
12007+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
12008+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
12009+
12010+ if (err)
12011+ break;
12012+
12013+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12014+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12015+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12016+ {
12017+ unsigned long addr;
12018+
12019+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12020+ addr = regs->u_regs[UREG_G1];
12021+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12022+
12023+ if (test_thread_flag(TIF_32BIT))
12024+ addr &= 0xFFFFFFFFUL;
12025+
12026+ regs->tpc = addr;
12027+ regs->tnpc = addr+4;
12028+ return 2;
12029+ }
12030+ } while (0);
12031+
12032+ do { /* PaX: patched PLT emulation #2 */
12033+ unsigned int ba;
12034+
12035+ err = get_user(ba, (unsigned int *)regs->tpc);
12036+
12037+ if (err)
12038+ break;
12039+
12040+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12041+ unsigned long addr;
12042+
12043+ if ((ba & 0xFFC00000U) == 0x30800000U)
12044+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12045+ else
12046+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12047+
12048+ if (test_thread_flag(TIF_32BIT))
12049+ addr &= 0xFFFFFFFFUL;
12050+
12051+ regs->tpc = addr;
12052+ regs->tnpc = addr+4;
12053+ return 2;
12054+ }
12055+ } while (0);
12056+
12057+ do { /* PaX: patched PLT emulation #3 */
12058+ unsigned int sethi, bajmpl, nop;
12059+
12060+ err = get_user(sethi, (unsigned int *)regs->tpc);
12061+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12062+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12063+
12064+ if (err)
12065+ break;
12066+
12067+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12068+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12069+ nop == 0x01000000U)
12070+ {
12071+ unsigned long addr;
12072+
12073+ addr = (sethi & 0x003FFFFFU) << 10;
12074+ regs->u_regs[UREG_G1] = addr;
12075+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12076+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12077+ else
12078+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12079+
12080+ if (test_thread_flag(TIF_32BIT))
12081+ addr &= 0xFFFFFFFFUL;
12082+
12083+ regs->tpc = addr;
12084+ regs->tnpc = addr+4;
12085+ return 2;
12086+ }
12087+ } while (0);
12088+
12089+ do { /* PaX: patched PLT emulation #4 */
12090+ unsigned int sethi, mov1, call, mov2;
12091+
12092+ err = get_user(sethi, (unsigned int *)regs->tpc);
12093+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12094+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12095+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12096+
12097+ if (err)
12098+ break;
12099+
12100+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12101+ mov1 == 0x8210000FU &&
12102+ (call & 0xC0000000U) == 0x40000000U &&
12103+ mov2 == 0x9E100001U)
12104+ {
12105+ unsigned long addr;
12106+
12107+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12108+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12109+
12110+ if (test_thread_flag(TIF_32BIT))
12111+ addr &= 0xFFFFFFFFUL;
12112+
12113+ regs->tpc = addr;
12114+ regs->tnpc = addr+4;
12115+ return 2;
12116+ }
12117+ } while (0);
12118+
12119+ do { /* PaX: patched PLT emulation #5 */
12120+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12121+
12122+ err = get_user(sethi, (unsigned int *)regs->tpc);
12123+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12124+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12125+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12126+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12127+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12128+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12129+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12130+
12131+ if (err)
12132+ break;
12133+
12134+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12135+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12136+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12137+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12138+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12139+ sllx == 0x83287020U &&
12140+ jmpl == 0x81C04005U &&
12141+ nop == 0x01000000U)
12142+ {
12143+ unsigned long addr;
12144+
12145+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12146+ regs->u_regs[UREG_G1] <<= 32;
12147+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12148+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12149+ regs->tpc = addr;
12150+ regs->tnpc = addr+4;
12151+ return 2;
12152+ }
12153+ } while (0);
12154+
12155+ do { /* PaX: patched PLT emulation #6 */
12156+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12157+
12158+ err = get_user(sethi, (unsigned int *)regs->tpc);
12159+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12160+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12161+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12162+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12163+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12164+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12165+
12166+ if (err)
12167+ break;
12168+
12169+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12170+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12171+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12172+ sllx == 0x83287020U &&
12173+ (or & 0xFFFFE000U) == 0x8A116000U &&
12174+ jmpl == 0x81C04005U &&
12175+ nop == 0x01000000U)
12176+ {
12177+ unsigned long addr;
12178+
12179+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12180+ regs->u_regs[UREG_G1] <<= 32;
12181+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12182+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12183+ regs->tpc = addr;
12184+ regs->tnpc = addr+4;
12185+ return 2;
12186+ }
12187+ } while (0);
12188+
12189+ do { /* PaX: unpatched PLT emulation step 1 */
12190+ unsigned int sethi, ba, nop;
12191+
12192+ err = get_user(sethi, (unsigned int *)regs->tpc);
12193+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12194+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12195+
12196+ if (err)
12197+ break;
12198+
12199+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12200+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12201+ nop == 0x01000000U)
12202+ {
12203+ unsigned long addr;
12204+ unsigned int save, call;
12205+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12206+
12207+ if ((ba & 0xFFC00000U) == 0x30800000U)
12208+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12209+ else
12210+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12211+
12212+ if (test_thread_flag(TIF_32BIT))
12213+ addr &= 0xFFFFFFFFUL;
12214+
12215+ err = get_user(save, (unsigned int *)addr);
12216+ err |= get_user(call, (unsigned int *)(addr+4));
12217+ err |= get_user(nop, (unsigned int *)(addr+8));
12218+ if (err)
12219+ break;
12220+
12221+#ifdef CONFIG_PAX_DLRESOLVE
12222+ if (save == 0x9DE3BFA8U &&
12223+ (call & 0xC0000000U) == 0x40000000U &&
12224+ nop == 0x01000000U)
12225+ {
12226+ struct vm_area_struct *vma;
12227+ unsigned long call_dl_resolve;
12228+
12229+ down_read(&current->mm->mmap_sem);
12230+ call_dl_resolve = current->mm->call_dl_resolve;
12231+ up_read(&current->mm->mmap_sem);
12232+ if (likely(call_dl_resolve))
12233+ goto emulate;
12234+
12235+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12236+
12237+ down_write(&current->mm->mmap_sem);
12238+ if (current->mm->call_dl_resolve) {
12239+ call_dl_resolve = current->mm->call_dl_resolve;
12240+ up_write(&current->mm->mmap_sem);
12241+ if (vma)
12242+ kmem_cache_free(vm_area_cachep, vma);
12243+ goto emulate;
12244+ }
12245+
12246+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12247+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12248+ up_write(&current->mm->mmap_sem);
12249+ if (vma)
12250+ kmem_cache_free(vm_area_cachep, vma);
12251+ return 1;
12252+ }
12253+
12254+ if (pax_insert_vma(vma, call_dl_resolve)) {
12255+ up_write(&current->mm->mmap_sem);
12256+ kmem_cache_free(vm_area_cachep, vma);
12257+ return 1;
12258+ }
12259+
12260+ current->mm->call_dl_resolve = call_dl_resolve;
12261+ up_write(&current->mm->mmap_sem);
12262+
12263+emulate:
12264+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12265+ regs->tpc = call_dl_resolve;
12266+ regs->tnpc = addr+4;
12267+ return 3;
12268+ }
12269+#endif
12270+
12271+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12272+ if ((save & 0xFFC00000U) == 0x05000000U &&
12273+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12274+ nop == 0x01000000U)
12275+ {
12276+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12277+ regs->u_regs[UREG_G2] = addr + 4;
12278+ addr = (save & 0x003FFFFFU) << 10;
12279+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12280+
12281+ if (test_thread_flag(TIF_32BIT))
12282+ addr &= 0xFFFFFFFFUL;
12283+
12284+ regs->tpc = addr;
12285+ regs->tnpc = addr+4;
12286+ return 3;
12287+ }
12288+
12289+ /* PaX: 64-bit PLT stub */
12290+ err = get_user(sethi1, (unsigned int *)addr);
12291+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12292+ err |= get_user(or1, (unsigned int *)(addr+8));
12293+ err |= get_user(or2, (unsigned int *)(addr+12));
12294+ err |= get_user(sllx, (unsigned int *)(addr+16));
12295+ err |= get_user(add, (unsigned int *)(addr+20));
12296+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12297+ err |= get_user(nop, (unsigned int *)(addr+28));
12298+ if (err)
12299+ break;
12300+
12301+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12302+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12303+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12304+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12305+ sllx == 0x89293020U &&
12306+ add == 0x8A010005U &&
12307+ jmpl == 0x89C14000U &&
12308+ nop == 0x01000000U)
12309+ {
12310+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12311+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12312+ regs->u_regs[UREG_G4] <<= 32;
12313+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12314+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12315+ regs->u_regs[UREG_G4] = addr + 24;
12316+ addr = regs->u_regs[UREG_G5];
12317+ regs->tpc = addr;
12318+ regs->tnpc = addr+4;
12319+ return 3;
12320+ }
12321+ }
12322+ } while (0);
12323+
12324+#ifdef CONFIG_PAX_DLRESOLVE
12325+ do { /* PaX: unpatched PLT emulation step 2 */
12326+ unsigned int save, call, nop;
12327+
12328+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12329+ err |= get_user(call, (unsigned int *)regs->tpc);
12330+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12331+ if (err)
12332+ break;
12333+
12334+ if (save == 0x9DE3BFA8U &&
12335+ (call & 0xC0000000U) == 0x40000000U &&
12336+ nop == 0x01000000U)
12337+ {
12338+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12339+
12340+ if (test_thread_flag(TIF_32BIT))
12341+ dl_resolve &= 0xFFFFFFFFUL;
12342+
12343+ regs->u_regs[UREG_RETPC] = regs->tpc;
12344+ regs->tpc = dl_resolve;
12345+ regs->tnpc = dl_resolve+4;
12346+ return 3;
12347+ }
12348+ } while (0);
12349+#endif
12350+
12351+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12352+ unsigned int sethi, ba, nop;
12353+
12354+ err = get_user(sethi, (unsigned int *)regs->tpc);
12355+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12356+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12357+
12358+ if (err)
12359+ break;
12360+
12361+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12362+ (ba & 0xFFF00000U) == 0x30600000U &&
12363+ nop == 0x01000000U)
12364+ {
12365+ unsigned long addr;
12366+
12367+ addr = (sethi & 0x003FFFFFU) << 10;
12368+ regs->u_regs[UREG_G1] = addr;
12369+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12370+
12371+ if (test_thread_flag(TIF_32BIT))
12372+ addr &= 0xFFFFFFFFUL;
12373+
12374+ regs->tpc = addr;
12375+ regs->tnpc = addr+4;
12376+ return 2;
12377+ }
12378+ } while (0);
12379+
12380+#endif
12381+
12382+ return 1;
12383+}
12384+
12385+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12386+{
12387+ unsigned long i;
12388+
12389+ printk(KERN_ERR "PAX: bytes at PC: ");
12390+ for (i = 0; i < 8; i++) {
12391+ unsigned int c;
12392+ if (get_user(c, (unsigned int *)pc+i))
12393+ printk(KERN_CONT "???????? ");
12394+ else
12395+ printk(KERN_CONT "%08x ", c);
12396+ }
12397+ printk("\n");
12398+}
12399+#endif
12400+
12401 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12402 {
12403 enum ctx_state prev_state = exception_enter();
12404@@ -350,6 +813,29 @@ retry:
12405 if (!vma)
12406 goto bad_area;
12407
12408+#ifdef CONFIG_PAX_PAGEEXEC
12409+ /* PaX: detect ITLB misses on non-exec pages */
12410+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12411+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12412+ {
12413+ if (address != regs->tpc)
12414+ goto good_area;
12415+
12416+ up_read(&mm->mmap_sem);
12417+ switch (pax_handle_fetch_fault(regs)) {
12418+
12419+#ifdef CONFIG_PAX_EMUPLT
12420+ case 2:
12421+ case 3:
12422+ return;
12423+#endif
12424+
12425+ }
12426+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12427+ do_group_exit(SIGKILL);
12428+ }
12429+#endif
12430+
12431 /* Pure DTLB misses do not tell us whether the fault causing
12432 * load/store/atomic was a write or not, it only says that there
12433 * was no match. So in such a case we (carefully) read the
12434diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12435index d329537..2c3746a 100644
12436--- a/arch/sparc/mm/hugetlbpage.c
12437+++ b/arch/sparc/mm/hugetlbpage.c
12438@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12439 unsigned long addr,
12440 unsigned long len,
12441 unsigned long pgoff,
12442- unsigned long flags)
12443+ unsigned long flags,
12444+ unsigned long offset)
12445 {
12446+ struct mm_struct *mm = current->mm;
12447 unsigned long task_size = TASK_SIZE;
12448 struct vm_unmapped_area_info info;
12449
12450@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12451
12452 info.flags = 0;
12453 info.length = len;
12454- info.low_limit = TASK_UNMAPPED_BASE;
12455+ info.low_limit = mm->mmap_base;
12456 info.high_limit = min(task_size, VA_EXCLUDE_START);
12457 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12458 info.align_offset = 0;
12459+ info.threadstack_offset = offset;
12460 addr = vm_unmapped_area(&info);
12461
12462 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12463 VM_BUG_ON(addr != -ENOMEM);
12464 info.low_limit = VA_EXCLUDE_END;
12465+
12466+#ifdef CONFIG_PAX_RANDMMAP
12467+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12468+ info.low_limit += mm->delta_mmap;
12469+#endif
12470+
12471 info.high_limit = task_size;
12472 addr = vm_unmapped_area(&info);
12473 }
12474@@ -55,7 +64,8 @@ static unsigned long
12475 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12476 const unsigned long len,
12477 const unsigned long pgoff,
12478- const unsigned long flags)
12479+ const unsigned long flags,
12480+ const unsigned long offset)
12481 {
12482 struct mm_struct *mm = current->mm;
12483 unsigned long addr = addr0;
12484@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12485 info.high_limit = mm->mmap_base;
12486 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12487 info.align_offset = 0;
12488+ info.threadstack_offset = offset;
12489 addr = vm_unmapped_area(&info);
12490
12491 /*
12492@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12493 VM_BUG_ON(addr != -ENOMEM);
12494 info.flags = 0;
12495 info.low_limit = TASK_UNMAPPED_BASE;
12496+
12497+#ifdef CONFIG_PAX_RANDMMAP
12498+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12499+ info.low_limit += mm->delta_mmap;
12500+#endif
12501+
12502 info.high_limit = STACK_TOP32;
12503 addr = vm_unmapped_area(&info);
12504 }
12505@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12506 struct mm_struct *mm = current->mm;
12507 struct vm_area_struct *vma;
12508 unsigned long task_size = TASK_SIZE;
12509+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12510
12511 if (test_thread_flag(TIF_32BIT))
12512 task_size = STACK_TOP32;
12513@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12514 return addr;
12515 }
12516
12517+#ifdef CONFIG_PAX_RANDMMAP
12518+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12519+#endif
12520+
12521 if (addr) {
12522 addr = ALIGN(addr, HPAGE_SIZE);
12523 vma = find_vma(mm, addr);
12524- if (task_size - len >= addr &&
12525- (!vma || addr + len <= vma->vm_start))
12526+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12527 return addr;
12528 }
12529 if (mm->get_unmapped_area == arch_get_unmapped_area)
12530 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12531- pgoff, flags);
12532+ pgoff, flags, offset);
12533 else
12534 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12535- pgoff, flags);
12536+ pgoff, flags, offset);
12537 }
12538
12539 pte_t *huge_pte_alloc(struct mm_struct *mm,
12540diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12541index 2cfb0f2..e917d9f 100644
12542--- a/arch/sparc/mm/init_64.c
12543+++ b/arch/sparc/mm/init_64.c
12544@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12545 int num_kernel_image_mappings;
12546
12547 #ifdef CONFIG_DEBUG_DCFLUSH
12548-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12549+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12550 #ifdef CONFIG_SMP
12551-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12552+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12553 #endif
12554 #endif
12555
12556@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
12557 {
12558 BUG_ON(tlb_type == hypervisor);
12559 #ifdef CONFIG_DEBUG_DCFLUSH
12560- atomic_inc(&dcpage_flushes);
12561+ atomic_inc_unchecked(&dcpage_flushes);
12562 #endif
12563
12564 #ifdef DCACHE_ALIASING_POSSIBLE
12565@@ -471,10 +471,10 @@ void mmu_info(struct seq_file *m)
12566
12567 #ifdef CONFIG_DEBUG_DCFLUSH
12568 seq_printf(m, "DCPageFlushes\t: %d\n",
12569- atomic_read(&dcpage_flushes));
12570+ atomic_read_unchecked(&dcpage_flushes));
12571 #ifdef CONFIG_SMP
12572 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12573- atomic_read(&dcpage_flushes_xcall));
12574+ atomic_read_unchecked(&dcpage_flushes_xcall));
12575 #endif /* CONFIG_SMP */
12576 #endif /* CONFIG_DEBUG_DCFLUSH */
12577 }
12578diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12579index 4f3006b..453f625f 100644
12580--- a/arch/tile/Kconfig
12581+++ b/arch/tile/Kconfig
12582@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12583
12584 config KEXEC
12585 bool "kexec system call"
12586+ depends on !GRKERNSEC_KMEM
12587 ---help---
12588 kexec is a system call that implements the ability to shutdown your
12589 current kernel, and to start another kernel. It is like a reboot
12590diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12591index 7b11c5f..755a026 100644
12592--- a/arch/tile/include/asm/atomic_64.h
12593+++ b/arch/tile/include/asm/atomic_64.h
12594@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12595
12596 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12597
12598+#define atomic64_read_unchecked(v) atomic64_read(v)
12599+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12600+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12601+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12602+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12603+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12604+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12605+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12606+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12607+
12608 /* Define this to indicate that cmpxchg is an efficient operation. */
12609 #define __HAVE_ARCH_CMPXCHG
12610
12611diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12612index 6160761..00cac88 100644
12613--- a/arch/tile/include/asm/cache.h
12614+++ b/arch/tile/include/asm/cache.h
12615@@ -15,11 +15,12 @@
12616 #ifndef _ASM_TILE_CACHE_H
12617 #define _ASM_TILE_CACHE_H
12618
12619+#include <linux/const.h>
12620 #include <arch/chip.h>
12621
12622 /* bytes per L1 data cache line */
12623 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12624-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12625+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12626
12627 /* bytes per L2 cache line */
12628 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12629diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12630index b6cde32..c0cb736 100644
12631--- a/arch/tile/include/asm/uaccess.h
12632+++ b/arch/tile/include/asm/uaccess.h
12633@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12634 const void __user *from,
12635 unsigned long n)
12636 {
12637- int sz = __compiletime_object_size(to);
12638+ size_t sz = __compiletime_object_size(to);
12639
12640- if (likely(sz == -1 || sz >= n))
12641+ if (likely(sz == (size_t)-1 || sz >= n))
12642 n = _copy_from_user(to, from, n);
12643 else
12644 copy_from_user_overflow();
12645diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12646index e514899..f8743c4 100644
12647--- a/arch/tile/mm/hugetlbpage.c
12648+++ b/arch/tile/mm/hugetlbpage.c
12649@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12650 info.high_limit = TASK_SIZE;
12651 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12652 info.align_offset = 0;
12653+ info.threadstack_offset = 0;
12654 return vm_unmapped_area(&info);
12655 }
12656
12657@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12658 info.high_limit = current->mm->mmap_base;
12659 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12660 info.align_offset = 0;
12661+ info.threadstack_offset = 0;
12662 addr = vm_unmapped_area(&info);
12663
12664 /*
12665diff --git a/arch/um/Makefile b/arch/um/Makefile
12666index e4b1a96..16162f8 100644
12667--- a/arch/um/Makefile
12668+++ b/arch/um/Makefile
12669@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12670 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12671 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12672
12673+ifdef CONSTIFY_PLUGIN
12674+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12675+endif
12676+
12677 #This will adjust *FLAGS accordingly to the platform.
12678 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12679
12680diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12681index 19e1bdd..3665b77 100644
12682--- a/arch/um/include/asm/cache.h
12683+++ b/arch/um/include/asm/cache.h
12684@@ -1,6 +1,7 @@
12685 #ifndef __UM_CACHE_H
12686 #define __UM_CACHE_H
12687
12688+#include <linux/const.h>
12689
12690 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12691 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12692@@ -12,6 +13,6 @@
12693 # define L1_CACHE_SHIFT 5
12694 #endif
12695
12696-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12697+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12698
12699 #endif
12700diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12701index 2e0a6b1..a64d0f5 100644
12702--- a/arch/um/include/asm/kmap_types.h
12703+++ b/arch/um/include/asm/kmap_types.h
12704@@ -8,6 +8,6 @@
12705
12706 /* No more #include "asm/arch/kmap_types.h" ! */
12707
12708-#define KM_TYPE_NR 14
12709+#define KM_TYPE_NR 15
12710
12711 #endif
12712diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12713index 5ff53d9..5850cdf 100644
12714--- a/arch/um/include/asm/page.h
12715+++ b/arch/um/include/asm/page.h
12716@@ -14,6 +14,9 @@
12717 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12718 #define PAGE_MASK (~(PAGE_SIZE-1))
12719
12720+#define ktla_ktva(addr) (addr)
12721+#define ktva_ktla(addr) (addr)
12722+
12723 #ifndef __ASSEMBLY__
12724
12725 struct page;
12726diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12727index 0032f92..cd151e0 100644
12728--- a/arch/um/include/asm/pgtable-3level.h
12729+++ b/arch/um/include/asm/pgtable-3level.h
12730@@ -58,6 +58,7 @@
12731 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12732 #define pud_populate(mm, pud, pmd) \
12733 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12734+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12735
12736 #ifdef CONFIG_64BIT
12737 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12738diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12739index f17bca8..48adb87 100644
12740--- a/arch/um/kernel/process.c
12741+++ b/arch/um/kernel/process.c
12742@@ -356,22 +356,6 @@ int singlestepping(void * t)
12743 return 2;
12744 }
12745
12746-/*
12747- * Only x86 and x86_64 have an arch_align_stack().
12748- * All other arches have "#define arch_align_stack(x) (x)"
12749- * in their asm/exec.h
12750- * As this is included in UML from asm-um/system-generic.h,
12751- * we can use it to behave as the subarch does.
12752- */
12753-#ifndef arch_align_stack
12754-unsigned long arch_align_stack(unsigned long sp)
12755-{
12756- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12757- sp -= get_random_int() % 8192;
12758- return sp & ~0xf;
12759-}
12760-#endif
12761-
12762 unsigned long get_wchan(struct task_struct *p)
12763 {
12764 unsigned long stack_page, sp, ip;
12765diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12766index ad8f795..2c7eec6 100644
12767--- a/arch/unicore32/include/asm/cache.h
12768+++ b/arch/unicore32/include/asm/cache.h
12769@@ -12,8 +12,10 @@
12770 #ifndef __UNICORE_CACHE_H__
12771 #define __UNICORE_CACHE_H__
12772
12773-#define L1_CACHE_SHIFT (5)
12774-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12775+#include <linux/const.h>
12776+
12777+#define L1_CACHE_SHIFT 5
12778+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12779
12780 /*
12781 * Memory returned by kmalloc() may be used for DMA, so we must make
12782diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12783index 27adfd9..2362ac6 100644
12784--- a/arch/x86/Kconfig
12785+++ b/arch/x86/Kconfig
12786@@ -22,6 +22,7 @@ config X86_64
12787 config X86
12788 def_bool y
12789 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
12790+ select ARCH_HAS_FAST_MULTIPLIER
12791 select ARCH_MIGHT_HAVE_PC_PARPORT
12792 select ARCH_MIGHT_HAVE_PC_SERIO
12793 select HAVE_AOUT if X86_32
12794@@ -128,7 +129,7 @@ config X86
12795 select RTC_LIB
12796 select HAVE_DEBUG_STACKOVERFLOW
12797 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12798- select HAVE_CC_STACKPROTECTOR
12799+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12800 select GENERIC_CPU_AUTOPROBE
12801 select HAVE_ARCH_AUDITSYSCALL
12802 select ARCH_SUPPORTS_ATOMIC_RMW
12803@@ -253,7 +254,7 @@ config X86_HT
12804
12805 config X86_32_LAZY_GS
12806 def_bool y
12807- depends on X86_32 && !CC_STACKPROTECTOR
12808+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12809
12810 config ARCH_HWEIGHT_CFLAGS
12811 string
12812@@ -549,6 +550,7 @@ config SCHED_OMIT_FRAME_POINTER
12813
12814 menuconfig HYPERVISOR_GUEST
12815 bool "Linux guest support"
12816+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12817 ---help---
12818 Say Y here to enable options for running Linux under various hyper-
12819 visors. This option enables basic hypervisor detection and platform
12820@@ -1076,6 +1078,7 @@ choice
12821
12822 config NOHIGHMEM
12823 bool "off"
12824+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12825 ---help---
12826 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12827 However, the address space of 32-bit x86 processors is only 4
12828@@ -1112,6 +1115,7 @@ config NOHIGHMEM
12829
12830 config HIGHMEM4G
12831 bool "4GB"
12832+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12833 ---help---
12834 Select this if you have a 32-bit processor and between 1 and 4
12835 gigabytes of physical RAM.
12836@@ -1164,7 +1168,7 @@ config PAGE_OFFSET
12837 hex
12838 default 0xB0000000 if VMSPLIT_3G_OPT
12839 default 0x80000000 if VMSPLIT_2G
12840- default 0x78000000 if VMSPLIT_2G_OPT
12841+ default 0x70000000 if VMSPLIT_2G_OPT
12842 default 0x40000000 if VMSPLIT_1G
12843 default 0xC0000000
12844 depends on X86_32
12845@@ -1578,6 +1582,7 @@ source kernel/Kconfig.hz
12846
12847 config KEXEC
12848 bool "kexec system call"
12849+ depends on !GRKERNSEC_KMEM
12850 ---help---
12851 kexec is a system call that implements the ability to shutdown your
12852 current kernel, and to start another kernel. It is like a reboot
12853@@ -1728,7 +1733,9 @@ config X86_NEED_RELOCS
12854
12855 config PHYSICAL_ALIGN
12856 hex "Alignment value to which kernel should be aligned"
12857- default "0x200000"
12858+ default "0x1000000"
12859+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12860+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12861 range 0x2000 0x1000000 if X86_32
12862 range 0x200000 0x1000000 if X86_64
12863 ---help---
12864@@ -1811,6 +1818,7 @@ config COMPAT_VDSO
12865 def_bool n
12866 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12867 depends on X86_32 || IA32_EMULATION
12868+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12869 ---help---
12870 Certain buggy versions of glibc will crash if they are
12871 presented with a 32-bit vDSO that is not mapped at the address
12872diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12873index 6983314..54ad7e8 100644
12874--- a/arch/x86/Kconfig.cpu
12875+++ b/arch/x86/Kconfig.cpu
12876@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12877
12878 config X86_F00F_BUG
12879 def_bool y
12880- depends on M586MMX || M586TSC || M586 || M486
12881+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12882
12883 config X86_INVD_BUG
12884 def_bool y
12885@@ -327,7 +327,7 @@ config X86_INVD_BUG
12886
12887 config X86_ALIGNMENT_16
12888 def_bool y
12889- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12890+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12891
12892 config X86_INTEL_USERCOPY
12893 def_bool y
12894@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12895 # generates cmov.
12896 config X86_CMOV
12897 def_bool y
12898- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12899+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12900
12901 config X86_MINIMUM_CPU_FAMILY
12902 int
12903diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12904index 61bd2ad..50b625d 100644
12905--- a/arch/x86/Kconfig.debug
12906+++ b/arch/x86/Kconfig.debug
12907@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12908 config DEBUG_RODATA
12909 bool "Write protect kernel read-only data structures"
12910 default y
12911- depends on DEBUG_KERNEL
12912+ depends on DEBUG_KERNEL && BROKEN
12913 ---help---
12914 Mark the kernel read-only data as write-protected in the pagetables,
12915 in order to catch accidental (and incorrect) writes to such const
12916@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12917
12918 config DEBUG_SET_MODULE_RONX
12919 bool "Set loadable kernel module data as NX and text as RO"
12920- depends on MODULES
12921+ depends on MODULES && BROKEN
12922 ---help---
12923 This option helps catch unintended modifications to loadable
12924 kernel module's text and read-only data. It also prevents execution
12925diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12926index 33f71b0..c2cefa2 100644
12927--- a/arch/x86/Makefile
12928+++ b/arch/x86/Makefile
12929@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y)
12930 # CPU-specific tuning. Anything which can be shared with UML should go here.
12931 include $(srctree)/arch/x86/Makefile_32.cpu
12932 KBUILD_CFLAGS += $(cflags-y)
12933-
12934- # temporary until string.h is fixed
12935- KBUILD_CFLAGS += -ffreestanding
12936 else
12937 BITS := 64
12938 UTS_MACHINE := x86_64
12939@@ -114,6 +111,9 @@ else
12940 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12941 endif
12942
12943+# temporary until string.h is fixed
12944+KBUILD_CFLAGS += -ffreestanding
12945+
12946 # Make sure compiler does not have buggy stack-protector support.
12947 ifdef CONFIG_CC_STACKPROTECTOR
12948 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12949@@ -271,3 +271,12 @@ define archhelp
12950 echo ' FDINITRD=file initrd for the booted kernel'
12951 echo ' kvmconfig - Enable additional options for guest kernel support'
12952 endef
12953+
12954+define OLD_LD
12955+
12956+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12957+*** Please upgrade your binutils to 2.18 or newer
12958+endef
12959+
12960+archprepare:
12961+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12962diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12963index dbe8dd2..2f0a98f 100644
12964--- a/arch/x86/boot/Makefile
12965+++ b/arch/x86/boot/Makefile
12966@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
12967 # ---------------------------------------------------------------------------
12968
12969 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12970+ifdef CONSTIFY_PLUGIN
12971+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12972+endif
12973 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12974 GCOV_PROFILE := n
12975
12976diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12977index 878e4b9..20537ab 100644
12978--- a/arch/x86/boot/bitops.h
12979+++ b/arch/x86/boot/bitops.h
12980@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12981 u8 v;
12982 const u32 *p = (const u32 *)addr;
12983
12984- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12985+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12986 return v;
12987 }
12988
12989@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12990
12991 static inline void set_bit(int nr, void *addr)
12992 {
12993- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12994+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12995 }
12996
12997 #endif /* BOOT_BITOPS_H */
12998diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12999index bd49ec6..94c7f58 100644
13000--- a/arch/x86/boot/boot.h
13001+++ b/arch/x86/boot/boot.h
13002@@ -84,7 +84,7 @@ static inline void io_delay(void)
13003 static inline u16 ds(void)
13004 {
13005 u16 seg;
13006- asm("movw %%ds,%0" : "=rm" (seg));
13007+ asm volatile("movw %%ds,%0" : "=rm" (seg));
13008 return seg;
13009 }
13010
13011diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
13012index 0fcd913..3bb5c42 100644
13013--- a/arch/x86/boot/compressed/Makefile
13014+++ b/arch/x86/boot/compressed/Makefile
13015@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
13016 KBUILD_CFLAGS += -mno-mmx -mno-sse
13017 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
13018 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
13019+ifdef CONSTIFY_PLUGIN
13020+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13021+endif
13022
13023 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13024 GCOV_PROFILE := n
13025diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13026index a53440e..c3dbf1e 100644
13027--- a/arch/x86/boot/compressed/efi_stub_32.S
13028+++ b/arch/x86/boot/compressed/efi_stub_32.S
13029@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13030 * parameter 2, ..., param n. To make things easy, we save the return
13031 * address of efi_call_phys in a global variable.
13032 */
13033- popl %ecx
13034- movl %ecx, saved_return_addr(%edx)
13035- /* get the function pointer into ECX*/
13036- popl %ecx
13037- movl %ecx, efi_rt_function_ptr(%edx)
13038+ popl saved_return_addr(%edx)
13039+ popl efi_rt_function_ptr(%edx)
13040
13041 /*
13042 * 3. Call the physical function.
13043 */
13044- call *%ecx
13045+ call *efi_rt_function_ptr(%edx)
13046
13047 /*
13048 * 4. Balance the stack. And because EAX contain the return value,
13049@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13050 1: popl %edx
13051 subl $1b, %edx
13052
13053- movl efi_rt_function_ptr(%edx), %ecx
13054- pushl %ecx
13055+ pushl efi_rt_function_ptr(%edx)
13056
13057 /*
13058 * 10. Push the saved return address onto the stack and return.
13059 */
13060- movl saved_return_addr(%edx), %ecx
13061- pushl %ecx
13062- ret
13063+ jmpl *saved_return_addr(%edx)
13064 ENDPROC(efi_call_phys)
13065 .previous
13066
13067diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13068index cbed140..5f2ca57 100644
13069--- a/arch/x86/boot/compressed/head_32.S
13070+++ b/arch/x86/boot/compressed/head_32.S
13071@@ -140,10 +140,10 @@ preferred_addr:
13072 addl %eax, %ebx
13073 notl %eax
13074 andl %eax, %ebx
13075- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13076+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13077 jge 1f
13078 #endif
13079- movl $LOAD_PHYSICAL_ADDR, %ebx
13080+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13081 1:
13082
13083 /* Target address to relocate to for decompression */
13084diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13085index 2884e0c..904a2f7 100644
13086--- a/arch/x86/boot/compressed/head_64.S
13087+++ b/arch/x86/boot/compressed/head_64.S
13088@@ -94,10 +94,10 @@ ENTRY(startup_32)
13089 addl %eax, %ebx
13090 notl %eax
13091 andl %eax, %ebx
13092- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13093+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13094 jge 1f
13095 #endif
13096- movl $LOAD_PHYSICAL_ADDR, %ebx
13097+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13098 1:
13099
13100 /* Target address to relocate to for decompression */
13101@@ -322,10 +322,10 @@ preferred_addr:
13102 addq %rax, %rbp
13103 notq %rax
13104 andq %rax, %rbp
13105- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13106+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13107 jge 1f
13108 #endif
13109- movq $LOAD_PHYSICAL_ADDR, %rbp
13110+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13111 1:
13112
13113 /* Target address to relocate to for decompression */
13114@@ -431,8 +431,8 @@ gdt:
13115 .long gdt
13116 .word 0
13117 .quad 0x0000000000000000 /* NULL descriptor */
13118- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13119- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13120+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13121+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13122 .quad 0x0080890000000000 /* TS descriptor */
13123 .quad 0x0000000000000000 /* TS continued */
13124 gdt_end:
13125diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13126index 57ab74d..7c52182 100644
13127--- a/arch/x86/boot/compressed/misc.c
13128+++ b/arch/x86/boot/compressed/misc.c
13129@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13130 * Calculate the delta between where vmlinux was linked to load
13131 * and where it was actually loaded.
13132 */
13133- delta = min_addr - LOAD_PHYSICAL_ADDR;
13134+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13135 if (!delta) {
13136 debug_putstr("No relocation needed... ");
13137 return;
13138@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13139 Elf32_Ehdr ehdr;
13140 Elf32_Phdr *phdrs, *phdr;
13141 #endif
13142- void *dest;
13143+ void *dest, *prev;
13144 int i;
13145
13146 memcpy(&ehdr, output, sizeof(ehdr));
13147@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13148 case PT_LOAD:
13149 #ifdef CONFIG_RELOCATABLE
13150 dest = output;
13151- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13152+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13153 #else
13154 dest = (void *)(phdr->p_paddr);
13155 #endif
13156 memcpy(dest,
13157 output + phdr->p_offset,
13158 phdr->p_filesz);
13159+ if (i)
13160+ memset(prev, 0xff, dest - prev);
13161+ prev = dest + phdr->p_filesz;
13162 break;
13163 default: /* Ignore other PT_* */ break;
13164 }
13165@@ -395,7 +398,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13166 error("Destination address too large");
13167 #endif
13168 #ifndef CONFIG_RELOCATABLE
13169- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13170+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13171 error("Wrong destination address");
13172 #endif
13173
13174diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13175index 1fd7d57..0f7d096 100644
13176--- a/arch/x86/boot/cpucheck.c
13177+++ b/arch/x86/boot/cpucheck.c
13178@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13179 u32 ecx = MSR_K7_HWCR;
13180 u32 eax, edx;
13181
13182- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13183+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13184 eax &= ~(1 << 15);
13185- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13186+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13187
13188 get_cpuflags(); /* Make sure it really did something */
13189 err = check_cpuflags();
13190@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13191 u32 ecx = MSR_VIA_FCR;
13192 u32 eax, edx;
13193
13194- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13195+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13196 eax |= (1<<1)|(1<<7);
13197- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13198+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13199
13200 set_bit(X86_FEATURE_CX8, cpu.flags);
13201 err = check_cpuflags();
13202@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13203 u32 eax, edx;
13204 u32 level = 1;
13205
13206- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13207- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13208- asm("cpuid"
13209+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13210+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13211+ asm volatile("cpuid"
13212 : "+a" (level), "=d" (cpu.flags[0])
13213 : : "ecx", "ebx");
13214- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13215+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13216
13217 err = check_cpuflags();
13218 } else if (err == 0x01 &&
13219diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13220index 7a6d43a..edf6e40 100644
13221--- a/arch/x86/boot/header.S
13222+++ b/arch/x86/boot/header.S
13223@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13224 # single linked list of
13225 # struct setup_data
13226
13227-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13228+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13229
13230 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13231+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13232+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13233+#else
13234 #define VO_INIT_SIZE (VO__end - VO__text)
13235+#endif
13236 #if ZO_INIT_SIZE > VO_INIT_SIZE
13237 #define INIT_SIZE ZO_INIT_SIZE
13238 #else
13239diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13240index db75d07..8e6d0af 100644
13241--- a/arch/x86/boot/memory.c
13242+++ b/arch/x86/boot/memory.c
13243@@ -19,7 +19,7 @@
13244
13245 static int detect_memory_e820(void)
13246 {
13247- int count = 0;
13248+ unsigned int count = 0;
13249 struct biosregs ireg, oreg;
13250 struct e820entry *desc = boot_params.e820_map;
13251 static struct e820entry buf; /* static so it is zeroed */
13252diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13253index ba3e100..6501b8f 100644
13254--- a/arch/x86/boot/video-vesa.c
13255+++ b/arch/x86/boot/video-vesa.c
13256@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13257
13258 boot_params.screen_info.vesapm_seg = oreg.es;
13259 boot_params.screen_info.vesapm_off = oreg.di;
13260+ boot_params.screen_info.vesapm_size = oreg.cx;
13261 }
13262
13263 /*
13264diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13265index 43eda28..5ab5fdb 100644
13266--- a/arch/x86/boot/video.c
13267+++ b/arch/x86/boot/video.c
13268@@ -96,7 +96,7 @@ static void store_mode_params(void)
13269 static unsigned int get_entry(void)
13270 {
13271 char entry_buf[4];
13272- int i, len = 0;
13273+ unsigned int i, len = 0;
13274 int key;
13275 unsigned int v;
13276
13277diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13278index 9105655..41779c1 100644
13279--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13280+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13281@@ -8,6 +8,8 @@
13282 * including this sentence is retained in full.
13283 */
13284
13285+#include <asm/alternative-asm.h>
13286+
13287 .extern crypto_ft_tab
13288 .extern crypto_it_tab
13289 .extern crypto_fl_tab
13290@@ -70,6 +72,8 @@
13291 je B192; \
13292 leaq 32(r9),r9;
13293
13294+#define ret pax_force_retaddr; ret
13295+
13296 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13297 movq r1,r2; \
13298 movq r3,r4; \
13299diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13300index 477e9d7..c92c7d8 100644
13301--- a/arch/x86/crypto/aesni-intel_asm.S
13302+++ b/arch/x86/crypto/aesni-intel_asm.S
13303@@ -31,6 +31,7 @@
13304
13305 #include <linux/linkage.h>
13306 #include <asm/inst.h>
13307+#include <asm/alternative-asm.h>
13308
13309 #ifdef __x86_64__
13310 .data
13311@@ -205,7 +206,7 @@ enc: .octa 0x2
13312 * num_initial_blocks = b mod 4
13313 * encrypt the initial num_initial_blocks blocks and apply ghash on
13314 * the ciphertext
13315-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13316+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13317 * are clobbered
13318 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13319 */
13320@@ -214,8 +215,8 @@ enc: .octa 0x2
13321 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13322 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13323 mov arg7, %r10 # %r10 = AAD
13324- mov arg8, %r12 # %r12 = aadLen
13325- mov %r12, %r11
13326+ mov arg8, %r15 # %r15 = aadLen
13327+ mov %r15, %r11
13328 pxor %xmm\i, %xmm\i
13329 _get_AAD_loop\num_initial_blocks\operation:
13330 movd (%r10), \TMP1
13331@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13332 psrldq $4, %xmm\i
13333 pxor \TMP1, %xmm\i
13334 add $4, %r10
13335- sub $4, %r12
13336+ sub $4, %r15
13337 jne _get_AAD_loop\num_initial_blocks\operation
13338 cmp $16, %r11
13339 je _get_AAD_loop2_done\num_initial_blocks\operation
13340- mov $16, %r12
13341+ mov $16, %r15
13342 _get_AAD_loop2\num_initial_blocks\operation:
13343 psrldq $4, %xmm\i
13344- sub $4, %r12
13345- cmp %r11, %r12
13346+ sub $4, %r15
13347+ cmp %r11, %r15
13348 jne _get_AAD_loop2\num_initial_blocks\operation
13349 _get_AAD_loop2_done\num_initial_blocks\operation:
13350 movdqa SHUF_MASK(%rip), %xmm14
13351@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13352 * num_initial_blocks = b mod 4
13353 * encrypt the initial num_initial_blocks blocks and apply ghash on
13354 * the ciphertext
13355-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13356+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13357 * are clobbered
13358 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13359 */
13360@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13361 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13362 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13363 mov arg7, %r10 # %r10 = AAD
13364- mov arg8, %r12 # %r12 = aadLen
13365- mov %r12, %r11
13366+ mov arg8, %r15 # %r15 = aadLen
13367+ mov %r15, %r11
13368 pxor %xmm\i, %xmm\i
13369 _get_AAD_loop\num_initial_blocks\operation:
13370 movd (%r10), \TMP1
13371@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13372 psrldq $4, %xmm\i
13373 pxor \TMP1, %xmm\i
13374 add $4, %r10
13375- sub $4, %r12
13376+ sub $4, %r15
13377 jne _get_AAD_loop\num_initial_blocks\operation
13378 cmp $16, %r11
13379 je _get_AAD_loop2_done\num_initial_blocks\operation
13380- mov $16, %r12
13381+ mov $16, %r15
13382 _get_AAD_loop2\num_initial_blocks\operation:
13383 psrldq $4, %xmm\i
13384- sub $4, %r12
13385- cmp %r11, %r12
13386+ sub $4, %r15
13387+ cmp %r11, %r15
13388 jne _get_AAD_loop2\num_initial_blocks\operation
13389 _get_AAD_loop2_done\num_initial_blocks\operation:
13390 movdqa SHUF_MASK(%rip), %xmm14
13391@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13392 *
13393 *****************************************************************************/
13394 ENTRY(aesni_gcm_dec)
13395- push %r12
13396+ push %r15
13397 push %r13
13398 push %r14
13399 mov %rsp, %r14
13400@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13401 */
13402 sub $VARIABLE_OFFSET, %rsp
13403 and $~63, %rsp # align rsp to 64 bytes
13404- mov %arg6, %r12
13405- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13406+ mov %arg6, %r15
13407+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13408 movdqa SHUF_MASK(%rip), %xmm2
13409 PSHUFB_XMM %xmm2, %xmm13
13410
13411@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13412 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13413 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13414 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13415- mov %r13, %r12
13416- and $(3<<4), %r12
13417+ mov %r13, %r15
13418+ and $(3<<4), %r15
13419 jz _initial_num_blocks_is_0_decrypt
13420- cmp $(2<<4), %r12
13421+ cmp $(2<<4), %r15
13422 jb _initial_num_blocks_is_1_decrypt
13423 je _initial_num_blocks_is_2_decrypt
13424 _initial_num_blocks_is_3_decrypt:
13425@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13426 sub $16, %r11
13427 add %r13, %r11
13428 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13429- lea SHIFT_MASK+16(%rip), %r12
13430- sub %r13, %r12
13431+ lea SHIFT_MASK+16(%rip), %r15
13432+ sub %r13, %r15
13433 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13434 # (%r13 is the number of bytes in plaintext mod 16)
13435- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13436+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13437 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13438
13439 movdqa %xmm1, %xmm2
13440 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13441- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13442+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13443 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13444 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13445 pand %xmm1, %xmm2
13446@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13447 sub $1, %r13
13448 jne _less_than_8_bytes_left_decrypt
13449 _multiple_of_16_bytes_decrypt:
13450- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13451- shl $3, %r12 # convert into number of bits
13452- movd %r12d, %xmm15 # len(A) in %xmm15
13453+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13454+ shl $3, %r15 # convert into number of bits
13455+ movd %r15d, %xmm15 # len(A) in %xmm15
13456 shl $3, %arg4 # len(C) in bits (*128)
13457 MOVQ_R64_XMM %arg4, %xmm1
13458 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13459@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13460 mov %r14, %rsp
13461 pop %r14
13462 pop %r13
13463- pop %r12
13464+ pop %r15
13465+ pax_force_retaddr
13466 ret
13467 ENDPROC(aesni_gcm_dec)
13468
13469@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13470 * poly = x^128 + x^127 + x^126 + x^121 + 1
13471 ***************************************************************************/
13472 ENTRY(aesni_gcm_enc)
13473- push %r12
13474+ push %r15
13475 push %r13
13476 push %r14
13477 mov %rsp, %r14
13478@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13479 #
13480 sub $VARIABLE_OFFSET, %rsp
13481 and $~63, %rsp
13482- mov %arg6, %r12
13483- movdqu (%r12), %xmm13
13484+ mov %arg6, %r15
13485+ movdqu (%r15), %xmm13
13486 movdqa SHUF_MASK(%rip), %xmm2
13487 PSHUFB_XMM %xmm2, %xmm13
13488
13489@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13490 movdqa %xmm13, HashKey(%rsp)
13491 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13492 and $-16, %r13
13493- mov %r13, %r12
13494+ mov %r13, %r15
13495
13496 # Encrypt first few blocks
13497
13498- and $(3<<4), %r12
13499+ and $(3<<4), %r15
13500 jz _initial_num_blocks_is_0_encrypt
13501- cmp $(2<<4), %r12
13502+ cmp $(2<<4), %r15
13503 jb _initial_num_blocks_is_1_encrypt
13504 je _initial_num_blocks_is_2_encrypt
13505 _initial_num_blocks_is_3_encrypt:
13506@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13507 sub $16, %r11
13508 add %r13, %r11
13509 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13510- lea SHIFT_MASK+16(%rip), %r12
13511- sub %r13, %r12
13512+ lea SHIFT_MASK+16(%rip), %r15
13513+ sub %r13, %r15
13514 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13515 # (%r13 is the number of bytes in plaintext mod 16)
13516- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13517+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13518 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13519 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13520- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13521+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13522 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13523 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13524 movdqa SHUF_MASK(%rip), %xmm10
13525@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13526 sub $1, %r13
13527 jne _less_than_8_bytes_left_encrypt
13528 _multiple_of_16_bytes_encrypt:
13529- mov arg8, %r12 # %r12 = addLen (number of bytes)
13530- shl $3, %r12
13531- movd %r12d, %xmm15 # len(A) in %xmm15
13532+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13533+ shl $3, %r15
13534+ movd %r15d, %xmm15 # len(A) in %xmm15
13535 shl $3, %arg4 # len(C) in bits (*128)
13536 MOVQ_R64_XMM %arg4, %xmm1
13537 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13538@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13539 mov %r14, %rsp
13540 pop %r14
13541 pop %r13
13542- pop %r12
13543+ pop %r15
13544+ pax_force_retaddr
13545 ret
13546 ENDPROC(aesni_gcm_enc)
13547
13548@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13549 pxor %xmm1, %xmm0
13550 movaps %xmm0, (TKEYP)
13551 add $0x10, TKEYP
13552+ pax_force_retaddr
13553 ret
13554 ENDPROC(_key_expansion_128)
13555 ENDPROC(_key_expansion_256a)
13556@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13557 shufps $0b01001110, %xmm2, %xmm1
13558 movaps %xmm1, 0x10(TKEYP)
13559 add $0x20, TKEYP
13560+ pax_force_retaddr
13561 ret
13562 ENDPROC(_key_expansion_192a)
13563
13564@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13565
13566 movaps %xmm0, (TKEYP)
13567 add $0x10, TKEYP
13568+ pax_force_retaddr
13569 ret
13570 ENDPROC(_key_expansion_192b)
13571
13572@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13573 pxor %xmm1, %xmm2
13574 movaps %xmm2, (TKEYP)
13575 add $0x10, TKEYP
13576+ pax_force_retaddr
13577 ret
13578 ENDPROC(_key_expansion_256b)
13579
13580@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13581 #ifndef __x86_64__
13582 popl KEYP
13583 #endif
13584+ pax_force_retaddr
13585 ret
13586 ENDPROC(aesni_set_key)
13587
13588@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13589 popl KLEN
13590 popl KEYP
13591 #endif
13592+ pax_force_retaddr
13593 ret
13594 ENDPROC(aesni_enc)
13595
13596@@ -1974,6 +1983,7 @@ _aesni_enc1:
13597 AESENC KEY STATE
13598 movaps 0x70(TKEYP), KEY
13599 AESENCLAST KEY STATE
13600+ pax_force_retaddr
13601 ret
13602 ENDPROC(_aesni_enc1)
13603
13604@@ -2083,6 +2093,7 @@ _aesni_enc4:
13605 AESENCLAST KEY STATE2
13606 AESENCLAST KEY STATE3
13607 AESENCLAST KEY STATE4
13608+ pax_force_retaddr
13609 ret
13610 ENDPROC(_aesni_enc4)
13611
13612@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13613 popl KLEN
13614 popl KEYP
13615 #endif
13616+ pax_force_retaddr
13617 ret
13618 ENDPROC(aesni_dec)
13619
13620@@ -2164,6 +2176,7 @@ _aesni_dec1:
13621 AESDEC KEY STATE
13622 movaps 0x70(TKEYP), KEY
13623 AESDECLAST KEY STATE
13624+ pax_force_retaddr
13625 ret
13626 ENDPROC(_aesni_dec1)
13627
13628@@ -2273,6 +2286,7 @@ _aesni_dec4:
13629 AESDECLAST KEY STATE2
13630 AESDECLAST KEY STATE3
13631 AESDECLAST KEY STATE4
13632+ pax_force_retaddr
13633 ret
13634 ENDPROC(_aesni_dec4)
13635
13636@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13637 popl KEYP
13638 popl LEN
13639 #endif
13640+ pax_force_retaddr
13641 ret
13642 ENDPROC(aesni_ecb_enc)
13643
13644@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13645 popl KEYP
13646 popl LEN
13647 #endif
13648+ pax_force_retaddr
13649 ret
13650 ENDPROC(aesni_ecb_dec)
13651
13652@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13653 popl LEN
13654 popl IVP
13655 #endif
13656+ pax_force_retaddr
13657 ret
13658 ENDPROC(aesni_cbc_enc)
13659
13660@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13661 popl LEN
13662 popl IVP
13663 #endif
13664+ pax_force_retaddr
13665 ret
13666 ENDPROC(aesni_cbc_dec)
13667
13668@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13669 mov $1, TCTR_LOW
13670 MOVQ_R64_XMM TCTR_LOW INC
13671 MOVQ_R64_XMM CTR TCTR_LOW
13672+ pax_force_retaddr
13673 ret
13674 ENDPROC(_aesni_inc_init)
13675
13676@@ -2579,6 +2598,7 @@ _aesni_inc:
13677 .Linc_low:
13678 movaps CTR, IV
13679 PSHUFB_XMM BSWAP_MASK IV
13680+ pax_force_retaddr
13681 ret
13682 ENDPROC(_aesni_inc)
13683
13684@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13685 .Lctr_enc_ret:
13686 movups IV, (IVP)
13687 .Lctr_enc_just_ret:
13688+ pax_force_retaddr
13689 ret
13690 ENDPROC(aesni_ctr_enc)
13691
13692@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13693 pxor INC, STATE4
13694 movdqu STATE4, 0x70(OUTP)
13695
13696+ pax_force_retaddr
13697 ret
13698 ENDPROC(aesni_xts_crypt8)
13699
13700diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13701index 246c670..466e2d6 100644
13702--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13703+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13704@@ -21,6 +21,7 @@
13705 */
13706
13707 #include <linux/linkage.h>
13708+#include <asm/alternative-asm.h>
13709
13710 .file "blowfish-x86_64-asm.S"
13711 .text
13712@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13713 jnz .L__enc_xor;
13714
13715 write_block();
13716+ pax_force_retaddr
13717 ret;
13718 .L__enc_xor:
13719 xor_block();
13720+ pax_force_retaddr
13721 ret;
13722 ENDPROC(__blowfish_enc_blk)
13723
13724@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13725
13726 movq %r11, %rbp;
13727
13728+ pax_force_retaddr
13729 ret;
13730 ENDPROC(blowfish_dec_blk)
13731
13732@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13733
13734 popq %rbx;
13735 popq %rbp;
13736+ pax_force_retaddr
13737 ret;
13738
13739 .L__enc_xor4:
13740@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13741
13742 popq %rbx;
13743 popq %rbp;
13744+ pax_force_retaddr
13745 ret;
13746 ENDPROC(__blowfish_enc_blk_4way)
13747
13748@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13749 popq %rbx;
13750 popq %rbp;
13751
13752+ pax_force_retaddr
13753 ret;
13754 ENDPROC(blowfish_dec_blk_4way)
13755diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13756index ce71f92..1dce7ec 100644
13757--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13758+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13759@@ -16,6 +16,7 @@
13760 */
13761
13762 #include <linux/linkage.h>
13763+#include <asm/alternative-asm.h>
13764
13765 #define CAMELLIA_TABLE_BYTE_LEN 272
13766
13767@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13768 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13769 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13770 %rcx, (%r9));
13771+ pax_force_retaddr
13772 ret;
13773 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13774
13775@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13776 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13777 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13778 %rax, (%r9));
13779+ pax_force_retaddr
13780 ret;
13781 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13782
13783@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13784 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13785 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13786
13787+ pax_force_retaddr
13788 ret;
13789
13790 .align 8
13791@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13792 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13793 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13794
13795+ pax_force_retaddr
13796 ret;
13797
13798 .align 8
13799@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13800 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13801 %xmm8, %rsi);
13802
13803+ pax_force_retaddr
13804 ret;
13805 ENDPROC(camellia_ecb_enc_16way)
13806
13807@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13808 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13809 %xmm8, %rsi);
13810
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(camellia_ecb_dec_16way)
13814
13815@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13816 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13817 %xmm8, %rsi);
13818
13819+ pax_force_retaddr
13820 ret;
13821 ENDPROC(camellia_cbc_dec_16way)
13822
13823@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13824 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13825 %xmm8, %rsi);
13826
13827+ pax_force_retaddr
13828 ret;
13829 ENDPROC(camellia_ctr_16way)
13830
13831@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13832 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13833 %xmm8, %rsi);
13834
13835+ pax_force_retaddr
13836 ret;
13837 ENDPROC(camellia_xts_crypt_16way)
13838
13839diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13840index 0e0b886..5a3123c 100644
13841--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13842+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13843@@ -11,6 +11,7 @@
13844 */
13845
13846 #include <linux/linkage.h>
13847+#include <asm/alternative-asm.h>
13848
13849 #define CAMELLIA_TABLE_BYTE_LEN 272
13850
13851@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13852 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13853 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13854 %rcx, (%r9));
13855+ pax_force_retaddr
13856 ret;
13857 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13858
13859@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13860 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13861 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13862 %rax, (%r9));
13863+ pax_force_retaddr
13864 ret;
13865 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13866
13867@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13868 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13869 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13870
13871+ pax_force_retaddr
13872 ret;
13873
13874 .align 8
13875@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13876 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13877 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13878
13879+ pax_force_retaddr
13880 ret;
13881
13882 .align 8
13883@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13884
13885 vzeroupper;
13886
13887+ pax_force_retaddr
13888 ret;
13889 ENDPROC(camellia_ecb_enc_32way)
13890
13891@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13892
13893 vzeroupper;
13894
13895+ pax_force_retaddr
13896 ret;
13897 ENDPROC(camellia_ecb_dec_32way)
13898
13899@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13900
13901 vzeroupper;
13902
13903+ pax_force_retaddr
13904 ret;
13905 ENDPROC(camellia_cbc_dec_32way)
13906
13907@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13908
13909 vzeroupper;
13910
13911+ pax_force_retaddr
13912 ret;
13913 ENDPROC(camellia_ctr_32way)
13914
13915@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13916
13917 vzeroupper;
13918
13919+ pax_force_retaddr
13920 ret;
13921 ENDPROC(camellia_xts_crypt_32way)
13922
13923diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13924index 310319c..db3d7b5 100644
13925--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13926+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13927@@ -21,6 +21,7 @@
13928 */
13929
13930 #include <linux/linkage.h>
13931+#include <asm/alternative-asm.h>
13932
13933 .file "camellia-x86_64-asm_64.S"
13934 .text
13935@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13936 enc_outunpack(mov, RT1);
13937
13938 movq RRBP, %rbp;
13939+ pax_force_retaddr
13940 ret;
13941
13942 .L__enc_xor:
13943 enc_outunpack(xor, RT1);
13944
13945 movq RRBP, %rbp;
13946+ pax_force_retaddr
13947 ret;
13948 ENDPROC(__camellia_enc_blk)
13949
13950@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13951 dec_outunpack();
13952
13953 movq RRBP, %rbp;
13954+ pax_force_retaddr
13955 ret;
13956 ENDPROC(camellia_dec_blk)
13957
13958@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13959
13960 movq RRBP, %rbp;
13961 popq %rbx;
13962+ pax_force_retaddr
13963 ret;
13964
13965 .L__enc2_xor:
13966@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13967
13968 movq RRBP, %rbp;
13969 popq %rbx;
13970+ pax_force_retaddr
13971 ret;
13972 ENDPROC(__camellia_enc_blk_2way)
13973
13974@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13975
13976 movq RRBP, %rbp;
13977 movq RXOR, %rbx;
13978+ pax_force_retaddr
13979 ret;
13980 ENDPROC(camellia_dec_blk_2way)
13981diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13982index c35fd5d..2d8c7db 100644
13983--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13984+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13985@@ -24,6 +24,7 @@
13986 */
13987
13988 #include <linux/linkage.h>
13989+#include <asm/alternative-asm.h>
13990
13991 .file "cast5-avx-x86_64-asm_64.S"
13992
13993@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13994 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13995 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13996
13997+ pax_force_retaddr
13998 ret;
13999 ENDPROC(__cast5_enc_blk16)
14000
14001@@ -352,6 +354,7 @@ __cast5_dec_blk16:
14002 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14003 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14004
14005+ pax_force_retaddr
14006 ret;
14007
14008 .L__skip_dec:
14009@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
14010 vmovdqu RR4, (6*4*4)(%r11);
14011 vmovdqu RL4, (7*4*4)(%r11);
14012
14013+ pax_force_retaddr
14014 ret;
14015 ENDPROC(cast5_ecb_enc_16way)
14016
14017@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
14018 vmovdqu RR4, (6*4*4)(%r11);
14019 vmovdqu RL4, (7*4*4)(%r11);
14020
14021+ pax_force_retaddr
14022 ret;
14023 ENDPROC(cast5_ecb_dec_16way)
14024
14025@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14026 * %rdx: src
14027 */
14028
14029- pushq %r12;
14030+ pushq %r14;
14031
14032 movq %rsi, %r11;
14033- movq %rdx, %r12;
14034+ movq %rdx, %r14;
14035
14036 vmovdqu (0*16)(%rdx), RL1;
14037 vmovdqu (1*16)(%rdx), RR1;
14038@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14039 call __cast5_dec_blk16;
14040
14041 /* xor with src */
14042- vmovq (%r12), RX;
14043+ vmovq (%r14), RX;
14044 vpshufd $0x4f, RX, RX;
14045 vpxor RX, RR1, RR1;
14046- vpxor 0*16+8(%r12), RL1, RL1;
14047- vpxor 1*16+8(%r12), RR2, RR2;
14048- vpxor 2*16+8(%r12), RL2, RL2;
14049- vpxor 3*16+8(%r12), RR3, RR3;
14050- vpxor 4*16+8(%r12), RL3, RL3;
14051- vpxor 5*16+8(%r12), RR4, RR4;
14052- vpxor 6*16+8(%r12), RL4, RL4;
14053+ vpxor 0*16+8(%r14), RL1, RL1;
14054+ vpxor 1*16+8(%r14), RR2, RR2;
14055+ vpxor 2*16+8(%r14), RL2, RL2;
14056+ vpxor 3*16+8(%r14), RR3, RR3;
14057+ vpxor 4*16+8(%r14), RL3, RL3;
14058+ vpxor 5*16+8(%r14), RR4, RR4;
14059+ vpxor 6*16+8(%r14), RL4, RL4;
14060
14061 vmovdqu RR1, (0*16)(%r11);
14062 vmovdqu RL1, (1*16)(%r11);
14063@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14064 vmovdqu RR4, (6*16)(%r11);
14065 vmovdqu RL4, (7*16)(%r11);
14066
14067- popq %r12;
14068+ popq %r14;
14069
14070+ pax_force_retaddr
14071 ret;
14072 ENDPROC(cast5_cbc_dec_16way)
14073
14074@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14075 * %rcx: iv (big endian, 64bit)
14076 */
14077
14078- pushq %r12;
14079+ pushq %r14;
14080
14081 movq %rsi, %r11;
14082- movq %rdx, %r12;
14083+ movq %rdx, %r14;
14084
14085 vpcmpeqd RTMP, RTMP, RTMP;
14086 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14087@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14088 call __cast5_enc_blk16;
14089
14090 /* dst = src ^ iv */
14091- vpxor (0*16)(%r12), RR1, RR1;
14092- vpxor (1*16)(%r12), RL1, RL1;
14093- vpxor (2*16)(%r12), RR2, RR2;
14094- vpxor (3*16)(%r12), RL2, RL2;
14095- vpxor (4*16)(%r12), RR3, RR3;
14096- vpxor (5*16)(%r12), RL3, RL3;
14097- vpxor (6*16)(%r12), RR4, RR4;
14098- vpxor (7*16)(%r12), RL4, RL4;
14099+ vpxor (0*16)(%r14), RR1, RR1;
14100+ vpxor (1*16)(%r14), RL1, RL1;
14101+ vpxor (2*16)(%r14), RR2, RR2;
14102+ vpxor (3*16)(%r14), RL2, RL2;
14103+ vpxor (4*16)(%r14), RR3, RR3;
14104+ vpxor (5*16)(%r14), RL3, RL3;
14105+ vpxor (6*16)(%r14), RR4, RR4;
14106+ vpxor (7*16)(%r14), RL4, RL4;
14107 vmovdqu RR1, (0*16)(%r11);
14108 vmovdqu RL1, (1*16)(%r11);
14109 vmovdqu RR2, (2*16)(%r11);
14110@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14111 vmovdqu RR4, (6*16)(%r11);
14112 vmovdqu RL4, (7*16)(%r11);
14113
14114- popq %r12;
14115+ popq %r14;
14116
14117+ pax_force_retaddr
14118 ret;
14119 ENDPROC(cast5_ctr_16way)
14120diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14121index e3531f8..e123f35 100644
14122--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14123+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14124@@ -24,6 +24,7 @@
14125 */
14126
14127 #include <linux/linkage.h>
14128+#include <asm/alternative-asm.h>
14129 #include "glue_helper-asm-avx.S"
14130
14131 .file "cast6-avx-x86_64-asm_64.S"
14132@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14133 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14134 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14135
14136+ pax_force_retaddr
14137 ret;
14138 ENDPROC(__cast6_enc_blk8)
14139
14140@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14141 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14142 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14143
14144+ pax_force_retaddr
14145 ret;
14146 ENDPROC(__cast6_dec_blk8)
14147
14148@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14149
14150 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14151
14152+ pax_force_retaddr
14153 ret;
14154 ENDPROC(cast6_ecb_enc_8way)
14155
14156@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14157
14158 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14159
14160+ pax_force_retaddr
14161 ret;
14162 ENDPROC(cast6_ecb_dec_8way)
14163
14164@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14165 * %rdx: src
14166 */
14167
14168- pushq %r12;
14169+ pushq %r14;
14170
14171 movq %rsi, %r11;
14172- movq %rdx, %r12;
14173+ movq %rdx, %r14;
14174
14175 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14176
14177 call __cast6_dec_blk8;
14178
14179- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14180+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14181
14182- popq %r12;
14183+ popq %r14;
14184
14185+ pax_force_retaddr
14186 ret;
14187 ENDPROC(cast6_cbc_dec_8way)
14188
14189@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14190 * %rcx: iv (little endian, 128bit)
14191 */
14192
14193- pushq %r12;
14194+ pushq %r14;
14195
14196 movq %rsi, %r11;
14197- movq %rdx, %r12;
14198+ movq %rdx, %r14;
14199
14200 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14201 RD2, RX, RKR, RKM);
14202
14203 call __cast6_enc_blk8;
14204
14205- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14206+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14207
14208- popq %r12;
14209+ popq %r14;
14210
14211+ pax_force_retaddr
14212 ret;
14213 ENDPROC(cast6_ctr_8way)
14214
14215@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14216 /* dst <= regs xor IVs(in dst) */
14217 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14218
14219+ pax_force_retaddr
14220 ret;
14221 ENDPROC(cast6_xts_enc_8way)
14222
14223@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14224 /* dst <= regs xor IVs(in dst) */
14225 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14226
14227+ pax_force_retaddr
14228 ret;
14229 ENDPROC(cast6_xts_dec_8way)
14230diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14231index dbc4339..de6e120 100644
14232--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14233+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14234@@ -45,6 +45,7 @@
14235
14236 #include <asm/inst.h>
14237 #include <linux/linkage.h>
14238+#include <asm/alternative-asm.h>
14239
14240 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14241
14242@@ -312,6 +313,7 @@ do_return:
14243 popq %rsi
14244 popq %rdi
14245 popq %rbx
14246+ pax_force_retaddr
14247 ret
14248
14249 ################################################################
14250diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14251index 5d1e007..098cb4f 100644
14252--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14253+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14254@@ -18,6 +18,7 @@
14255
14256 #include <linux/linkage.h>
14257 #include <asm/inst.h>
14258+#include <asm/alternative-asm.h>
14259
14260 .data
14261
14262@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14263 psrlq $1, T2
14264 pxor T2, T1
14265 pxor T1, DATA
14266+ pax_force_retaddr
14267 ret
14268 ENDPROC(__clmul_gf128mul_ble)
14269
14270@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14271 call __clmul_gf128mul_ble
14272 PSHUFB_XMM BSWAP DATA
14273 movups DATA, (%rdi)
14274+ pax_force_retaddr
14275 ret
14276 ENDPROC(clmul_ghash_mul)
14277
14278@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14279 PSHUFB_XMM BSWAP DATA
14280 movups DATA, (%rdi)
14281 .Lupdate_just_ret:
14282+ pax_force_retaddr
14283 ret
14284 ENDPROC(clmul_ghash_update)
14285diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14286index 9279e0b..c4b3d2c 100644
14287--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14288+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14289@@ -1,4 +1,5 @@
14290 #include <linux/linkage.h>
14291+#include <asm/alternative-asm.h>
14292
14293 # enter salsa20_encrypt_bytes
14294 ENTRY(salsa20_encrypt_bytes)
14295@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14296 add %r11,%rsp
14297 mov %rdi,%rax
14298 mov %rsi,%rdx
14299+ pax_force_retaddr
14300 ret
14301 # bytesatleast65:
14302 ._bytesatleast65:
14303@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14304 add %r11,%rsp
14305 mov %rdi,%rax
14306 mov %rsi,%rdx
14307+ pax_force_retaddr
14308 ret
14309 ENDPROC(salsa20_keysetup)
14310
14311@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14312 add %r11,%rsp
14313 mov %rdi,%rax
14314 mov %rsi,%rdx
14315+ pax_force_retaddr
14316 ret
14317 ENDPROC(salsa20_ivsetup)
14318diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14319index 2f202f4..d9164d6 100644
14320--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14321+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14322@@ -24,6 +24,7 @@
14323 */
14324
14325 #include <linux/linkage.h>
14326+#include <asm/alternative-asm.h>
14327 #include "glue_helper-asm-avx.S"
14328
14329 .file "serpent-avx-x86_64-asm_64.S"
14330@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14331 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14332 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14333
14334+ pax_force_retaddr
14335 ret;
14336 ENDPROC(__serpent_enc_blk8_avx)
14337
14338@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14339 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14340 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14341
14342+ pax_force_retaddr
14343 ret;
14344 ENDPROC(__serpent_dec_blk8_avx)
14345
14346@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14347
14348 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14349
14350+ pax_force_retaddr
14351 ret;
14352 ENDPROC(serpent_ecb_enc_8way_avx)
14353
14354@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14355
14356 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14357
14358+ pax_force_retaddr
14359 ret;
14360 ENDPROC(serpent_ecb_dec_8way_avx)
14361
14362@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14363
14364 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14365
14366+ pax_force_retaddr
14367 ret;
14368 ENDPROC(serpent_cbc_dec_8way_avx)
14369
14370@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14371
14372 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14373
14374+ pax_force_retaddr
14375 ret;
14376 ENDPROC(serpent_ctr_8way_avx)
14377
14378@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14379 /* dst <= regs xor IVs(in dst) */
14380 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14381
14382+ pax_force_retaddr
14383 ret;
14384 ENDPROC(serpent_xts_enc_8way_avx)
14385
14386@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14387 /* dst <= regs xor IVs(in dst) */
14388 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14389
14390+ pax_force_retaddr
14391 ret;
14392 ENDPROC(serpent_xts_dec_8way_avx)
14393diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14394index b222085..abd483c 100644
14395--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14396+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14397@@ -15,6 +15,7 @@
14398 */
14399
14400 #include <linux/linkage.h>
14401+#include <asm/alternative-asm.h>
14402 #include "glue_helper-asm-avx2.S"
14403
14404 .file "serpent-avx2-asm_64.S"
14405@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14406 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14407 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14408
14409+ pax_force_retaddr
14410 ret;
14411 ENDPROC(__serpent_enc_blk16)
14412
14413@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14414 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14415 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14416
14417+ pax_force_retaddr
14418 ret;
14419 ENDPROC(__serpent_dec_blk16)
14420
14421@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14422
14423 vzeroupper;
14424
14425+ pax_force_retaddr
14426 ret;
14427 ENDPROC(serpent_ecb_enc_16way)
14428
14429@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14430
14431 vzeroupper;
14432
14433+ pax_force_retaddr
14434 ret;
14435 ENDPROC(serpent_ecb_dec_16way)
14436
14437@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14438
14439 vzeroupper;
14440
14441+ pax_force_retaddr
14442 ret;
14443 ENDPROC(serpent_cbc_dec_16way)
14444
14445@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14446
14447 vzeroupper;
14448
14449+ pax_force_retaddr
14450 ret;
14451 ENDPROC(serpent_ctr_16way)
14452
14453@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14454
14455 vzeroupper;
14456
14457+ pax_force_retaddr
14458 ret;
14459 ENDPROC(serpent_xts_enc_16way)
14460
14461@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14462
14463 vzeroupper;
14464
14465+ pax_force_retaddr
14466 ret;
14467 ENDPROC(serpent_xts_dec_16way)
14468diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14469index acc066c..1559cc4 100644
14470--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14471+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14472@@ -25,6 +25,7 @@
14473 */
14474
14475 #include <linux/linkage.h>
14476+#include <asm/alternative-asm.h>
14477
14478 .file "serpent-sse2-x86_64-asm_64.S"
14479 .text
14480@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14481 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14482 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14483
14484+ pax_force_retaddr
14485 ret;
14486
14487 .L__enc_xor8:
14488 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14489 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14490
14491+ pax_force_retaddr
14492 ret;
14493 ENDPROC(__serpent_enc_blk_8way)
14494
14495@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14496 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14497 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14498
14499+ pax_force_retaddr
14500 ret;
14501 ENDPROC(serpent_dec_blk_8way)
14502diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14503index a410950..9dfe7ad 100644
14504--- a/arch/x86/crypto/sha1_ssse3_asm.S
14505+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14506@@ -29,6 +29,7 @@
14507 */
14508
14509 #include <linux/linkage.h>
14510+#include <asm/alternative-asm.h>
14511
14512 #define CTX %rdi // arg1
14513 #define BUF %rsi // arg2
14514@@ -75,9 +76,9 @@
14515
14516 push %rbx
14517 push %rbp
14518- push %r12
14519+ push %r14
14520
14521- mov %rsp, %r12
14522+ mov %rsp, %r14
14523 sub $64, %rsp # allocate workspace
14524 and $~15, %rsp # align stack
14525
14526@@ -99,11 +100,12 @@
14527 xor %rax, %rax
14528 rep stosq
14529
14530- mov %r12, %rsp # deallocate workspace
14531+ mov %r14, %rsp # deallocate workspace
14532
14533- pop %r12
14534+ pop %r14
14535 pop %rbp
14536 pop %rbx
14537+ pax_force_retaddr
14538 ret
14539
14540 ENDPROC(\name)
14541diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14542index 642f156..51a513c 100644
14543--- a/arch/x86/crypto/sha256-avx-asm.S
14544+++ b/arch/x86/crypto/sha256-avx-asm.S
14545@@ -49,6 +49,7 @@
14546
14547 #ifdef CONFIG_AS_AVX
14548 #include <linux/linkage.h>
14549+#include <asm/alternative-asm.h>
14550
14551 ## assume buffers not aligned
14552 #define VMOVDQ vmovdqu
14553@@ -460,6 +461,7 @@ done_hash:
14554 popq %r13
14555 popq %rbp
14556 popq %rbx
14557+ pax_force_retaddr
14558 ret
14559 ENDPROC(sha256_transform_avx)
14560
14561diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14562index 9e86944..3795e6a 100644
14563--- a/arch/x86/crypto/sha256-avx2-asm.S
14564+++ b/arch/x86/crypto/sha256-avx2-asm.S
14565@@ -50,6 +50,7 @@
14566
14567 #ifdef CONFIG_AS_AVX2
14568 #include <linux/linkage.h>
14569+#include <asm/alternative-asm.h>
14570
14571 ## assume buffers not aligned
14572 #define VMOVDQ vmovdqu
14573@@ -720,6 +721,7 @@ done_hash:
14574 popq %r12
14575 popq %rbp
14576 popq %rbx
14577+ pax_force_retaddr
14578 ret
14579 ENDPROC(sha256_transform_rorx)
14580
14581diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14582index f833b74..8c62a9e 100644
14583--- a/arch/x86/crypto/sha256-ssse3-asm.S
14584+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14585@@ -47,6 +47,7 @@
14586 ########################################################################
14587
14588 #include <linux/linkage.h>
14589+#include <asm/alternative-asm.h>
14590
14591 ## assume buffers not aligned
14592 #define MOVDQ movdqu
14593@@ -471,6 +472,7 @@ done_hash:
14594 popq %rbp
14595 popq %rbx
14596
14597+ pax_force_retaddr
14598 ret
14599 ENDPROC(sha256_transform_ssse3)
14600
14601diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14602index 974dde9..a823ff9 100644
14603--- a/arch/x86/crypto/sha512-avx-asm.S
14604+++ b/arch/x86/crypto/sha512-avx-asm.S
14605@@ -49,6 +49,7 @@
14606
14607 #ifdef CONFIG_AS_AVX
14608 #include <linux/linkage.h>
14609+#include <asm/alternative-asm.h>
14610
14611 .text
14612
14613@@ -364,6 +365,7 @@ updateblock:
14614 mov frame_RSPSAVE(%rsp), %rsp
14615
14616 nowork:
14617+ pax_force_retaddr
14618 ret
14619 ENDPROC(sha512_transform_avx)
14620
14621diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14622index 568b961..ed20c37 100644
14623--- a/arch/x86/crypto/sha512-avx2-asm.S
14624+++ b/arch/x86/crypto/sha512-avx2-asm.S
14625@@ -51,6 +51,7 @@
14626
14627 #ifdef CONFIG_AS_AVX2
14628 #include <linux/linkage.h>
14629+#include <asm/alternative-asm.h>
14630
14631 .text
14632
14633@@ -678,6 +679,7 @@ done_hash:
14634
14635 # Restore Stack Pointer
14636 mov frame_RSPSAVE(%rsp), %rsp
14637+ pax_force_retaddr
14638 ret
14639 ENDPROC(sha512_transform_rorx)
14640
14641diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14642index fb56855..6edd768 100644
14643--- a/arch/x86/crypto/sha512-ssse3-asm.S
14644+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14645@@ -48,6 +48,7 @@
14646 ########################################################################
14647
14648 #include <linux/linkage.h>
14649+#include <asm/alternative-asm.h>
14650
14651 .text
14652
14653@@ -363,6 +364,7 @@ updateblock:
14654 mov frame_RSPSAVE(%rsp), %rsp
14655
14656 nowork:
14657+ pax_force_retaddr
14658 ret
14659 ENDPROC(sha512_transform_ssse3)
14660
14661diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14662index 0505813..b067311 100644
14663--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14664+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14665@@ -24,6 +24,7 @@
14666 */
14667
14668 #include <linux/linkage.h>
14669+#include <asm/alternative-asm.h>
14670 #include "glue_helper-asm-avx.S"
14671
14672 .file "twofish-avx-x86_64-asm_64.S"
14673@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14674 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14675 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14676
14677+ pax_force_retaddr
14678 ret;
14679 ENDPROC(__twofish_enc_blk8)
14680
14681@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14682 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14683 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14684
14685+ pax_force_retaddr
14686 ret;
14687 ENDPROC(__twofish_dec_blk8)
14688
14689@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14690
14691 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14692
14693+ pax_force_retaddr
14694 ret;
14695 ENDPROC(twofish_ecb_enc_8way)
14696
14697@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14698
14699 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14700
14701+ pax_force_retaddr
14702 ret;
14703 ENDPROC(twofish_ecb_dec_8way)
14704
14705@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14706 * %rdx: src
14707 */
14708
14709- pushq %r12;
14710+ pushq %r14;
14711
14712 movq %rsi, %r11;
14713- movq %rdx, %r12;
14714+ movq %rdx, %r14;
14715
14716 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14717
14718 call __twofish_dec_blk8;
14719
14720- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14721+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14722
14723- popq %r12;
14724+ popq %r14;
14725
14726+ pax_force_retaddr
14727 ret;
14728 ENDPROC(twofish_cbc_dec_8way)
14729
14730@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14731 * %rcx: iv (little endian, 128bit)
14732 */
14733
14734- pushq %r12;
14735+ pushq %r14;
14736
14737 movq %rsi, %r11;
14738- movq %rdx, %r12;
14739+ movq %rdx, %r14;
14740
14741 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14742 RD2, RX0, RX1, RY0);
14743
14744 call __twofish_enc_blk8;
14745
14746- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14747+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14748
14749- popq %r12;
14750+ popq %r14;
14751
14752+ pax_force_retaddr
14753 ret;
14754 ENDPROC(twofish_ctr_8way)
14755
14756@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14757 /* dst <= regs xor IVs(in dst) */
14758 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14759
14760+ pax_force_retaddr
14761 ret;
14762 ENDPROC(twofish_xts_enc_8way)
14763
14764@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14765 /* dst <= regs xor IVs(in dst) */
14766 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14767
14768+ pax_force_retaddr
14769 ret;
14770 ENDPROC(twofish_xts_dec_8way)
14771diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14772index 1c3b7ce..02f578d 100644
14773--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14774+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14775@@ -21,6 +21,7 @@
14776 */
14777
14778 #include <linux/linkage.h>
14779+#include <asm/alternative-asm.h>
14780
14781 .file "twofish-x86_64-asm-3way.S"
14782 .text
14783@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14784 popq %r13;
14785 popq %r14;
14786 popq %r15;
14787+ pax_force_retaddr
14788 ret;
14789
14790 .L__enc_xor3:
14791@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14792 popq %r13;
14793 popq %r14;
14794 popq %r15;
14795+ pax_force_retaddr
14796 ret;
14797 ENDPROC(__twofish_enc_blk_3way)
14798
14799@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14800 popq %r13;
14801 popq %r14;
14802 popq %r15;
14803+ pax_force_retaddr
14804 ret;
14805 ENDPROC(twofish_dec_blk_3way)
14806diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14807index a039d21..524b8b2 100644
14808--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14809+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14810@@ -22,6 +22,7 @@
14811
14812 #include <linux/linkage.h>
14813 #include <asm/asm-offsets.h>
14814+#include <asm/alternative-asm.h>
14815
14816 #define a_offset 0
14817 #define b_offset 4
14818@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14819
14820 popq R1
14821 movq $1,%rax
14822+ pax_force_retaddr
14823 ret
14824 ENDPROC(twofish_enc_blk)
14825
14826@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14827
14828 popq R1
14829 movq $1,%rax
14830+ pax_force_retaddr
14831 ret
14832 ENDPROC(twofish_dec_blk)
14833diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14834index d21ff89..6da8e6e 100644
14835--- a/arch/x86/ia32/ia32_aout.c
14836+++ b/arch/x86/ia32/ia32_aout.c
14837@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14838 unsigned long dump_start, dump_size;
14839 struct user32 dump;
14840
14841+ memset(&dump, 0, sizeof(dump));
14842+
14843 fs = get_fs();
14844 set_fs(KERNEL_DS);
14845 has_dumped = 1;
14846diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14847index f9e181a..b0df8b3 100644
14848--- a/arch/x86/ia32/ia32_signal.c
14849+++ b/arch/x86/ia32/ia32_signal.c
14850@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14851 if (__get_user(set.sig[0], &frame->sc.oldmask)
14852 || (_COMPAT_NSIG_WORDS > 1
14853 && __copy_from_user((((char *) &set.sig) + 4),
14854- &frame->extramask,
14855+ frame->extramask,
14856 sizeof(frame->extramask))))
14857 goto badframe;
14858
14859@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14860 sp -= frame_size;
14861 /* Align the stack pointer according to the i386 ABI,
14862 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14863- sp = ((sp + 4) & -16ul) - 4;
14864+ sp = ((sp - 12) & -16ul) - 4;
14865 return (void __user *) sp;
14866 }
14867
14868@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14869 restorer = current->mm->context.vdso +
14870 selected_vdso32->sym___kernel_sigreturn;
14871 else
14872- restorer = &frame->retcode;
14873+ restorer = frame->retcode;
14874 }
14875
14876 put_user_try {
14877@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14878 * These are actually not used anymore, but left because some
14879 * gdb versions depend on them as a marker.
14880 */
14881- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14882+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14883 } put_user_catch(err);
14884
14885 if (err)
14886@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14887 0xb8,
14888 __NR_ia32_rt_sigreturn,
14889 0x80cd,
14890- 0,
14891+ 0
14892 };
14893
14894 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14895@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14896
14897 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14898 restorer = ksig->ka.sa.sa_restorer;
14899- else
14900+ else if (current->mm->context.vdso)
14901+ /* Return stub is in 32bit vsyscall page */
14902 restorer = current->mm->context.vdso +
14903 selected_vdso32->sym___kernel_rt_sigreturn;
14904+ else
14905+ restorer = frame->retcode;
14906 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14907
14908 /*
14909 * Not actually used anymore, but left because some gdb
14910 * versions need it.
14911 */
14912- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14913+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14914 } put_user_catch(err);
14915
14916 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14917diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14918index 4299eb0..fefe70e 100644
14919--- a/arch/x86/ia32/ia32entry.S
14920+++ b/arch/x86/ia32/ia32entry.S
14921@@ -15,8 +15,10 @@
14922 #include <asm/irqflags.h>
14923 #include <asm/asm.h>
14924 #include <asm/smap.h>
14925+#include <asm/pgtable.h>
14926 #include <linux/linkage.h>
14927 #include <linux/err.h>
14928+#include <asm/alternative-asm.h>
14929
14930 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14931 #include <linux/elf-em.h>
14932@@ -62,12 +64,12 @@
14933 */
14934 .macro LOAD_ARGS32 offset, _r9=0
14935 .if \_r9
14936- movl \offset+16(%rsp),%r9d
14937+ movl \offset+R9(%rsp),%r9d
14938 .endif
14939- movl \offset+40(%rsp),%ecx
14940- movl \offset+48(%rsp),%edx
14941- movl \offset+56(%rsp),%esi
14942- movl \offset+64(%rsp),%edi
14943+ movl \offset+RCX(%rsp),%ecx
14944+ movl \offset+RDX(%rsp),%edx
14945+ movl \offset+RSI(%rsp),%esi
14946+ movl \offset+RDI(%rsp),%edi
14947 movl %eax,%eax /* zero extension */
14948 .endm
14949
14950@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14951 ENDPROC(native_irq_enable_sysexit)
14952 #endif
14953
14954+ .macro pax_enter_kernel_user
14955+ pax_set_fptr_mask
14956+#ifdef CONFIG_PAX_MEMORY_UDEREF
14957+ call pax_enter_kernel_user
14958+#endif
14959+ .endm
14960+
14961+ .macro pax_exit_kernel_user
14962+#ifdef CONFIG_PAX_MEMORY_UDEREF
14963+ call pax_exit_kernel_user
14964+#endif
14965+#ifdef CONFIG_PAX_RANDKSTACK
14966+ pushq %rax
14967+ pushq %r11
14968+ call pax_randomize_kstack
14969+ popq %r11
14970+ popq %rax
14971+#endif
14972+ .endm
14973+
14974+ .macro pax_erase_kstack
14975+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14976+ call pax_erase_kstack
14977+#endif
14978+ .endm
14979+
14980 /*
14981 * 32bit SYSENTER instruction entry.
14982 *
14983@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14984 CFI_REGISTER rsp,rbp
14985 SWAPGS_UNSAFE_STACK
14986 movq PER_CPU_VAR(kernel_stack), %rsp
14987- addq $(KERNEL_STACK_OFFSET),%rsp
14988- /*
14989- * No need to follow this irqs on/off section: the syscall
14990- * disabled irqs, here we enable it straight after entry:
14991- */
14992- ENABLE_INTERRUPTS(CLBR_NONE)
14993 movl %ebp,%ebp /* zero extension */
14994 pushq_cfi $__USER32_DS
14995 /*CFI_REL_OFFSET ss,0*/
14996@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14997 CFI_REL_OFFSET rsp,0
14998 pushfq_cfi
14999 /*CFI_REL_OFFSET rflags,0*/
15000- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
15001- CFI_REGISTER rip,r10
15002+ orl $X86_EFLAGS_IF,(%rsp)
15003+ GET_THREAD_INFO(%r11)
15004+ movl TI_sysenter_return(%r11), %r11d
15005+ CFI_REGISTER rip,r11
15006 pushq_cfi $__USER32_CS
15007 /*CFI_REL_OFFSET cs,0*/
15008 movl %eax, %eax
15009- pushq_cfi %r10
15010+ pushq_cfi %r11
15011 CFI_REL_OFFSET rip,0
15012 pushq_cfi %rax
15013 cld
15014 SAVE_ARGS 0,1,0
15015+ pax_enter_kernel_user
15016+
15017+#ifdef CONFIG_PAX_RANDKSTACK
15018+ pax_erase_kstack
15019+#endif
15020+
15021+ /*
15022+ * No need to follow this irqs on/off section: the syscall
15023+ * disabled irqs, here we enable it straight after entry:
15024+ */
15025+ ENABLE_INTERRUPTS(CLBR_NONE)
15026 /* no need to do an access_ok check here because rbp has been
15027 32bit zero extended */
15028+
15029+#ifdef CONFIG_PAX_MEMORY_UDEREF
15030+ addq pax_user_shadow_base,%rbp
15031+ ASM_PAX_OPEN_USERLAND
15032+#endif
15033+
15034 ASM_STAC
15035 1: movl (%rbp),%ebp
15036 _ASM_EXTABLE(1b,ia32_badarg)
15037 ASM_CLAC
15038- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15039- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15040+
15041+#ifdef CONFIG_PAX_MEMORY_UDEREF
15042+ ASM_PAX_CLOSE_USERLAND
15043+#endif
15044+
15045+ GET_THREAD_INFO(%r11)
15046+ orl $TS_COMPAT,TI_status(%r11)
15047+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15048 CFI_REMEMBER_STATE
15049 jnz sysenter_tracesys
15050 cmpq $(IA32_NR_syscalls-1),%rax
15051@@ -162,15 +209,18 @@ sysenter_do_call:
15052 sysenter_dispatch:
15053 call *ia32_sys_call_table(,%rax,8)
15054 movq %rax,RAX-ARGOFFSET(%rsp)
15055+ GET_THREAD_INFO(%r11)
15056 DISABLE_INTERRUPTS(CLBR_NONE)
15057 TRACE_IRQS_OFF
15058- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15059+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15060 jnz sysexit_audit
15061 sysexit_from_sys_call:
15062- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15063+ pax_exit_kernel_user
15064+ pax_erase_kstack
15065+ andl $~TS_COMPAT,TI_status(%r11)
15066 /* clear IF, that popfq doesn't enable interrupts early */
15067- andl $~0x200,EFLAGS-R11(%rsp)
15068- movl RIP-R11(%rsp),%edx /* User %eip */
15069+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15070+ movl RIP(%rsp),%edx /* User %eip */
15071 CFI_REGISTER rip,rdx
15072 RESTORE_ARGS 0,24,0,0,0,0
15073 xorq %r8,%r8
15074@@ -193,6 +243,9 @@ sysexit_from_sys_call:
15075 movl %eax,%esi /* 2nd arg: syscall number */
15076 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15077 call __audit_syscall_entry
15078+
15079+ pax_erase_kstack
15080+
15081 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15082 cmpq $(IA32_NR_syscalls-1),%rax
15083 ja ia32_badsys
15084@@ -204,7 +257,7 @@ sysexit_from_sys_call:
15085 .endm
15086
15087 .macro auditsys_exit exit
15088- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15089+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15090 jnz ia32_ret_from_sys_call
15091 TRACE_IRQS_ON
15092 ENABLE_INTERRUPTS(CLBR_NONE)
15093@@ -215,11 +268,12 @@ sysexit_from_sys_call:
15094 1: setbe %al /* 1 if error, 0 if not */
15095 movzbl %al,%edi /* zero-extend that into %edi */
15096 call __audit_syscall_exit
15097+ GET_THREAD_INFO(%r11)
15098 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15099 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15100 DISABLE_INTERRUPTS(CLBR_NONE)
15101 TRACE_IRQS_OFF
15102- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15103+ testl %edi,TI_flags(%r11)
15104 jz \exit
15105 CLEAR_RREGS -ARGOFFSET
15106 jmp int_with_check
15107@@ -237,7 +291,7 @@ sysexit_audit:
15108
15109 sysenter_tracesys:
15110 #ifdef CONFIG_AUDITSYSCALL
15111- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15112+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15113 jz sysenter_auditsys
15114 #endif
15115 SAVE_REST
15116@@ -249,6 +303,9 @@ sysenter_tracesys:
15117 RESTORE_REST
15118 cmpq $(IA32_NR_syscalls-1),%rax
15119 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15120+
15121+ pax_erase_kstack
15122+
15123 jmp sysenter_do_call
15124 CFI_ENDPROC
15125 ENDPROC(ia32_sysenter_target)
15126@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
15127 ENTRY(ia32_cstar_target)
15128 CFI_STARTPROC32 simple
15129 CFI_SIGNAL_FRAME
15130- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15131+ CFI_DEF_CFA rsp,0
15132 CFI_REGISTER rip,rcx
15133 /*CFI_REGISTER rflags,r11*/
15134 SWAPGS_UNSAFE_STACK
15135 movl %esp,%r8d
15136 CFI_REGISTER rsp,r8
15137 movq PER_CPU_VAR(kernel_stack),%rsp
15138+ SAVE_ARGS 8*6,0,0
15139+ pax_enter_kernel_user
15140+
15141+#ifdef CONFIG_PAX_RANDKSTACK
15142+ pax_erase_kstack
15143+#endif
15144+
15145 /*
15146 * No need to follow this irqs on/off section: the syscall
15147 * disabled irqs and here we enable it straight after entry:
15148 */
15149 ENABLE_INTERRUPTS(CLBR_NONE)
15150- SAVE_ARGS 8,0,0
15151 movl %eax,%eax /* zero extension */
15152 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15153 movq %rcx,RIP-ARGOFFSET(%rsp)
15154@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15155 /* no need to do an access_ok check here because r8 has been
15156 32bit zero extended */
15157 /* hardware stack frame is complete now */
15158+
15159+#ifdef CONFIG_PAX_MEMORY_UDEREF
15160+ ASM_PAX_OPEN_USERLAND
15161+ movq pax_user_shadow_base,%r8
15162+ addq RSP-ARGOFFSET(%rsp),%r8
15163+#endif
15164+
15165 ASM_STAC
15166 1: movl (%r8),%r9d
15167 _ASM_EXTABLE(1b,ia32_badarg)
15168 ASM_CLAC
15169- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15170- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15171+
15172+#ifdef CONFIG_PAX_MEMORY_UDEREF
15173+ ASM_PAX_CLOSE_USERLAND
15174+#endif
15175+
15176+ GET_THREAD_INFO(%r11)
15177+ orl $TS_COMPAT,TI_status(%r11)
15178+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15179 CFI_REMEMBER_STATE
15180 jnz cstar_tracesys
15181 cmpq $IA32_NR_syscalls-1,%rax
15182@@ -319,13 +395,16 @@ cstar_do_call:
15183 cstar_dispatch:
15184 call *ia32_sys_call_table(,%rax,8)
15185 movq %rax,RAX-ARGOFFSET(%rsp)
15186+ GET_THREAD_INFO(%r11)
15187 DISABLE_INTERRUPTS(CLBR_NONE)
15188 TRACE_IRQS_OFF
15189- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15190+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15191 jnz sysretl_audit
15192 sysretl_from_sys_call:
15193- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15194- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15195+ pax_exit_kernel_user
15196+ pax_erase_kstack
15197+ andl $~TS_COMPAT,TI_status(%r11)
15198+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15199 movl RIP-ARGOFFSET(%rsp),%ecx
15200 CFI_REGISTER rip,rcx
15201 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15202@@ -352,7 +431,7 @@ sysretl_audit:
15203
15204 cstar_tracesys:
15205 #ifdef CONFIG_AUDITSYSCALL
15206- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15207+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15208 jz cstar_auditsys
15209 #endif
15210 xchgl %r9d,%ebp
15211@@ -366,11 +445,19 @@ cstar_tracesys:
15212 xchgl %ebp,%r9d
15213 cmpq $(IA32_NR_syscalls-1),%rax
15214 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15215+
15216+ pax_erase_kstack
15217+
15218 jmp cstar_do_call
15219 END(ia32_cstar_target)
15220
15221 ia32_badarg:
15222 ASM_CLAC
15223+
15224+#ifdef CONFIG_PAX_MEMORY_UDEREF
15225+ ASM_PAX_CLOSE_USERLAND
15226+#endif
15227+
15228 movq $-EFAULT,%rax
15229 jmp ia32_sysret
15230 CFI_ENDPROC
15231@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15232 CFI_REL_OFFSET rip,RIP-RIP
15233 PARAVIRT_ADJUST_EXCEPTION_FRAME
15234 SWAPGS
15235- /*
15236- * No need to follow this irqs on/off section: the syscall
15237- * disabled irqs and here we enable it straight after entry:
15238- */
15239- ENABLE_INTERRUPTS(CLBR_NONE)
15240 movl %eax,%eax
15241 pushq_cfi %rax
15242 cld
15243 /* note the registers are not zero extended to the sf.
15244 this could be a problem. */
15245 SAVE_ARGS 0,1,0
15246- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15247- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15248+ pax_enter_kernel_user
15249+
15250+#ifdef CONFIG_PAX_RANDKSTACK
15251+ pax_erase_kstack
15252+#endif
15253+
15254+ /*
15255+ * No need to follow this irqs on/off section: the syscall
15256+ * disabled irqs and here we enable it straight after entry:
15257+ */
15258+ ENABLE_INTERRUPTS(CLBR_NONE)
15259+ GET_THREAD_INFO(%r11)
15260+ orl $TS_COMPAT,TI_status(%r11)
15261+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15262 jnz ia32_tracesys
15263 cmpq $(IA32_NR_syscalls-1),%rax
15264 ja ia32_badsys
15265@@ -442,6 +536,9 @@ ia32_tracesys:
15266 RESTORE_REST
15267 cmpq $(IA32_NR_syscalls-1),%rax
15268 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15269+
15270+ pax_erase_kstack
15271+
15272 jmp ia32_do_call
15273 END(ia32_syscall)
15274
15275diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15276index 8e0ceec..af13504 100644
15277--- a/arch/x86/ia32/sys_ia32.c
15278+++ b/arch/x86/ia32/sys_ia32.c
15279@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15280 */
15281 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15282 {
15283- typeof(ubuf->st_uid) uid = 0;
15284- typeof(ubuf->st_gid) gid = 0;
15285+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15286+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15287 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15288 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15289 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15290diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15291index 372231c..51b537d 100644
15292--- a/arch/x86/include/asm/alternative-asm.h
15293+++ b/arch/x86/include/asm/alternative-asm.h
15294@@ -18,6 +18,45 @@
15295 .endm
15296 #endif
15297
15298+#ifdef KERNEXEC_PLUGIN
15299+ .macro pax_force_retaddr_bts rip=0
15300+ btsq $63,\rip(%rsp)
15301+ .endm
15302+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15303+ .macro pax_force_retaddr rip=0, reload=0
15304+ btsq $63,\rip(%rsp)
15305+ .endm
15306+ .macro pax_force_fptr ptr
15307+ btsq $63,\ptr
15308+ .endm
15309+ .macro pax_set_fptr_mask
15310+ .endm
15311+#endif
15312+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15313+ .macro pax_force_retaddr rip=0, reload=0
15314+ .if \reload
15315+ pax_set_fptr_mask
15316+ .endif
15317+ orq %r12,\rip(%rsp)
15318+ .endm
15319+ .macro pax_force_fptr ptr
15320+ orq %r12,\ptr
15321+ .endm
15322+ .macro pax_set_fptr_mask
15323+ movabs $0x8000000000000000,%r12
15324+ .endm
15325+#endif
15326+#else
15327+ .macro pax_force_retaddr rip=0, reload=0
15328+ .endm
15329+ .macro pax_force_fptr ptr
15330+ .endm
15331+ .macro pax_force_retaddr_bts rip=0
15332+ .endm
15333+ .macro pax_set_fptr_mask
15334+ .endm
15335+#endif
15336+
15337 .macro altinstruction_entry orig alt feature orig_len alt_len
15338 .long \orig - .
15339 .long \alt - .
15340diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15341index 0a3f9c9..c9d081d 100644
15342--- a/arch/x86/include/asm/alternative.h
15343+++ b/arch/x86/include/asm/alternative.h
15344@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15345 ".pushsection .discard,\"aw\",@progbits\n" \
15346 DISCARD_ENTRY(1) \
15347 ".popsection\n" \
15348- ".pushsection .altinstr_replacement, \"ax\"\n" \
15349+ ".pushsection .altinstr_replacement, \"a\"\n" \
15350 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15351 ".popsection"
15352
15353@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15354 DISCARD_ENTRY(1) \
15355 DISCARD_ENTRY(2) \
15356 ".popsection\n" \
15357- ".pushsection .altinstr_replacement, \"ax\"\n" \
15358+ ".pushsection .altinstr_replacement, \"a\"\n" \
15359 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15360 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15361 ".popsection"
15362diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15363index 19b0eba..12254cd 100644
15364--- a/arch/x86/include/asm/apic.h
15365+++ b/arch/x86/include/asm/apic.h
15366@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15367
15368 #ifdef CONFIG_X86_LOCAL_APIC
15369
15370-extern unsigned int apic_verbosity;
15371+extern int apic_verbosity;
15372 extern int local_apic_timer_c2_ok;
15373
15374 extern int disable_apic;
15375diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15376index 20370c6..a2eb9b0 100644
15377--- a/arch/x86/include/asm/apm.h
15378+++ b/arch/x86/include/asm/apm.h
15379@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15380 __asm__ __volatile__(APM_DO_ZERO_SEGS
15381 "pushl %%edi\n\t"
15382 "pushl %%ebp\n\t"
15383- "lcall *%%cs:apm_bios_entry\n\t"
15384+ "lcall *%%ss:apm_bios_entry\n\t"
15385 "setc %%al\n\t"
15386 "popl %%ebp\n\t"
15387 "popl %%edi\n\t"
15388@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15389 __asm__ __volatile__(APM_DO_ZERO_SEGS
15390 "pushl %%edi\n\t"
15391 "pushl %%ebp\n\t"
15392- "lcall *%%cs:apm_bios_entry\n\t"
15393+ "lcall *%%ss:apm_bios_entry\n\t"
15394 "setc %%bl\n\t"
15395 "popl %%ebp\n\t"
15396 "popl %%edi\n\t"
15397diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15398index 6dd1c7dd..2edd216 100644
15399--- a/arch/x86/include/asm/atomic.h
15400+++ b/arch/x86/include/asm/atomic.h
15401@@ -24,7 +24,18 @@
15402 */
15403 static inline int atomic_read(const atomic_t *v)
15404 {
15405- return (*(volatile int *)&(v)->counter);
15406+ return (*(volatile const int *)&(v)->counter);
15407+}
15408+
15409+/**
15410+ * atomic_read_unchecked - read atomic variable
15411+ * @v: pointer of type atomic_unchecked_t
15412+ *
15413+ * Atomically reads the value of @v.
15414+ */
15415+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15416+{
15417+ return (*(volatile const int *)&(v)->counter);
15418 }
15419
15420 /**
15421@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15422 }
15423
15424 /**
15425+ * atomic_set_unchecked - set atomic variable
15426+ * @v: pointer of type atomic_unchecked_t
15427+ * @i: required value
15428+ *
15429+ * Atomically sets the value of @v to @i.
15430+ */
15431+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15432+{
15433+ v->counter = i;
15434+}
15435+
15436+/**
15437 * atomic_add - add integer to atomic variable
15438 * @i: integer value to add
15439 * @v: pointer of type atomic_t
15440@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15441 */
15442 static inline void atomic_add(int i, atomic_t *v)
15443 {
15444- asm volatile(LOCK_PREFIX "addl %1,%0"
15445+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15446+
15447+#ifdef CONFIG_PAX_REFCOUNT
15448+ "jno 0f\n"
15449+ LOCK_PREFIX "subl %1,%0\n"
15450+ "int $4\n0:\n"
15451+ _ASM_EXTABLE(0b, 0b)
15452+#endif
15453+
15454+ : "+m" (v->counter)
15455+ : "ir" (i));
15456+}
15457+
15458+/**
15459+ * atomic_add_unchecked - add integer to atomic variable
15460+ * @i: integer value to add
15461+ * @v: pointer of type atomic_unchecked_t
15462+ *
15463+ * Atomically adds @i to @v.
15464+ */
15465+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15466+{
15467+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15468 : "+m" (v->counter)
15469 : "ir" (i));
15470 }
15471@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15472 */
15473 static inline void atomic_sub(int i, atomic_t *v)
15474 {
15475- asm volatile(LOCK_PREFIX "subl %1,%0"
15476+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15477+
15478+#ifdef CONFIG_PAX_REFCOUNT
15479+ "jno 0f\n"
15480+ LOCK_PREFIX "addl %1,%0\n"
15481+ "int $4\n0:\n"
15482+ _ASM_EXTABLE(0b, 0b)
15483+#endif
15484+
15485+ : "+m" (v->counter)
15486+ : "ir" (i));
15487+}
15488+
15489+/**
15490+ * atomic_sub_unchecked - subtract integer from atomic variable
15491+ * @i: integer value to subtract
15492+ * @v: pointer of type atomic_unchecked_t
15493+ *
15494+ * Atomically subtracts @i from @v.
15495+ */
15496+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15497+{
15498+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15499 : "+m" (v->counter)
15500 : "ir" (i));
15501 }
15502@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15503 */
15504 static inline int atomic_sub_and_test(int i, atomic_t *v)
15505 {
15506- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15507+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15508 }
15509
15510 /**
15511@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15512 */
15513 static inline void atomic_inc(atomic_t *v)
15514 {
15515- asm volatile(LOCK_PREFIX "incl %0"
15516+ asm volatile(LOCK_PREFIX "incl %0\n"
15517+
15518+#ifdef CONFIG_PAX_REFCOUNT
15519+ "jno 0f\n"
15520+ LOCK_PREFIX "decl %0\n"
15521+ "int $4\n0:\n"
15522+ _ASM_EXTABLE(0b, 0b)
15523+#endif
15524+
15525+ : "+m" (v->counter));
15526+}
15527+
15528+/**
15529+ * atomic_inc_unchecked - increment atomic variable
15530+ * @v: pointer of type atomic_unchecked_t
15531+ *
15532+ * Atomically increments @v by 1.
15533+ */
15534+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15535+{
15536+ asm volatile(LOCK_PREFIX "incl %0\n"
15537 : "+m" (v->counter));
15538 }
15539
15540@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15541 */
15542 static inline void atomic_dec(atomic_t *v)
15543 {
15544- asm volatile(LOCK_PREFIX "decl %0"
15545+ asm volatile(LOCK_PREFIX "decl %0\n"
15546+
15547+#ifdef CONFIG_PAX_REFCOUNT
15548+ "jno 0f\n"
15549+ LOCK_PREFIX "incl %0\n"
15550+ "int $4\n0:\n"
15551+ _ASM_EXTABLE(0b, 0b)
15552+#endif
15553+
15554+ : "+m" (v->counter));
15555+}
15556+
15557+/**
15558+ * atomic_dec_unchecked - decrement atomic variable
15559+ * @v: pointer of type atomic_unchecked_t
15560+ *
15561+ * Atomically decrements @v by 1.
15562+ */
15563+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15564+{
15565+ asm volatile(LOCK_PREFIX "decl %0\n"
15566 : "+m" (v->counter));
15567 }
15568
15569@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15570 */
15571 static inline int atomic_dec_and_test(atomic_t *v)
15572 {
15573- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15574+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15575 }
15576
15577 /**
15578@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15579 */
15580 static inline int atomic_inc_and_test(atomic_t *v)
15581 {
15582- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15583+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15584+}
15585+
15586+/**
15587+ * atomic_inc_and_test_unchecked - increment and test
15588+ * @v: pointer of type atomic_unchecked_t
15589+ *
15590+ * Atomically increments @v by 1
15591+ * and returns true if the result is zero, or false for all
15592+ * other cases.
15593+ */
15594+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15595+{
15596+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15597 }
15598
15599 /**
15600@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15601 */
15602 static inline int atomic_add_negative(int i, atomic_t *v)
15603 {
15604- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15605+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15606 }
15607
15608 /**
15609@@ -154,6 +274,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15610 */
15611 static inline int atomic_add_return(int i, atomic_t *v)
15612 {
15613+ return i + xadd_check_overflow(&v->counter, i);
15614+}
15615+
15616+/**
15617+ * atomic_add_return_unchecked - add integer and return
15618+ * @i: integer value to add
15619+ * @v: pointer of type atomic_unchecked_t
15620+ *
15621+ * Atomically adds @i to @v and returns @i + @v
15622+ */
15623+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15624+{
15625 return i + xadd(&v->counter, i);
15626 }
15627
15628@@ -170,9 +302,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15629 }
15630
15631 #define atomic_inc_return(v) (atomic_add_return(1, v))
15632+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15633+{
15634+ return atomic_add_return_unchecked(1, v);
15635+}
15636 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15637
15638-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15639+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15640+{
15641+ return cmpxchg(&v->counter, old, new);
15642+}
15643+
15644+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15645 {
15646 return cmpxchg(&v->counter, old, new);
15647 }
15648@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15649 return xchg(&v->counter, new);
15650 }
15651
15652+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15653+{
15654+ return xchg(&v->counter, new);
15655+}
15656+
15657 /**
15658 * __atomic_add_unless - add unless the number is already a given value
15659 * @v: pointer of type atomic_t
15660@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15661 * Atomically adds @a to @v, so long as @v was not already @u.
15662 * Returns the old value of @v.
15663 */
15664-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15665+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15666 {
15667- int c, old;
15668+ int c, old, new;
15669 c = atomic_read(v);
15670 for (;;) {
15671- if (unlikely(c == (u)))
15672+ if (unlikely(c == u))
15673 break;
15674- old = atomic_cmpxchg((v), c, c + (a));
15675+
15676+ asm volatile("addl %2,%0\n"
15677+
15678+#ifdef CONFIG_PAX_REFCOUNT
15679+ "jno 0f\n"
15680+ "subl %2,%0\n"
15681+ "int $4\n0:\n"
15682+ _ASM_EXTABLE(0b, 0b)
15683+#endif
15684+
15685+ : "=r" (new)
15686+ : "0" (c), "ir" (a));
15687+
15688+ old = atomic_cmpxchg(v, c, new);
15689 if (likely(old == c))
15690 break;
15691 c = old;
15692@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15693 }
15694
15695 /**
15696+ * atomic_inc_not_zero_hint - increment if not null
15697+ * @v: pointer of type atomic_t
15698+ * @hint: probable value of the atomic before the increment
15699+ *
15700+ * This version of atomic_inc_not_zero() gives a hint of probable
15701+ * value of the atomic. This helps processor to not read the memory
15702+ * before doing the atomic read/modify/write cycle, lowering
15703+ * number of bus transactions on some arches.
15704+ *
15705+ * Returns: 0 if increment was not done, 1 otherwise.
15706+ */
15707+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15708+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15709+{
15710+ int val, c = hint, new;
15711+
15712+ /* sanity test, should be removed by compiler if hint is a constant */
15713+ if (!hint)
15714+ return __atomic_add_unless(v, 1, 0);
15715+
15716+ do {
15717+ asm volatile("incl %0\n"
15718+
15719+#ifdef CONFIG_PAX_REFCOUNT
15720+ "jno 0f\n"
15721+ "decl %0\n"
15722+ "int $4\n0:\n"
15723+ _ASM_EXTABLE(0b, 0b)
15724+#endif
15725+
15726+ : "=r" (new)
15727+ : "0" (c));
15728+
15729+ val = atomic_cmpxchg(v, c, new);
15730+ if (val == c)
15731+ return 1;
15732+ c = val;
15733+ } while (c);
15734+
15735+ return 0;
15736+}
15737+
15738+/**
15739 * atomic_inc_short - increment of a short integer
15740 * @v: pointer to type int
15741 *
15742@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15743 #endif
15744
15745 /* These are x86-specific, used by some header files */
15746-#define atomic_clear_mask(mask, addr) \
15747- asm volatile(LOCK_PREFIX "andl %0,%1" \
15748- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15749+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15750+{
15751+ asm volatile(LOCK_PREFIX "andl %1,%0"
15752+ : "+m" (v->counter)
15753+ : "r" (~(mask))
15754+ : "memory");
15755+}
15756
15757-#define atomic_set_mask(mask, addr) \
15758- asm volatile(LOCK_PREFIX "orl %0,%1" \
15759- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15760- : "memory")
15761+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15762+{
15763+ asm volatile(LOCK_PREFIX "andl %1,%0"
15764+ : "+m" (v->counter)
15765+ : "r" (~(mask))
15766+ : "memory");
15767+}
15768+
15769+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15770+{
15771+ asm volatile(LOCK_PREFIX "orl %1,%0"
15772+ : "+m" (v->counter)
15773+ : "r" (mask)
15774+ : "memory");
15775+}
15776+
15777+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15778+{
15779+ asm volatile(LOCK_PREFIX "orl %1,%0"
15780+ : "+m" (v->counter)
15781+ : "r" (mask)
15782+ : "memory");
15783+}
15784
15785 #ifdef CONFIG_X86_32
15786 # include <asm/atomic64_32.h>
15787diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15788index b154de7..bf18a5a 100644
15789--- a/arch/x86/include/asm/atomic64_32.h
15790+++ b/arch/x86/include/asm/atomic64_32.h
15791@@ -12,6 +12,14 @@ typedef struct {
15792 u64 __aligned(8) counter;
15793 } atomic64_t;
15794
15795+#ifdef CONFIG_PAX_REFCOUNT
15796+typedef struct {
15797+ u64 __aligned(8) counter;
15798+} atomic64_unchecked_t;
15799+#else
15800+typedef atomic64_t atomic64_unchecked_t;
15801+#endif
15802+
15803 #define ATOMIC64_INIT(val) { (val) }
15804
15805 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15806@@ -37,21 +45,31 @@ typedef struct {
15807 ATOMIC64_DECL_ONE(sym##_386)
15808
15809 ATOMIC64_DECL_ONE(add_386);
15810+ATOMIC64_DECL_ONE(add_unchecked_386);
15811 ATOMIC64_DECL_ONE(sub_386);
15812+ATOMIC64_DECL_ONE(sub_unchecked_386);
15813 ATOMIC64_DECL_ONE(inc_386);
15814+ATOMIC64_DECL_ONE(inc_unchecked_386);
15815 ATOMIC64_DECL_ONE(dec_386);
15816+ATOMIC64_DECL_ONE(dec_unchecked_386);
15817 #endif
15818
15819 #define alternative_atomic64(f, out, in...) \
15820 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15821
15822 ATOMIC64_DECL(read);
15823+ATOMIC64_DECL(read_unchecked);
15824 ATOMIC64_DECL(set);
15825+ATOMIC64_DECL(set_unchecked);
15826 ATOMIC64_DECL(xchg);
15827 ATOMIC64_DECL(add_return);
15828+ATOMIC64_DECL(add_return_unchecked);
15829 ATOMIC64_DECL(sub_return);
15830+ATOMIC64_DECL(sub_return_unchecked);
15831 ATOMIC64_DECL(inc_return);
15832+ATOMIC64_DECL(inc_return_unchecked);
15833 ATOMIC64_DECL(dec_return);
15834+ATOMIC64_DECL(dec_return_unchecked);
15835 ATOMIC64_DECL(dec_if_positive);
15836 ATOMIC64_DECL(inc_not_zero);
15837 ATOMIC64_DECL(add_unless);
15838@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15839 }
15840
15841 /**
15842+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15843+ * @p: pointer to type atomic64_unchecked_t
15844+ * @o: expected value
15845+ * @n: new value
15846+ *
15847+ * Atomically sets @v to @n if it was equal to @o and returns
15848+ * the old value.
15849+ */
15850+
15851+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15852+{
15853+ return cmpxchg64(&v->counter, o, n);
15854+}
15855+
15856+/**
15857 * atomic64_xchg - xchg atomic64 variable
15858 * @v: pointer to type atomic64_t
15859 * @n: value to assign
15860@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15861 }
15862
15863 /**
15864+ * atomic64_set_unchecked - set atomic64 variable
15865+ * @v: pointer to type atomic64_unchecked_t
15866+ * @n: value to assign
15867+ *
15868+ * Atomically sets the value of @v to @n.
15869+ */
15870+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15871+{
15872+ unsigned high = (unsigned)(i >> 32);
15873+ unsigned low = (unsigned)i;
15874+ alternative_atomic64(set, /* no output */,
15875+ "S" (v), "b" (low), "c" (high)
15876+ : "eax", "edx", "memory");
15877+}
15878+
15879+/**
15880 * atomic64_read - read atomic64 variable
15881 * @v: pointer to type atomic64_t
15882 *
15883@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15884 }
15885
15886 /**
15887+ * atomic64_read_unchecked - read atomic64 variable
15888+ * @v: pointer to type atomic64_unchecked_t
15889+ *
15890+ * Atomically reads the value of @v and returns it.
15891+ */
15892+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15893+{
15894+ long long r;
15895+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15896+ return r;
15897+ }
15898+
15899+/**
15900 * atomic64_add_return - add and return
15901 * @i: integer value to add
15902 * @v: pointer to type atomic64_t
15903@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15904 return i;
15905 }
15906
15907+/**
15908+ * atomic64_add_return_unchecked - add and return
15909+ * @i: integer value to add
15910+ * @v: pointer to type atomic64_unchecked_t
15911+ *
15912+ * Atomically adds @i to @v and returns @i + *@v
15913+ */
15914+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15915+{
15916+ alternative_atomic64(add_return_unchecked,
15917+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15918+ ASM_NO_INPUT_CLOBBER("memory"));
15919+ return i;
15920+}
15921+
15922 /*
15923 * Other variants with different arithmetic operators:
15924 */
15925@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15926 return a;
15927 }
15928
15929+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15930+{
15931+ long long a;
15932+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15933+ "S" (v) : "memory", "ecx");
15934+ return a;
15935+}
15936+
15937 static inline long long atomic64_dec_return(atomic64_t *v)
15938 {
15939 long long a;
15940@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15941 }
15942
15943 /**
15944+ * atomic64_add_unchecked - add integer to atomic64 variable
15945+ * @i: integer value to add
15946+ * @v: pointer to type atomic64_unchecked_t
15947+ *
15948+ * Atomically adds @i to @v.
15949+ */
15950+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15951+{
15952+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15953+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15954+ ASM_NO_INPUT_CLOBBER("memory"));
15955+ return i;
15956+}
15957+
15958+/**
15959 * atomic64_sub - subtract the atomic64 variable
15960 * @i: integer value to subtract
15961 * @v: pointer to type atomic64_t
15962diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15963index 46e9052..ae45136 100644
15964--- a/arch/x86/include/asm/atomic64_64.h
15965+++ b/arch/x86/include/asm/atomic64_64.h
15966@@ -18,7 +18,19 @@
15967 */
15968 static inline long atomic64_read(const atomic64_t *v)
15969 {
15970- return (*(volatile long *)&(v)->counter);
15971+ return (*(volatile const long *)&(v)->counter);
15972+}
15973+
15974+/**
15975+ * atomic64_read_unchecked - read atomic64 variable
15976+ * @v: pointer of type atomic64_unchecked_t
15977+ *
15978+ * Atomically reads the value of @v.
15979+ * Doesn't imply a read memory barrier.
15980+ */
15981+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15982+{
15983+ return (*(volatile const long *)&(v)->counter);
15984 }
15985
15986 /**
15987@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15988 }
15989
15990 /**
15991+ * atomic64_set_unchecked - set atomic64 variable
15992+ * @v: pointer to type atomic64_unchecked_t
15993+ * @i: required value
15994+ *
15995+ * Atomically sets the value of @v to @i.
15996+ */
15997+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15998+{
15999+ v->counter = i;
16000+}
16001+
16002+/**
16003 * atomic64_add - add integer to atomic64 variable
16004 * @i: integer value to add
16005 * @v: pointer to type atomic64_t
16006@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
16007 */
16008 static inline void atomic64_add(long i, atomic64_t *v)
16009 {
16010+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
16011+
16012+#ifdef CONFIG_PAX_REFCOUNT
16013+ "jno 0f\n"
16014+ LOCK_PREFIX "subq %1,%0\n"
16015+ "int $4\n0:\n"
16016+ _ASM_EXTABLE(0b, 0b)
16017+#endif
16018+
16019+ : "=m" (v->counter)
16020+ : "er" (i), "m" (v->counter));
16021+}
16022+
16023+/**
16024+ * atomic64_add_unchecked - add integer to atomic64 variable
16025+ * @i: integer value to add
16026+ * @v: pointer to type atomic64_unchecked_t
16027+ *
16028+ * Atomically adds @i to @v.
16029+ */
16030+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16031+{
16032 asm volatile(LOCK_PREFIX "addq %1,%0"
16033 : "=m" (v->counter)
16034 : "er" (i), "m" (v->counter));
16035@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16036 */
16037 static inline void atomic64_sub(long i, atomic64_t *v)
16038 {
16039- asm volatile(LOCK_PREFIX "subq %1,%0"
16040+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16041+
16042+#ifdef CONFIG_PAX_REFCOUNT
16043+ "jno 0f\n"
16044+ LOCK_PREFIX "addq %1,%0\n"
16045+ "int $4\n0:\n"
16046+ _ASM_EXTABLE(0b, 0b)
16047+#endif
16048+
16049+ : "=m" (v->counter)
16050+ : "er" (i), "m" (v->counter));
16051+}
16052+
16053+/**
16054+ * atomic64_sub_unchecked - subtract the atomic64 variable
16055+ * @i: integer value to subtract
16056+ * @v: pointer to type atomic64_unchecked_t
16057+ *
16058+ * Atomically subtracts @i from @v.
16059+ */
16060+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16061+{
16062+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16063 : "=m" (v->counter)
16064 : "er" (i), "m" (v->counter));
16065 }
16066@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16067 */
16068 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16069 {
16070- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16071+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16072 }
16073
16074 /**
16075@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16076 */
16077 static inline void atomic64_inc(atomic64_t *v)
16078 {
16079+ asm volatile(LOCK_PREFIX "incq %0\n"
16080+
16081+#ifdef CONFIG_PAX_REFCOUNT
16082+ "jno 0f\n"
16083+ LOCK_PREFIX "decq %0\n"
16084+ "int $4\n0:\n"
16085+ _ASM_EXTABLE(0b, 0b)
16086+#endif
16087+
16088+ : "=m" (v->counter)
16089+ : "m" (v->counter));
16090+}
16091+
16092+/**
16093+ * atomic64_inc_unchecked - increment atomic64 variable
16094+ * @v: pointer to type atomic64_unchecked_t
16095+ *
16096+ * Atomically increments @v by 1.
16097+ */
16098+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16099+{
16100 asm volatile(LOCK_PREFIX "incq %0"
16101 : "=m" (v->counter)
16102 : "m" (v->counter));
16103@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16104 */
16105 static inline void atomic64_dec(atomic64_t *v)
16106 {
16107- asm volatile(LOCK_PREFIX "decq %0"
16108+ asm volatile(LOCK_PREFIX "decq %0\n"
16109+
16110+#ifdef CONFIG_PAX_REFCOUNT
16111+ "jno 0f\n"
16112+ LOCK_PREFIX "incq %0\n"
16113+ "int $4\n0:\n"
16114+ _ASM_EXTABLE(0b, 0b)
16115+#endif
16116+
16117+ : "=m" (v->counter)
16118+ : "m" (v->counter));
16119+}
16120+
16121+/**
16122+ * atomic64_dec_unchecked - decrement atomic64 variable
16123+ * @v: pointer to type atomic64_t
16124+ *
16125+ * Atomically decrements @v by 1.
16126+ */
16127+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16128+{
16129+ asm volatile(LOCK_PREFIX "decq %0\n"
16130 : "=m" (v->counter)
16131 : "m" (v->counter));
16132 }
16133@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16134 */
16135 static inline int atomic64_dec_and_test(atomic64_t *v)
16136 {
16137- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16138+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16139 }
16140
16141 /**
16142@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16143 */
16144 static inline int atomic64_inc_and_test(atomic64_t *v)
16145 {
16146- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16147+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16148 }
16149
16150 /**
16151@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16152 */
16153 static inline int atomic64_add_negative(long i, atomic64_t *v)
16154 {
16155- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16156+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16157 }
16158
16159 /**
16160@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16161 */
16162 static inline long atomic64_add_return(long i, atomic64_t *v)
16163 {
16164+ return i + xadd_check_overflow(&v->counter, i);
16165+}
16166+
16167+/**
16168+ * atomic64_add_return_unchecked - add and return
16169+ * @i: integer value to add
16170+ * @v: pointer to type atomic64_unchecked_t
16171+ *
16172+ * Atomically adds @i to @v and returns @i + @v
16173+ */
16174+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16175+{
16176 return i + xadd(&v->counter, i);
16177 }
16178
16179@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16180 }
16181
16182 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16183+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16184+{
16185+ return atomic64_add_return_unchecked(1, v);
16186+}
16187 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16188
16189 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16190@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16191 return cmpxchg(&v->counter, old, new);
16192 }
16193
16194+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16195+{
16196+ return cmpxchg(&v->counter, old, new);
16197+}
16198+
16199 static inline long atomic64_xchg(atomic64_t *v, long new)
16200 {
16201 return xchg(&v->counter, new);
16202@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16203 */
16204 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16205 {
16206- long c, old;
16207+ long c, old, new;
16208 c = atomic64_read(v);
16209 for (;;) {
16210- if (unlikely(c == (u)))
16211+ if (unlikely(c == u))
16212 break;
16213- old = atomic64_cmpxchg((v), c, c + (a));
16214+
16215+ asm volatile("add %2,%0\n"
16216+
16217+#ifdef CONFIG_PAX_REFCOUNT
16218+ "jno 0f\n"
16219+ "sub %2,%0\n"
16220+ "int $4\n0:\n"
16221+ _ASM_EXTABLE(0b, 0b)
16222+#endif
16223+
16224+ : "=r" (new)
16225+ : "0" (c), "ir" (a));
16226+
16227+ old = atomic64_cmpxchg(v, c, new);
16228 if (likely(old == c))
16229 break;
16230 c = old;
16231 }
16232- return c != (u);
16233+ return c != u;
16234 }
16235
16236 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16237diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16238index 5c7198c..44180b5 100644
16239--- a/arch/x86/include/asm/barrier.h
16240+++ b/arch/x86/include/asm/barrier.h
16241@@ -107,7 +107,7 @@
16242 do { \
16243 compiletime_assert_atomic_type(*p); \
16244 smp_mb(); \
16245- ACCESS_ONCE(*p) = (v); \
16246+ ACCESS_ONCE_RW(*p) = (v); \
16247 } while (0)
16248
16249 #define smp_load_acquire(p) \
16250@@ -124,7 +124,7 @@ do { \
16251 do { \
16252 compiletime_assert_atomic_type(*p); \
16253 barrier(); \
16254- ACCESS_ONCE(*p) = (v); \
16255+ ACCESS_ONCE_RW(*p) = (v); \
16256 } while (0)
16257
16258 #define smp_load_acquire(p) \
16259diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16260index afcd35d..d01b118 100644
16261--- a/arch/x86/include/asm/bitops.h
16262+++ b/arch/x86/include/asm/bitops.h
16263@@ -50,7 +50,7 @@
16264 * a mask operation on a byte.
16265 */
16266 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16267-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16268+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16269 #define CONST_MASK(nr) (1 << ((nr) & 7))
16270
16271 /**
16272@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16273 */
16274 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16275 {
16276- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16277+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16278 }
16279
16280 /**
16281@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16282 */
16283 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16284 {
16285- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16286+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16287 }
16288
16289 /**
16290@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16291 */
16292 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16293 {
16294- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16295+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16296 }
16297
16298 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16299@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16300 *
16301 * Undefined if no bit exists, so code should check against 0 first.
16302 */
16303-static inline unsigned long __ffs(unsigned long word)
16304+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16305 {
16306 asm("rep; bsf %1,%0"
16307 : "=r" (word)
16308@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16309 *
16310 * Undefined if no zero exists, so code should check against ~0UL first.
16311 */
16312-static inline unsigned long ffz(unsigned long word)
16313+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16314 {
16315 asm("rep; bsf %1,%0"
16316 : "=r" (word)
16317@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16318 *
16319 * Undefined if no set bit exists, so code should check against 0 first.
16320 */
16321-static inline unsigned long __fls(unsigned long word)
16322+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16323 {
16324 asm("bsr %1,%0"
16325 : "=r" (word)
16326@@ -434,7 +434,7 @@ static inline int ffs(int x)
16327 * set bit if value is nonzero. The last (most significant) bit is
16328 * at position 32.
16329 */
16330-static inline int fls(int x)
16331+static inline int __intentional_overflow(-1) fls(int x)
16332 {
16333 int r;
16334
16335@@ -476,7 +476,7 @@ static inline int fls(int x)
16336 * at position 64.
16337 */
16338 #ifdef CONFIG_X86_64
16339-static __always_inline int fls64(__u64 x)
16340+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16341 {
16342 int bitpos = -1;
16343 /*
16344@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x)
16345
16346 #include <asm-generic/bitops/sched.h>
16347
16348-#define ARCH_HAS_FAST_MULTIPLIER 1
16349-
16350 #include <asm/arch_hweight.h>
16351
16352 #include <asm-generic/bitops/const_hweight.h>
16353diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16354index 4fa687a..60f2d39 100644
16355--- a/arch/x86/include/asm/boot.h
16356+++ b/arch/x86/include/asm/boot.h
16357@@ -6,10 +6,15 @@
16358 #include <uapi/asm/boot.h>
16359
16360 /* Physical address where kernel should be loaded. */
16361-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16362+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16363 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16364 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16365
16366+#ifndef __ASSEMBLY__
16367+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16368+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16369+#endif
16370+
16371 /* Minimum kernel alignment, as a power of two */
16372 #ifdef CONFIG_X86_64
16373 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16374diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16375index 48f99f1..d78ebf9 100644
16376--- a/arch/x86/include/asm/cache.h
16377+++ b/arch/x86/include/asm/cache.h
16378@@ -5,12 +5,13 @@
16379
16380 /* L1 cache line size */
16381 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16382-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16383+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16384
16385 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16386+#define __read_only __attribute__((__section__(".data..read_only")))
16387
16388 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16389-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16390+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16391
16392 #ifdef CONFIG_X86_VSMP
16393 #ifdef CONFIG_SMP
16394diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16395index 9863ee3..4a1f8e1 100644
16396--- a/arch/x86/include/asm/cacheflush.h
16397+++ b/arch/x86/include/asm/cacheflush.h
16398@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16399 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16400
16401 if (pg_flags == _PGMT_DEFAULT)
16402- return -1;
16403+ return ~0UL;
16404 else if (pg_flags == _PGMT_WC)
16405 return _PAGE_CACHE_WC;
16406 else if (pg_flags == _PGMT_UC_MINUS)
16407diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16408index cb4c73b..c473c29 100644
16409--- a/arch/x86/include/asm/calling.h
16410+++ b/arch/x86/include/asm/calling.h
16411@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16412 #define RSP 152
16413 #define SS 160
16414
16415-#define ARGOFFSET R11
16416-#define SWFRAME ORIG_RAX
16417+#define ARGOFFSET R15
16418
16419 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16420- subq $9*8+\addskip, %rsp
16421- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16422- movq_cfi rdi, 8*8
16423- movq_cfi rsi, 7*8
16424- movq_cfi rdx, 6*8
16425+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16426+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16427+ movq_cfi rdi, RDI
16428+ movq_cfi rsi, RSI
16429+ movq_cfi rdx, RDX
16430
16431 .if \save_rcx
16432- movq_cfi rcx, 5*8
16433+ movq_cfi rcx, RCX
16434 .endif
16435
16436- movq_cfi rax, 4*8
16437+ movq_cfi rax, RAX
16438
16439 .if \save_r891011
16440- movq_cfi r8, 3*8
16441- movq_cfi r9, 2*8
16442- movq_cfi r10, 1*8
16443- movq_cfi r11, 0*8
16444+ movq_cfi r8, R8
16445+ movq_cfi r9, R9
16446+ movq_cfi r10, R10
16447+ movq_cfi r11, R11
16448 .endif
16449
16450+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16451+ movq_cfi r12, R12
16452+#endif
16453+
16454 .endm
16455
16456-#define ARG_SKIP (9*8)
16457+#define ARG_SKIP ORIG_RAX
16458
16459 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16460 rstor_r8910=1, rstor_rdx=1
16461+
16462+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16463+ movq_cfi_restore R12, r12
16464+#endif
16465+
16466 .if \rstor_r11
16467- movq_cfi_restore 0*8, r11
16468+ movq_cfi_restore R11, r11
16469 .endif
16470
16471 .if \rstor_r8910
16472- movq_cfi_restore 1*8, r10
16473- movq_cfi_restore 2*8, r9
16474- movq_cfi_restore 3*8, r8
16475+ movq_cfi_restore R10, r10
16476+ movq_cfi_restore R9, r9
16477+ movq_cfi_restore R8, r8
16478 .endif
16479
16480 .if \rstor_rax
16481- movq_cfi_restore 4*8, rax
16482+ movq_cfi_restore RAX, rax
16483 .endif
16484
16485 .if \rstor_rcx
16486- movq_cfi_restore 5*8, rcx
16487+ movq_cfi_restore RCX, rcx
16488 .endif
16489
16490 .if \rstor_rdx
16491- movq_cfi_restore 6*8, rdx
16492+ movq_cfi_restore RDX, rdx
16493 .endif
16494
16495- movq_cfi_restore 7*8, rsi
16496- movq_cfi_restore 8*8, rdi
16497+ movq_cfi_restore RSI, rsi
16498+ movq_cfi_restore RDI, rdi
16499
16500- .if ARG_SKIP+\addskip > 0
16501- addq $ARG_SKIP+\addskip, %rsp
16502- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16503+ .if ORIG_RAX+\addskip > 0
16504+ addq $ORIG_RAX+\addskip, %rsp
16505+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16506 .endif
16507 .endm
16508
16509- .macro LOAD_ARGS offset, skiprax=0
16510- movq \offset(%rsp), %r11
16511- movq \offset+8(%rsp), %r10
16512- movq \offset+16(%rsp), %r9
16513- movq \offset+24(%rsp), %r8
16514- movq \offset+40(%rsp), %rcx
16515- movq \offset+48(%rsp), %rdx
16516- movq \offset+56(%rsp), %rsi
16517- movq \offset+64(%rsp), %rdi
16518+ .macro LOAD_ARGS skiprax=0
16519+ movq R11(%rsp), %r11
16520+ movq R10(%rsp), %r10
16521+ movq R9(%rsp), %r9
16522+ movq R8(%rsp), %r8
16523+ movq RCX(%rsp), %rcx
16524+ movq RDX(%rsp), %rdx
16525+ movq RSI(%rsp), %rsi
16526+ movq RDI(%rsp), %rdi
16527 .if \skiprax
16528 .else
16529- movq \offset+72(%rsp), %rax
16530+ movq RAX(%rsp), %rax
16531 .endif
16532 .endm
16533
16534-#define REST_SKIP (6*8)
16535-
16536 .macro SAVE_REST
16537- subq $REST_SKIP, %rsp
16538- CFI_ADJUST_CFA_OFFSET REST_SKIP
16539- movq_cfi rbx, 5*8
16540- movq_cfi rbp, 4*8
16541- movq_cfi r12, 3*8
16542- movq_cfi r13, 2*8
16543- movq_cfi r14, 1*8
16544- movq_cfi r15, 0*8
16545+ movq_cfi rbx, RBX
16546+ movq_cfi rbp, RBP
16547+
16548+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16549+ movq_cfi r12, R12
16550+#endif
16551+
16552+ movq_cfi r13, R13
16553+ movq_cfi r14, R14
16554+ movq_cfi r15, R15
16555 .endm
16556
16557 .macro RESTORE_REST
16558- movq_cfi_restore 0*8, r15
16559- movq_cfi_restore 1*8, r14
16560- movq_cfi_restore 2*8, r13
16561- movq_cfi_restore 3*8, r12
16562- movq_cfi_restore 4*8, rbp
16563- movq_cfi_restore 5*8, rbx
16564- addq $REST_SKIP, %rsp
16565- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16566+ movq_cfi_restore R15, r15
16567+ movq_cfi_restore R14, r14
16568+ movq_cfi_restore R13, r13
16569+
16570+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16571+ movq_cfi_restore R12, r12
16572+#endif
16573+
16574+ movq_cfi_restore RBP, rbp
16575+ movq_cfi_restore RBX, rbx
16576 .endm
16577
16578 .macro SAVE_ALL
16579diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16580index f50de69..2b0a458 100644
16581--- a/arch/x86/include/asm/checksum_32.h
16582+++ b/arch/x86/include/asm/checksum_32.h
16583@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16584 int len, __wsum sum,
16585 int *src_err_ptr, int *dst_err_ptr);
16586
16587+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16588+ int len, __wsum sum,
16589+ int *src_err_ptr, int *dst_err_ptr);
16590+
16591+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16592+ int len, __wsum sum,
16593+ int *src_err_ptr, int *dst_err_ptr);
16594+
16595 /*
16596 * Note: when you get a NULL pointer exception here this means someone
16597 * passed in an incorrect kernel address to one of these functions.
16598@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16599
16600 might_sleep();
16601 stac();
16602- ret = csum_partial_copy_generic((__force void *)src, dst,
16603+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16604 len, sum, err_ptr, NULL);
16605 clac();
16606
16607@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16608 might_sleep();
16609 if (access_ok(VERIFY_WRITE, dst, len)) {
16610 stac();
16611- ret = csum_partial_copy_generic(src, (__force void *)dst,
16612+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16613 len, sum, NULL, err_ptr);
16614 clac();
16615 return ret;
16616diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16617index d47786a..2d8883e 100644
16618--- a/arch/x86/include/asm/cmpxchg.h
16619+++ b/arch/x86/include/asm/cmpxchg.h
16620@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16621 __compiletime_error("Bad argument size for cmpxchg");
16622 extern void __xadd_wrong_size(void)
16623 __compiletime_error("Bad argument size for xadd");
16624+extern void __xadd_check_overflow_wrong_size(void)
16625+ __compiletime_error("Bad argument size for xadd_check_overflow");
16626 extern void __add_wrong_size(void)
16627 __compiletime_error("Bad argument size for add");
16628+extern void __add_check_overflow_wrong_size(void)
16629+ __compiletime_error("Bad argument size for add_check_overflow");
16630
16631 /*
16632 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16633@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
16634 __ret; \
16635 })
16636
16637+#ifdef CONFIG_PAX_REFCOUNT
16638+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16639+ ({ \
16640+ __typeof__ (*(ptr)) __ret = (arg); \
16641+ switch (sizeof(*(ptr))) { \
16642+ case __X86_CASE_L: \
16643+ asm volatile (lock #op "l %0, %1\n" \
16644+ "jno 0f\n" \
16645+ "mov %0,%1\n" \
16646+ "int $4\n0:\n" \
16647+ _ASM_EXTABLE(0b, 0b) \
16648+ : "+r" (__ret), "+m" (*(ptr)) \
16649+ : : "memory", "cc"); \
16650+ break; \
16651+ case __X86_CASE_Q: \
16652+ asm volatile (lock #op "q %q0, %1\n" \
16653+ "jno 0f\n" \
16654+ "mov %0,%1\n" \
16655+ "int $4\n0:\n" \
16656+ _ASM_EXTABLE(0b, 0b) \
16657+ : "+r" (__ret), "+m" (*(ptr)) \
16658+ : : "memory", "cc"); \
16659+ break; \
16660+ default: \
16661+ __ ## op ## _check_overflow_wrong_size(); \
16662+ } \
16663+ __ret; \
16664+ })
16665+#else
16666+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16667+#endif
16668+
16669 /*
16670 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16671 * Since this is generally used to protect other memory information, we
16672@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16673 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16674 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16675
16676+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16677+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16678+
16679 #define __add(ptr, inc, lock) \
16680 ({ \
16681 __typeof__ (*(ptr)) __ret = (inc); \
16682diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16683index 59c6c40..5e0b22c 100644
16684--- a/arch/x86/include/asm/compat.h
16685+++ b/arch/x86/include/asm/compat.h
16686@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16687 typedef u32 compat_uint_t;
16688 typedef u32 compat_ulong_t;
16689 typedef u64 __attribute__((aligned(4))) compat_u64;
16690-typedef u32 compat_uptr_t;
16691+typedef u32 __user compat_uptr_t;
16692
16693 struct compat_timespec {
16694 compat_time_t tv_sec;
16695diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16696index e265ff9..72c253b 100644
16697--- a/arch/x86/include/asm/cpufeature.h
16698+++ b/arch/x86/include/asm/cpufeature.h
16699@@ -203,7 +203,7 @@
16700 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16701 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16702 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16703-
16704+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16705
16706 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16707 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16708@@ -211,7 +211,7 @@
16709 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16710 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16711 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16712-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16713+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16714 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16715 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16716 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16717@@ -359,6 +359,7 @@ extern const char * const x86_power_flags[32];
16718 #undef cpu_has_centaur_mcr
16719 #define cpu_has_centaur_mcr 0
16720
16721+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16722 #endif /* CONFIG_X86_64 */
16723
16724 #if __GNUC__ >= 4
16725@@ -411,7 +412,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16726
16727 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16728 t_warn:
16729- warn_pre_alternatives();
16730+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16731+ warn_pre_alternatives();
16732 return false;
16733 #endif
16734
16735@@ -431,7 +433,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16736 ".section .discard,\"aw\",@progbits\n"
16737 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16738 ".previous\n"
16739- ".section .altinstr_replacement,\"ax\"\n"
16740+ ".section .altinstr_replacement,\"a\"\n"
16741 "3: movb $1,%0\n"
16742 "4:\n"
16743 ".previous\n"
16744@@ -468,7 +470,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16745 " .byte 2b - 1b\n" /* src len */
16746 " .byte 4f - 3f\n" /* repl len */
16747 ".previous\n"
16748- ".section .altinstr_replacement,\"ax\"\n"
16749+ ".section .altinstr_replacement,\"a\"\n"
16750 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16751 "4:\n"
16752 ".previous\n"
16753@@ -501,7 +503,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16754 ".section .discard,\"aw\",@progbits\n"
16755 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16756 ".previous\n"
16757- ".section .altinstr_replacement,\"ax\"\n"
16758+ ".section .altinstr_replacement,\"a\"\n"
16759 "3: movb $0,%0\n"
16760 "4:\n"
16761 ".previous\n"
16762@@ -515,7 +517,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16763 ".section .discard,\"aw\",@progbits\n"
16764 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16765 ".previous\n"
16766- ".section .altinstr_replacement,\"ax\"\n"
16767+ ".section .altinstr_replacement,\"a\"\n"
16768 "5: movb $1,%0\n"
16769 "6:\n"
16770 ".previous\n"
16771diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16772index 50d033a..37deb26 100644
16773--- a/arch/x86/include/asm/desc.h
16774+++ b/arch/x86/include/asm/desc.h
16775@@ -4,6 +4,7 @@
16776 #include <asm/desc_defs.h>
16777 #include <asm/ldt.h>
16778 #include <asm/mmu.h>
16779+#include <asm/pgtable.h>
16780
16781 #include <linux/smp.h>
16782 #include <linux/percpu.h>
16783@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16784
16785 desc->type = (info->read_exec_only ^ 1) << 1;
16786 desc->type |= info->contents << 2;
16787+ desc->type |= info->seg_not_present ^ 1;
16788
16789 desc->s = 1;
16790 desc->dpl = 0x3;
16791@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16792 }
16793
16794 extern struct desc_ptr idt_descr;
16795-extern gate_desc idt_table[];
16796-extern struct desc_ptr debug_idt_descr;
16797-extern gate_desc debug_idt_table[];
16798-
16799-struct gdt_page {
16800- struct desc_struct gdt[GDT_ENTRIES];
16801-} __attribute__((aligned(PAGE_SIZE)));
16802-
16803-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16804+extern gate_desc idt_table[IDT_ENTRIES];
16805+extern const struct desc_ptr debug_idt_descr;
16806+extern gate_desc debug_idt_table[IDT_ENTRIES];
16807
16808+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16809 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16810 {
16811- return per_cpu(gdt_page, cpu).gdt;
16812+ return cpu_gdt_table[cpu];
16813 }
16814
16815 #ifdef CONFIG_X86_64
16816@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16817 unsigned long base, unsigned dpl, unsigned flags,
16818 unsigned short seg)
16819 {
16820- gate->a = (seg << 16) | (base & 0xffff);
16821- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16822+ gate->gate.offset_low = base;
16823+ gate->gate.seg = seg;
16824+ gate->gate.reserved = 0;
16825+ gate->gate.type = type;
16826+ gate->gate.s = 0;
16827+ gate->gate.dpl = dpl;
16828+ gate->gate.p = 1;
16829+ gate->gate.offset_high = base >> 16;
16830 }
16831
16832 #endif
16833@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16834
16835 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16836 {
16837+ pax_open_kernel();
16838 memcpy(&idt[entry], gate, sizeof(*gate));
16839+ pax_close_kernel();
16840 }
16841
16842 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16843 {
16844+ pax_open_kernel();
16845 memcpy(&ldt[entry], desc, 8);
16846+ pax_close_kernel();
16847 }
16848
16849 static inline void
16850@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16851 default: size = sizeof(*gdt); break;
16852 }
16853
16854+ pax_open_kernel();
16855 memcpy(&gdt[entry], desc, size);
16856+ pax_close_kernel();
16857 }
16858
16859 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16860@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16861
16862 static inline void native_load_tr_desc(void)
16863 {
16864+ pax_open_kernel();
16865 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16866+ pax_close_kernel();
16867 }
16868
16869 static inline void native_load_gdt(const struct desc_ptr *dtr)
16870@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16871 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16872 unsigned int i;
16873
16874+ pax_open_kernel();
16875 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16876 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16877+ pax_close_kernel();
16878 }
16879
16880 #define _LDT_empty(info) \
16881@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16882 preempt_enable();
16883 }
16884
16885-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16886+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16887 {
16888 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16889 }
16890@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16891 }
16892
16893 #ifdef CONFIG_X86_64
16894-static inline void set_nmi_gate(int gate, void *addr)
16895+static inline void set_nmi_gate(int gate, const void *addr)
16896 {
16897 gate_desc s;
16898
16899@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16900 #endif
16901
16902 #ifdef CONFIG_TRACING
16903-extern struct desc_ptr trace_idt_descr;
16904-extern gate_desc trace_idt_table[];
16905+extern const struct desc_ptr trace_idt_descr;
16906+extern gate_desc trace_idt_table[IDT_ENTRIES];
16907 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16908 {
16909 write_idt_entry(trace_idt_table, entry, gate);
16910 }
16911
16912-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16913+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16914 unsigned dpl, unsigned ist, unsigned seg)
16915 {
16916 gate_desc s;
16917@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16918 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16919 #endif
16920
16921-static inline void _set_gate(int gate, unsigned type, void *addr,
16922+static inline void _set_gate(int gate, unsigned type, const void *addr,
16923 unsigned dpl, unsigned ist, unsigned seg)
16924 {
16925 gate_desc s;
16926@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16927 #define set_intr_gate(n, addr) \
16928 do { \
16929 BUG_ON((unsigned)n > 0xFF); \
16930- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16931+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16932 __KERNEL_CS); \
16933- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16934+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16935 0, 0, __KERNEL_CS); \
16936 } while (0)
16937
16938@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16939 /*
16940 * This routine sets up an interrupt gate at directory privilege level 3.
16941 */
16942-static inline void set_system_intr_gate(unsigned int n, void *addr)
16943+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16944 {
16945 BUG_ON((unsigned)n > 0xFF);
16946 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16947 }
16948
16949-static inline void set_system_trap_gate(unsigned int n, void *addr)
16950+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16951 {
16952 BUG_ON((unsigned)n > 0xFF);
16953 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16954 }
16955
16956-static inline void set_trap_gate(unsigned int n, void *addr)
16957+static inline void set_trap_gate(unsigned int n, const void *addr)
16958 {
16959 BUG_ON((unsigned)n > 0xFF);
16960 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16961@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16962 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16963 {
16964 BUG_ON((unsigned)n > 0xFF);
16965- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16966+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16967 }
16968
16969-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16970+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16971 {
16972 BUG_ON((unsigned)n > 0xFF);
16973 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16974 }
16975
16976-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16977+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16978 {
16979 BUG_ON((unsigned)n > 0xFF);
16980 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16981@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16982 else
16983 load_idt((const struct desc_ptr *)&idt_descr);
16984 }
16985+
16986+#ifdef CONFIG_X86_32
16987+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16988+{
16989+ struct desc_struct d;
16990+
16991+ if (likely(limit))
16992+ limit = (limit - 1UL) >> PAGE_SHIFT;
16993+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16994+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16995+}
16996+#endif
16997+
16998 #endif /* _ASM_X86_DESC_H */
16999diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
17000index 278441f..b95a174 100644
17001--- a/arch/x86/include/asm/desc_defs.h
17002+++ b/arch/x86/include/asm/desc_defs.h
17003@@ -31,6 +31,12 @@ struct desc_struct {
17004 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
17005 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
17006 };
17007+ struct {
17008+ u16 offset_low;
17009+ u16 seg;
17010+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
17011+ unsigned offset_high: 16;
17012+ } gate;
17013 };
17014 } __attribute__((packed));
17015
17016diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
17017index ced283a..ffe04cc 100644
17018--- a/arch/x86/include/asm/div64.h
17019+++ b/arch/x86/include/asm/div64.h
17020@@ -39,7 +39,7 @@
17021 __mod; \
17022 })
17023
17024-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17025+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17026 {
17027 union {
17028 u64 v64;
17029diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
17030index 1a055c8..a1701de 100644
17031--- a/arch/x86/include/asm/elf.h
17032+++ b/arch/x86/include/asm/elf.h
17033@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17034
17035 #include <asm/vdso.h>
17036
17037-#ifdef CONFIG_X86_64
17038-extern unsigned int vdso64_enabled;
17039-#endif
17040 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17041 extern unsigned int vdso32_enabled;
17042 #endif
17043@@ -248,7 +245,25 @@ extern int force_personality32;
17044 the loader. We need to make sure that it is out of the way of the program
17045 that it will "exec", and that there is sufficient room for the brk. */
17046
17047+#ifdef CONFIG_PAX_SEGMEXEC
17048+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17049+#else
17050 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17051+#endif
17052+
17053+#ifdef CONFIG_PAX_ASLR
17054+#ifdef CONFIG_X86_32
17055+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17056+
17057+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17058+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17059+#else
17060+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17061+
17062+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17063+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17064+#endif
17065+#endif
17066
17067 /* This yields a mask that user programs can use to figure out what
17068 instruction set this CPU supports. This could be done in user space,
17069@@ -297,17 +312,13 @@ do { \
17070
17071 #define ARCH_DLINFO \
17072 do { \
17073- if (vdso64_enabled) \
17074- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17075- (unsigned long __force)current->mm->context.vdso); \
17076+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17077 } while (0)
17078
17079 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17080 #define ARCH_DLINFO_X32 \
17081 do { \
17082- if (vdso64_enabled) \
17083- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17084- (unsigned long __force)current->mm->context.vdso); \
17085+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17086 } while (0)
17087
17088 #define AT_SYSINFO 32
17089@@ -322,10 +333,10 @@ else \
17090
17091 #endif /* !CONFIG_X86_32 */
17092
17093-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17094+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17095
17096 #define VDSO_ENTRY \
17097- ((unsigned long)current->mm->context.vdso + \
17098+ (current->mm->context.vdso + \
17099 selected_vdso32->sym___kernel_vsyscall)
17100
17101 struct linux_binprm;
17102@@ -337,9 +348,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17103 int uses_interp);
17104 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17105
17106-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17107-#define arch_randomize_brk arch_randomize_brk
17108-
17109 /*
17110 * True on X86_32 or when emulating IA32 on X86_64
17111 */
17112diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17113index 77a99ac..39ff7f5 100644
17114--- a/arch/x86/include/asm/emergency-restart.h
17115+++ b/arch/x86/include/asm/emergency-restart.h
17116@@ -1,6 +1,6 @@
17117 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17118 #define _ASM_X86_EMERGENCY_RESTART_H
17119
17120-extern void machine_emergency_restart(void);
17121+extern void machine_emergency_restart(void) __noreturn;
17122
17123 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17124diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17125index 1c7eefe..d0e4702 100644
17126--- a/arch/x86/include/asm/floppy.h
17127+++ b/arch/x86/include/asm/floppy.h
17128@@ -229,18 +229,18 @@ static struct fd_routine_l {
17129 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17130 } fd_routine[] = {
17131 {
17132- request_dma,
17133- free_dma,
17134- get_dma_residue,
17135- dma_mem_alloc,
17136- hard_dma_setup
17137+ ._request_dma = request_dma,
17138+ ._free_dma = free_dma,
17139+ ._get_dma_residue = get_dma_residue,
17140+ ._dma_mem_alloc = dma_mem_alloc,
17141+ ._dma_setup = hard_dma_setup
17142 },
17143 {
17144- vdma_request_dma,
17145- vdma_nop,
17146- vdma_get_dma_residue,
17147- vdma_mem_alloc,
17148- vdma_dma_setup
17149+ ._request_dma = vdma_request_dma,
17150+ ._free_dma = vdma_nop,
17151+ ._get_dma_residue = vdma_get_dma_residue,
17152+ ._dma_mem_alloc = vdma_mem_alloc,
17153+ ._dma_setup = vdma_dma_setup
17154 }
17155 };
17156
17157diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17158index 115e368..76ecf6c 100644
17159--- a/arch/x86/include/asm/fpu-internal.h
17160+++ b/arch/x86/include/asm/fpu-internal.h
17161@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17162 #define user_insn(insn, output, input...) \
17163 ({ \
17164 int err; \
17165+ pax_open_userland(); \
17166 asm volatile(ASM_STAC "\n" \
17167- "1:" #insn "\n\t" \
17168+ "1:" \
17169+ __copyuser_seg \
17170+ #insn "\n\t" \
17171 "2: " ASM_CLAC "\n" \
17172 ".section .fixup,\"ax\"\n" \
17173 "3: movl $-1,%[err]\n" \
17174@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17175 _ASM_EXTABLE(1b, 3b) \
17176 : [err] "=r" (err), output \
17177 : "0"(0), input); \
17178+ pax_close_userland(); \
17179 err; \
17180 })
17181
17182@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17183 "fnclex\n\t"
17184 "emms\n\t"
17185 "fildl %P[addr]" /* set F?P to defined value */
17186- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17187+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17188 }
17189
17190 return fpu_restore_checking(&tsk->thread.fpu);
17191diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17192index b4c1f54..e290c08 100644
17193--- a/arch/x86/include/asm/futex.h
17194+++ b/arch/x86/include/asm/futex.h
17195@@ -12,6 +12,7 @@
17196 #include <asm/smap.h>
17197
17198 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17199+ typecheck(u32 __user *, uaddr); \
17200 asm volatile("\t" ASM_STAC "\n" \
17201 "1:\t" insn "\n" \
17202 "2:\t" ASM_CLAC "\n" \
17203@@ -20,15 +21,16 @@
17204 "\tjmp\t2b\n" \
17205 "\t.previous\n" \
17206 _ASM_EXTABLE(1b, 3b) \
17207- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17208+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17209 : "i" (-EFAULT), "0" (oparg), "1" (0))
17210
17211 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17212+ typecheck(u32 __user *, uaddr); \
17213 asm volatile("\t" ASM_STAC "\n" \
17214 "1:\tmovl %2, %0\n" \
17215 "\tmovl\t%0, %3\n" \
17216 "\t" insn "\n" \
17217- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17218+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17219 "\tjnz\t1b\n" \
17220 "3:\t" ASM_CLAC "\n" \
17221 "\t.section .fixup,\"ax\"\n" \
17222@@ -38,7 +40,7 @@
17223 _ASM_EXTABLE(1b, 4b) \
17224 _ASM_EXTABLE(2b, 4b) \
17225 : "=&a" (oldval), "=&r" (ret), \
17226- "+m" (*uaddr), "=&r" (tem) \
17227+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17228 : "r" (oparg), "i" (-EFAULT), "1" (0))
17229
17230 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17231@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17232
17233 pagefault_disable();
17234
17235+ pax_open_userland();
17236 switch (op) {
17237 case FUTEX_OP_SET:
17238- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17239+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17240 break;
17241 case FUTEX_OP_ADD:
17242- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17243+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17244 uaddr, oparg);
17245 break;
17246 case FUTEX_OP_OR:
17247@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17248 default:
17249 ret = -ENOSYS;
17250 }
17251+ pax_close_userland();
17252
17253 pagefault_enable();
17254
17255diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17256index 4615906..788c817 100644
17257--- a/arch/x86/include/asm/hw_irq.h
17258+++ b/arch/x86/include/asm/hw_irq.h
17259@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17260 extern void enable_IO_APIC(void);
17261
17262 /* Statistics */
17263-extern atomic_t irq_err_count;
17264-extern atomic_t irq_mis_count;
17265+extern atomic_unchecked_t irq_err_count;
17266+extern atomic_unchecked_t irq_mis_count;
17267
17268 /* EISA */
17269 extern void eisa_set_level_irq(unsigned int irq);
17270diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17271index a203659..9889f1c 100644
17272--- a/arch/x86/include/asm/i8259.h
17273+++ b/arch/x86/include/asm/i8259.h
17274@@ -62,7 +62,7 @@ struct legacy_pic {
17275 void (*init)(int auto_eoi);
17276 int (*irq_pending)(unsigned int irq);
17277 void (*make_irq)(unsigned int irq);
17278-};
17279+} __do_const;
17280
17281 extern struct legacy_pic *legacy_pic;
17282 extern struct legacy_pic null_legacy_pic;
17283diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17284index b8237d8..3e8864e 100644
17285--- a/arch/x86/include/asm/io.h
17286+++ b/arch/x86/include/asm/io.h
17287@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17288 "m" (*(volatile type __force *)addr) barrier); }
17289
17290 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17291-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17292-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17293+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17294+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17295
17296 build_mmio_read(__readb, "b", unsigned char, "=q", )
17297-build_mmio_read(__readw, "w", unsigned short, "=r", )
17298-build_mmio_read(__readl, "l", unsigned int, "=r", )
17299+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17300+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17301
17302 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17303 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17304@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17305 * this function
17306 */
17307
17308-static inline phys_addr_t virt_to_phys(volatile void *address)
17309+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17310 {
17311 return __pa(address);
17312 }
17313@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17314 return ioremap_nocache(offset, size);
17315 }
17316
17317-extern void iounmap(volatile void __iomem *addr);
17318+extern void iounmap(const volatile void __iomem *addr);
17319
17320 extern void set_iounmap_nonlazy(void);
17321
17322@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17323
17324 #include <linux/vmalloc.h>
17325
17326+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17327+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17328+{
17329+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17330+}
17331+
17332+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17333+{
17334+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17335+}
17336+
17337 /*
17338 * Convert a virtual cached pointer to an uncached pointer
17339 */
17340diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17341index 0a8b519..80e7d5b 100644
17342--- a/arch/x86/include/asm/irqflags.h
17343+++ b/arch/x86/include/asm/irqflags.h
17344@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17345 sti; \
17346 sysexit
17347
17348+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17349+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17350+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17351+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17352+
17353 #else
17354 #define INTERRUPT_RETURN iret
17355 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17356diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17357index 53cdfb2..d1369e6 100644
17358--- a/arch/x86/include/asm/kprobes.h
17359+++ b/arch/x86/include/asm/kprobes.h
17360@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17361 #define RELATIVEJUMP_SIZE 5
17362 #define RELATIVECALL_OPCODE 0xe8
17363 #define RELATIVE_ADDR_SIZE 4
17364-#define MAX_STACK_SIZE 64
17365-#define MIN_STACK_SIZE(ADDR) \
17366- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17367- THREAD_SIZE - (unsigned long)(ADDR))) \
17368- ? (MAX_STACK_SIZE) \
17369- : (((unsigned long)current_thread_info()) + \
17370- THREAD_SIZE - (unsigned long)(ADDR)))
17371+#define MAX_STACK_SIZE 64UL
17372+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17373
17374 #define flush_insn_slot(p) do { } while (0)
17375
17376diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17377index 4ad6560..75c7bdd 100644
17378--- a/arch/x86/include/asm/local.h
17379+++ b/arch/x86/include/asm/local.h
17380@@ -10,33 +10,97 @@ typedef struct {
17381 atomic_long_t a;
17382 } local_t;
17383
17384+typedef struct {
17385+ atomic_long_unchecked_t a;
17386+} local_unchecked_t;
17387+
17388 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17389
17390 #define local_read(l) atomic_long_read(&(l)->a)
17391+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17392 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17393+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17394
17395 static inline void local_inc(local_t *l)
17396 {
17397- asm volatile(_ASM_INC "%0"
17398+ asm volatile(_ASM_INC "%0\n"
17399+
17400+#ifdef CONFIG_PAX_REFCOUNT
17401+ "jno 0f\n"
17402+ _ASM_DEC "%0\n"
17403+ "int $4\n0:\n"
17404+ _ASM_EXTABLE(0b, 0b)
17405+#endif
17406+
17407+ : "+m" (l->a.counter));
17408+}
17409+
17410+static inline void local_inc_unchecked(local_unchecked_t *l)
17411+{
17412+ asm volatile(_ASM_INC "%0\n"
17413 : "+m" (l->a.counter));
17414 }
17415
17416 static inline void local_dec(local_t *l)
17417 {
17418- asm volatile(_ASM_DEC "%0"
17419+ asm volatile(_ASM_DEC "%0\n"
17420+
17421+#ifdef CONFIG_PAX_REFCOUNT
17422+ "jno 0f\n"
17423+ _ASM_INC "%0\n"
17424+ "int $4\n0:\n"
17425+ _ASM_EXTABLE(0b, 0b)
17426+#endif
17427+
17428+ : "+m" (l->a.counter));
17429+}
17430+
17431+static inline void local_dec_unchecked(local_unchecked_t *l)
17432+{
17433+ asm volatile(_ASM_DEC "%0\n"
17434 : "+m" (l->a.counter));
17435 }
17436
17437 static inline void local_add(long i, local_t *l)
17438 {
17439- asm volatile(_ASM_ADD "%1,%0"
17440+ asm volatile(_ASM_ADD "%1,%0\n"
17441+
17442+#ifdef CONFIG_PAX_REFCOUNT
17443+ "jno 0f\n"
17444+ _ASM_SUB "%1,%0\n"
17445+ "int $4\n0:\n"
17446+ _ASM_EXTABLE(0b, 0b)
17447+#endif
17448+
17449+ : "+m" (l->a.counter)
17450+ : "ir" (i));
17451+}
17452+
17453+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17454+{
17455+ asm volatile(_ASM_ADD "%1,%0\n"
17456 : "+m" (l->a.counter)
17457 : "ir" (i));
17458 }
17459
17460 static inline void local_sub(long i, local_t *l)
17461 {
17462- asm volatile(_ASM_SUB "%1,%0"
17463+ asm volatile(_ASM_SUB "%1,%0\n"
17464+
17465+#ifdef CONFIG_PAX_REFCOUNT
17466+ "jno 0f\n"
17467+ _ASM_ADD "%1,%0\n"
17468+ "int $4\n0:\n"
17469+ _ASM_EXTABLE(0b, 0b)
17470+#endif
17471+
17472+ : "+m" (l->a.counter)
17473+ : "ir" (i));
17474+}
17475+
17476+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17477+{
17478+ asm volatile(_ASM_SUB "%1,%0\n"
17479 : "+m" (l->a.counter)
17480 : "ir" (i));
17481 }
17482@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17483 */
17484 static inline int local_sub_and_test(long i, local_t *l)
17485 {
17486- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17487+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17488 }
17489
17490 /**
17491@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17492 */
17493 static inline int local_dec_and_test(local_t *l)
17494 {
17495- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17496+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17497 }
17498
17499 /**
17500@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17501 */
17502 static inline int local_inc_and_test(local_t *l)
17503 {
17504- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17505+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17506 }
17507
17508 /**
17509@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17510 */
17511 static inline int local_add_negative(long i, local_t *l)
17512 {
17513- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17514+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17515 }
17516
17517 /**
17518@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17519 static inline long local_add_return(long i, local_t *l)
17520 {
17521 long __i = i;
17522+ asm volatile(_ASM_XADD "%0, %1\n"
17523+
17524+#ifdef CONFIG_PAX_REFCOUNT
17525+ "jno 0f\n"
17526+ _ASM_MOV "%0,%1\n"
17527+ "int $4\n0:\n"
17528+ _ASM_EXTABLE(0b, 0b)
17529+#endif
17530+
17531+ : "+r" (i), "+m" (l->a.counter)
17532+ : : "memory");
17533+ return i + __i;
17534+}
17535+
17536+/**
17537+ * local_add_return_unchecked - add and return
17538+ * @i: integer value to add
17539+ * @l: pointer to type local_unchecked_t
17540+ *
17541+ * Atomically adds @i to @l and returns @i + @l
17542+ */
17543+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17544+{
17545+ long __i = i;
17546 asm volatile(_ASM_XADD "%0, %1;"
17547 : "+r" (i), "+m" (l->a.counter)
17548 : : "memory");
17549@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17550
17551 #define local_cmpxchg(l, o, n) \
17552 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17553+#define local_cmpxchg_unchecked(l, o, n) \
17554+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17555 /* Always has a lock prefix */
17556 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17557
17558diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17559new file mode 100644
17560index 0000000..2bfd3ba
17561--- /dev/null
17562+++ b/arch/x86/include/asm/mman.h
17563@@ -0,0 +1,15 @@
17564+#ifndef _X86_MMAN_H
17565+#define _X86_MMAN_H
17566+
17567+#include <uapi/asm/mman.h>
17568+
17569+#ifdef __KERNEL__
17570+#ifndef __ASSEMBLY__
17571+#ifdef CONFIG_X86_32
17572+#define arch_mmap_check i386_mmap_check
17573+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17574+#endif
17575+#endif
17576+#endif
17577+
17578+#endif /* X86_MMAN_H */
17579diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17580index 876e74e..e20bfb1 100644
17581--- a/arch/x86/include/asm/mmu.h
17582+++ b/arch/x86/include/asm/mmu.h
17583@@ -9,7 +9,7 @@
17584 * we put the segment information here.
17585 */
17586 typedef struct {
17587- void *ldt;
17588+ struct desc_struct *ldt;
17589 int size;
17590
17591 #ifdef CONFIG_X86_64
17592@@ -18,7 +18,19 @@ typedef struct {
17593 #endif
17594
17595 struct mutex lock;
17596- void __user *vdso;
17597+ unsigned long vdso;
17598+
17599+#ifdef CONFIG_X86_32
17600+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17601+ unsigned long user_cs_base;
17602+ unsigned long user_cs_limit;
17603+
17604+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17605+ cpumask_t cpu_user_cs_mask;
17606+#endif
17607+
17608+#endif
17609+#endif
17610 } mm_context_t;
17611
17612 #ifdef CONFIG_SMP
17613diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17614index be12c53..07fd3ca 100644
17615--- a/arch/x86/include/asm/mmu_context.h
17616+++ b/arch/x86/include/asm/mmu_context.h
17617@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17618
17619 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17620 {
17621+
17622+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17623+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17624+ unsigned int i;
17625+ pgd_t *pgd;
17626+
17627+ pax_open_kernel();
17628+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17629+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17630+ set_pgd_batched(pgd+i, native_make_pgd(0));
17631+ pax_close_kernel();
17632+ }
17633+#endif
17634+
17635 #ifdef CONFIG_SMP
17636 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17637 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17638@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17639 struct task_struct *tsk)
17640 {
17641 unsigned cpu = smp_processor_id();
17642+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17643+ int tlbstate = TLBSTATE_OK;
17644+#endif
17645
17646 if (likely(prev != next)) {
17647 #ifdef CONFIG_SMP
17648+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17649+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17650+#endif
17651 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17652 this_cpu_write(cpu_tlbstate.active_mm, next);
17653 #endif
17654 cpumask_set_cpu(cpu, mm_cpumask(next));
17655
17656 /* Re-load page tables */
17657+#ifdef CONFIG_PAX_PER_CPU_PGD
17658+ pax_open_kernel();
17659+
17660+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17661+ if (static_cpu_has(X86_FEATURE_PCID))
17662+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17663+ else
17664+#endif
17665+
17666+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17667+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17668+ pax_close_kernel();
17669+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17670+
17671+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17672+ if (static_cpu_has(X86_FEATURE_PCID)) {
17673+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17674+ u64 descriptor[2];
17675+ descriptor[0] = PCID_USER;
17676+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17677+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17678+ descriptor[0] = PCID_KERNEL;
17679+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17680+ }
17681+ } else {
17682+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17683+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17684+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17685+ else
17686+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17687+ }
17688+ } else
17689+#endif
17690+
17691+ load_cr3(get_cpu_pgd(cpu, kernel));
17692+#else
17693 load_cr3(next->pgd);
17694+#endif
17695
17696 /* Stop flush ipis for the previous mm */
17697 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17698@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17699 /* Load the LDT, if the LDT is different: */
17700 if (unlikely(prev->context.ldt != next->context.ldt))
17701 load_LDT_nolock(&next->context);
17702+
17703+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17704+ if (!(__supported_pte_mask & _PAGE_NX)) {
17705+ smp_mb__before_atomic();
17706+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17707+ smp_mb__after_atomic();
17708+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17709+ }
17710+#endif
17711+
17712+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17713+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17714+ prev->context.user_cs_limit != next->context.user_cs_limit))
17715+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17716+#ifdef CONFIG_SMP
17717+ else if (unlikely(tlbstate != TLBSTATE_OK))
17718+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17719+#endif
17720+#endif
17721+
17722 }
17723+ else {
17724+
17725+#ifdef CONFIG_PAX_PER_CPU_PGD
17726+ pax_open_kernel();
17727+
17728+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17729+ if (static_cpu_has(X86_FEATURE_PCID))
17730+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17731+ else
17732+#endif
17733+
17734+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17735+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17736+ pax_close_kernel();
17737+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17738+
17739+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17740+ if (static_cpu_has(X86_FEATURE_PCID)) {
17741+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17742+ u64 descriptor[2];
17743+ descriptor[0] = PCID_USER;
17744+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17745+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17746+ descriptor[0] = PCID_KERNEL;
17747+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17748+ }
17749+ } else {
17750+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17751+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17752+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17753+ else
17754+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17755+ }
17756+ } else
17757+#endif
17758+
17759+ load_cr3(get_cpu_pgd(cpu, kernel));
17760+#endif
17761+
17762 #ifdef CONFIG_SMP
17763- else {
17764 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17765 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17766
17767@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17768 * tlb flush IPI delivery. We must reload CR3
17769 * to make sure to use no freed page tables.
17770 */
17771+
17772+#ifndef CONFIG_PAX_PER_CPU_PGD
17773 load_cr3(next->pgd);
17774+#endif
17775+
17776 load_LDT_nolock(&next->context);
17777+
17778+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17779+ if (!(__supported_pte_mask & _PAGE_NX))
17780+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17781+#endif
17782+
17783+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17784+#ifdef CONFIG_PAX_PAGEEXEC
17785+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17786+#endif
17787+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17788+#endif
17789+
17790 }
17791+#endif
17792 }
17793-#endif
17794 }
17795
17796 #define activate_mm(prev, next) \
17797diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17798index e3b7819..b257c64 100644
17799--- a/arch/x86/include/asm/module.h
17800+++ b/arch/x86/include/asm/module.h
17801@@ -5,6 +5,7 @@
17802
17803 #ifdef CONFIG_X86_64
17804 /* X86_64 does not define MODULE_PROC_FAMILY */
17805+#define MODULE_PROC_FAMILY ""
17806 #elif defined CONFIG_M486
17807 #define MODULE_PROC_FAMILY "486 "
17808 #elif defined CONFIG_M586
17809@@ -57,8 +58,20 @@
17810 #error unknown processor family
17811 #endif
17812
17813-#ifdef CONFIG_X86_32
17814-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17815+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17816+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17817+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17818+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17819+#else
17820+#define MODULE_PAX_KERNEXEC ""
17821 #endif
17822
17823+#ifdef CONFIG_PAX_MEMORY_UDEREF
17824+#define MODULE_PAX_UDEREF "UDEREF "
17825+#else
17826+#define MODULE_PAX_UDEREF ""
17827+#endif
17828+
17829+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17830+
17831 #endif /* _ASM_X86_MODULE_H */
17832diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17833index 5f2fc44..106caa6 100644
17834--- a/arch/x86/include/asm/nmi.h
17835+++ b/arch/x86/include/asm/nmi.h
17836@@ -36,26 +36,35 @@ enum {
17837
17838 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17839
17840+struct nmiaction;
17841+
17842+struct nmiwork {
17843+ const struct nmiaction *action;
17844+ u64 max_duration;
17845+ struct irq_work irq_work;
17846+};
17847+
17848 struct nmiaction {
17849 struct list_head list;
17850 nmi_handler_t handler;
17851- u64 max_duration;
17852- struct irq_work irq_work;
17853 unsigned long flags;
17854 const char *name;
17855-};
17856+ struct nmiwork *work;
17857+} __do_const;
17858
17859 #define register_nmi_handler(t, fn, fg, n, init...) \
17860 ({ \
17861- static struct nmiaction init fn##_na = { \
17862+ static struct nmiwork fn##_nw; \
17863+ static const struct nmiaction init fn##_na = { \
17864 .handler = (fn), \
17865 .name = (n), \
17866 .flags = (fg), \
17867+ .work = &fn##_nw, \
17868 }; \
17869 __register_nmi_handler((t), &fn##_na); \
17870 })
17871
17872-int __register_nmi_handler(unsigned int, struct nmiaction *);
17873+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17874
17875 void unregister_nmi_handler(unsigned int, const char *);
17876
17877diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17878index 775873d..04cd306 100644
17879--- a/arch/x86/include/asm/page.h
17880+++ b/arch/x86/include/asm/page.h
17881@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17882 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17883
17884 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17885+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17886
17887 #define __boot_va(x) __va(x)
17888 #define __boot_pa(x) __pa(x)
17889@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17890 * virt_to_page(kaddr) returns a valid pointer if and only if
17891 * virt_addr_valid(kaddr) returns true.
17892 */
17893-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17894 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17895 extern bool __virt_addr_valid(unsigned long kaddr);
17896 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17897
17898+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17899+#define virt_to_page(kaddr) \
17900+ ({ \
17901+ const void *__kaddr = (const void *)(kaddr); \
17902+ BUG_ON(!virt_addr_valid(__kaddr)); \
17903+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17904+ })
17905+#else
17906+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17907+#endif
17908+
17909 #endif /* __ASSEMBLY__ */
17910
17911 #include <asm-generic/memory_model.h>
17912diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17913index 0f1ddee..e2fc3d1 100644
17914--- a/arch/x86/include/asm/page_64.h
17915+++ b/arch/x86/include/asm/page_64.h
17916@@ -7,9 +7,9 @@
17917
17918 /* duplicated to the one in bootmem.h */
17919 extern unsigned long max_pfn;
17920-extern unsigned long phys_base;
17921+extern const unsigned long phys_base;
17922
17923-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17924+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17925 {
17926 unsigned long y = x - __START_KERNEL_map;
17927
17928diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17929index cd6e1610..70f4418 100644
17930--- a/arch/x86/include/asm/paravirt.h
17931+++ b/arch/x86/include/asm/paravirt.h
17932@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17933 return (pmd_t) { ret };
17934 }
17935
17936-static inline pmdval_t pmd_val(pmd_t pmd)
17937+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17938 {
17939 pmdval_t ret;
17940
17941@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17942 val);
17943 }
17944
17945+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17946+{
17947+ pgdval_t val = native_pgd_val(pgd);
17948+
17949+ if (sizeof(pgdval_t) > sizeof(long))
17950+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17951+ val, (u64)val >> 32);
17952+ else
17953+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17954+ val);
17955+}
17956+
17957 static inline void pgd_clear(pgd_t *pgdp)
17958 {
17959 set_pgd(pgdp, __pgd(0));
17960@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17961 pv_mmu_ops.set_fixmap(idx, phys, flags);
17962 }
17963
17964+#ifdef CONFIG_PAX_KERNEXEC
17965+static inline unsigned long pax_open_kernel(void)
17966+{
17967+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17968+}
17969+
17970+static inline unsigned long pax_close_kernel(void)
17971+{
17972+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17973+}
17974+#else
17975+static inline unsigned long pax_open_kernel(void) { return 0; }
17976+static inline unsigned long pax_close_kernel(void) { return 0; }
17977+#endif
17978+
17979 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17980
17981 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17982@@ -906,7 +933,7 @@ extern void default_banner(void);
17983
17984 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17985 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17986-#define PARA_INDIRECT(addr) *%cs:addr
17987+#define PARA_INDIRECT(addr) *%ss:addr
17988 #endif
17989
17990 #define INTERRUPT_RETURN \
17991@@ -981,6 +1008,21 @@ extern void default_banner(void);
17992 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17993 CLBR_NONE, \
17994 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17995+
17996+#define GET_CR0_INTO_RDI \
17997+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17998+ mov %rax,%rdi
17999+
18000+#define SET_RDI_INTO_CR0 \
18001+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18002+
18003+#define GET_CR3_INTO_RDI \
18004+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
18005+ mov %rax,%rdi
18006+
18007+#define SET_RDI_INTO_CR3 \
18008+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
18009+
18010 #endif /* CONFIG_X86_32 */
18011
18012 #endif /* __ASSEMBLY__ */
18013diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
18014index 7549b8b..f0edfda 100644
18015--- a/arch/x86/include/asm/paravirt_types.h
18016+++ b/arch/x86/include/asm/paravirt_types.h
18017@@ -84,7 +84,7 @@ struct pv_init_ops {
18018 */
18019 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
18020 unsigned long addr, unsigned len);
18021-};
18022+} __no_const __no_randomize_layout;
18023
18024
18025 struct pv_lazy_ops {
18026@@ -92,13 +92,13 @@ struct pv_lazy_ops {
18027 void (*enter)(void);
18028 void (*leave)(void);
18029 void (*flush)(void);
18030-};
18031+} __no_randomize_layout;
18032
18033 struct pv_time_ops {
18034 unsigned long long (*sched_clock)(void);
18035 unsigned long long (*steal_clock)(int cpu);
18036 unsigned long (*get_tsc_khz)(void);
18037-};
18038+} __no_const __no_randomize_layout;
18039
18040 struct pv_cpu_ops {
18041 /* hooks for various privileged instructions */
18042@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18043
18044 void (*start_context_switch)(struct task_struct *prev);
18045 void (*end_context_switch)(struct task_struct *next);
18046-};
18047+} __no_const __no_randomize_layout;
18048
18049 struct pv_irq_ops {
18050 /*
18051@@ -215,7 +215,7 @@ struct pv_irq_ops {
18052 #ifdef CONFIG_X86_64
18053 void (*adjust_exception_frame)(void);
18054 #endif
18055-};
18056+} __no_randomize_layout;
18057
18058 struct pv_apic_ops {
18059 #ifdef CONFIG_X86_LOCAL_APIC
18060@@ -223,7 +223,7 @@ struct pv_apic_ops {
18061 unsigned long start_eip,
18062 unsigned long start_esp);
18063 #endif
18064-};
18065+} __no_const __no_randomize_layout;
18066
18067 struct pv_mmu_ops {
18068 unsigned long (*read_cr2)(void);
18069@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18070 struct paravirt_callee_save make_pud;
18071
18072 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18073+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18074 #endif /* PAGETABLE_LEVELS == 4 */
18075 #endif /* PAGETABLE_LEVELS >= 3 */
18076
18077@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18078 an mfn. We can tell which is which from the index. */
18079 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18080 phys_addr_t phys, pgprot_t flags);
18081-};
18082+
18083+#ifdef CONFIG_PAX_KERNEXEC
18084+ unsigned long (*pax_open_kernel)(void);
18085+ unsigned long (*pax_close_kernel)(void);
18086+#endif
18087+
18088+} __no_randomize_layout;
18089
18090 struct arch_spinlock;
18091 #ifdef CONFIG_SMP
18092@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18093 struct pv_lock_ops {
18094 struct paravirt_callee_save lock_spinning;
18095 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18096-};
18097+} __no_randomize_layout;
18098
18099 /* This contains all the paravirt structures: we get a convenient
18100 * number for each function using the offset which we use to indicate
18101- * what to patch. */
18102+ * what to patch.
18103+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18104+ */
18105+
18106 struct paravirt_patch_template {
18107 struct pv_init_ops pv_init_ops;
18108 struct pv_time_ops pv_time_ops;
18109@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18110 struct pv_apic_ops pv_apic_ops;
18111 struct pv_mmu_ops pv_mmu_ops;
18112 struct pv_lock_ops pv_lock_ops;
18113-};
18114+} __no_randomize_layout;
18115
18116 extern struct pv_info pv_info;
18117 extern struct pv_init_ops pv_init_ops;
18118diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18119index c4412e9..90e88c5 100644
18120--- a/arch/x86/include/asm/pgalloc.h
18121+++ b/arch/x86/include/asm/pgalloc.h
18122@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18123 pmd_t *pmd, pte_t *pte)
18124 {
18125 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18126+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18127+}
18128+
18129+static inline void pmd_populate_user(struct mm_struct *mm,
18130+ pmd_t *pmd, pte_t *pte)
18131+{
18132+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18133 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18134 }
18135
18136@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18137
18138 #ifdef CONFIG_X86_PAE
18139 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18140+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18141+{
18142+ pud_populate(mm, pudp, pmd);
18143+}
18144 #else /* !CONFIG_X86_PAE */
18145 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18146 {
18147 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18148 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18149 }
18150+
18151+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18152+{
18153+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18154+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18155+}
18156 #endif /* CONFIG_X86_PAE */
18157
18158 #if PAGETABLE_LEVELS > 3
18159@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18160 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18161 }
18162
18163+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18164+{
18165+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18166+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18167+}
18168+
18169 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18170 {
18171 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18172diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18173index 206a87f..1623b06 100644
18174--- a/arch/x86/include/asm/pgtable-2level.h
18175+++ b/arch/x86/include/asm/pgtable-2level.h
18176@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18177
18178 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18179 {
18180+ pax_open_kernel();
18181 *pmdp = pmd;
18182+ pax_close_kernel();
18183 }
18184
18185 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18186diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18187index 81bb91b..9392125 100644
18188--- a/arch/x86/include/asm/pgtable-3level.h
18189+++ b/arch/x86/include/asm/pgtable-3level.h
18190@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18191
18192 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18193 {
18194+ pax_open_kernel();
18195 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18196+ pax_close_kernel();
18197 }
18198
18199 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18200 {
18201+ pax_open_kernel();
18202 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18203+ pax_close_kernel();
18204 }
18205
18206 /*
18207diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18208index aa97a07..f169e5b 100644
18209--- a/arch/x86/include/asm/pgtable.h
18210+++ b/arch/x86/include/asm/pgtable.h
18211@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18212
18213 #ifndef __PAGETABLE_PUD_FOLDED
18214 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18215+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18216 #define pgd_clear(pgd) native_pgd_clear(pgd)
18217 #endif
18218
18219@@ -83,12 +84,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18220
18221 #define arch_end_context_switch(prev) do {} while(0)
18222
18223+#define pax_open_kernel() native_pax_open_kernel()
18224+#define pax_close_kernel() native_pax_close_kernel()
18225 #endif /* CONFIG_PARAVIRT */
18226
18227+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18228+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18229+
18230+#ifdef CONFIG_PAX_KERNEXEC
18231+static inline unsigned long native_pax_open_kernel(void)
18232+{
18233+ unsigned long cr0;
18234+
18235+ preempt_disable();
18236+ barrier();
18237+ cr0 = read_cr0() ^ X86_CR0_WP;
18238+ BUG_ON(cr0 & X86_CR0_WP);
18239+ write_cr0(cr0);
18240+ return cr0 ^ X86_CR0_WP;
18241+}
18242+
18243+static inline unsigned long native_pax_close_kernel(void)
18244+{
18245+ unsigned long cr0;
18246+
18247+ cr0 = read_cr0() ^ X86_CR0_WP;
18248+ BUG_ON(!(cr0 & X86_CR0_WP));
18249+ write_cr0(cr0);
18250+ barrier();
18251+ preempt_enable_no_resched();
18252+ return cr0 ^ X86_CR0_WP;
18253+}
18254+#else
18255+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18256+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18257+#endif
18258+
18259 /*
18260 * The following only work if pte_present() is true.
18261 * Undefined behaviour if not..
18262 */
18263+static inline int pte_user(pte_t pte)
18264+{
18265+ return pte_val(pte) & _PAGE_USER;
18266+}
18267+
18268 static inline int pte_dirty(pte_t pte)
18269 {
18270 return pte_flags(pte) & _PAGE_DIRTY;
18271@@ -155,6 +195,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18272 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18273 }
18274
18275+static inline unsigned long pgd_pfn(pgd_t pgd)
18276+{
18277+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18278+}
18279+
18280 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18281
18282 static inline int pmd_large(pmd_t pte)
18283@@ -208,9 +253,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18284 return pte_clear_flags(pte, _PAGE_RW);
18285 }
18286
18287+static inline pte_t pte_mkread(pte_t pte)
18288+{
18289+ return __pte(pte_val(pte) | _PAGE_USER);
18290+}
18291+
18292 static inline pte_t pte_mkexec(pte_t pte)
18293 {
18294- return pte_clear_flags(pte, _PAGE_NX);
18295+#ifdef CONFIG_X86_PAE
18296+ if (__supported_pte_mask & _PAGE_NX)
18297+ return pte_clear_flags(pte, _PAGE_NX);
18298+ else
18299+#endif
18300+ return pte_set_flags(pte, _PAGE_USER);
18301+}
18302+
18303+static inline pte_t pte_exprotect(pte_t pte)
18304+{
18305+#ifdef CONFIG_X86_PAE
18306+ if (__supported_pte_mask & _PAGE_NX)
18307+ return pte_set_flags(pte, _PAGE_NX);
18308+ else
18309+#endif
18310+ return pte_clear_flags(pte, _PAGE_USER);
18311 }
18312
18313 static inline pte_t pte_mkdirty(pte_t pte)
18314@@ -440,6 +505,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18315 #endif
18316
18317 #ifndef __ASSEMBLY__
18318+
18319+#ifdef CONFIG_PAX_PER_CPU_PGD
18320+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18321+enum cpu_pgd_type {kernel = 0, user = 1};
18322+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18323+{
18324+ return cpu_pgd[cpu][type];
18325+}
18326+#endif
18327+
18328 #include <linux/mm_types.h>
18329 #include <linux/mmdebug.h>
18330 #include <linux/log2.h>
18331@@ -586,7 +661,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18332 * Currently stuck as a macro due to indirect forward reference to
18333 * linux/mmzone.h's __section_mem_map_addr() definition:
18334 */
18335-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18336+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18337
18338 /* Find an entry in the second-level page table.. */
18339 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18340@@ -626,7 +701,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18341 * Currently stuck as a macro due to indirect forward reference to
18342 * linux/mmzone.h's __section_mem_map_addr() definition:
18343 */
18344-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18345+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18346
18347 /* to find an entry in a page-table-directory. */
18348 static inline unsigned long pud_index(unsigned long address)
18349@@ -641,7 +716,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18350
18351 static inline int pgd_bad(pgd_t pgd)
18352 {
18353- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18354+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18355 }
18356
18357 static inline int pgd_none(pgd_t pgd)
18358@@ -664,7 +739,12 @@ static inline int pgd_none(pgd_t pgd)
18359 * pgd_offset() returns a (pgd_t *)
18360 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18361 */
18362-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18363+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18364+
18365+#ifdef CONFIG_PAX_PER_CPU_PGD
18366+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18367+#endif
18368+
18369 /*
18370 * a shortcut which implies the use of the kernel's pgd, instead
18371 * of a process's
18372@@ -675,6 +755,23 @@ static inline int pgd_none(pgd_t pgd)
18373 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18374 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18375
18376+#ifdef CONFIG_X86_32
18377+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18378+#else
18379+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18380+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18381+
18382+#ifdef CONFIG_PAX_MEMORY_UDEREF
18383+#ifdef __ASSEMBLY__
18384+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18385+#else
18386+extern unsigned long pax_user_shadow_base;
18387+extern pgdval_t clone_pgd_mask;
18388+#endif
18389+#endif
18390+
18391+#endif
18392+
18393 #ifndef __ASSEMBLY__
18394
18395 extern int direct_gbpages;
18396@@ -841,11 +938,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18397 * dst and src can be on the same page, but the range must not overlap,
18398 * and must not cross a page boundary.
18399 */
18400-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18401+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18402 {
18403- memcpy(dst, src, count * sizeof(pgd_t));
18404+ pax_open_kernel();
18405+ while (count--)
18406+ *dst++ = *src++;
18407+ pax_close_kernel();
18408 }
18409
18410+#ifdef CONFIG_PAX_PER_CPU_PGD
18411+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18412+#endif
18413+
18414+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18415+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18416+#else
18417+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18418+#endif
18419+
18420 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18421 static inline int page_level_shift(enum pg_level level)
18422 {
18423diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18424index 9ee3221..b979c6b 100644
18425--- a/arch/x86/include/asm/pgtable_32.h
18426+++ b/arch/x86/include/asm/pgtable_32.h
18427@@ -25,9 +25,6 @@
18428 struct mm_struct;
18429 struct vm_area_struct;
18430
18431-extern pgd_t swapper_pg_dir[1024];
18432-extern pgd_t initial_page_table[1024];
18433-
18434 static inline void pgtable_cache_init(void) { }
18435 static inline void check_pgt_cache(void) { }
18436 void paging_init(void);
18437@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18438 # include <asm/pgtable-2level.h>
18439 #endif
18440
18441+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18442+extern pgd_t initial_page_table[PTRS_PER_PGD];
18443+#ifdef CONFIG_X86_PAE
18444+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18445+#endif
18446+
18447 #if defined(CONFIG_HIGHPTE)
18448 #define pte_offset_map(dir, address) \
18449 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18450@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18451 /* Clear a kernel PTE and flush it from the TLB */
18452 #define kpte_clear_flush(ptep, vaddr) \
18453 do { \
18454+ pax_open_kernel(); \
18455 pte_clear(&init_mm, (vaddr), (ptep)); \
18456+ pax_close_kernel(); \
18457 __flush_tlb_one((vaddr)); \
18458 } while (0)
18459
18460 #endif /* !__ASSEMBLY__ */
18461
18462+#define HAVE_ARCH_UNMAPPED_AREA
18463+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18464+
18465 /*
18466 * kern_addr_valid() is (1) for FLATMEM and (0) for
18467 * SPARSEMEM and DISCONTIGMEM
18468diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18469index ed5903b..c7fe163 100644
18470--- a/arch/x86/include/asm/pgtable_32_types.h
18471+++ b/arch/x86/include/asm/pgtable_32_types.h
18472@@ -8,7 +8,7 @@
18473 */
18474 #ifdef CONFIG_X86_PAE
18475 # include <asm/pgtable-3level_types.h>
18476-# define PMD_SIZE (1UL << PMD_SHIFT)
18477+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18478 # define PMD_MASK (~(PMD_SIZE - 1))
18479 #else
18480 # include <asm/pgtable-2level_types.h>
18481@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18482 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18483 #endif
18484
18485+#ifdef CONFIG_PAX_KERNEXEC
18486+#ifndef __ASSEMBLY__
18487+extern unsigned char MODULES_EXEC_VADDR[];
18488+extern unsigned char MODULES_EXEC_END[];
18489+#endif
18490+#include <asm/boot.h>
18491+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18492+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18493+#else
18494+#define ktla_ktva(addr) (addr)
18495+#define ktva_ktla(addr) (addr)
18496+#endif
18497+
18498 #define MODULES_VADDR VMALLOC_START
18499 #define MODULES_END VMALLOC_END
18500 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18501diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18502index 5be9063..d62185b 100644
18503--- a/arch/x86/include/asm/pgtable_64.h
18504+++ b/arch/x86/include/asm/pgtable_64.h
18505@@ -16,10 +16,14 @@
18506
18507 extern pud_t level3_kernel_pgt[512];
18508 extern pud_t level3_ident_pgt[512];
18509+extern pud_t level3_vmalloc_start_pgt[512];
18510+extern pud_t level3_vmalloc_end_pgt[512];
18511+extern pud_t level3_vmemmap_pgt[512];
18512+extern pud_t level2_vmemmap_pgt[512];
18513 extern pmd_t level2_kernel_pgt[512];
18514 extern pmd_t level2_fixmap_pgt[512];
18515-extern pmd_t level2_ident_pgt[512];
18516-extern pgd_t init_level4_pgt[];
18517+extern pmd_t level2_ident_pgt[512*2];
18518+extern pgd_t init_level4_pgt[512];
18519
18520 #define swapper_pg_dir init_level4_pgt
18521
18522@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18523
18524 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18525 {
18526+ pax_open_kernel();
18527 *pmdp = pmd;
18528+ pax_close_kernel();
18529 }
18530
18531 static inline void native_pmd_clear(pmd_t *pmd)
18532@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18533
18534 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18535 {
18536+ pax_open_kernel();
18537 *pudp = pud;
18538+ pax_close_kernel();
18539 }
18540
18541 static inline void native_pud_clear(pud_t *pud)
18542@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
18543
18544 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18545 {
18546+ pax_open_kernel();
18547+ *pgdp = pgd;
18548+ pax_close_kernel();
18549+}
18550+
18551+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18552+{
18553 *pgdp = pgd;
18554 }
18555
18556diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18557index 7166e25..baaa6fe 100644
18558--- a/arch/x86/include/asm/pgtable_64_types.h
18559+++ b/arch/x86/include/asm/pgtable_64_types.h
18560@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
18561 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18562 #define MODULES_END _AC(0xffffffffff000000, UL)
18563 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18564+#define MODULES_EXEC_VADDR MODULES_VADDR
18565+#define MODULES_EXEC_END MODULES_END
18566 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18567 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18568
18569+#define ktla_ktva(addr) (addr)
18570+#define ktva_ktla(addr) (addr)
18571+
18572 #define EARLY_DYNAMIC_PAGE_TABLES 64
18573
18574 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18575diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18576index f216963..6bd7c21 100644
18577--- a/arch/x86/include/asm/pgtable_types.h
18578+++ b/arch/x86/include/asm/pgtable_types.h
18579@@ -111,8 +111,10 @@
18580
18581 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18582 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18583-#else
18584+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18585 #define _PAGE_NX (_AT(pteval_t, 0))
18586+#else
18587+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18588 #endif
18589
18590 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18591@@ -151,6 +153,9 @@
18592 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18593 _PAGE_ACCESSED)
18594
18595+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18596+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18597+
18598 #define __PAGE_KERNEL_EXEC \
18599 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18600 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18601@@ -161,7 +166,7 @@
18602 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
18603 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
18604 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
18605-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18606+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18607 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18608 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
18609 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18610@@ -218,7 +223,7 @@
18611 #ifdef CONFIG_X86_64
18612 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18613 #else
18614-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18615+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18616 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18617 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18618 #endif
18619@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18620 {
18621 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18622 }
18623+#endif
18624
18625+#if PAGETABLE_LEVELS == 3
18626+#include <asm-generic/pgtable-nopud.h>
18627+#endif
18628+
18629+#if PAGETABLE_LEVELS == 2
18630+#include <asm-generic/pgtable-nopmd.h>
18631+#endif
18632+
18633+#ifndef __ASSEMBLY__
18634 #if PAGETABLE_LEVELS > 3
18635 typedef struct { pudval_t pud; } pud_t;
18636
18637@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18638 return pud.pud;
18639 }
18640 #else
18641-#include <asm-generic/pgtable-nopud.h>
18642-
18643 static inline pudval_t native_pud_val(pud_t pud)
18644 {
18645 return native_pgd_val(pud.pgd);
18646@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18647 return pmd.pmd;
18648 }
18649 #else
18650-#include <asm-generic/pgtable-nopmd.h>
18651-
18652 static inline pmdval_t native_pmd_val(pmd_t pmd)
18653 {
18654 return native_pgd_val(pmd.pud.pgd);
18655@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
18656
18657 extern pteval_t __supported_pte_mask;
18658 extern void set_nx(void);
18659-extern int nx_enabled;
18660
18661 #define pgprot_writecombine pgprot_writecombine
18662 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18663diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18664index 7024c12..71c46b9 100644
18665--- a/arch/x86/include/asm/preempt.h
18666+++ b/arch/x86/include/asm/preempt.h
18667@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
18668 */
18669 static __always_inline bool __preempt_count_dec_and_test(void)
18670 {
18671- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18672+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18673 }
18674
18675 /*
18676diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18677index a4ea023..2ff3cb8 100644
18678--- a/arch/x86/include/asm/processor.h
18679+++ b/arch/x86/include/asm/processor.h
18680@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18681 /* Index into per_cpu list: */
18682 u16 cpu_index;
18683 u32 microcode;
18684-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18685+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18686
18687 #define X86_VENDOR_INTEL 0
18688 #define X86_VENDOR_CYRIX 1
18689@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18690 : "memory");
18691 }
18692
18693+/* invpcid (%rdx),%rax */
18694+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18695+
18696+#define INVPCID_SINGLE_ADDRESS 0UL
18697+#define INVPCID_SINGLE_CONTEXT 1UL
18698+#define INVPCID_ALL_GLOBAL 2UL
18699+#define INVPCID_ALL_NONGLOBAL 3UL
18700+
18701+#define PCID_KERNEL 0UL
18702+#define PCID_USER 1UL
18703+#define PCID_NOFLUSH (1UL << 63)
18704+
18705 static inline void load_cr3(pgd_t *pgdir)
18706 {
18707- write_cr3(__pa(pgdir));
18708+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18709 }
18710
18711 #ifdef CONFIG_X86_32
18712@@ -283,7 +295,7 @@ struct tss_struct {
18713
18714 } ____cacheline_aligned;
18715
18716-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18717+extern struct tss_struct init_tss[NR_CPUS];
18718
18719 /*
18720 * Save the original ist values for checking stack pointers during debugging
18721@@ -479,6 +491,7 @@ struct thread_struct {
18722 unsigned short ds;
18723 unsigned short fsindex;
18724 unsigned short gsindex;
18725+ unsigned short ss;
18726 #endif
18727 #ifdef CONFIG_X86_32
18728 unsigned long ip;
18729@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18730 extern unsigned long mmu_cr4_features;
18731 extern u32 *trampoline_cr4_features;
18732
18733-static inline void set_in_cr4(unsigned long mask)
18734-{
18735- unsigned long cr4;
18736-
18737- mmu_cr4_features |= mask;
18738- if (trampoline_cr4_features)
18739- *trampoline_cr4_features = mmu_cr4_features;
18740- cr4 = read_cr4();
18741- cr4 |= mask;
18742- write_cr4(cr4);
18743-}
18744-
18745-static inline void clear_in_cr4(unsigned long mask)
18746-{
18747- unsigned long cr4;
18748-
18749- mmu_cr4_features &= ~mask;
18750- if (trampoline_cr4_features)
18751- *trampoline_cr4_features = mmu_cr4_features;
18752- cr4 = read_cr4();
18753- cr4 &= ~mask;
18754- write_cr4(cr4);
18755-}
18756+extern void set_in_cr4(unsigned long mask);
18757+extern void clear_in_cr4(unsigned long mask);
18758
18759 typedef struct {
18760 unsigned long seg;
18761@@ -836,11 +828,18 @@ static inline void spin_lock_prefetch(const void *x)
18762 */
18763 #define TASK_SIZE PAGE_OFFSET
18764 #define TASK_SIZE_MAX TASK_SIZE
18765+
18766+#ifdef CONFIG_PAX_SEGMEXEC
18767+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18768+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18769+#else
18770 #define STACK_TOP TASK_SIZE
18771-#define STACK_TOP_MAX STACK_TOP
18772+#endif
18773+
18774+#define STACK_TOP_MAX TASK_SIZE
18775
18776 #define INIT_THREAD { \
18777- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18778+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18779 .vm86_info = NULL, \
18780 .sysenter_cs = __KERNEL_CS, \
18781 .io_bitmap_ptr = NULL, \
18782@@ -854,7 +853,7 @@ static inline void spin_lock_prefetch(const void *x)
18783 */
18784 #define INIT_TSS { \
18785 .x86_tss = { \
18786- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18787+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18788 .ss0 = __KERNEL_DS, \
18789 .ss1 = __KERNEL_CS, \
18790 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18791@@ -865,11 +864,7 @@ static inline void spin_lock_prefetch(const void *x)
18792 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18793
18794 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18795-#define KSTK_TOP(info) \
18796-({ \
18797- unsigned long *__ptr = (unsigned long *)(info); \
18798- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18799-})
18800+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18801
18802 /*
18803 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18804@@ -884,7 +879,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18805 #define task_pt_regs(task) \
18806 ({ \
18807 struct pt_regs *__regs__; \
18808- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18809+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18810 __regs__ - 1; \
18811 })
18812
18813@@ -894,13 +889,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18814 /*
18815 * User space process size. 47bits minus one guard page.
18816 */
18817-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18818+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18819
18820 /* This decides where the kernel will search for a free chunk of vm
18821 * space during mmap's.
18822 */
18823 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18824- 0xc0000000 : 0xFFFFe000)
18825+ 0xc0000000 : 0xFFFFf000)
18826
18827 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18828 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18829@@ -911,11 +906,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18830 #define STACK_TOP_MAX TASK_SIZE_MAX
18831
18832 #define INIT_THREAD { \
18833- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18834+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18835 }
18836
18837 #define INIT_TSS { \
18838- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18839+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18840 }
18841
18842 /*
18843@@ -943,6 +938,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18844 */
18845 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18846
18847+#ifdef CONFIG_PAX_SEGMEXEC
18848+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18849+#endif
18850+
18851 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18852
18853 /* Get/set a process' ability to use the timestamp counter instruction */
18854@@ -969,7 +968,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18855 return 0;
18856 }
18857
18858-extern unsigned long arch_align_stack(unsigned long sp);
18859+#define arch_align_stack(x) ((x) & ~0xfUL)
18860 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18861
18862 void default_idle(void);
18863@@ -979,6 +978,6 @@ bool xen_set_default_idle(void);
18864 #define xen_set_default_idle 0
18865 #endif
18866
18867-void stop_this_cpu(void *dummy);
18868+void stop_this_cpu(void *dummy) __noreturn;
18869 void df_debug(struct pt_regs *regs, long error_code);
18870 #endif /* _ASM_X86_PROCESSOR_H */
18871diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18872index 6205f0c..688a3a9 100644
18873--- a/arch/x86/include/asm/ptrace.h
18874+++ b/arch/x86/include/asm/ptrace.h
18875@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18876 }
18877
18878 /*
18879- * user_mode_vm(regs) determines whether a register set came from user mode.
18880+ * user_mode(regs) determines whether a register set came from user mode.
18881 * This is true if V8086 mode was enabled OR if the register set was from
18882 * protected mode with RPL-3 CS value. This tricky test checks that with
18883 * one comparison. Many places in the kernel can bypass this full check
18884- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18885+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18886+ * be used.
18887 */
18888-static inline int user_mode(struct pt_regs *regs)
18889+static inline int user_mode_novm(struct pt_regs *regs)
18890 {
18891 #ifdef CONFIG_X86_32
18892 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18893 #else
18894- return !!(regs->cs & 3);
18895+ return !!(regs->cs & SEGMENT_RPL_MASK);
18896 #endif
18897 }
18898
18899-static inline int user_mode_vm(struct pt_regs *regs)
18900+static inline int user_mode(struct pt_regs *regs)
18901 {
18902 #ifdef CONFIG_X86_32
18903 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18904 USER_RPL;
18905 #else
18906- return user_mode(regs);
18907+ return user_mode_novm(regs);
18908 #endif
18909 }
18910
18911@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18912 #ifdef CONFIG_X86_64
18913 static inline bool user_64bit_mode(struct pt_regs *regs)
18914 {
18915+ unsigned long cs = regs->cs & 0xffff;
18916 #ifndef CONFIG_PARAVIRT
18917 /*
18918 * On non-paravirt systems, this is the only long mode CPL 3
18919 * selector. We do not allow long mode selectors in the LDT.
18920 */
18921- return regs->cs == __USER_CS;
18922+ return cs == __USER_CS;
18923 #else
18924 /* Headers are too twisted for this to go in paravirt.h. */
18925- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18926+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18927 #endif
18928 }
18929
18930@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18931 * Traps from the kernel do not save sp and ss.
18932 * Use the helper function to retrieve sp.
18933 */
18934- if (offset == offsetof(struct pt_regs, sp) &&
18935- regs->cs == __KERNEL_CS)
18936- return kernel_stack_pointer(regs);
18937+ if (offset == offsetof(struct pt_regs, sp)) {
18938+ unsigned long cs = regs->cs & 0xffff;
18939+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18940+ return kernel_stack_pointer(regs);
18941+ }
18942 #endif
18943 return *(unsigned long *)((unsigned long)regs + offset);
18944 }
18945diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18946index 70f46f0..adfbdb4 100644
18947--- a/arch/x86/include/asm/qrwlock.h
18948+++ b/arch/x86/include/asm/qrwlock.h
18949@@ -7,8 +7,8 @@
18950 #define queue_write_unlock queue_write_unlock
18951 static inline void queue_write_unlock(struct qrwlock *lock)
18952 {
18953- barrier();
18954- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18955+ barrier();
18956+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18957 }
18958 #endif
18959
18960diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18961index 9c6b890..5305f53 100644
18962--- a/arch/x86/include/asm/realmode.h
18963+++ b/arch/x86/include/asm/realmode.h
18964@@ -22,16 +22,14 @@ struct real_mode_header {
18965 #endif
18966 /* APM/BIOS reboot */
18967 u32 machine_real_restart_asm;
18968-#ifdef CONFIG_X86_64
18969 u32 machine_real_restart_seg;
18970-#endif
18971 };
18972
18973 /* This must match data at trampoline_32/64.S */
18974 struct trampoline_header {
18975 #ifdef CONFIG_X86_32
18976 u32 start;
18977- u16 gdt_pad;
18978+ u16 boot_cs;
18979 u16 gdt_limit;
18980 u32 gdt_base;
18981 #else
18982diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18983index a82c4f1..ac45053 100644
18984--- a/arch/x86/include/asm/reboot.h
18985+++ b/arch/x86/include/asm/reboot.h
18986@@ -6,13 +6,13 @@
18987 struct pt_regs;
18988
18989 struct machine_ops {
18990- void (*restart)(char *cmd);
18991- void (*halt)(void);
18992- void (*power_off)(void);
18993+ void (* __noreturn restart)(char *cmd);
18994+ void (* __noreturn halt)(void);
18995+ void (* __noreturn power_off)(void);
18996 void (*shutdown)(void);
18997 void (*crash_shutdown)(struct pt_regs *);
18998- void (*emergency_restart)(void);
18999-};
19000+ void (* __noreturn emergency_restart)(void);
19001+} __no_const;
19002
19003 extern struct machine_ops machine_ops;
19004
19005diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
19006index 8f7866a..e442f20 100644
19007--- a/arch/x86/include/asm/rmwcc.h
19008+++ b/arch/x86/include/asm/rmwcc.h
19009@@ -3,7 +3,34 @@
19010
19011 #ifdef CC_HAVE_ASM_GOTO
19012
19013-#define __GEN_RMWcc(fullop, var, cc, ...) \
19014+#ifdef CONFIG_PAX_REFCOUNT
19015+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19016+do { \
19017+ asm_volatile_goto (fullop \
19018+ ";jno 0f\n" \
19019+ fullantiop \
19020+ ";int $4\n0:\n" \
19021+ _ASM_EXTABLE(0b, 0b) \
19022+ ";j" cc " %l[cc_label]" \
19023+ : : "m" (var), ## __VA_ARGS__ \
19024+ : "memory" : cc_label); \
19025+ return 0; \
19026+cc_label: \
19027+ return 1; \
19028+} while (0)
19029+#else
19030+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19031+do { \
19032+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19033+ : : "m" (var), ## __VA_ARGS__ \
19034+ : "memory" : cc_label); \
19035+ return 0; \
19036+cc_label: \
19037+ return 1; \
19038+} while (0)
19039+#endif
19040+
19041+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19042 do { \
19043 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19044 : : "m" (var), ## __VA_ARGS__ \
19045@@ -13,15 +40,46 @@ cc_label: \
19046 return 1; \
19047 } while (0)
19048
19049-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19050- __GEN_RMWcc(op " " arg0, var, cc)
19051+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19052+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19053
19054-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19055- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19056+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19057+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19058+
19059+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19060+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19061+
19062+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19063+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19064
19065 #else /* !CC_HAVE_ASM_GOTO */
19066
19067-#define __GEN_RMWcc(fullop, var, cc, ...) \
19068+#ifdef CONFIG_PAX_REFCOUNT
19069+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19070+do { \
19071+ char c; \
19072+ asm volatile (fullop \
19073+ ";jno 0f\n" \
19074+ fullantiop \
19075+ ";int $4\n0:\n" \
19076+ _ASM_EXTABLE(0b, 0b) \
19077+ "; set" cc " %1" \
19078+ : "+m" (var), "=qm" (c) \
19079+ : __VA_ARGS__ : "memory"); \
19080+ return c != 0; \
19081+} while (0)
19082+#else
19083+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19084+do { \
19085+ char c; \
19086+ asm volatile (fullop "; set" cc " %1" \
19087+ : "+m" (var), "=qm" (c) \
19088+ : __VA_ARGS__ : "memory"); \
19089+ return c != 0; \
19090+} while (0)
19091+#endif
19092+
19093+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19094 do { \
19095 char c; \
19096 asm volatile (fullop "; set" cc " %1" \
19097@@ -30,11 +88,17 @@ do { \
19098 return c != 0; \
19099 } while (0)
19100
19101-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19102- __GEN_RMWcc(op " " arg0, var, cc)
19103+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19104+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19105+
19106+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19107+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19108+
19109+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19110+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19111
19112-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19113- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19114+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19115+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19116
19117 #endif /* CC_HAVE_ASM_GOTO */
19118
19119diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19120index cad82c9..2e5c5c1 100644
19121--- a/arch/x86/include/asm/rwsem.h
19122+++ b/arch/x86/include/asm/rwsem.h
19123@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19124 {
19125 asm volatile("# beginning down_read\n\t"
19126 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19127+
19128+#ifdef CONFIG_PAX_REFCOUNT
19129+ "jno 0f\n"
19130+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19131+ "int $4\n0:\n"
19132+ _ASM_EXTABLE(0b, 0b)
19133+#endif
19134+
19135 /* adds 0x00000001 */
19136 " jns 1f\n"
19137 " call call_rwsem_down_read_failed\n"
19138@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19139 "1:\n\t"
19140 " mov %1,%2\n\t"
19141 " add %3,%2\n\t"
19142+
19143+#ifdef CONFIG_PAX_REFCOUNT
19144+ "jno 0f\n"
19145+ "sub %3,%2\n"
19146+ "int $4\n0:\n"
19147+ _ASM_EXTABLE(0b, 0b)
19148+#endif
19149+
19150 " jle 2f\n\t"
19151 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19152 " jnz 1b\n\t"
19153@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19154 long tmp;
19155 asm volatile("# beginning down_write\n\t"
19156 LOCK_PREFIX " xadd %1,(%2)\n\t"
19157+
19158+#ifdef CONFIG_PAX_REFCOUNT
19159+ "jno 0f\n"
19160+ "mov %1,(%2)\n"
19161+ "int $4\n0:\n"
19162+ _ASM_EXTABLE(0b, 0b)
19163+#endif
19164+
19165 /* adds 0xffff0001, returns the old value */
19166 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19167 /* was the active mask 0 before? */
19168@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19169 long tmp;
19170 asm volatile("# beginning __up_read\n\t"
19171 LOCK_PREFIX " xadd %1,(%2)\n\t"
19172+
19173+#ifdef CONFIG_PAX_REFCOUNT
19174+ "jno 0f\n"
19175+ "mov %1,(%2)\n"
19176+ "int $4\n0:\n"
19177+ _ASM_EXTABLE(0b, 0b)
19178+#endif
19179+
19180 /* subtracts 1, returns the old value */
19181 " jns 1f\n\t"
19182 " call call_rwsem_wake\n" /* expects old value in %edx */
19183@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19184 long tmp;
19185 asm volatile("# beginning __up_write\n\t"
19186 LOCK_PREFIX " xadd %1,(%2)\n\t"
19187+
19188+#ifdef CONFIG_PAX_REFCOUNT
19189+ "jno 0f\n"
19190+ "mov %1,(%2)\n"
19191+ "int $4\n0:\n"
19192+ _ASM_EXTABLE(0b, 0b)
19193+#endif
19194+
19195 /* subtracts 0xffff0001, returns the old value */
19196 " jns 1f\n\t"
19197 " call call_rwsem_wake\n" /* expects old value in %edx */
19198@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19199 {
19200 asm volatile("# beginning __downgrade_write\n\t"
19201 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19202+
19203+#ifdef CONFIG_PAX_REFCOUNT
19204+ "jno 0f\n"
19205+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19206+ "int $4\n0:\n"
19207+ _ASM_EXTABLE(0b, 0b)
19208+#endif
19209+
19210 /*
19211 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19212 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19213@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19214 */
19215 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19216 {
19217- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19218+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19219+
19220+#ifdef CONFIG_PAX_REFCOUNT
19221+ "jno 0f\n"
19222+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19223+ "int $4\n0:\n"
19224+ _ASM_EXTABLE(0b, 0b)
19225+#endif
19226+
19227 : "+m" (sem->count)
19228 : "er" (delta));
19229 }
19230@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19231 */
19232 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19233 {
19234- return delta + xadd(&sem->count, delta);
19235+ return delta + xadd_check_overflow(&sem->count, delta);
19236 }
19237
19238 #endif /* __KERNEL__ */
19239diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19240index 6f1c3a8..7744f19 100644
19241--- a/arch/x86/include/asm/segment.h
19242+++ b/arch/x86/include/asm/segment.h
19243@@ -64,10 +64,15 @@
19244 * 26 - ESPFIX small SS
19245 * 27 - per-cpu [ offset to per-cpu data area ]
19246 * 28 - stack_canary-20 [ for stack protector ]
19247- * 29 - unused
19248- * 30 - unused
19249+ * 29 - PCI BIOS CS
19250+ * 30 - PCI BIOS DS
19251 * 31 - TSS for double fault handler
19252 */
19253+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19254+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19255+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19256+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19257+
19258 #define GDT_ENTRY_TLS_MIN 6
19259 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19260
19261@@ -79,6 +84,8 @@
19262
19263 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19264
19265+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19266+
19267 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19268
19269 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19270@@ -104,6 +111,12 @@
19271 #define __KERNEL_STACK_CANARY 0
19272 #endif
19273
19274+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19275+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19276+
19277+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19278+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19279+
19280 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19281
19282 /*
19283@@ -141,7 +154,7 @@
19284 */
19285
19286 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19287-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19288+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19289
19290
19291 #else
19292@@ -165,6 +178,8 @@
19293 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19294 #define __USER32_DS __USER_DS
19295
19296+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19297+
19298 #define GDT_ENTRY_TSS 8 /* needs two entries */
19299 #define GDT_ENTRY_LDT 10 /* needs two entries */
19300 #define GDT_ENTRY_TLS_MIN 12
19301@@ -173,6 +188,8 @@
19302 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19303 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19304
19305+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19306+
19307 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19308 #define FS_TLS 0
19309 #define GS_TLS 1
19310@@ -180,12 +197,14 @@
19311 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19312 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19313
19314-#define GDT_ENTRIES 16
19315+#define GDT_ENTRIES 17
19316
19317 #endif
19318
19319 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19320+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19321 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19322+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19323 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19324 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19325 #ifndef CONFIG_PARAVIRT
19326@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19327 {
19328 unsigned long __limit;
19329 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19330- return __limit + 1;
19331+ return __limit;
19332 }
19333
19334 #endif /* !__ASSEMBLY__ */
19335diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19336index 8d3120f..352b440 100644
19337--- a/arch/x86/include/asm/smap.h
19338+++ b/arch/x86/include/asm/smap.h
19339@@ -25,11 +25,40 @@
19340
19341 #include <asm/alternative-asm.h>
19342
19343+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19344+#define ASM_PAX_OPEN_USERLAND \
19345+ 661: jmp 663f; \
19346+ .pushsection .altinstr_replacement, "a" ; \
19347+ 662: pushq %rax; nop; \
19348+ .popsection ; \
19349+ .pushsection .altinstructions, "a" ; \
19350+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19351+ .popsection ; \
19352+ call __pax_open_userland; \
19353+ popq %rax; \
19354+ 663:
19355+
19356+#define ASM_PAX_CLOSE_USERLAND \
19357+ 661: jmp 663f; \
19358+ .pushsection .altinstr_replacement, "a" ; \
19359+ 662: pushq %rax; nop; \
19360+ .popsection; \
19361+ .pushsection .altinstructions, "a" ; \
19362+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19363+ .popsection; \
19364+ call __pax_close_userland; \
19365+ popq %rax; \
19366+ 663:
19367+#else
19368+#define ASM_PAX_OPEN_USERLAND
19369+#define ASM_PAX_CLOSE_USERLAND
19370+#endif
19371+
19372 #ifdef CONFIG_X86_SMAP
19373
19374 #define ASM_CLAC \
19375 661: ASM_NOP3 ; \
19376- .pushsection .altinstr_replacement, "ax" ; \
19377+ .pushsection .altinstr_replacement, "a" ; \
19378 662: __ASM_CLAC ; \
19379 .popsection ; \
19380 .pushsection .altinstructions, "a" ; \
19381@@ -38,7 +67,7 @@
19382
19383 #define ASM_STAC \
19384 661: ASM_NOP3 ; \
19385- .pushsection .altinstr_replacement, "ax" ; \
19386+ .pushsection .altinstr_replacement, "a" ; \
19387 662: __ASM_STAC ; \
19388 .popsection ; \
19389 .pushsection .altinstructions, "a" ; \
19390@@ -56,6 +85,37 @@
19391
19392 #include <asm/alternative.h>
19393
19394+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19395+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19396+
19397+extern void __pax_open_userland(void);
19398+static __always_inline unsigned long pax_open_userland(void)
19399+{
19400+
19401+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19402+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19403+ :
19404+ : [open] "i" (__pax_open_userland)
19405+ : "memory", "rax");
19406+#endif
19407+
19408+ return 0;
19409+}
19410+
19411+extern void __pax_close_userland(void);
19412+static __always_inline unsigned long pax_close_userland(void)
19413+{
19414+
19415+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19416+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19417+ :
19418+ : [close] "i" (__pax_close_userland)
19419+ : "memory", "rax");
19420+#endif
19421+
19422+ return 0;
19423+}
19424+
19425 #ifdef CONFIG_X86_SMAP
19426
19427 static __always_inline void clac(void)
19428diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19429index 8cd27e0..7f05ec8 100644
19430--- a/arch/x86/include/asm/smp.h
19431+++ b/arch/x86/include/asm/smp.h
19432@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19433 /* cpus sharing the last level cache: */
19434 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19435 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19436-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19437+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19438
19439 static inline struct cpumask *cpu_sibling_mask(int cpu)
19440 {
19441@@ -78,7 +78,7 @@ struct smp_ops {
19442
19443 void (*send_call_func_ipi)(const struct cpumask *mask);
19444 void (*send_call_func_single_ipi)(int cpu);
19445-};
19446+} __no_const;
19447
19448 /* Globals due to paravirt */
19449 extern void set_cpu_sibling_map(int cpu);
19450@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19451 extern int safe_smp_processor_id(void);
19452
19453 #elif defined(CONFIG_X86_64_SMP)
19454-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19455-
19456-#define stack_smp_processor_id() \
19457-({ \
19458- struct thread_info *ti; \
19459- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19460- ti->cpu; \
19461-})
19462+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19463+#define stack_smp_processor_id() raw_smp_processor_id()
19464 #define safe_smp_processor_id() smp_processor_id()
19465
19466 #endif
19467diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19468index 54f1c80..39362a5 100644
19469--- a/arch/x86/include/asm/spinlock.h
19470+++ b/arch/x86/include/asm/spinlock.h
19471@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19472 static inline void arch_read_lock(arch_rwlock_t *rw)
19473 {
19474 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19475+
19476+#ifdef CONFIG_PAX_REFCOUNT
19477+ "jno 0f\n"
19478+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19479+ "int $4\n0:\n"
19480+ _ASM_EXTABLE(0b, 0b)
19481+#endif
19482+
19483 "jns 1f\n"
19484 "call __read_lock_failed\n\t"
19485 "1:\n"
19486@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19487 static inline void arch_write_lock(arch_rwlock_t *rw)
19488 {
19489 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19490+
19491+#ifdef CONFIG_PAX_REFCOUNT
19492+ "jno 0f\n"
19493+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19494+ "int $4\n0:\n"
19495+ _ASM_EXTABLE(0b, 0b)
19496+#endif
19497+
19498 "jz 1f\n"
19499 "call __write_lock_failed\n\t"
19500 "1:\n"
19501@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19502
19503 static inline void arch_read_unlock(arch_rwlock_t *rw)
19504 {
19505- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19506+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19507+
19508+#ifdef CONFIG_PAX_REFCOUNT
19509+ "jno 0f\n"
19510+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19511+ "int $4\n0:\n"
19512+ _ASM_EXTABLE(0b, 0b)
19513+#endif
19514+
19515 :"+m" (rw->lock) : : "memory");
19516 }
19517
19518 static inline void arch_write_unlock(arch_rwlock_t *rw)
19519 {
19520- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19521+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19522+
19523+#ifdef CONFIG_PAX_REFCOUNT
19524+ "jno 0f\n"
19525+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19526+ "int $4\n0:\n"
19527+ _ASM_EXTABLE(0b, 0b)
19528+#endif
19529+
19530 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
19531 }
19532 #else
19533diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19534index 6a99859..03cb807 100644
19535--- a/arch/x86/include/asm/stackprotector.h
19536+++ b/arch/x86/include/asm/stackprotector.h
19537@@ -47,7 +47,7 @@
19538 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19539 */
19540 #define GDT_STACK_CANARY_INIT \
19541- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19542+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19543
19544 /*
19545 * Initialize the stackprotector canary value.
19546@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19547
19548 static inline void load_stack_canary_segment(void)
19549 {
19550-#ifdef CONFIG_X86_32
19551+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19552 asm volatile ("mov %0, %%gs" : : "r" (0));
19553 #endif
19554 }
19555diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19556index 70bbe39..4ae2bd4 100644
19557--- a/arch/x86/include/asm/stacktrace.h
19558+++ b/arch/x86/include/asm/stacktrace.h
19559@@ -11,28 +11,20 @@
19560
19561 extern int kstack_depth_to_print;
19562
19563-struct thread_info;
19564+struct task_struct;
19565 struct stacktrace_ops;
19566
19567-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19568- unsigned long *stack,
19569- unsigned long bp,
19570- const struct stacktrace_ops *ops,
19571- void *data,
19572- unsigned long *end,
19573- int *graph);
19574+typedef unsigned long walk_stack_t(struct task_struct *task,
19575+ void *stack_start,
19576+ unsigned long *stack,
19577+ unsigned long bp,
19578+ const struct stacktrace_ops *ops,
19579+ void *data,
19580+ unsigned long *end,
19581+ int *graph);
19582
19583-extern unsigned long
19584-print_context_stack(struct thread_info *tinfo,
19585- unsigned long *stack, unsigned long bp,
19586- const struct stacktrace_ops *ops, void *data,
19587- unsigned long *end, int *graph);
19588-
19589-extern unsigned long
19590-print_context_stack_bp(struct thread_info *tinfo,
19591- unsigned long *stack, unsigned long bp,
19592- const struct stacktrace_ops *ops, void *data,
19593- unsigned long *end, int *graph);
19594+extern walk_stack_t print_context_stack;
19595+extern walk_stack_t print_context_stack_bp;
19596
19597 /* Generic stack tracer with callbacks */
19598
19599@@ -40,7 +32,7 @@ struct stacktrace_ops {
19600 void (*address)(void *data, unsigned long address, int reliable);
19601 /* On negative return stop dumping */
19602 int (*stack)(void *data, char *name);
19603- walk_stack_t walk_stack;
19604+ walk_stack_t *walk_stack;
19605 };
19606
19607 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19608diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19609index d7f3b3b..3cc39f1 100644
19610--- a/arch/x86/include/asm/switch_to.h
19611+++ b/arch/x86/include/asm/switch_to.h
19612@@ -108,7 +108,7 @@ do { \
19613 "call __switch_to\n\t" \
19614 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19615 __switch_canary \
19616- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19617+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19618 "movq %%rax,%%rdi\n\t" \
19619 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19620 "jnz ret_from_fork\n\t" \
19621@@ -119,7 +119,7 @@ do { \
19622 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19623 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19624 [_tif_fork] "i" (_TIF_FORK), \
19625- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19626+ [thread_info] "m" (current_tinfo), \
19627 [current_task] "m" (current_task) \
19628 __switch_canary_iparam \
19629 : "memory", "cc" __EXTRA_CLOBBER)
19630diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19631index 8540538..4b0b5e9 100644
19632--- a/arch/x86/include/asm/thread_info.h
19633+++ b/arch/x86/include/asm/thread_info.h
19634@@ -24,7 +24,6 @@ struct exec_domain;
19635 #include <linux/atomic.h>
19636
19637 struct thread_info {
19638- struct task_struct *task; /* main task structure */
19639 struct exec_domain *exec_domain; /* execution domain */
19640 __u32 flags; /* low level flags */
19641 __u32 status; /* thread synchronous flags */
19642@@ -33,13 +32,13 @@ struct thread_info {
19643 mm_segment_t addr_limit;
19644 struct restart_block restart_block;
19645 void __user *sysenter_return;
19646+ unsigned long lowest_stack;
19647 unsigned int sig_on_uaccess_error:1;
19648 unsigned int uaccess_err:1; /* uaccess failed */
19649 };
19650
19651-#define INIT_THREAD_INFO(tsk) \
19652+#define INIT_THREAD_INFO \
19653 { \
19654- .task = &tsk, \
19655 .exec_domain = &default_exec_domain, \
19656 .flags = 0, \
19657 .cpu = 0, \
19658@@ -50,7 +49,7 @@ struct thread_info {
19659 }, \
19660 }
19661
19662-#define init_thread_info (init_thread_union.thread_info)
19663+#define init_thread_info (init_thread_union.stack)
19664 #define init_stack (init_thread_union.stack)
19665
19666 #else /* !__ASSEMBLY__ */
19667@@ -91,6 +90,7 @@ struct thread_info {
19668 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19669 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19670 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19671+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19672
19673 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19674 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19675@@ -115,17 +115,18 @@ struct thread_info {
19676 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19677 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19678 #define _TIF_X32 (1 << TIF_X32)
19679+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19680
19681 /* work to do in syscall_trace_enter() */
19682 #define _TIF_WORK_SYSCALL_ENTRY \
19683 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19684 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19685- _TIF_NOHZ)
19686+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19687
19688 /* work to do in syscall_trace_leave() */
19689 #define _TIF_WORK_SYSCALL_EXIT \
19690 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19691- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19692+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19693
19694 /* work to do on interrupt/exception return */
19695 #define _TIF_WORK_MASK \
19696@@ -136,7 +137,7 @@ struct thread_info {
19697 /* work to do on any return to user space */
19698 #define _TIF_ALLWORK_MASK \
19699 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19700- _TIF_NOHZ)
19701+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19702
19703 /* Only used for 64 bit */
19704 #define _TIF_DO_NOTIFY_MASK \
19705@@ -151,7 +152,6 @@ struct thread_info {
19706 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19707
19708 #define STACK_WARN (THREAD_SIZE/8)
19709-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19710
19711 /*
19712 * macros/functions for gaining access to the thread information structure
19713@@ -162,26 +162,18 @@ struct thread_info {
19714
19715 DECLARE_PER_CPU(unsigned long, kernel_stack);
19716
19717+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19718+
19719 static inline struct thread_info *current_thread_info(void)
19720 {
19721- struct thread_info *ti;
19722- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19723- KERNEL_STACK_OFFSET - THREAD_SIZE);
19724- return ti;
19725+ return this_cpu_read_stable(current_tinfo);
19726 }
19727
19728 #else /* !__ASSEMBLY__ */
19729
19730 /* how to get the thread information struct from ASM */
19731 #define GET_THREAD_INFO(reg) \
19732- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19733- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19734-
19735-/*
19736- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19737- * a certain register (to be used in assembler memory operands).
19738- */
19739-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19740+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19741
19742 #endif
19743
19744@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19745 extern void arch_task_cache_init(void);
19746 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19747 extern void arch_release_task_struct(struct task_struct *tsk);
19748+
19749+#define __HAVE_THREAD_FUNCTIONS
19750+#define task_thread_info(task) (&(task)->tinfo)
19751+#define task_stack_page(task) ((task)->stack)
19752+#define setup_thread_stack(p, org) do {} while (0)
19753+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19754+
19755 #endif
19756 #endif /* _ASM_X86_THREAD_INFO_H */
19757diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19758index 04905bf..1178cdf 100644
19759--- a/arch/x86/include/asm/tlbflush.h
19760+++ b/arch/x86/include/asm/tlbflush.h
19761@@ -17,18 +17,44 @@
19762
19763 static inline void __native_flush_tlb(void)
19764 {
19765+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19766+ u64 descriptor[2];
19767+
19768+ descriptor[0] = PCID_KERNEL;
19769+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19770+ return;
19771+ }
19772+
19773+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19774+ if (static_cpu_has(X86_FEATURE_PCID)) {
19775+ unsigned int cpu = raw_get_cpu();
19776+
19777+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19778+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19779+ raw_put_cpu_no_resched();
19780+ return;
19781+ }
19782+#endif
19783+
19784 native_write_cr3(native_read_cr3());
19785 }
19786
19787 static inline void __native_flush_tlb_global_irq_disabled(void)
19788 {
19789- unsigned long cr4;
19790+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19791+ u64 descriptor[2];
19792
19793- cr4 = native_read_cr4();
19794- /* clear PGE */
19795- native_write_cr4(cr4 & ~X86_CR4_PGE);
19796- /* write old PGE again and flush TLBs */
19797- native_write_cr4(cr4);
19798+ descriptor[0] = PCID_KERNEL;
19799+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19800+ } else {
19801+ unsigned long cr4;
19802+
19803+ cr4 = native_read_cr4();
19804+ /* clear PGE */
19805+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19806+ /* write old PGE again and flush TLBs */
19807+ native_write_cr4(cr4);
19808+ }
19809 }
19810
19811 static inline void __native_flush_tlb_global(void)
19812@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19813
19814 static inline void __native_flush_tlb_single(unsigned long addr)
19815 {
19816+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19817+ u64 descriptor[2];
19818+
19819+ descriptor[0] = PCID_KERNEL;
19820+ descriptor[1] = addr;
19821+
19822+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19823+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19824+ if (addr < TASK_SIZE_MAX)
19825+ descriptor[1] += pax_user_shadow_base;
19826+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19827+ }
19828+
19829+ descriptor[0] = PCID_USER;
19830+ descriptor[1] = addr;
19831+#endif
19832+
19833+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19834+ return;
19835+ }
19836+
19837+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19838+ if (static_cpu_has(X86_FEATURE_PCID)) {
19839+ unsigned int cpu = raw_get_cpu();
19840+
19841+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19842+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19843+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19844+ raw_put_cpu_no_resched();
19845+
19846+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19847+ addr += pax_user_shadow_base;
19848+ }
19849+#endif
19850+
19851 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19852 }
19853
19854diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19855index 0d592e0..526f797 100644
19856--- a/arch/x86/include/asm/uaccess.h
19857+++ b/arch/x86/include/asm/uaccess.h
19858@@ -7,6 +7,7 @@
19859 #include <linux/compiler.h>
19860 #include <linux/thread_info.h>
19861 #include <linux/string.h>
19862+#include <linux/spinlock.h>
19863 #include <asm/asm.h>
19864 #include <asm/page.h>
19865 #include <asm/smap.h>
19866@@ -29,7 +30,12 @@
19867
19868 #define get_ds() (KERNEL_DS)
19869 #define get_fs() (current_thread_info()->addr_limit)
19870+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19871+void __set_fs(mm_segment_t x);
19872+void set_fs(mm_segment_t x);
19873+#else
19874 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19875+#endif
19876
19877 #define segment_eq(a, b) ((a).seg == (b).seg)
19878
19879@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19880 * checks that the pointer is in the user space range - after calling
19881 * this function, memory access functions may still return -EFAULT.
19882 */
19883-#define access_ok(type, addr, size) \
19884- likely(!__range_not_ok(addr, size, user_addr_max()))
19885+extern int _cond_resched(void);
19886+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19887+#define access_ok(type, addr, size) \
19888+({ \
19889+ unsigned long __size = size; \
19890+ unsigned long __addr = (unsigned long)addr; \
19891+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19892+ if (__ret_ao && __size) { \
19893+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19894+ unsigned long __end_ao = __addr + __size - 1; \
19895+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19896+ while (__addr_ao <= __end_ao) { \
19897+ char __c_ao; \
19898+ __addr_ao += PAGE_SIZE; \
19899+ if (__size > PAGE_SIZE) \
19900+ _cond_resched(); \
19901+ if (__get_user(__c_ao, (char __user *)__addr)) \
19902+ break; \
19903+ if (type != VERIFY_WRITE) { \
19904+ __addr = __addr_ao; \
19905+ continue; \
19906+ } \
19907+ if (__put_user(__c_ao, (char __user *)__addr)) \
19908+ break; \
19909+ __addr = __addr_ao; \
19910+ } \
19911+ } \
19912+ } \
19913+ __ret_ao; \
19914+})
19915
19916 /*
19917 * The exception table consists of pairs of addresses relative to the
19918@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19919 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19920 __chk_user_ptr(ptr); \
19921 might_fault(); \
19922+ pax_open_userland(); \
19923 asm volatile("call __get_user_%P3" \
19924 : "=a" (__ret_gu), "=r" (__val_gu) \
19925 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19926 (x) = (__typeof__(*(ptr))) __val_gu; \
19927+ pax_close_userland(); \
19928 __ret_gu; \
19929 })
19930
19931@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19932 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19933 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19934
19935-
19936+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19937+#define __copyuser_seg "gs;"
19938+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19939+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19940+#else
19941+#define __copyuser_seg
19942+#define __COPYUSER_SET_ES
19943+#define __COPYUSER_RESTORE_ES
19944+#endif
19945
19946 #ifdef CONFIG_X86_32
19947 #define __put_user_asm_u64(x, addr, err, errret) \
19948 asm volatile(ASM_STAC "\n" \
19949- "1: movl %%eax,0(%2)\n" \
19950- "2: movl %%edx,4(%2)\n" \
19951+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19952+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19953 "3: " ASM_CLAC "\n" \
19954 ".section .fixup,\"ax\"\n" \
19955 "4: movl %3,%0\n" \
19956@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19957
19958 #define __put_user_asm_ex_u64(x, addr) \
19959 asm volatile(ASM_STAC "\n" \
19960- "1: movl %%eax,0(%1)\n" \
19961- "2: movl %%edx,4(%1)\n" \
19962+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19963+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19964 "3: " ASM_CLAC "\n" \
19965 _ASM_EXTABLE_EX(1b, 2b) \
19966 _ASM_EXTABLE_EX(2b, 3b) \
19967@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19968 __typeof__(*(ptr)) __pu_val; \
19969 __chk_user_ptr(ptr); \
19970 might_fault(); \
19971- __pu_val = x; \
19972+ __pu_val = (x); \
19973+ pax_open_userland(); \
19974 switch (sizeof(*(ptr))) { \
19975 case 1: \
19976 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19977@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19978 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19979 break; \
19980 } \
19981+ pax_close_userland(); \
19982 __ret_pu; \
19983 })
19984
19985@@ -355,8 +401,10 @@ do { \
19986 } while (0)
19987
19988 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19989+do { \
19990+ pax_open_userland(); \
19991 asm volatile(ASM_STAC "\n" \
19992- "1: mov"itype" %2,%"rtype"1\n" \
19993+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19994 "2: " ASM_CLAC "\n" \
19995 ".section .fixup,\"ax\"\n" \
19996 "3: mov %3,%0\n" \
19997@@ -364,8 +412,10 @@ do { \
19998 " jmp 2b\n" \
19999 ".previous\n" \
20000 _ASM_EXTABLE(1b, 3b) \
20001- : "=r" (err), ltype(x) \
20002- : "m" (__m(addr)), "i" (errret), "0" (err))
20003+ : "=r" (err), ltype (x) \
20004+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
20005+ pax_close_userland(); \
20006+} while (0)
20007
20008 #define __get_user_size_ex(x, ptr, size) \
20009 do { \
20010@@ -389,7 +439,7 @@ do { \
20011 } while (0)
20012
20013 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
20014- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
20015+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
20016 "2:\n" \
20017 _ASM_EXTABLE_EX(1b, 2b) \
20018 : ltype(x) : "m" (__m(addr)))
20019@@ -406,13 +456,24 @@ do { \
20020 int __gu_err; \
20021 unsigned long __gu_val; \
20022 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
20023- (x) = (__force __typeof__(*(ptr)))__gu_val; \
20024+ (x) = (__typeof__(*(ptr)))__gu_val; \
20025 __gu_err; \
20026 })
20027
20028 /* FIXME: this hack is definitely wrong -AK */
20029 struct __large_struct { unsigned long buf[100]; };
20030-#define __m(x) (*(struct __large_struct __user *)(x))
20031+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20032+#define ____m(x) \
20033+({ \
20034+ unsigned long ____x = (unsigned long)(x); \
20035+ if (____x < pax_user_shadow_base) \
20036+ ____x += pax_user_shadow_base; \
20037+ (typeof(x))____x; \
20038+})
20039+#else
20040+#define ____m(x) (x)
20041+#endif
20042+#define __m(x) (*(struct __large_struct __user *)____m(x))
20043
20044 /*
20045 * Tell gcc we read from memory instead of writing: this is because
20046@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20047 * aliasing issues.
20048 */
20049 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20050+do { \
20051+ pax_open_userland(); \
20052 asm volatile(ASM_STAC "\n" \
20053- "1: mov"itype" %"rtype"1,%2\n" \
20054+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20055 "2: " ASM_CLAC "\n" \
20056 ".section .fixup,\"ax\"\n" \
20057 "3: mov %3,%0\n" \
20058@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20059 ".previous\n" \
20060 _ASM_EXTABLE(1b, 3b) \
20061 : "=r"(err) \
20062- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20063+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20064+ pax_close_userland(); \
20065+} while (0)
20066
20067 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20068- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20069+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20070 "2:\n" \
20071 _ASM_EXTABLE_EX(1b, 2b) \
20072 : : ltype(x), "m" (__m(addr)))
20073@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20074 */
20075 #define uaccess_try do { \
20076 current_thread_info()->uaccess_err = 0; \
20077+ pax_open_userland(); \
20078 stac(); \
20079 barrier();
20080
20081 #define uaccess_catch(err) \
20082 clac(); \
20083+ pax_close_userland(); \
20084 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20085 } while (0)
20086
20087@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20088 * On error, the variable @x is set to zero.
20089 */
20090
20091+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20092+#define __get_user(x, ptr) get_user((x), (ptr))
20093+#else
20094 #define __get_user(x, ptr) \
20095 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20096+#endif
20097
20098 /**
20099 * __put_user: - Write a simple value into user space, with less checking.
20100@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20101 * Returns zero on success, or -EFAULT on error.
20102 */
20103
20104+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20105+#define __put_user(x, ptr) put_user((x), (ptr))
20106+#else
20107 #define __put_user(x, ptr) \
20108 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20109+#endif
20110
20111 #define __get_user_unaligned __get_user
20112 #define __put_user_unaligned __put_user
20113@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20114 #define get_user_ex(x, ptr) do { \
20115 unsigned long __gue_val; \
20116 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20117- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20118+ (x) = (__typeof__(*(ptr)))__gue_val; \
20119 } while (0)
20120
20121 #define put_user_try uaccess_try
20122@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20123 __typeof__(ptr) __uval = (uval); \
20124 __typeof__(*(ptr)) __old = (old); \
20125 __typeof__(*(ptr)) __new = (new); \
20126+ pax_open_userland(); \
20127 switch (size) { \
20128 case 1: \
20129 { \
20130 asm volatile("\t" ASM_STAC "\n" \
20131- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20132+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20133 "2:\t" ASM_CLAC "\n" \
20134 "\t.section .fixup, \"ax\"\n" \
20135 "3:\tmov %3, %0\n" \
20136 "\tjmp 2b\n" \
20137 "\t.previous\n" \
20138 _ASM_EXTABLE(1b, 3b) \
20139- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20140+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20141 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20142 : "memory" \
20143 ); \
20144@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20145 case 2: \
20146 { \
20147 asm volatile("\t" ASM_STAC "\n" \
20148- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20149+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20150 "2:\t" ASM_CLAC "\n" \
20151 "\t.section .fixup, \"ax\"\n" \
20152 "3:\tmov %3, %0\n" \
20153 "\tjmp 2b\n" \
20154 "\t.previous\n" \
20155 _ASM_EXTABLE(1b, 3b) \
20156- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20157+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20158 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20159 : "memory" \
20160 ); \
20161@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20162 case 4: \
20163 { \
20164 asm volatile("\t" ASM_STAC "\n" \
20165- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20166+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20167 "2:\t" ASM_CLAC "\n" \
20168 "\t.section .fixup, \"ax\"\n" \
20169 "3:\tmov %3, %0\n" \
20170 "\tjmp 2b\n" \
20171 "\t.previous\n" \
20172 _ASM_EXTABLE(1b, 3b) \
20173- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20174+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20175 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20176 : "memory" \
20177 ); \
20178@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20179 __cmpxchg_wrong_size(); \
20180 \
20181 asm volatile("\t" ASM_STAC "\n" \
20182- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20183+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20184 "2:\t" ASM_CLAC "\n" \
20185 "\t.section .fixup, \"ax\"\n" \
20186 "3:\tmov %3, %0\n" \
20187 "\tjmp 2b\n" \
20188 "\t.previous\n" \
20189 _ASM_EXTABLE(1b, 3b) \
20190- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20191+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20192 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20193 : "memory" \
20194 ); \
20195@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20196 default: \
20197 __cmpxchg_wrong_size(); \
20198 } \
20199+ pax_close_userland(); \
20200 *__uval = __old; \
20201 __ret; \
20202 })
20203@@ -636,17 +713,6 @@ extern struct movsl_mask {
20204
20205 #define ARCH_HAS_NOCACHE_UACCESS 1
20206
20207-#ifdef CONFIG_X86_32
20208-# include <asm/uaccess_32.h>
20209-#else
20210-# include <asm/uaccess_64.h>
20211-#endif
20212-
20213-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20214- unsigned n);
20215-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20216- unsigned n);
20217-
20218 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20219 # define copy_user_diag __compiletime_error
20220 #else
20221@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20222 extern void copy_user_diag("copy_from_user() buffer size is too small")
20223 copy_from_user_overflow(void);
20224 extern void copy_user_diag("copy_to_user() buffer size is too small")
20225-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20226+copy_to_user_overflow(void);
20227
20228 #undef copy_user_diag
20229
20230@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20231
20232 extern void
20233 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20234-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20235+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20236 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20237
20238 #else
20239@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20240
20241 #endif
20242
20243+#ifdef CONFIG_X86_32
20244+# include <asm/uaccess_32.h>
20245+#else
20246+# include <asm/uaccess_64.h>
20247+#endif
20248+
20249 static inline unsigned long __must_check
20250 copy_from_user(void *to, const void __user *from, unsigned long n)
20251 {
20252- int sz = __compiletime_object_size(to);
20253+ size_t sz = __compiletime_object_size(to);
20254
20255 might_fault();
20256
20257@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20258 * case, and do only runtime checking for non-constant sizes.
20259 */
20260
20261- if (likely(sz < 0 || sz >= n))
20262- n = _copy_from_user(to, from, n);
20263- else if(__builtin_constant_p(n))
20264- copy_from_user_overflow();
20265- else
20266- __copy_from_user_overflow(sz, n);
20267+ if (likely(sz != (size_t)-1 && sz < n)) {
20268+ if(__builtin_constant_p(n))
20269+ copy_from_user_overflow();
20270+ else
20271+ __copy_from_user_overflow(sz, n);
20272+ } else if (access_ok(VERIFY_READ, from, n))
20273+ n = __copy_from_user(to, from, n);
20274+ else if ((long)n > 0)
20275+ memset(to, 0, n);
20276
20277 return n;
20278 }
20279@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20280 static inline unsigned long __must_check
20281 copy_to_user(void __user *to, const void *from, unsigned long n)
20282 {
20283- int sz = __compiletime_object_size(from);
20284+ size_t sz = __compiletime_object_size(from);
20285
20286 might_fault();
20287
20288 /* See the comment in copy_from_user() above. */
20289- if (likely(sz < 0 || sz >= n))
20290- n = _copy_to_user(to, from, n);
20291- else if(__builtin_constant_p(n))
20292- copy_to_user_overflow();
20293- else
20294- __copy_to_user_overflow(sz, n);
20295+ if (likely(sz != (size_t)-1 && sz < n)) {
20296+ if(__builtin_constant_p(n))
20297+ copy_to_user_overflow();
20298+ else
20299+ __copy_to_user_overflow(sz, n);
20300+ } else if (access_ok(VERIFY_WRITE, to, n))
20301+ n = __copy_to_user(to, from, n);
20302
20303 return n;
20304 }
20305diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20306index 3c03a5d..1071638 100644
20307--- a/arch/x86/include/asm/uaccess_32.h
20308+++ b/arch/x86/include/asm/uaccess_32.h
20309@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20310 static __always_inline unsigned long __must_check
20311 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20312 {
20313+ if ((long)n < 0)
20314+ return n;
20315+
20316+ check_object_size(from, n, true);
20317+
20318 if (__builtin_constant_p(n)) {
20319 unsigned long ret;
20320
20321@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20322 __copy_to_user(void __user *to, const void *from, unsigned long n)
20323 {
20324 might_fault();
20325+
20326 return __copy_to_user_inatomic(to, from, n);
20327 }
20328
20329 static __always_inline unsigned long
20330 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20331 {
20332+ if ((long)n < 0)
20333+ return n;
20334+
20335 /* Avoid zeroing the tail if the copy fails..
20336 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20337 * but as the zeroing behaviour is only significant when n is not
20338@@ -137,6 +146,12 @@ static __always_inline unsigned long
20339 __copy_from_user(void *to, const void __user *from, unsigned long n)
20340 {
20341 might_fault();
20342+
20343+ if ((long)n < 0)
20344+ return n;
20345+
20346+ check_object_size(to, n, false);
20347+
20348 if (__builtin_constant_p(n)) {
20349 unsigned long ret;
20350
20351@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20352 const void __user *from, unsigned long n)
20353 {
20354 might_fault();
20355+
20356+ if ((long)n < 0)
20357+ return n;
20358+
20359 if (__builtin_constant_p(n)) {
20360 unsigned long ret;
20361
20362@@ -181,7 +200,10 @@ static __always_inline unsigned long
20363 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20364 unsigned long n)
20365 {
20366- return __copy_from_user_ll_nocache_nozero(to, from, n);
20367+ if ((long)n < 0)
20368+ return n;
20369+
20370+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20371 }
20372
20373 #endif /* _ASM_X86_UACCESS_32_H */
20374diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20375index 12a26b9..206c200 100644
20376--- a/arch/x86/include/asm/uaccess_64.h
20377+++ b/arch/x86/include/asm/uaccess_64.h
20378@@ -10,6 +10,9 @@
20379 #include <asm/alternative.h>
20380 #include <asm/cpufeature.h>
20381 #include <asm/page.h>
20382+#include <asm/pgtable.h>
20383+
20384+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20385
20386 /*
20387 * Copy To/From Userspace
20388@@ -17,14 +20,14 @@
20389
20390 /* Handles exceptions in both to and from, but doesn't do access_ok */
20391 __must_check unsigned long
20392-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20393+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20394 __must_check unsigned long
20395-copy_user_generic_string(void *to, const void *from, unsigned len);
20396+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20397 __must_check unsigned long
20398-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20399+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20400
20401 static __always_inline __must_check unsigned long
20402-copy_user_generic(void *to, const void *from, unsigned len)
20403+copy_user_generic(void *to, const void *from, unsigned long len)
20404 {
20405 unsigned ret;
20406
20407@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20408 }
20409
20410 __must_check unsigned long
20411-copy_in_user(void __user *to, const void __user *from, unsigned len);
20412+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20413
20414 static __always_inline __must_check
20415-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20416+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20417 {
20418- int ret = 0;
20419+ size_t sz = __compiletime_object_size(dst);
20420+ unsigned ret = 0;
20421+
20422+ if (size > INT_MAX)
20423+ return size;
20424+
20425+ check_object_size(dst, size, false);
20426+
20427+#ifdef CONFIG_PAX_MEMORY_UDEREF
20428+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20429+ return size;
20430+#endif
20431+
20432+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20433+ if(__builtin_constant_p(size))
20434+ copy_from_user_overflow();
20435+ else
20436+ __copy_from_user_overflow(sz, size);
20437+ return size;
20438+ }
20439
20440 if (!__builtin_constant_p(size))
20441- return copy_user_generic(dst, (__force void *)src, size);
20442+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20443 switch (size) {
20444- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20445+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20446 ret, "b", "b", "=q", 1);
20447 return ret;
20448- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20449+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20450 ret, "w", "w", "=r", 2);
20451 return ret;
20452- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20453+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20454 ret, "l", "k", "=r", 4);
20455 return ret;
20456- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20457+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20458 ret, "q", "", "=r", 8);
20459 return ret;
20460 case 10:
20461- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20462+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20463 ret, "q", "", "=r", 10);
20464 if (unlikely(ret))
20465 return ret;
20466 __get_user_asm(*(u16 *)(8 + (char *)dst),
20467- (u16 __user *)(8 + (char __user *)src),
20468+ (const u16 __user *)(8 + (const char __user *)src),
20469 ret, "w", "w", "=r", 2);
20470 return ret;
20471 case 16:
20472- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20473+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20474 ret, "q", "", "=r", 16);
20475 if (unlikely(ret))
20476 return ret;
20477 __get_user_asm(*(u64 *)(8 + (char *)dst),
20478- (u64 __user *)(8 + (char __user *)src),
20479+ (const u64 __user *)(8 + (const char __user *)src),
20480 ret, "q", "", "=r", 8);
20481 return ret;
20482 default:
20483- return copy_user_generic(dst, (__force void *)src, size);
20484+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20485 }
20486 }
20487
20488 static __always_inline __must_check
20489-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20490+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20491 {
20492 might_fault();
20493 return __copy_from_user_nocheck(dst, src, size);
20494 }
20495
20496 static __always_inline __must_check
20497-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20498+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20499 {
20500- int ret = 0;
20501+ size_t sz = __compiletime_object_size(src);
20502+ unsigned ret = 0;
20503+
20504+ if (size > INT_MAX)
20505+ return size;
20506+
20507+ check_object_size(src, size, true);
20508+
20509+#ifdef CONFIG_PAX_MEMORY_UDEREF
20510+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20511+ return size;
20512+#endif
20513+
20514+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20515+ if(__builtin_constant_p(size))
20516+ copy_to_user_overflow();
20517+ else
20518+ __copy_to_user_overflow(sz, size);
20519+ return size;
20520+ }
20521
20522 if (!__builtin_constant_p(size))
20523- return copy_user_generic((__force void *)dst, src, size);
20524+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20525 switch (size) {
20526- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20527+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20528 ret, "b", "b", "iq", 1);
20529 return ret;
20530- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20531+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20532 ret, "w", "w", "ir", 2);
20533 return ret;
20534- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20535+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20536 ret, "l", "k", "ir", 4);
20537 return ret;
20538- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20539+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20540 ret, "q", "", "er", 8);
20541 return ret;
20542 case 10:
20543- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20544+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20545 ret, "q", "", "er", 10);
20546 if (unlikely(ret))
20547 return ret;
20548 asm("":::"memory");
20549- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20550+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20551 ret, "w", "w", "ir", 2);
20552 return ret;
20553 case 16:
20554- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20555+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20556 ret, "q", "", "er", 16);
20557 if (unlikely(ret))
20558 return ret;
20559 asm("":::"memory");
20560- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20561+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20562 ret, "q", "", "er", 8);
20563 return ret;
20564 default:
20565- return copy_user_generic((__force void *)dst, src, size);
20566+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20567 }
20568 }
20569
20570 static __always_inline __must_check
20571-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20572+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20573 {
20574 might_fault();
20575 return __copy_to_user_nocheck(dst, src, size);
20576 }
20577
20578 static __always_inline __must_check
20579-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20580+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20581 {
20582- int ret = 0;
20583+ unsigned ret = 0;
20584
20585 might_fault();
20586+
20587+ if (size > INT_MAX)
20588+ return size;
20589+
20590+#ifdef CONFIG_PAX_MEMORY_UDEREF
20591+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20592+ return size;
20593+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20594+ return size;
20595+#endif
20596+
20597 if (!__builtin_constant_p(size))
20598- return copy_user_generic((__force void *)dst,
20599- (__force void *)src, size);
20600+ return copy_user_generic((__force_kernel void *)____m(dst),
20601+ (__force_kernel const void *)____m(src), size);
20602 switch (size) {
20603 case 1: {
20604 u8 tmp;
20605- __get_user_asm(tmp, (u8 __user *)src,
20606+ __get_user_asm(tmp, (const u8 __user *)src,
20607 ret, "b", "b", "=q", 1);
20608 if (likely(!ret))
20609 __put_user_asm(tmp, (u8 __user *)dst,
20610@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20611 }
20612 case 2: {
20613 u16 tmp;
20614- __get_user_asm(tmp, (u16 __user *)src,
20615+ __get_user_asm(tmp, (const u16 __user *)src,
20616 ret, "w", "w", "=r", 2);
20617 if (likely(!ret))
20618 __put_user_asm(tmp, (u16 __user *)dst,
20619@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20620
20621 case 4: {
20622 u32 tmp;
20623- __get_user_asm(tmp, (u32 __user *)src,
20624+ __get_user_asm(tmp, (const u32 __user *)src,
20625 ret, "l", "k", "=r", 4);
20626 if (likely(!ret))
20627 __put_user_asm(tmp, (u32 __user *)dst,
20628@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20629 }
20630 case 8: {
20631 u64 tmp;
20632- __get_user_asm(tmp, (u64 __user *)src,
20633+ __get_user_asm(tmp, (const u64 __user *)src,
20634 ret, "q", "", "=r", 8);
20635 if (likely(!ret))
20636 __put_user_asm(tmp, (u64 __user *)dst,
20637@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20638 return ret;
20639 }
20640 default:
20641- return copy_user_generic((__force void *)dst,
20642- (__force void *)src, size);
20643+ return copy_user_generic((__force_kernel void *)____m(dst),
20644+ (__force_kernel const void *)____m(src), size);
20645 }
20646 }
20647
20648-static __must_check __always_inline int
20649-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20650+static __must_check __always_inline unsigned long
20651+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20652 {
20653 return __copy_from_user_nocheck(dst, src, size);
20654 }
20655
20656-static __must_check __always_inline int
20657-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20658+static __must_check __always_inline unsigned long
20659+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20660 {
20661 return __copy_to_user_nocheck(dst, src, size);
20662 }
20663
20664-extern long __copy_user_nocache(void *dst, const void __user *src,
20665- unsigned size, int zerorest);
20666+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20667+ unsigned long size, int zerorest);
20668
20669-static inline int
20670-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20671+static inline unsigned long
20672+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20673 {
20674 might_fault();
20675+
20676+ if (size > INT_MAX)
20677+ return size;
20678+
20679+#ifdef CONFIG_PAX_MEMORY_UDEREF
20680+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20681+ return size;
20682+#endif
20683+
20684 return __copy_user_nocache(dst, src, size, 1);
20685 }
20686
20687-static inline int
20688+static inline unsigned long
20689 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20690- unsigned size)
20691+ unsigned long size)
20692 {
20693+ if (size > INT_MAX)
20694+ return size;
20695+
20696+#ifdef CONFIG_PAX_MEMORY_UDEREF
20697+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20698+ return size;
20699+#endif
20700+
20701 return __copy_user_nocache(dst, src, size, 0);
20702 }
20703
20704 unsigned long
20705-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20706+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20707
20708 #endif /* _ASM_X86_UACCESS_64_H */
20709diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20710index 5b238981..77fdd78 100644
20711--- a/arch/x86/include/asm/word-at-a-time.h
20712+++ b/arch/x86/include/asm/word-at-a-time.h
20713@@ -11,7 +11,7 @@
20714 * and shift, for example.
20715 */
20716 struct word_at_a_time {
20717- const unsigned long one_bits, high_bits;
20718+ unsigned long one_bits, high_bits;
20719 };
20720
20721 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20722diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20723index e45e4da..44e8572 100644
20724--- a/arch/x86/include/asm/x86_init.h
20725+++ b/arch/x86/include/asm/x86_init.h
20726@@ -129,7 +129,7 @@ struct x86_init_ops {
20727 struct x86_init_timers timers;
20728 struct x86_init_iommu iommu;
20729 struct x86_init_pci pci;
20730-};
20731+} __no_const;
20732
20733 /**
20734 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20735@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20736 void (*setup_percpu_clockev)(void);
20737 void (*early_percpu_clock_init)(void);
20738 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20739-};
20740+} __no_const;
20741
20742 struct timespec;
20743
20744@@ -168,7 +168,7 @@ struct x86_platform_ops {
20745 void (*save_sched_clock_state)(void);
20746 void (*restore_sched_clock_state)(void);
20747 void (*apic_post_init)(void);
20748-};
20749+} __no_const;
20750
20751 struct pci_dev;
20752 struct msi_msg;
20753@@ -185,7 +185,7 @@ struct x86_msi_ops {
20754 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20755 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20756 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20757-};
20758+} __no_const;
20759
20760 struct IO_APIC_route_entry;
20761 struct io_apic_irq_attr;
20762@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20763 unsigned int destination, int vector,
20764 struct io_apic_irq_attr *attr);
20765 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20766-};
20767+} __no_const;
20768
20769 extern struct x86_init_ops x86_init;
20770 extern struct x86_cpuinit_ops x86_cpuinit;
20771diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20772index c949923..c22bfa4 100644
20773--- a/arch/x86/include/asm/xen/page.h
20774+++ b/arch/x86/include/asm/xen/page.h
20775@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
20776 extern struct page *m2p_find_override(unsigned long mfn);
20777 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20778
20779-static inline unsigned long pfn_to_mfn(unsigned long pfn)
20780+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20781 {
20782 unsigned long mfn;
20783
20784diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20785index d949ef2..479b9d1 100644
20786--- a/arch/x86/include/asm/xsave.h
20787+++ b/arch/x86/include/asm/xsave.h
20788@@ -82,8 +82,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20789 if (unlikely(err))
20790 return -EFAULT;
20791
20792+ pax_open_userland();
20793 __asm__ __volatile__(ASM_STAC "\n"
20794- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20795+ "1:"
20796+ __copyuser_seg
20797+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20798 "2: " ASM_CLAC "\n"
20799 ".section .fixup,\"ax\"\n"
20800 "3: movl $-1,%[err]\n"
20801@@ -93,18 +96,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20802 : [err] "=r" (err)
20803 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20804 : "memory");
20805+ pax_close_userland();
20806 return err;
20807 }
20808
20809 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20810 {
20811 int err;
20812- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20813+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20814 u32 lmask = mask;
20815 u32 hmask = mask >> 32;
20816
20817+ pax_open_userland();
20818 __asm__ __volatile__(ASM_STAC "\n"
20819- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20820+ "1:"
20821+ __copyuser_seg
20822+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20823 "2: " ASM_CLAC "\n"
20824 ".section .fixup,\"ax\"\n"
20825 "3: movl $-1,%[err]\n"
20826@@ -114,6 +121,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20827 : [err] "=r" (err)
20828 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20829 : "memory"); /* memory required? */
20830+ pax_close_userland();
20831 return err;
20832 }
20833
20834diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20835index bbae024..e1528f9 100644
20836--- a/arch/x86/include/uapi/asm/e820.h
20837+++ b/arch/x86/include/uapi/asm/e820.h
20838@@ -63,7 +63,7 @@ struct e820map {
20839 #define ISA_START_ADDRESS 0xa0000
20840 #define ISA_END_ADDRESS 0x100000
20841
20842-#define BIOS_BEGIN 0x000a0000
20843+#define BIOS_BEGIN 0x000c0000
20844 #define BIOS_END 0x00100000
20845
20846 #define BIOS_ROM_BASE 0xffe00000
20847diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20848index 7b0a55a..ad115bf 100644
20849--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20850+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20851@@ -49,7 +49,6 @@
20852 #define EFLAGS 144
20853 #define RSP 152
20854 #define SS 160
20855-#define ARGOFFSET R11
20856 #endif /* __ASSEMBLY__ */
20857
20858 /* top of stack page */
20859diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20860index 047f9ff..4ba5ea6 100644
20861--- a/arch/x86/kernel/Makefile
20862+++ b/arch/x86/kernel/Makefile
20863@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20864 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20865 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20866 obj-y += probe_roms.o
20867-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20868+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20869 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20870 obj-$(CONFIG_X86_64) += mcount_64.o
20871 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20872diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20873index 86281ff..e046fc2 100644
20874--- a/arch/x86/kernel/acpi/boot.c
20875+++ b/arch/x86/kernel/acpi/boot.c
20876@@ -1296,7 +1296,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20877 * If your system is blacklisted here, but you find that acpi=force
20878 * works for you, please contact linux-acpi@vger.kernel.org
20879 */
20880-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20881+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20882 /*
20883 * Boxes that need ACPI disabled
20884 */
20885@@ -1371,7 +1371,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20886 };
20887
20888 /* second table for DMI checks that should run after early-quirks */
20889-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20890+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20891 /*
20892 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20893 * which includes some code which overrides all temperature
20894diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20895index 3136820..e2c6577 100644
20896--- a/arch/x86/kernel/acpi/sleep.c
20897+++ b/arch/x86/kernel/acpi/sleep.c
20898@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20899 #else /* CONFIG_64BIT */
20900 #ifdef CONFIG_SMP
20901 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20902+
20903+ pax_open_kernel();
20904 early_gdt_descr.address =
20905 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20906+ pax_close_kernel();
20907+
20908 initial_gs = per_cpu_offset(smp_processor_id());
20909 #endif
20910 initial_code = (unsigned long)wakeup_long64;
20911diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20912index 665c6b7..eae4d56 100644
20913--- a/arch/x86/kernel/acpi/wakeup_32.S
20914+++ b/arch/x86/kernel/acpi/wakeup_32.S
20915@@ -29,13 +29,11 @@ wakeup_pmode_return:
20916 # and restore the stack ... but you need gdt for this to work
20917 movl saved_context_esp, %esp
20918
20919- movl %cs:saved_magic, %eax
20920- cmpl $0x12345678, %eax
20921+ cmpl $0x12345678, saved_magic
20922 jne bogus_magic
20923
20924 # jump to place where we left off
20925- movl saved_eip, %eax
20926- jmp *%eax
20927+ jmp *(saved_eip)
20928
20929 bogus_magic:
20930 jmp bogus_magic
20931diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20932index 703130f..27a155d 100644
20933--- a/arch/x86/kernel/alternative.c
20934+++ b/arch/x86/kernel/alternative.c
20935@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20936 */
20937 for (a = start; a < end; a++) {
20938 instr = (u8 *)&a->instr_offset + a->instr_offset;
20939+
20940+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20941+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20942+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20943+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20944+#endif
20945+
20946 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20947 BUG_ON(a->replacementlen > a->instrlen);
20948 BUG_ON(a->instrlen > sizeof(insnbuf));
20949@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20950 add_nops(insnbuf + a->replacementlen,
20951 a->instrlen - a->replacementlen);
20952
20953+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20954+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20955+ instr = ktva_ktla(instr);
20956+#endif
20957+
20958 text_poke_early(instr, insnbuf, a->instrlen);
20959 }
20960 }
20961@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20962 for (poff = start; poff < end; poff++) {
20963 u8 *ptr = (u8 *)poff + *poff;
20964
20965+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20966+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20967+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20968+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20969+#endif
20970+
20971 if (!*poff || ptr < text || ptr >= text_end)
20972 continue;
20973 /* turn DS segment override prefix into lock prefix */
20974- if (*ptr == 0x3e)
20975+ if (*ktla_ktva(ptr) == 0x3e)
20976 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20977 }
20978 mutex_unlock(&text_mutex);
20979@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20980 for (poff = start; poff < end; poff++) {
20981 u8 *ptr = (u8 *)poff + *poff;
20982
20983+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20984+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20985+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20986+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20987+#endif
20988+
20989 if (!*poff || ptr < text || ptr >= text_end)
20990 continue;
20991 /* turn lock prefix into DS segment override prefix */
20992- if (*ptr == 0xf0)
20993+ if (*ktla_ktva(ptr) == 0xf0)
20994 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20995 }
20996 mutex_unlock(&text_mutex);
20997@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20998
20999 BUG_ON(p->len > MAX_PATCH_LEN);
21000 /* prep the buffer with the original instructions */
21001- memcpy(insnbuf, p->instr, p->len);
21002+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
21003 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
21004 (unsigned long)p->instr, p->len);
21005
21006@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
21007 if (!uniproc_patched || num_possible_cpus() == 1)
21008 free_init_pages("SMP alternatives",
21009 (unsigned long)__smp_locks,
21010- (unsigned long)__smp_locks_end);
21011+ PAGE_ALIGN((unsigned long)__smp_locks_end));
21012 #endif
21013
21014 apply_paravirt(__parainstructions, __parainstructions_end);
21015@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
21016 * instructions. And on the local CPU you need to be protected again NMI or MCE
21017 * handlers seeing an inconsistent instruction while you patch.
21018 */
21019-void *__init_or_module text_poke_early(void *addr, const void *opcode,
21020+void *__kprobes text_poke_early(void *addr, const void *opcode,
21021 size_t len)
21022 {
21023 unsigned long flags;
21024 local_irq_save(flags);
21025- memcpy(addr, opcode, len);
21026+
21027+ pax_open_kernel();
21028+ memcpy(ktla_ktva(addr), opcode, len);
21029 sync_core();
21030+ pax_close_kernel();
21031+
21032 local_irq_restore(flags);
21033 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21034 that causes hangs on some VIA CPUs. */
21035@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21036 */
21037 void *text_poke(void *addr, const void *opcode, size_t len)
21038 {
21039- unsigned long flags;
21040- char *vaddr;
21041+ unsigned char *vaddr = ktla_ktva(addr);
21042 struct page *pages[2];
21043- int i;
21044+ size_t i;
21045
21046 if (!core_kernel_text((unsigned long)addr)) {
21047- pages[0] = vmalloc_to_page(addr);
21048- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21049+ pages[0] = vmalloc_to_page(vaddr);
21050+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21051 } else {
21052- pages[0] = virt_to_page(addr);
21053+ pages[0] = virt_to_page(vaddr);
21054 WARN_ON(!PageReserved(pages[0]));
21055- pages[1] = virt_to_page(addr + PAGE_SIZE);
21056+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21057 }
21058 BUG_ON(!pages[0]);
21059- local_irq_save(flags);
21060- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21061- if (pages[1])
21062- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21063- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21064- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21065- clear_fixmap(FIX_TEXT_POKE0);
21066- if (pages[1])
21067- clear_fixmap(FIX_TEXT_POKE1);
21068- local_flush_tlb();
21069- sync_core();
21070- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21071- that causes hangs on some VIA CPUs. */
21072+ text_poke_early(addr, opcode, len);
21073 for (i = 0; i < len; i++)
21074- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21075- local_irq_restore(flags);
21076+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21077 return addr;
21078 }
21079
21080@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21081 if (likely(!bp_patching_in_progress))
21082 return 0;
21083
21084- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21085+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21086 return 0;
21087
21088 /* set up the specified breakpoint handler */
21089@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21090 */
21091 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21092 {
21093- unsigned char int3 = 0xcc;
21094+ const unsigned char int3 = 0xcc;
21095
21096 bp_int3_handler = handler;
21097 bp_int3_addr = (u8 *)addr + sizeof(int3);
21098diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21099index ad28db7..c538b2c 100644
21100--- a/arch/x86/kernel/apic/apic.c
21101+++ b/arch/x86/kernel/apic/apic.c
21102@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21103 /*
21104 * Debug level, exported for io_apic.c
21105 */
21106-unsigned int apic_verbosity;
21107+int apic_verbosity;
21108
21109 int pic_mode;
21110
21111@@ -2000,7 +2000,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21112 apic_write(APIC_ESR, 0);
21113 v = apic_read(APIC_ESR);
21114 ack_APIC_irq();
21115- atomic_inc(&irq_err_count);
21116+ atomic_inc_unchecked(&irq_err_count);
21117
21118 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21119 smp_processor_id(), v);
21120diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21121index 7c1b294..e71d27f 100644
21122--- a/arch/x86/kernel/apic/apic_flat_64.c
21123+++ b/arch/x86/kernel/apic/apic_flat_64.c
21124@@ -154,7 +154,7 @@ static int flat_probe(void)
21125 return 1;
21126 }
21127
21128-static struct apic apic_flat = {
21129+static struct apic apic_flat __read_only = {
21130 .name = "flat",
21131 .probe = flat_probe,
21132 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21133@@ -268,7 +268,7 @@ static int physflat_probe(void)
21134 return 0;
21135 }
21136
21137-static struct apic apic_physflat = {
21138+static struct apic apic_physflat __read_only = {
21139
21140 .name = "physical flat",
21141 .probe = physflat_probe,
21142diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21143index 8c7c982..a225910 100644
21144--- a/arch/x86/kernel/apic/apic_noop.c
21145+++ b/arch/x86/kernel/apic/apic_noop.c
21146@@ -118,7 +118,7 @@ static void noop_apic_write(u32 reg, u32 v)
21147 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21148 }
21149
21150-struct apic apic_noop = {
21151+struct apic apic_noop __read_only = {
21152 .name = "noop",
21153 .probe = noop_probe,
21154 .acpi_madt_oem_check = NULL,
21155diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21156index e4840aa..e7d9dac 100644
21157--- a/arch/x86/kernel/apic/bigsmp_32.c
21158+++ b/arch/x86/kernel/apic/bigsmp_32.c
21159@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
21160 return dmi_bigsmp;
21161 }
21162
21163-static struct apic apic_bigsmp = {
21164+static struct apic apic_bigsmp __read_only = {
21165
21166 .name = "bigsmp",
21167 .probe = probe_bigsmp,
21168diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21169index 81e08ef..abc77e5 100644
21170--- a/arch/x86/kernel/apic/io_apic.c
21171+++ b/arch/x86/kernel/apic/io_apic.c
21172@@ -1042,7 +1042,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
21173 }
21174 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21175
21176-void lock_vector_lock(void)
21177+void lock_vector_lock(void) __acquires(vector_lock)
21178 {
21179 /* Used to the online set of cpus does not change
21180 * during assign_irq_vector.
21181@@ -1050,7 +1050,7 @@ void lock_vector_lock(void)
21182 raw_spin_lock(&vector_lock);
21183 }
21184
21185-void unlock_vector_lock(void)
21186+void unlock_vector_lock(void) __releases(vector_lock)
21187 {
21188 raw_spin_unlock(&vector_lock);
21189 }
21190@@ -2349,7 +2349,7 @@ static void ack_apic_edge(struct irq_data *data)
21191 ack_APIC_irq();
21192 }
21193
21194-atomic_t irq_mis_count;
21195+atomic_unchecked_t irq_mis_count;
21196
21197 #ifdef CONFIG_GENERIC_PENDING_IRQ
21198 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21199@@ -2490,7 +2490,7 @@ static void ack_apic_level(struct irq_data *data)
21200 * at the cpu.
21201 */
21202 if (!(v & (1 << (i & 0x1f)))) {
21203- atomic_inc(&irq_mis_count);
21204+ atomic_inc_unchecked(&irq_mis_count);
21205
21206 eoi_ioapic_irq(irq, cfg);
21207 }
21208diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21209index cceb352..a635fd8 100644
21210--- a/arch/x86/kernel/apic/probe_32.c
21211+++ b/arch/x86/kernel/apic/probe_32.c
21212@@ -72,7 +72,7 @@ static int probe_default(void)
21213 return 1;
21214 }
21215
21216-static struct apic apic_default = {
21217+static struct apic apic_default __read_only = {
21218
21219 .name = "default",
21220 .probe = probe_default,
21221diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21222index e66766b..1c008ba 100644
21223--- a/arch/x86/kernel/apic/x2apic_cluster.c
21224+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21225@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21226 return notifier_from_errno(err);
21227 }
21228
21229-static struct notifier_block __refdata x2apic_cpu_notifier = {
21230+static struct notifier_block x2apic_cpu_notifier = {
21231 .notifier_call = update_clusterinfo,
21232 };
21233
21234@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21235 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21236 }
21237
21238-static struct apic apic_x2apic_cluster = {
21239+static struct apic apic_x2apic_cluster __read_only = {
21240
21241 .name = "cluster x2apic",
21242 .probe = x2apic_cluster_probe,
21243diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21244index 6d600eb..0300c00 100644
21245--- a/arch/x86/kernel/apic/x2apic_phys.c
21246+++ b/arch/x86/kernel/apic/x2apic_phys.c
21247@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21248 return apic == &apic_x2apic_phys;
21249 }
21250
21251-static struct apic apic_x2apic_phys = {
21252+static struct apic apic_x2apic_phys __read_only = {
21253
21254 .name = "physical x2apic",
21255 .probe = x2apic_phys_probe,
21256diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21257index 293b41d..4df25fd 100644
21258--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21259+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21260@@ -350,7 +350,7 @@ static int uv_probe(void)
21261 return apic == &apic_x2apic_uv_x;
21262 }
21263
21264-static struct apic __refdata apic_x2apic_uv_x = {
21265+static struct apic apic_x2apic_uv_x __read_only = {
21266
21267 .name = "UV large system",
21268 .probe = uv_probe,
21269diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21270index 5848744..56cb598 100644
21271--- a/arch/x86/kernel/apm_32.c
21272+++ b/arch/x86/kernel/apm_32.c
21273@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21274 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21275 * even though they are called in protected mode.
21276 */
21277-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21278+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21279 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21280
21281 static const char driver_version[] = "1.16ac"; /* no spaces */
21282@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21283 BUG_ON(cpu != 0);
21284 gdt = get_cpu_gdt_table(cpu);
21285 save_desc_40 = gdt[0x40 / 8];
21286+
21287+ pax_open_kernel();
21288 gdt[0x40 / 8] = bad_bios_desc;
21289+ pax_close_kernel();
21290
21291 apm_irq_save(flags);
21292 APM_DO_SAVE_SEGS;
21293@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21294 &call->esi);
21295 APM_DO_RESTORE_SEGS;
21296 apm_irq_restore(flags);
21297+
21298+ pax_open_kernel();
21299 gdt[0x40 / 8] = save_desc_40;
21300+ pax_close_kernel();
21301+
21302 put_cpu();
21303
21304 return call->eax & 0xff;
21305@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21306 BUG_ON(cpu != 0);
21307 gdt = get_cpu_gdt_table(cpu);
21308 save_desc_40 = gdt[0x40 / 8];
21309+
21310+ pax_open_kernel();
21311 gdt[0x40 / 8] = bad_bios_desc;
21312+ pax_close_kernel();
21313
21314 apm_irq_save(flags);
21315 APM_DO_SAVE_SEGS;
21316@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21317 &call->eax);
21318 APM_DO_RESTORE_SEGS;
21319 apm_irq_restore(flags);
21320+
21321+ pax_open_kernel();
21322 gdt[0x40 / 8] = save_desc_40;
21323+ pax_close_kernel();
21324+
21325 put_cpu();
21326 return error;
21327 }
21328@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21329 * code to that CPU.
21330 */
21331 gdt = get_cpu_gdt_table(0);
21332+
21333+ pax_open_kernel();
21334 set_desc_base(&gdt[APM_CS >> 3],
21335 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21336 set_desc_base(&gdt[APM_CS_16 >> 3],
21337 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21338 set_desc_base(&gdt[APM_DS >> 3],
21339 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21340+ pax_close_kernel();
21341
21342 proc_create("apm", 0, NULL, &apm_file_ops);
21343
21344diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21345index 9f6b934..cf5ffb3 100644
21346--- a/arch/x86/kernel/asm-offsets.c
21347+++ b/arch/x86/kernel/asm-offsets.c
21348@@ -32,6 +32,8 @@ void common(void) {
21349 OFFSET(TI_flags, thread_info, flags);
21350 OFFSET(TI_status, thread_info, status);
21351 OFFSET(TI_addr_limit, thread_info, addr_limit);
21352+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21353+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21354
21355 BLANK();
21356 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21357@@ -52,8 +54,26 @@ void common(void) {
21358 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21359 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21360 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21361+
21362+#ifdef CONFIG_PAX_KERNEXEC
21363+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21364 #endif
21365
21366+#ifdef CONFIG_PAX_MEMORY_UDEREF
21367+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21368+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21369+#ifdef CONFIG_X86_64
21370+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21371+#endif
21372+#endif
21373+
21374+#endif
21375+
21376+ BLANK();
21377+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21378+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21379+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21380+
21381 #ifdef CONFIG_XEN
21382 BLANK();
21383 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21384diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21385index e7c798b..2b2019b 100644
21386--- a/arch/x86/kernel/asm-offsets_64.c
21387+++ b/arch/x86/kernel/asm-offsets_64.c
21388@@ -77,6 +77,7 @@ int main(void)
21389 BLANK();
21390 #undef ENTRY
21391
21392+ DEFINE(TSS_size, sizeof(struct tss_struct));
21393 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21394 BLANK();
21395
21396diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21397index 7fd54f0..0691410 100644
21398--- a/arch/x86/kernel/cpu/Makefile
21399+++ b/arch/x86/kernel/cpu/Makefile
21400@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21401 CFLAGS_REMOVE_perf_event.o = -pg
21402 endif
21403
21404-# Make sure load_percpu_segment has no stackprotector
21405-nostackp := $(call cc-option, -fno-stack-protector)
21406-CFLAGS_common.o := $(nostackp)
21407-
21408 obj-y := intel_cacheinfo.o scattered.o topology.o
21409 obj-y += proc.o capflags.o powerflags.o common.o
21410 obj-y += rdrand.o
21411diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21412index ce8b8ff..d7d8851 100644
21413--- a/arch/x86/kernel/cpu/amd.c
21414+++ b/arch/x86/kernel/cpu/amd.c
21415@@ -728,7 +728,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21416 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21417 {
21418 /* AMD errata T13 (order #21922) */
21419- if ((c->x86 == 6)) {
21420+ if (c->x86 == 6) {
21421 /* Duron Rev A0 */
21422 if (c->x86_model == 3 && c->x86_mask == 0)
21423 size = 64;
21424diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21425index ef1b93f..150db65 100644
21426--- a/arch/x86/kernel/cpu/common.c
21427+++ b/arch/x86/kernel/cpu/common.c
21428@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21429
21430 static const struct cpu_dev *this_cpu = &default_cpu;
21431
21432-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21433-#ifdef CONFIG_X86_64
21434- /*
21435- * We need valid kernel segments for data and code in long mode too
21436- * IRET will check the segment types kkeil 2000/10/28
21437- * Also sysret mandates a special GDT layout
21438- *
21439- * TLS descriptors are currently at a different place compared to i386.
21440- * Hopefully nobody expects them at a fixed place (Wine?)
21441- */
21442- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21443- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21444- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21445- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21446- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21447- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21448-#else
21449- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21450- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21451- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21452- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21453- /*
21454- * Segments used for calling PnP BIOS have byte granularity.
21455- * They code segments and data segments have fixed 64k limits,
21456- * the transfer segment sizes are set at run time.
21457- */
21458- /* 32-bit code */
21459- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21460- /* 16-bit code */
21461- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21462- /* 16-bit data */
21463- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21464- /* 16-bit data */
21465- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21466- /* 16-bit data */
21467- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21468- /*
21469- * The APM segments have byte granularity and their bases
21470- * are set at run time. All have 64k limits.
21471- */
21472- /* 32-bit code */
21473- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21474- /* 16-bit code */
21475- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21476- /* data */
21477- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21478-
21479- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21480- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21481- GDT_STACK_CANARY_INIT
21482-#endif
21483-} };
21484-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21485-
21486 static int __init x86_xsave_setup(char *s)
21487 {
21488 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21489@@ -295,6 +241,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21490 }
21491 }
21492
21493+#ifdef CONFIG_X86_64
21494+static __init int setup_disable_pcid(char *arg)
21495+{
21496+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21497+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21498+
21499+#ifdef CONFIG_PAX_MEMORY_UDEREF
21500+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21501+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21502+#endif
21503+
21504+ return 1;
21505+}
21506+__setup("nopcid", setup_disable_pcid);
21507+
21508+static void setup_pcid(struct cpuinfo_x86 *c)
21509+{
21510+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21511+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21512+
21513+#ifdef CONFIG_PAX_MEMORY_UDEREF
21514+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21515+ pax_open_kernel();
21516+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21517+ pax_close_kernel();
21518+ printk("PAX: slow and weak UDEREF enabled\n");
21519+ } else
21520+ printk("PAX: UDEREF disabled\n");
21521+#endif
21522+
21523+ return;
21524+ }
21525+
21526+ printk("PAX: PCID detected\n");
21527+ set_in_cr4(X86_CR4_PCIDE);
21528+
21529+#ifdef CONFIG_PAX_MEMORY_UDEREF
21530+ pax_open_kernel();
21531+ clone_pgd_mask = ~(pgdval_t)0UL;
21532+ pax_close_kernel();
21533+ if (pax_user_shadow_base)
21534+ printk("PAX: weak UDEREF enabled\n");
21535+ else {
21536+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21537+ printk("PAX: strong UDEREF enabled\n");
21538+ }
21539+#endif
21540+
21541+ if (cpu_has(c, X86_FEATURE_INVPCID))
21542+ printk("PAX: INVPCID detected\n");
21543+}
21544+#endif
21545+
21546 /*
21547 * Some CPU features depend on higher CPUID levels, which may not always
21548 * be available due to CPUID level capping or broken virtualization
21549@@ -395,7 +394,7 @@ void switch_to_new_gdt(int cpu)
21550 {
21551 struct desc_ptr gdt_descr;
21552
21553- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21554+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21555 gdt_descr.size = GDT_SIZE - 1;
21556 load_gdt(&gdt_descr);
21557 /* Reload the per-cpu base */
21558@@ -885,6 +884,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21559 setup_smep(c);
21560 setup_smap(c);
21561
21562+#ifdef CONFIG_X86_64
21563+ setup_pcid(c);
21564+#endif
21565+
21566 /*
21567 * The vendor-specific functions might have changed features.
21568 * Now we do "generic changes."
21569@@ -893,6 +896,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21570 /* Filter out anything that depends on CPUID levels we don't have */
21571 filter_cpuid_features(c, true);
21572
21573+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21574+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21575+#endif
21576+
21577 /* If the model name is still unset, do table lookup. */
21578 if (!c->x86_model_id[0]) {
21579 const char *p;
21580@@ -973,7 +980,7 @@ static void syscall32_cpu_init(void)
21581 void enable_sep_cpu(void)
21582 {
21583 int cpu = get_cpu();
21584- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21585+ struct tss_struct *tss = init_tss + cpu;
21586
21587 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21588 put_cpu();
21589@@ -1113,14 +1120,16 @@ static __init int setup_disablecpuid(char *arg)
21590 }
21591 __setup("clearcpuid=", setup_disablecpuid);
21592
21593+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21594+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21595+
21596 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21597- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21598+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21599 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21600
21601 #ifdef CONFIG_X86_64
21602-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21603-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21604- (unsigned long) debug_idt_table };
21605+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21606+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21607
21608 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21609 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21610@@ -1283,7 +1292,7 @@ void cpu_init(void)
21611 load_ucode_ap();
21612
21613 cpu = stack_smp_processor_id();
21614- t = &per_cpu(init_tss, cpu);
21615+ t = init_tss + cpu;
21616 oist = &per_cpu(orig_ist, cpu);
21617
21618 #ifdef CONFIG_NUMA
21619@@ -1318,7 +1327,6 @@ void cpu_init(void)
21620 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21621 barrier();
21622
21623- x86_configure_nx();
21624 enable_x2apic();
21625
21626 /*
21627@@ -1370,7 +1378,7 @@ void cpu_init(void)
21628 {
21629 int cpu = smp_processor_id();
21630 struct task_struct *curr = current;
21631- struct tss_struct *t = &per_cpu(init_tss, cpu);
21632+ struct tss_struct *t = init_tss + cpu;
21633 struct thread_struct *thread = &curr->thread;
21634
21635 show_ucode_info_early();
21636diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21637index 9c8f739..902a9c5 100644
21638--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21639+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21640@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21641 };
21642
21643 #ifdef CONFIG_AMD_NB
21644+static struct attribute *default_attrs_amd_nb[] = {
21645+ &type.attr,
21646+ &level.attr,
21647+ &coherency_line_size.attr,
21648+ &physical_line_partition.attr,
21649+ &ways_of_associativity.attr,
21650+ &number_of_sets.attr,
21651+ &size.attr,
21652+ &shared_cpu_map.attr,
21653+ &shared_cpu_list.attr,
21654+ NULL,
21655+ NULL,
21656+ NULL,
21657+ NULL
21658+};
21659+
21660 static struct attribute **amd_l3_attrs(void)
21661 {
21662 static struct attribute **attrs;
21663@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21664
21665 n = ARRAY_SIZE(default_attrs);
21666
21667- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21668- n += 2;
21669-
21670- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21671- n += 1;
21672-
21673- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21674- if (attrs == NULL)
21675- return attrs = default_attrs;
21676-
21677- for (n = 0; default_attrs[n]; n++)
21678- attrs[n] = default_attrs[n];
21679+ attrs = default_attrs_amd_nb;
21680
21681 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21682 attrs[n++] = &cache_disable_0.attr;
21683@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21684 .default_attrs = default_attrs,
21685 };
21686
21687+#ifdef CONFIG_AMD_NB
21688+static struct kobj_type ktype_cache_amd_nb = {
21689+ .sysfs_ops = &sysfs_ops,
21690+ .default_attrs = default_attrs_amd_nb,
21691+};
21692+#endif
21693+
21694 static struct kobj_type ktype_percpu_entry = {
21695 .sysfs_ops = &sysfs_ops,
21696 };
21697@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21698 return retval;
21699 }
21700
21701+#ifdef CONFIG_AMD_NB
21702+ amd_l3_attrs();
21703+#endif
21704+
21705 for (i = 0; i < num_cache_leaves; i++) {
21706+ struct kobj_type *ktype;
21707+
21708 this_object = INDEX_KOBJECT_PTR(cpu, i);
21709 this_object->cpu = cpu;
21710 this_object->index = i;
21711
21712 this_leaf = CPUID4_INFO_IDX(cpu, i);
21713
21714- ktype_cache.default_attrs = default_attrs;
21715+ ktype = &ktype_cache;
21716 #ifdef CONFIG_AMD_NB
21717 if (this_leaf->base.nb)
21718- ktype_cache.default_attrs = amd_l3_attrs();
21719+ ktype = &ktype_cache_amd_nb;
21720 #endif
21721 retval = kobject_init_and_add(&(this_object->kobj),
21722- &ktype_cache,
21723+ ktype,
21724 per_cpu(ici_cache_kobject, cpu),
21725 "index%1lu", i);
21726 if (unlikely(retval)) {
21727diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21728index 9a79c8d..158c2f1 100644
21729--- a/arch/x86/kernel/cpu/mcheck/mce.c
21730+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21731@@ -45,6 +45,7 @@
21732 #include <asm/processor.h>
21733 #include <asm/mce.h>
21734 #include <asm/msr.h>
21735+#include <asm/local.h>
21736
21737 #include "mce-internal.h"
21738
21739@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21740 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21741 m->cs, m->ip);
21742
21743- if (m->cs == __KERNEL_CS)
21744+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21745 print_symbol("{%s}", m->ip);
21746 pr_cont("\n");
21747 }
21748@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21749
21750 #define PANIC_TIMEOUT 5 /* 5 seconds */
21751
21752-static atomic_t mce_paniced;
21753+static atomic_unchecked_t mce_paniced;
21754
21755 static int fake_panic;
21756-static atomic_t mce_fake_paniced;
21757+static atomic_unchecked_t mce_fake_paniced;
21758
21759 /* Panic in progress. Enable interrupts and wait for final IPI */
21760 static void wait_for_panic(void)
21761@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21762 /*
21763 * Make sure only one CPU runs in machine check panic
21764 */
21765- if (atomic_inc_return(&mce_paniced) > 1)
21766+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21767 wait_for_panic();
21768 barrier();
21769
21770@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21771 console_verbose();
21772 } else {
21773 /* Don't log too much for fake panic */
21774- if (atomic_inc_return(&mce_fake_paniced) > 1)
21775+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21776 return;
21777 }
21778 /* First print corrected ones that are still unlogged */
21779@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21780 if (!fake_panic) {
21781 if (panic_timeout == 0)
21782 panic_timeout = mca_cfg.panic_timeout;
21783- panic(msg);
21784+ panic("%s", msg);
21785 } else
21786 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21787 }
21788@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
21789 * might have been modified by someone else.
21790 */
21791 rmb();
21792- if (atomic_read(&mce_paniced))
21793+ if (atomic_read_unchecked(&mce_paniced))
21794 wait_for_panic();
21795 if (!mca_cfg.monarch_timeout)
21796 goto out;
21797@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21798 }
21799
21800 /* Call the installed machine check handler for this CPU setup. */
21801-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21802+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21803 unexpected_machine_check;
21804
21805 /*
21806@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21807 return;
21808 }
21809
21810+ pax_open_kernel();
21811 machine_check_vector = do_machine_check;
21812+ pax_close_kernel();
21813
21814 __mcheck_cpu_init_generic();
21815 __mcheck_cpu_init_vendor(c);
21816@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21817 */
21818
21819 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21820-static int mce_chrdev_open_count; /* #times opened */
21821+static local_t mce_chrdev_open_count; /* #times opened */
21822 static int mce_chrdev_open_exclu; /* already open exclusive? */
21823
21824 static int mce_chrdev_open(struct inode *inode, struct file *file)
21825@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21826 spin_lock(&mce_chrdev_state_lock);
21827
21828 if (mce_chrdev_open_exclu ||
21829- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21830+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21831 spin_unlock(&mce_chrdev_state_lock);
21832
21833 return -EBUSY;
21834@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21835
21836 if (file->f_flags & O_EXCL)
21837 mce_chrdev_open_exclu = 1;
21838- mce_chrdev_open_count++;
21839+ local_inc(&mce_chrdev_open_count);
21840
21841 spin_unlock(&mce_chrdev_state_lock);
21842
21843@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21844 {
21845 spin_lock(&mce_chrdev_state_lock);
21846
21847- mce_chrdev_open_count--;
21848+ local_dec(&mce_chrdev_open_count);
21849 mce_chrdev_open_exclu = 0;
21850
21851 spin_unlock(&mce_chrdev_state_lock);
21852@@ -2414,7 +2417,7 @@ static __init void mce_init_banks(void)
21853
21854 for (i = 0; i < mca_cfg.banks; i++) {
21855 struct mce_bank *b = &mce_banks[i];
21856- struct device_attribute *a = &b->attr;
21857+ device_attribute_no_const *a = &b->attr;
21858
21859 sysfs_attr_init(&a->attr);
21860 a->attr.name = b->attrname;
21861@@ -2521,7 +2524,7 @@ struct dentry *mce_get_debugfs_dir(void)
21862 static void mce_reset(void)
21863 {
21864 cpu_missing = 0;
21865- atomic_set(&mce_fake_paniced, 0);
21866+ atomic_set_unchecked(&mce_fake_paniced, 0);
21867 atomic_set(&mce_executing, 0);
21868 atomic_set(&mce_callin, 0);
21869 atomic_set(&global_nwo, 0);
21870diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21871index a304298..49b6d06 100644
21872--- a/arch/x86/kernel/cpu/mcheck/p5.c
21873+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21874@@ -10,6 +10,7 @@
21875 #include <asm/processor.h>
21876 #include <asm/mce.h>
21877 #include <asm/msr.h>
21878+#include <asm/pgtable.h>
21879
21880 /* By default disabled */
21881 int mce_p5_enabled __read_mostly;
21882@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21883 if (!cpu_has(c, X86_FEATURE_MCE))
21884 return;
21885
21886+ pax_open_kernel();
21887 machine_check_vector = pentium_machine_check;
21888+ pax_close_kernel();
21889 /* Make sure the vector pointer is visible before we enable MCEs: */
21890 wmb();
21891
21892diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21893index 7dc5564..1273569 100644
21894--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21895+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21896@@ -9,6 +9,7 @@
21897 #include <asm/processor.h>
21898 #include <asm/mce.h>
21899 #include <asm/msr.h>
21900+#include <asm/pgtable.h>
21901
21902 /* Machine check handler for WinChip C6: */
21903 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21904@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21905 {
21906 u32 lo, hi;
21907
21908+ pax_open_kernel();
21909 machine_check_vector = winchip_machine_check;
21910+ pax_close_kernel();
21911 /* Make sure the vector pointer is visible before we enable MCEs: */
21912 wmb();
21913
21914diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21915index dd9d619..86e1d81 100644
21916--- a/arch/x86/kernel/cpu/microcode/core.c
21917+++ b/arch/x86/kernel/cpu/microcode/core.c
21918@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21919 return NOTIFY_OK;
21920 }
21921
21922-static struct notifier_block __refdata mc_cpu_notifier = {
21923+static struct notifier_block mc_cpu_notifier = {
21924 .notifier_call = mc_cpu_callback,
21925 };
21926
21927diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21928index a276fa7..e66810f 100644
21929--- a/arch/x86/kernel/cpu/microcode/intel.c
21930+++ b/arch/x86/kernel/cpu/microcode/intel.c
21931@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21932
21933 static int get_ucode_user(void *to, const void *from, size_t n)
21934 {
21935- return copy_from_user(to, from, n);
21936+ return copy_from_user(to, (const void __force_user *)from, n);
21937 }
21938
21939 static enum ucode_state
21940 request_microcode_user(int cpu, const void __user *buf, size_t size)
21941 {
21942- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21943+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21944 }
21945
21946 static void microcode_fini_cpu(int cpu)
21947diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21948index f961de9..8a9d332 100644
21949--- a/arch/x86/kernel/cpu/mtrr/main.c
21950+++ b/arch/x86/kernel/cpu/mtrr/main.c
21951@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21952 u64 size_or_mask, size_and_mask;
21953 static bool mtrr_aps_delayed_init;
21954
21955-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21956+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21957
21958 const struct mtrr_ops *mtrr_if;
21959
21960diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21961index df5e41f..816c719 100644
21962--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21963+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21964@@ -25,7 +25,7 @@ struct mtrr_ops {
21965 int (*validate_add_page)(unsigned long base, unsigned long size,
21966 unsigned int type);
21967 int (*have_wrcomb)(void);
21968-};
21969+} __do_const;
21970
21971 extern int generic_get_free_region(unsigned long base, unsigned long size,
21972 int replace_reg);
21973diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21974index 2879ecd..bb8c80b 100644
21975--- a/arch/x86/kernel/cpu/perf_event.c
21976+++ b/arch/x86/kernel/cpu/perf_event.c
21977@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
21978
21979 }
21980
21981-static struct attribute_group x86_pmu_format_group = {
21982+static attribute_group_no_const x86_pmu_format_group = {
21983 .name = "format",
21984 .attrs = NULL,
21985 };
21986@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
21987 NULL,
21988 };
21989
21990-static struct attribute_group x86_pmu_events_group = {
21991+static attribute_group_no_const x86_pmu_events_group = {
21992 .name = "events",
21993 .attrs = events_attr,
21994 };
21995@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
21996 if (idx > GDT_ENTRIES)
21997 return 0;
21998
21999- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
22000+ desc = get_cpu_gdt_table(smp_processor_id());
22001 }
22002
22003 return get_desc_base(desc + idx);
22004@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
22005 break;
22006
22007 perf_callchain_store(entry, frame.return_address);
22008- fp = frame.next_frame;
22009+ fp = (const void __force_user *)frame.next_frame;
22010 }
22011 }
22012
22013diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22014index 639d128..e92d7e5 100644
22015--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22016+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22017@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
22018 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
22019 {
22020 struct attribute **attrs;
22021- struct attribute_group *attr_group;
22022+ attribute_group_no_const *attr_group;
22023 int i = 0, j;
22024
22025 while (amd_iommu_v2_event_descs[i].attr.attr.name)
22026diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
22027index 2502d0d..e5cc05c 100644
22028--- a/arch/x86/kernel/cpu/perf_event_intel.c
22029+++ b/arch/x86/kernel/cpu/perf_event_intel.c
22030@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22031 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22032
22033 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22034- u64 capabilities;
22035+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22036
22037- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22038- x86_pmu.intel_cap.capabilities = capabilities;
22039+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22040+ x86_pmu.intel_cap.capabilities = capabilities;
22041 }
22042
22043 intel_ds_init();
22044diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22045index 619f769..d510008 100644
22046--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22047+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22048@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22049 NULL,
22050 };
22051
22052-static struct attribute_group rapl_pmu_events_group = {
22053+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22054 .name = "events",
22055 .attrs = NULL, /* patched at runtime */
22056 };
22057diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22058index ae6552a..b5be2d3 100644
22059--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22060+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22061@@ -3694,7 +3694,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22062 static int __init uncore_type_init(struct intel_uncore_type *type)
22063 {
22064 struct intel_uncore_pmu *pmus;
22065- struct attribute_group *attr_group;
22066+ attribute_group_no_const *attr_group;
22067 struct attribute **attrs;
22068 int i, j;
22069
22070diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22071index 90236f0..54cb20d 100644
22072--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22073+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22074@@ -503,7 +503,7 @@ struct intel_uncore_box {
22075 struct uncore_event_desc {
22076 struct kobj_attribute attr;
22077 const char *config;
22078-};
22079+} __do_const;
22080
22081 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22082 { \
22083diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22084index 3225ae6c..ee3c6db 100644
22085--- a/arch/x86/kernel/cpuid.c
22086+++ b/arch/x86/kernel/cpuid.c
22087@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22088 return notifier_from_errno(err);
22089 }
22090
22091-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22092+static struct notifier_block cpuid_class_cpu_notifier =
22093 {
22094 .notifier_call = cpuid_class_cpu_callback,
22095 };
22096diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22097index 507de80..ebaae2a 100644
22098--- a/arch/x86/kernel/crash.c
22099+++ b/arch/x86/kernel/crash.c
22100@@ -58,7 +58,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22101 #ifdef CONFIG_X86_32
22102 struct pt_regs fixed_regs;
22103
22104- if (!user_mode_vm(regs)) {
22105+ if (!user_mode(regs)) {
22106 crash_fixup_ss_esp(&fixed_regs, regs);
22107 regs = &fixed_regs;
22108 }
22109diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22110index afa64ad..dce67dd 100644
22111--- a/arch/x86/kernel/crash_dump_64.c
22112+++ b/arch/x86/kernel/crash_dump_64.c
22113@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22114 return -ENOMEM;
22115
22116 if (userbuf) {
22117- if (copy_to_user(buf, vaddr + offset, csize)) {
22118+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22119 iounmap(vaddr);
22120 return -EFAULT;
22121 }
22122diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22123index f6dfd93..892ade4 100644
22124--- a/arch/x86/kernel/doublefault.c
22125+++ b/arch/x86/kernel/doublefault.c
22126@@ -12,7 +12,7 @@
22127
22128 #define DOUBLEFAULT_STACKSIZE (1024)
22129 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22130-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22131+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22132
22133 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22134
22135@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22136 unsigned long gdt, tss;
22137
22138 native_store_gdt(&gdt_desc);
22139- gdt = gdt_desc.address;
22140+ gdt = (unsigned long)gdt_desc.address;
22141
22142 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22143
22144@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22145 /* 0x2 bit is always set */
22146 .flags = X86_EFLAGS_SF | 0x2,
22147 .sp = STACK_START,
22148- .es = __USER_DS,
22149+ .es = __KERNEL_DS,
22150 .cs = __KERNEL_CS,
22151 .ss = __KERNEL_DS,
22152- .ds = __USER_DS,
22153+ .ds = __KERNEL_DS,
22154 .fs = __KERNEL_PERCPU,
22155
22156 .__cr3 = __pa_nodebug(swapper_pg_dir),
22157diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22158index b74ebc7..6dbb0c5 100644
22159--- a/arch/x86/kernel/dumpstack.c
22160+++ b/arch/x86/kernel/dumpstack.c
22161@@ -2,6 +2,9 @@
22162 * Copyright (C) 1991, 1992 Linus Torvalds
22163 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22164 */
22165+#ifdef CONFIG_GRKERNSEC_HIDESYM
22166+#define __INCLUDED_BY_HIDESYM 1
22167+#endif
22168 #include <linux/kallsyms.h>
22169 #include <linux/kprobes.h>
22170 #include <linux/uaccess.h>
22171@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
22172 static void
22173 print_ftrace_graph_addr(unsigned long addr, void *data,
22174 const struct stacktrace_ops *ops,
22175- struct thread_info *tinfo, int *graph)
22176+ struct task_struct *task, int *graph)
22177 {
22178- struct task_struct *task;
22179 unsigned long ret_addr;
22180 int index;
22181
22182 if (addr != (unsigned long)return_to_handler)
22183 return;
22184
22185- task = tinfo->task;
22186 index = task->curr_ret_stack;
22187
22188 if (!task->ret_stack || index < *graph)
22189@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22190 static inline void
22191 print_ftrace_graph_addr(unsigned long addr, void *data,
22192 const struct stacktrace_ops *ops,
22193- struct thread_info *tinfo, int *graph)
22194+ struct task_struct *task, int *graph)
22195 { }
22196 #endif
22197
22198@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22199 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22200 */
22201
22202-static inline int valid_stack_ptr(struct thread_info *tinfo,
22203- void *p, unsigned int size, void *end)
22204+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22205 {
22206- void *t = tinfo;
22207 if (end) {
22208 if (p < end && p >= (end-THREAD_SIZE))
22209 return 1;
22210@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22211 }
22212
22213 unsigned long
22214-print_context_stack(struct thread_info *tinfo,
22215+print_context_stack(struct task_struct *task, void *stack_start,
22216 unsigned long *stack, unsigned long bp,
22217 const struct stacktrace_ops *ops, void *data,
22218 unsigned long *end, int *graph)
22219 {
22220 struct stack_frame *frame = (struct stack_frame *)bp;
22221
22222- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22223+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22224 unsigned long addr;
22225
22226 addr = *stack;
22227@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22228 } else {
22229 ops->address(data, addr, 0);
22230 }
22231- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22232+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22233 }
22234 stack++;
22235 }
22236@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22237 EXPORT_SYMBOL_GPL(print_context_stack);
22238
22239 unsigned long
22240-print_context_stack_bp(struct thread_info *tinfo,
22241+print_context_stack_bp(struct task_struct *task, void *stack_start,
22242 unsigned long *stack, unsigned long bp,
22243 const struct stacktrace_ops *ops, void *data,
22244 unsigned long *end, int *graph)
22245@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22246 struct stack_frame *frame = (struct stack_frame *)bp;
22247 unsigned long *ret_addr = &frame->return_address;
22248
22249- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22250+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22251 unsigned long addr = *ret_addr;
22252
22253 if (!__kernel_text_address(addr))
22254@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22255 ops->address(data, addr, 1);
22256 frame = frame->next_frame;
22257 ret_addr = &frame->return_address;
22258- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22259+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22260 }
22261
22262 return (unsigned long)frame;
22263@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22264 static void print_trace_address(void *data, unsigned long addr, int reliable)
22265 {
22266 touch_nmi_watchdog();
22267- printk(data);
22268+ printk("%s", (char *)data);
22269 printk_stack_address(addr, reliable);
22270 }
22271
22272@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22273 EXPORT_SYMBOL_GPL(oops_begin);
22274 NOKPROBE_SYMBOL(oops_begin);
22275
22276+extern void gr_handle_kernel_exploit(void);
22277+
22278 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22279 {
22280 if (regs && kexec_should_crash(current))
22281@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22282 panic("Fatal exception in interrupt");
22283 if (panic_on_oops)
22284 panic("Fatal exception");
22285- do_exit(signr);
22286+
22287+ gr_handle_kernel_exploit();
22288+
22289+ do_group_exit(signr);
22290 }
22291 NOKPROBE_SYMBOL(oops_end);
22292
22293@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22294 print_modules();
22295 show_regs(regs);
22296 #ifdef CONFIG_X86_32
22297- if (user_mode_vm(regs)) {
22298+ if (user_mode(regs)) {
22299 sp = regs->sp;
22300 ss = regs->ss & 0xffff;
22301 } else {
22302@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22303 unsigned long flags = oops_begin();
22304 int sig = SIGSEGV;
22305
22306- if (!user_mode_vm(regs))
22307+ if (!user_mode(regs))
22308 report_bug(regs->ip, regs);
22309
22310 if (__die(str, regs, err))
22311diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22312index 5abd4cd..c65733b 100644
22313--- a/arch/x86/kernel/dumpstack_32.c
22314+++ b/arch/x86/kernel/dumpstack_32.c
22315@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22316 bp = stack_frame(task, regs);
22317
22318 for (;;) {
22319- struct thread_info *context;
22320+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22321 void *end_stack;
22322
22323 end_stack = is_hardirq_stack(stack, cpu);
22324 if (!end_stack)
22325 end_stack = is_softirq_stack(stack, cpu);
22326
22327- context = task_thread_info(task);
22328- bp = ops->walk_stack(context, stack, bp, ops, data,
22329+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22330 end_stack, &graph);
22331
22332 /* Stop if not on irq stack */
22333@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22334 int i;
22335
22336 show_regs_print_info(KERN_EMERG);
22337- __show_regs(regs, !user_mode_vm(regs));
22338+ __show_regs(regs, !user_mode(regs));
22339
22340 /*
22341 * When in-kernel, we also print out the stack and code at the
22342 * time of the fault..
22343 */
22344- if (!user_mode_vm(regs)) {
22345+ if (!user_mode(regs)) {
22346 unsigned int code_prologue = code_bytes * 43 / 64;
22347 unsigned int code_len = code_bytes;
22348 unsigned char c;
22349 u8 *ip;
22350+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22351
22352 pr_emerg("Stack:\n");
22353 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22354
22355 pr_emerg("Code:");
22356
22357- ip = (u8 *)regs->ip - code_prologue;
22358+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22359 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22360 /* try starting at IP */
22361- ip = (u8 *)regs->ip;
22362+ ip = (u8 *)regs->ip + cs_base;
22363 code_len = code_len - code_prologue + 1;
22364 }
22365 for (i = 0; i < code_len; i++, ip++) {
22366@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22367 pr_cont(" Bad EIP value.");
22368 break;
22369 }
22370- if (ip == (u8 *)regs->ip)
22371+ if (ip == (u8 *)regs->ip + cs_base)
22372 pr_cont(" <%02x>", c);
22373 else
22374 pr_cont(" %02x", c);
22375@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22376 {
22377 unsigned short ud2;
22378
22379+ ip = ktla_ktva(ip);
22380 if (ip < PAGE_OFFSET)
22381 return 0;
22382 if (probe_kernel_address((unsigned short *)ip, ud2))
22383@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22384
22385 return ud2 == 0x0b0f;
22386 }
22387+
22388+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22389+void pax_check_alloca(unsigned long size)
22390+{
22391+ unsigned long sp = (unsigned long)&sp, stack_left;
22392+
22393+ /* all kernel stacks are of the same size */
22394+ stack_left = sp & (THREAD_SIZE - 1);
22395+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22396+}
22397+EXPORT_SYMBOL(pax_check_alloca);
22398+#endif
22399diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22400index 1abcb50..6c8d702 100644
22401--- a/arch/x86/kernel/dumpstack_64.c
22402+++ b/arch/x86/kernel/dumpstack_64.c
22403@@ -154,12 +154,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22404 const struct stacktrace_ops *ops, void *data)
22405 {
22406 const unsigned cpu = get_cpu();
22407- struct thread_info *tinfo;
22408 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22409 unsigned long dummy;
22410 unsigned used = 0;
22411 int graph = 0;
22412 int done = 0;
22413+ void *stack_start;
22414
22415 if (!task)
22416 task = current;
22417@@ -180,7 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22418 * current stack address. If the stacks consist of nested
22419 * exceptions
22420 */
22421- tinfo = task_thread_info(task);
22422 while (!done) {
22423 unsigned long *stack_end;
22424 enum stack_type stype;
22425@@ -203,7 +202,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22426 if (ops->stack(data, id) < 0)
22427 break;
22428
22429- bp = ops->walk_stack(tinfo, stack, bp, ops,
22430+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22431 data, stack_end, &graph);
22432 ops->stack(data, "<EOE>");
22433 /*
22434@@ -211,6 +210,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22435 * second-to-last pointer (index -2 to end) in the
22436 * exception stack:
22437 */
22438+ if ((u16)stack_end[-1] != __KERNEL_DS)
22439+ goto out;
22440 stack = (unsigned long *) stack_end[-2];
22441 done = 0;
22442 break;
22443@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22444
22445 if (ops->stack(data, "IRQ") < 0)
22446 break;
22447- bp = ops->walk_stack(tinfo, stack, bp,
22448+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22449 ops, data, stack_end, &graph);
22450 /*
22451 * We link to the next stack (which would be
22452@@ -241,7 +242,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22453 /*
22454 * This handles the process stack:
22455 */
22456- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22457+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22458+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22459+out:
22460 put_cpu();
22461 }
22462 EXPORT_SYMBOL(dump_trace);
22463@@ -350,3 +353,50 @@ int is_valid_bugaddr(unsigned long ip)
22464
22465 return ud2 == 0x0b0f;
22466 }
22467+
22468+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22469+void pax_check_alloca(unsigned long size)
22470+{
22471+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22472+ unsigned cpu, used;
22473+ char *id;
22474+
22475+ /* check the process stack first */
22476+ stack_start = (unsigned long)task_stack_page(current);
22477+ stack_end = stack_start + THREAD_SIZE;
22478+ if (likely(stack_start <= sp && sp < stack_end)) {
22479+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22480+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22481+ return;
22482+ }
22483+
22484+ cpu = get_cpu();
22485+
22486+ /* check the irq stacks */
22487+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22488+ stack_start = stack_end - IRQ_STACK_SIZE;
22489+ if (stack_start <= sp && sp < stack_end) {
22490+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22491+ put_cpu();
22492+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22493+ return;
22494+ }
22495+
22496+ /* check the exception stacks */
22497+ used = 0;
22498+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22499+ stack_start = stack_end - EXCEPTION_STKSZ;
22500+ if (stack_end && stack_start <= sp && sp < stack_end) {
22501+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22502+ put_cpu();
22503+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22504+ return;
22505+ }
22506+
22507+ put_cpu();
22508+
22509+ /* unknown stack */
22510+ BUG();
22511+}
22512+EXPORT_SYMBOL(pax_check_alloca);
22513+#endif
22514diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22515index 988c00a..4f673b6 100644
22516--- a/arch/x86/kernel/e820.c
22517+++ b/arch/x86/kernel/e820.c
22518@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22519
22520 static void early_panic(char *msg)
22521 {
22522- early_printk(msg);
22523- panic(msg);
22524+ early_printk("%s", msg);
22525+ panic("%s", msg);
22526 }
22527
22528 static int userdef __initdata;
22529diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22530index 01d1c18..8073693 100644
22531--- a/arch/x86/kernel/early_printk.c
22532+++ b/arch/x86/kernel/early_printk.c
22533@@ -7,6 +7,7 @@
22534 #include <linux/pci_regs.h>
22535 #include <linux/pci_ids.h>
22536 #include <linux/errno.h>
22537+#include <linux/sched.h>
22538 #include <asm/io.h>
22539 #include <asm/processor.h>
22540 #include <asm/fcntl.h>
22541diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22542index 0d0c9d4..f65b4f6 100644
22543--- a/arch/x86/kernel/entry_32.S
22544+++ b/arch/x86/kernel/entry_32.S
22545@@ -177,13 +177,153 @@
22546 /*CFI_REL_OFFSET gs, PT_GS*/
22547 .endm
22548 .macro SET_KERNEL_GS reg
22549+
22550+#ifdef CONFIG_CC_STACKPROTECTOR
22551 movl $(__KERNEL_STACK_CANARY), \reg
22552+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22553+ movl $(__USER_DS), \reg
22554+#else
22555+ xorl \reg, \reg
22556+#endif
22557+
22558 movl \reg, %gs
22559 .endm
22560
22561 #endif /* CONFIG_X86_32_LAZY_GS */
22562
22563-.macro SAVE_ALL
22564+.macro pax_enter_kernel
22565+#ifdef CONFIG_PAX_KERNEXEC
22566+ call pax_enter_kernel
22567+#endif
22568+.endm
22569+
22570+.macro pax_exit_kernel
22571+#ifdef CONFIG_PAX_KERNEXEC
22572+ call pax_exit_kernel
22573+#endif
22574+.endm
22575+
22576+#ifdef CONFIG_PAX_KERNEXEC
22577+ENTRY(pax_enter_kernel)
22578+#ifdef CONFIG_PARAVIRT
22579+ pushl %eax
22580+ pushl %ecx
22581+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22582+ mov %eax, %esi
22583+#else
22584+ mov %cr0, %esi
22585+#endif
22586+ bts $16, %esi
22587+ jnc 1f
22588+ mov %cs, %esi
22589+ cmp $__KERNEL_CS, %esi
22590+ jz 3f
22591+ ljmp $__KERNEL_CS, $3f
22592+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22593+2:
22594+#ifdef CONFIG_PARAVIRT
22595+ mov %esi, %eax
22596+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22597+#else
22598+ mov %esi, %cr0
22599+#endif
22600+3:
22601+#ifdef CONFIG_PARAVIRT
22602+ popl %ecx
22603+ popl %eax
22604+#endif
22605+ ret
22606+ENDPROC(pax_enter_kernel)
22607+
22608+ENTRY(pax_exit_kernel)
22609+#ifdef CONFIG_PARAVIRT
22610+ pushl %eax
22611+ pushl %ecx
22612+#endif
22613+ mov %cs, %esi
22614+ cmp $__KERNEXEC_KERNEL_CS, %esi
22615+ jnz 2f
22616+#ifdef CONFIG_PARAVIRT
22617+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22618+ mov %eax, %esi
22619+#else
22620+ mov %cr0, %esi
22621+#endif
22622+ btr $16, %esi
22623+ ljmp $__KERNEL_CS, $1f
22624+1:
22625+#ifdef CONFIG_PARAVIRT
22626+ mov %esi, %eax
22627+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22628+#else
22629+ mov %esi, %cr0
22630+#endif
22631+2:
22632+#ifdef CONFIG_PARAVIRT
22633+ popl %ecx
22634+ popl %eax
22635+#endif
22636+ ret
22637+ENDPROC(pax_exit_kernel)
22638+#endif
22639+
22640+ .macro pax_erase_kstack
22641+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22642+ call pax_erase_kstack
22643+#endif
22644+ .endm
22645+
22646+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22647+/*
22648+ * ebp: thread_info
22649+ */
22650+ENTRY(pax_erase_kstack)
22651+ pushl %edi
22652+ pushl %ecx
22653+ pushl %eax
22654+
22655+ mov TI_lowest_stack(%ebp), %edi
22656+ mov $-0xBEEF, %eax
22657+ std
22658+
22659+1: mov %edi, %ecx
22660+ and $THREAD_SIZE_asm - 1, %ecx
22661+ shr $2, %ecx
22662+ repne scasl
22663+ jecxz 2f
22664+
22665+ cmp $2*16, %ecx
22666+ jc 2f
22667+
22668+ mov $2*16, %ecx
22669+ repe scasl
22670+ jecxz 2f
22671+ jne 1b
22672+
22673+2: cld
22674+ mov %esp, %ecx
22675+ sub %edi, %ecx
22676+
22677+ cmp $THREAD_SIZE_asm, %ecx
22678+ jb 3f
22679+ ud2
22680+3:
22681+
22682+ shr $2, %ecx
22683+ rep stosl
22684+
22685+ mov TI_task_thread_sp0(%ebp), %edi
22686+ sub $128, %edi
22687+ mov %edi, TI_lowest_stack(%ebp)
22688+
22689+ popl %eax
22690+ popl %ecx
22691+ popl %edi
22692+ ret
22693+ENDPROC(pax_erase_kstack)
22694+#endif
22695+
22696+.macro __SAVE_ALL _DS
22697 cld
22698 PUSH_GS
22699 pushl_cfi %fs
22700@@ -206,7 +346,7 @@
22701 CFI_REL_OFFSET ecx, 0
22702 pushl_cfi %ebx
22703 CFI_REL_OFFSET ebx, 0
22704- movl $(__USER_DS), %edx
22705+ movl $\_DS, %edx
22706 movl %edx, %ds
22707 movl %edx, %es
22708 movl $(__KERNEL_PERCPU), %edx
22709@@ -214,6 +354,15 @@
22710 SET_KERNEL_GS %edx
22711 .endm
22712
22713+.macro SAVE_ALL
22714+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22715+ __SAVE_ALL __KERNEL_DS
22716+ pax_enter_kernel
22717+#else
22718+ __SAVE_ALL __USER_DS
22719+#endif
22720+.endm
22721+
22722 .macro RESTORE_INT_REGS
22723 popl_cfi %ebx
22724 CFI_RESTORE ebx
22725@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22726 popfl_cfi
22727 jmp syscall_exit
22728 CFI_ENDPROC
22729-END(ret_from_fork)
22730+ENDPROC(ret_from_fork)
22731
22732 ENTRY(ret_from_kernel_thread)
22733 CFI_STARTPROC
22734@@ -340,7 +489,15 @@ ret_from_intr:
22735 andl $SEGMENT_RPL_MASK, %eax
22736 #endif
22737 cmpl $USER_RPL, %eax
22738+
22739+#ifdef CONFIG_PAX_KERNEXEC
22740+ jae resume_userspace
22741+
22742+ pax_exit_kernel
22743+ jmp resume_kernel
22744+#else
22745 jb resume_kernel # not returning to v8086 or userspace
22746+#endif
22747
22748 ENTRY(resume_userspace)
22749 LOCKDEP_SYS_EXIT
22750@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
22751 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22752 # int/exception return?
22753 jne work_pending
22754- jmp restore_all
22755-END(ret_from_exception)
22756+ jmp restore_all_pax
22757+ENDPROC(ret_from_exception)
22758
22759 #ifdef CONFIG_PREEMPT
22760 ENTRY(resume_kernel)
22761@@ -365,7 +522,7 @@ need_resched:
22762 jz restore_all
22763 call preempt_schedule_irq
22764 jmp need_resched
22765-END(resume_kernel)
22766+ENDPROC(resume_kernel)
22767 #endif
22768 CFI_ENDPROC
22769
22770@@ -395,30 +552,45 @@ sysenter_past_esp:
22771 /*CFI_REL_OFFSET cs, 0*/
22772 /*
22773 * Push current_thread_info()->sysenter_return to the stack.
22774- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22775- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22776 */
22777- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22778+ pushl_cfi $0
22779 CFI_REL_OFFSET eip, 0
22780
22781 pushl_cfi %eax
22782 SAVE_ALL
22783+ GET_THREAD_INFO(%ebp)
22784+ movl TI_sysenter_return(%ebp),%ebp
22785+ movl %ebp,PT_EIP(%esp)
22786 ENABLE_INTERRUPTS(CLBR_NONE)
22787
22788 /*
22789 * Load the potential sixth argument from user stack.
22790 * Careful about security.
22791 */
22792+ movl PT_OLDESP(%esp),%ebp
22793+
22794+#ifdef CONFIG_PAX_MEMORY_UDEREF
22795+ mov PT_OLDSS(%esp),%ds
22796+1: movl %ds:(%ebp),%ebp
22797+ push %ss
22798+ pop %ds
22799+#else
22800 cmpl $__PAGE_OFFSET-3,%ebp
22801 jae syscall_fault
22802 ASM_STAC
22803 1: movl (%ebp),%ebp
22804 ASM_CLAC
22805+#endif
22806+
22807 movl %ebp,PT_EBP(%esp)
22808 _ASM_EXTABLE(1b,syscall_fault)
22809
22810 GET_THREAD_INFO(%ebp)
22811
22812+#ifdef CONFIG_PAX_RANDKSTACK
22813+ pax_erase_kstack
22814+#endif
22815+
22816 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22817 jnz sysenter_audit
22818 sysenter_do_call:
22819@@ -434,12 +606,24 @@ sysenter_after_call:
22820 testl $_TIF_ALLWORK_MASK, %ecx
22821 jne sysexit_audit
22822 sysenter_exit:
22823+
22824+#ifdef CONFIG_PAX_RANDKSTACK
22825+ pushl_cfi %eax
22826+ movl %esp, %eax
22827+ call pax_randomize_kstack
22828+ popl_cfi %eax
22829+#endif
22830+
22831+ pax_erase_kstack
22832+
22833 /* if something modifies registers it must also disable sysexit */
22834 movl PT_EIP(%esp), %edx
22835 movl PT_OLDESP(%esp), %ecx
22836 xorl %ebp,%ebp
22837 TRACE_IRQS_ON
22838 1: mov PT_FS(%esp), %fs
22839+2: mov PT_DS(%esp), %ds
22840+3: mov PT_ES(%esp), %es
22841 PTGS_TO_GS
22842 ENABLE_INTERRUPTS_SYSEXIT
22843
22844@@ -456,6 +640,9 @@ sysenter_audit:
22845 movl %eax,%edx /* 2nd arg: syscall number */
22846 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22847 call __audit_syscall_entry
22848+
22849+ pax_erase_kstack
22850+
22851 pushl_cfi %ebx
22852 movl PT_EAX(%esp),%eax /* reload syscall number */
22853 jmp sysenter_do_call
22854@@ -481,10 +668,16 @@ sysexit_audit:
22855
22856 CFI_ENDPROC
22857 .pushsection .fixup,"ax"
22858-2: movl $0,PT_FS(%esp)
22859+4: movl $0,PT_FS(%esp)
22860+ jmp 1b
22861+5: movl $0,PT_DS(%esp)
22862+ jmp 1b
22863+6: movl $0,PT_ES(%esp)
22864 jmp 1b
22865 .popsection
22866- _ASM_EXTABLE(1b,2b)
22867+ _ASM_EXTABLE(1b,4b)
22868+ _ASM_EXTABLE(2b,5b)
22869+ _ASM_EXTABLE(3b,6b)
22870 PTGS_TO_GS_EX
22871 ENDPROC(ia32_sysenter_target)
22872
22873@@ -495,6 +688,11 @@ ENTRY(system_call)
22874 pushl_cfi %eax # save orig_eax
22875 SAVE_ALL
22876 GET_THREAD_INFO(%ebp)
22877+
22878+#ifdef CONFIG_PAX_RANDKSTACK
22879+ pax_erase_kstack
22880+#endif
22881+
22882 # system call tracing in operation / emulation
22883 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22884 jnz syscall_trace_entry
22885@@ -514,6 +712,15 @@ syscall_exit:
22886 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22887 jne syscall_exit_work
22888
22889+restore_all_pax:
22890+
22891+#ifdef CONFIG_PAX_RANDKSTACK
22892+ movl %esp, %eax
22893+ call pax_randomize_kstack
22894+#endif
22895+
22896+ pax_erase_kstack
22897+
22898 restore_all:
22899 TRACE_IRQS_IRET
22900 restore_all_notrace:
22901@@ -568,14 +775,34 @@ ldt_ss:
22902 * compensating for the offset by changing to the ESPFIX segment with
22903 * a base address that matches for the difference.
22904 */
22905-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22906+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22907 mov %esp, %edx /* load kernel esp */
22908 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22909 mov %dx, %ax /* eax: new kernel esp */
22910 sub %eax, %edx /* offset (low word is 0) */
22911+#ifdef CONFIG_SMP
22912+ movl PER_CPU_VAR(cpu_number), %ebx
22913+ shll $PAGE_SHIFT_asm, %ebx
22914+ addl $cpu_gdt_table, %ebx
22915+#else
22916+ movl $cpu_gdt_table, %ebx
22917+#endif
22918 shr $16, %edx
22919- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22920- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22921+
22922+#ifdef CONFIG_PAX_KERNEXEC
22923+ mov %cr0, %esi
22924+ btr $16, %esi
22925+ mov %esi, %cr0
22926+#endif
22927+
22928+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22929+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22930+
22931+#ifdef CONFIG_PAX_KERNEXEC
22932+ bts $16, %esi
22933+ mov %esi, %cr0
22934+#endif
22935+
22936 pushl_cfi $__ESPFIX_SS
22937 pushl_cfi %eax /* new kernel esp */
22938 /* Disable interrupts, but do not irqtrace this section: we
22939@@ -605,20 +832,18 @@ work_resched:
22940 movl TI_flags(%ebp), %ecx
22941 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22942 # than syscall tracing?
22943- jz restore_all
22944+ jz restore_all_pax
22945 testb $_TIF_NEED_RESCHED, %cl
22946 jnz work_resched
22947
22948 work_notifysig: # deal with pending signals and
22949 # notify-resume requests
22950+ movl %esp, %eax
22951 #ifdef CONFIG_VM86
22952 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22953- movl %esp, %eax
22954 jne work_notifysig_v86 # returning to kernel-space or
22955 # vm86-space
22956 1:
22957-#else
22958- movl %esp, %eax
22959 #endif
22960 TRACE_IRQS_ON
22961 ENABLE_INTERRUPTS(CLBR_NONE)
22962@@ -639,7 +864,7 @@ work_notifysig_v86:
22963 movl %eax, %esp
22964 jmp 1b
22965 #endif
22966-END(work_pending)
22967+ENDPROC(work_pending)
22968
22969 # perform syscall exit tracing
22970 ALIGN
22971@@ -647,11 +872,14 @@ syscall_trace_entry:
22972 movl $-ENOSYS,PT_EAX(%esp)
22973 movl %esp, %eax
22974 call syscall_trace_enter
22975+
22976+ pax_erase_kstack
22977+
22978 /* What it returned is what we'll actually use. */
22979 cmpl $(NR_syscalls), %eax
22980 jnae syscall_call
22981 jmp syscall_exit
22982-END(syscall_trace_entry)
22983+ENDPROC(syscall_trace_entry)
22984
22985 # perform syscall exit tracing
22986 ALIGN
22987@@ -664,26 +892,30 @@ syscall_exit_work:
22988 movl %esp, %eax
22989 call syscall_trace_leave
22990 jmp resume_userspace
22991-END(syscall_exit_work)
22992+ENDPROC(syscall_exit_work)
22993 CFI_ENDPROC
22994
22995 RING0_INT_FRAME # can't unwind into user space anyway
22996 syscall_fault:
22997+#ifdef CONFIG_PAX_MEMORY_UDEREF
22998+ push %ss
22999+ pop %ds
23000+#endif
23001 ASM_CLAC
23002 GET_THREAD_INFO(%ebp)
23003 movl $-EFAULT,PT_EAX(%esp)
23004 jmp resume_userspace
23005-END(syscall_fault)
23006+ENDPROC(syscall_fault)
23007
23008 syscall_badsys:
23009 movl $-ENOSYS,%eax
23010 jmp syscall_after_call
23011-END(syscall_badsys)
23012+ENDPROC(syscall_badsys)
23013
23014 sysenter_badsys:
23015 movl $-ENOSYS,%eax
23016 jmp sysenter_after_call
23017-END(syscall_badsys)
23018+ENDPROC(sysenter_badsys)
23019 CFI_ENDPROC
23020
23021 .macro FIXUP_ESPFIX_STACK
23022@@ -696,8 +928,15 @@ END(syscall_badsys)
23023 */
23024 #ifdef CONFIG_X86_ESPFIX32
23025 /* fixup the stack */
23026- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
23027- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
23028+#ifdef CONFIG_SMP
23029+ movl PER_CPU_VAR(cpu_number), %ebx
23030+ shll $PAGE_SHIFT_asm, %ebx
23031+ addl $cpu_gdt_table, %ebx
23032+#else
23033+ movl $cpu_gdt_table, %ebx
23034+#endif
23035+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23036+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23037 shl $16, %eax
23038 addl %esp, %eax /* the adjusted stack pointer */
23039 pushl_cfi $__KERNEL_DS
23040@@ -753,7 +992,7 @@ vector=vector+1
23041 .endr
23042 2: jmp common_interrupt
23043 .endr
23044-END(irq_entries_start)
23045+ENDPROC(irq_entries_start)
23046
23047 .previous
23048 END(interrupt)
23049@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23050 pushl_cfi $do_coprocessor_error
23051 jmp error_code
23052 CFI_ENDPROC
23053-END(coprocessor_error)
23054+ENDPROC(coprocessor_error)
23055
23056 ENTRY(simd_coprocessor_error)
23057 RING0_INT_FRAME
23058@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23059 .section .altinstructions,"a"
23060 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23061 .previous
23062-.section .altinstr_replacement,"ax"
23063+.section .altinstr_replacement,"a"
23064 663: pushl $do_simd_coprocessor_error
23065 664:
23066 .previous
23067@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23068 #endif
23069 jmp error_code
23070 CFI_ENDPROC
23071-END(simd_coprocessor_error)
23072+ENDPROC(simd_coprocessor_error)
23073
23074 ENTRY(device_not_available)
23075 RING0_INT_FRAME
23076@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23077 pushl_cfi $do_device_not_available
23078 jmp error_code
23079 CFI_ENDPROC
23080-END(device_not_available)
23081+ENDPROC(device_not_available)
23082
23083 #ifdef CONFIG_PARAVIRT
23084 ENTRY(native_iret)
23085 iret
23086 _ASM_EXTABLE(native_iret, iret_exc)
23087-END(native_iret)
23088+ENDPROC(native_iret)
23089
23090 ENTRY(native_irq_enable_sysexit)
23091 sti
23092 sysexit
23093-END(native_irq_enable_sysexit)
23094+ENDPROC(native_irq_enable_sysexit)
23095 #endif
23096
23097 ENTRY(overflow)
23098@@ -862,7 +1101,7 @@ ENTRY(overflow)
23099 pushl_cfi $do_overflow
23100 jmp error_code
23101 CFI_ENDPROC
23102-END(overflow)
23103+ENDPROC(overflow)
23104
23105 ENTRY(bounds)
23106 RING0_INT_FRAME
23107@@ -871,7 +1110,7 @@ ENTRY(bounds)
23108 pushl_cfi $do_bounds
23109 jmp error_code
23110 CFI_ENDPROC
23111-END(bounds)
23112+ENDPROC(bounds)
23113
23114 ENTRY(invalid_op)
23115 RING0_INT_FRAME
23116@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23117 pushl_cfi $do_invalid_op
23118 jmp error_code
23119 CFI_ENDPROC
23120-END(invalid_op)
23121+ENDPROC(invalid_op)
23122
23123 ENTRY(coprocessor_segment_overrun)
23124 RING0_INT_FRAME
23125@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23126 pushl_cfi $do_coprocessor_segment_overrun
23127 jmp error_code
23128 CFI_ENDPROC
23129-END(coprocessor_segment_overrun)
23130+ENDPROC(coprocessor_segment_overrun)
23131
23132 ENTRY(invalid_TSS)
23133 RING0_EC_FRAME
23134@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23135 pushl_cfi $do_invalid_TSS
23136 jmp error_code
23137 CFI_ENDPROC
23138-END(invalid_TSS)
23139+ENDPROC(invalid_TSS)
23140
23141 ENTRY(segment_not_present)
23142 RING0_EC_FRAME
23143@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23144 pushl_cfi $do_segment_not_present
23145 jmp error_code
23146 CFI_ENDPROC
23147-END(segment_not_present)
23148+ENDPROC(segment_not_present)
23149
23150 ENTRY(stack_segment)
23151 RING0_EC_FRAME
23152@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23153 pushl_cfi $do_stack_segment
23154 jmp error_code
23155 CFI_ENDPROC
23156-END(stack_segment)
23157+ENDPROC(stack_segment)
23158
23159 ENTRY(alignment_check)
23160 RING0_EC_FRAME
23161@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23162 pushl_cfi $do_alignment_check
23163 jmp error_code
23164 CFI_ENDPROC
23165-END(alignment_check)
23166+ENDPROC(alignment_check)
23167
23168 ENTRY(divide_error)
23169 RING0_INT_FRAME
23170@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23171 pushl_cfi $do_divide_error
23172 jmp error_code
23173 CFI_ENDPROC
23174-END(divide_error)
23175+ENDPROC(divide_error)
23176
23177 #ifdef CONFIG_X86_MCE
23178 ENTRY(machine_check)
23179@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23180 pushl_cfi machine_check_vector
23181 jmp error_code
23182 CFI_ENDPROC
23183-END(machine_check)
23184+ENDPROC(machine_check)
23185 #endif
23186
23187 ENTRY(spurious_interrupt_bug)
23188@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23189 pushl_cfi $do_spurious_interrupt_bug
23190 jmp error_code
23191 CFI_ENDPROC
23192-END(spurious_interrupt_bug)
23193+ENDPROC(spurious_interrupt_bug)
23194
23195 #ifdef CONFIG_XEN
23196 /* Xen doesn't set %esp to be precisely what the normal sysenter
23197@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23198
23199 ENTRY(mcount)
23200 ret
23201-END(mcount)
23202+ENDPROC(mcount)
23203
23204 ENTRY(ftrace_caller)
23205 cmpl $0, function_trace_stop
23206@@ -1089,7 +1328,7 @@ ftrace_graph_call:
23207 .globl ftrace_stub
23208 ftrace_stub:
23209 ret
23210-END(ftrace_caller)
23211+ENDPROC(ftrace_caller)
23212
23213 ENTRY(ftrace_regs_caller)
23214 pushf /* push flags before compare (in cs location) */
23215@@ -1193,7 +1432,7 @@ trace:
23216 popl %ecx
23217 popl %eax
23218 jmp ftrace_stub
23219-END(mcount)
23220+ENDPROC(mcount)
23221 #endif /* CONFIG_DYNAMIC_FTRACE */
23222 #endif /* CONFIG_FUNCTION_TRACER */
23223
23224@@ -1211,7 +1450,7 @@ ENTRY(ftrace_graph_caller)
23225 popl %ecx
23226 popl %eax
23227 ret
23228-END(ftrace_graph_caller)
23229+ENDPROC(ftrace_graph_caller)
23230
23231 .globl return_to_handler
23232 return_to_handler:
23233@@ -1272,15 +1511,18 @@ error_code:
23234 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23235 REG_TO_PTGS %ecx
23236 SET_KERNEL_GS %ecx
23237- movl $(__USER_DS), %ecx
23238+ movl $(__KERNEL_DS), %ecx
23239 movl %ecx, %ds
23240 movl %ecx, %es
23241+
23242+ pax_enter_kernel
23243+
23244 TRACE_IRQS_OFF
23245 movl %esp,%eax # pt_regs pointer
23246 call *%edi
23247 jmp ret_from_exception
23248 CFI_ENDPROC
23249-END(page_fault)
23250+ENDPROC(page_fault)
23251
23252 /*
23253 * Debug traps and NMI can happen at the one SYSENTER instruction
23254@@ -1323,7 +1565,7 @@ debug_stack_correct:
23255 call do_debug
23256 jmp ret_from_exception
23257 CFI_ENDPROC
23258-END(debug)
23259+ENDPROC(debug)
23260
23261 /*
23262 * NMI is doubly nasty. It can happen _while_ we're handling
23263@@ -1363,6 +1605,9 @@ nmi_stack_correct:
23264 xorl %edx,%edx # zero error code
23265 movl %esp,%eax # pt_regs pointer
23266 call do_nmi
23267+
23268+ pax_exit_kernel
23269+
23270 jmp restore_all_notrace
23271 CFI_ENDPROC
23272
23273@@ -1400,13 +1645,16 @@ nmi_espfix_stack:
23274 FIXUP_ESPFIX_STACK # %eax == %esp
23275 xorl %edx,%edx # zero error code
23276 call do_nmi
23277+
23278+ pax_exit_kernel
23279+
23280 RESTORE_REGS
23281 lss 12+4(%esp), %esp # back to espfix stack
23282 CFI_ADJUST_CFA_OFFSET -24
23283 jmp irq_return
23284 #endif
23285 CFI_ENDPROC
23286-END(nmi)
23287+ENDPROC(nmi)
23288
23289 ENTRY(int3)
23290 RING0_INT_FRAME
23291@@ -1419,14 +1667,14 @@ ENTRY(int3)
23292 call do_int3
23293 jmp ret_from_exception
23294 CFI_ENDPROC
23295-END(int3)
23296+ENDPROC(int3)
23297
23298 ENTRY(general_protection)
23299 RING0_EC_FRAME
23300 pushl_cfi $do_general_protection
23301 jmp error_code
23302 CFI_ENDPROC
23303-END(general_protection)
23304+ENDPROC(general_protection)
23305
23306 #ifdef CONFIG_KVM_GUEST
23307 ENTRY(async_page_fault)
23308@@ -1435,6 +1683,6 @@ ENTRY(async_page_fault)
23309 pushl_cfi $do_async_page_fault
23310 jmp error_code
23311 CFI_ENDPROC
23312-END(async_page_fault)
23313+ENDPROC(async_page_fault)
23314 #endif
23315
23316diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23317index c844f08..966a50e 100644
23318--- a/arch/x86/kernel/entry_64.S
23319+++ b/arch/x86/kernel/entry_64.S
23320@@ -59,6 +59,8 @@
23321 #include <asm/smap.h>
23322 #include <asm/pgtable_types.h>
23323 #include <linux/err.h>
23324+#include <asm/pgtable.h>
23325+#include <asm/alternative-asm.h>
23326
23327 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23328 #include <linux/elf-em.h>
23329@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23330 ENDPROC(native_usergs_sysret64)
23331 #endif /* CONFIG_PARAVIRT */
23332
23333+ .macro ljmpq sel, off
23334+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23335+ .byte 0x48; ljmp *1234f(%rip)
23336+ .pushsection .rodata
23337+ .align 16
23338+ 1234: .quad \off; .word \sel
23339+ .popsection
23340+#else
23341+ pushq $\sel
23342+ pushq $\off
23343+ lretq
23344+#endif
23345+ .endm
23346+
23347+ .macro pax_enter_kernel
23348+ pax_set_fptr_mask
23349+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23350+ call pax_enter_kernel
23351+#endif
23352+ .endm
23353+
23354+ .macro pax_exit_kernel
23355+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23356+ call pax_exit_kernel
23357+#endif
23358+
23359+ .endm
23360+
23361+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23362+ENTRY(pax_enter_kernel)
23363+ pushq %rdi
23364+
23365+#ifdef CONFIG_PARAVIRT
23366+ PV_SAVE_REGS(CLBR_RDI)
23367+#endif
23368+
23369+#ifdef CONFIG_PAX_KERNEXEC
23370+ GET_CR0_INTO_RDI
23371+ bts $16,%rdi
23372+ jnc 3f
23373+ mov %cs,%edi
23374+ cmp $__KERNEL_CS,%edi
23375+ jnz 2f
23376+1:
23377+#endif
23378+
23379+#ifdef CONFIG_PAX_MEMORY_UDEREF
23380+ 661: jmp 111f
23381+ .pushsection .altinstr_replacement, "a"
23382+ 662: ASM_NOP2
23383+ .popsection
23384+ .pushsection .altinstructions, "a"
23385+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23386+ .popsection
23387+ GET_CR3_INTO_RDI
23388+ cmp $0,%dil
23389+ jnz 112f
23390+ mov $__KERNEL_DS,%edi
23391+ mov %edi,%ss
23392+ jmp 111f
23393+112: cmp $1,%dil
23394+ jz 113f
23395+ ud2
23396+113: sub $4097,%rdi
23397+ bts $63,%rdi
23398+ SET_RDI_INTO_CR3
23399+ mov $__UDEREF_KERNEL_DS,%edi
23400+ mov %edi,%ss
23401+111:
23402+#endif
23403+
23404+#ifdef CONFIG_PARAVIRT
23405+ PV_RESTORE_REGS(CLBR_RDI)
23406+#endif
23407+
23408+ popq %rdi
23409+ pax_force_retaddr
23410+ retq
23411+
23412+#ifdef CONFIG_PAX_KERNEXEC
23413+2: ljmpq __KERNEL_CS,1b
23414+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23415+4: SET_RDI_INTO_CR0
23416+ jmp 1b
23417+#endif
23418+ENDPROC(pax_enter_kernel)
23419+
23420+ENTRY(pax_exit_kernel)
23421+ pushq %rdi
23422+
23423+#ifdef CONFIG_PARAVIRT
23424+ PV_SAVE_REGS(CLBR_RDI)
23425+#endif
23426+
23427+#ifdef CONFIG_PAX_KERNEXEC
23428+ mov %cs,%rdi
23429+ cmp $__KERNEXEC_KERNEL_CS,%edi
23430+ jz 2f
23431+ GET_CR0_INTO_RDI
23432+ bts $16,%rdi
23433+ jnc 4f
23434+1:
23435+#endif
23436+
23437+#ifdef CONFIG_PAX_MEMORY_UDEREF
23438+ 661: jmp 111f
23439+ .pushsection .altinstr_replacement, "a"
23440+ 662: ASM_NOP2
23441+ .popsection
23442+ .pushsection .altinstructions, "a"
23443+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23444+ .popsection
23445+ mov %ss,%edi
23446+ cmp $__UDEREF_KERNEL_DS,%edi
23447+ jnz 111f
23448+ GET_CR3_INTO_RDI
23449+ cmp $0,%dil
23450+ jz 112f
23451+ ud2
23452+112: add $4097,%rdi
23453+ bts $63,%rdi
23454+ SET_RDI_INTO_CR3
23455+ mov $__KERNEL_DS,%edi
23456+ mov %edi,%ss
23457+111:
23458+#endif
23459+
23460+#ifdef CONFIG_PARAVIRT
23461+ PV_RESTORE_REGS(CLBR_RDI);
23462+#endif
23463+
23464+ popq %rdi
23465+ pax_force_retaddr
23466+ retq
23467+
23468+#ifdef CONFIG_PAX_KERNEXEC
23469+2: GET_CR0_INTO_RDI
23470+ btr $16,%rdi
23471+ jnc 4f
23472+ ljmpq __KERNEL_CS,3f
23473+3: SET_RDI_INTO_CR0
23474+ jmp 1b
23475+4: ud2
23476+ jmp 4b
23477+#endif
23478+ENDPROC(pax_exit_kernel)
23479+#endif
23480+
23481+ .macro pax_enter_kernel_user
23482+ pax_set_fptr_mask
23483+#ifdef CONFIG_PAX_MEMORY_UDEREF
23484+ call pax_enter_kernel_user
23485+#endif
23486+ .endm
23487+
23488+ .macro pax_exit_kernel_user
23489+#ifdef CONFIG_PAX_MEMORY_UDEREF
23490+ call pax_exit_kernel_user
23491+#endif
23492+#ifdef CONFIG_PAX_RANDKSTACK
23493+ pushq %rax
23494+ pushq %r11
23495+ call pax_randomize_kstack
23496+ popq %r11
23497+ popq %rax
23498+#endif
23499+ .endm
23500+
23501+#ifdef CONFIG_PAX_MEMORY_UDEREF
23502+ENTRY(pax_enter_kernel_user)
23503+ pushq %rdi
23504+ pushq %rbx
23505+
23506+#ifdef CONFIG_PARAVIRT
23507+ PV_SAVE_REGS(CLBR_RDI)
23508+#endif
23509+
23510+ 661: jmp 111f
23511+ .pushsection .altinstr_replacement, "a"
23512+ 662: ASM_NOP2
23513+ .popsection
23514+ .pushsection .altinstructions, "a"
23515+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23516+ .popsection
23517+ GET_CR3_INTO_RDI
23518+ cmp $1,%dil
23519+ jnz 4f
23520+ sub $4097,%rdi
23521+ bts $63,%rdi
23522+ SET_RDI_INTO_CR3
23523+ jmp 3f
23524+111:
23525+
23526+ GET_CR3_INTO_RDI
23527+ mov %rdi,%rbx
23528+ add $__START_KERNEL_map,%rbx
23529+ sub phys_base(%rip),%rbx
23530+
23531+#ifdef CONFIG_PARAVIRT
23532+ cmpl $0, pv_info+PARAVIRT_enabled
23533+ jz 1f
23534+ pushq %rdi
23535+ i = 0
23536+ .rept USER_PGD_PTRS
23537+ mov i*8(%rbx),%rsi
23538+ mov $0,%sil
23539+ lea i*8(%rbx),%rdi
23540+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23541+ i = i + 1
23542+ .endr
23543+ popq %rdi
23544+ jmp 2f
23545+1:
23546+#endif
23547+
23548+ i = 0
23549+ .rept USER_PGD_PTRS
23550+ movb $0,i*8(%rbx)
23551+ i = i + 1
23552+ .endr
23553+
23554+2: SET_RDI_INTO_CR3
23555+
23556+#ifdef CONFIG_PAX_KERNEXEC
23557+ GET_CR0_INTO_RDI
23558+ bts $16,%rdi
23559+ SET_RDI_INTO_CR0
23560+#endif
23561+
23562+3:
23563+
23564+#ifdef CONFIG_PARAVIRT
23565+ PV_RESTORE_REGS(CLBR_RDI)
23566+#endif
23567+
23568+ popq %rbx
23569+ popq %rdi
23570+ pax_force_retaddr
23571+ retq
23572+4: ud2
23573+ENDPROC(pax_enter_kernel_user)
23574+
23575+ENTRY(pax_exit_kernel_user)
23576+ pushq %rdi
23577+ pushq %rbx
23578+
23579+#ifdef CONFIG_PARAVIRT
23580+ PV_SAVE_REGS(CLBR_RDI)
23581+#endif
23582+
23583+ GET_CR3_INTO_RDI
23584+ 661: jmp 1f
23585+ .pushsection .altinstr_replacement, "a"
23586+ 662: ASM_NOP2
23587+ .popsection
23588+ .pushsection .altinstructions, "a"
23589+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23590+ .popsection
23591+ cmp $0,%dil
23592+ jnz 3f
23593+ add $4097,%rdi
23594+ bts $63,%rdi
23595+ SET_RDI_INTO_CR3
23596+ jmp 2f
23597+1:
23598+
23599+ mov %rdi,%rbx
23600+
23601+#ifdef CONFIG_PAX_KERNEXEC
23602+ GET_CR0_INTO_RDI
23603+ btr $16,%rdi
23604+ jnc 3f
23605+ SET_RDI_INTO_CR0
23606+#endif
23607+
23608+ add $__START_KERNEL_map,%rbx
23609+ sub phys_base(%rip),%rbx
23610+
23611+#ifdef CONFIG_PARAVIRT
23612+ cmpl $0, pv_info+PARAVIRT_enabled
23613+ jz 1f
23614+ i = 0
23615+ .rept USER_PGD_PTRS
23616+ mov i*8(%rbx),%rsi
23617+ mov $0x67,%sil
23618+ lea i*8(%rbx),%rdi
23619+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23620+ i = i + 1
23621+ .endr
23622+ jmp 2f
23623+1:
23624+#endif
23625+
23626+ i = 0
23627+ .rept USER_PGD_PTRS
23628+ movb $0x67,i*8(%rbx)
23629+ i = i + 1
23630+ .endr
23631+2:
23632+
23633+#ifdef CONFIG_PARAVIRT
23634+ PV_RESTORE_REGS(CLBR_RDI)
23635+#endif
23636+
23637+ popq %rbx
23638+ popq %rdi
23639+ pax_force_retaddr
23640+ retq
23641+3: ud2
23642+ENDPROC(pax_exit_kernel_user)
23643+#endif
23644+
23645+ .macro pax_enter_kernel_nmi
23646+ pax_set_fptr_mask
23647+
23648+#ifdef CONFIG_PAX_KERNEXEC
23649+ GET_CR0_INTO_RDI
23650+ bts $16,%rdi
23651+ jc 110f
23652+ SET_RDI_INTO_CR0
23653+ or $2,%ebx
23654+110:
23655+#endif
23656+
23657+#ifdef CONFIG_PAX_MEMORY_UDEREF
23658+ 661: jmp 111f
23659+ .pushsection .altinstr_replacement, "a"
23660+ 662: ASM_NOP2
23661+ .popsection
23662+ .pushsection .altinstructions, "a"
23663+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23664+ .popsection
23665+ GET_CR3_INTO_RDI
23666+ cmp $0,%dil
23667+ jz 111f
23668+ sub $4097,%rdi
23669+ or $4,%ebx
23670+ bts $63,%rdi
23671+ SET_RDI_INTO_CR3
23672+ mov $__UDEREF_KERNEL_DS,%edi
23673+ mov %edi,%ss
23674+111:
23675+#endif
23676+ .endm
23677+
23678+ .macro pax_exit_kernel_nmi
23679+#ifdef CONFIG_PAX_KERNEXEC
23680+ btr $1,%ebx
23681+ jnc 110f
23682+ GET_CR0_INTO_RDI
23683+ btr $16,%rdi
23684+ SET_RDI_INTO_CR0
23685+110:
23686+#endif
23687+
23688+#ifdef CONFIG_PAX_MEMORY_UDEREF
23689+ btr $2,%ebx
23690+ jnc 111f
23691+ GET_CR3_INTO_RDI
23692+ add $4097,%rdi
23693+ bts $63,%rdi
23694+ SET_RDI_INTO_CR3
23695+ mov $__KERNEL_DS,%edi
23696+ mov %edi,%ss
23697+111:
23698+#endif
23699+ .endm
23700+
23701+ .macro pax_erase_kstack
23702+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23703+ call pax_erase_kstack
23704+#endif
23705+ .endm
23706+
23707+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23708+ENTRY(pax_erase_kstack)
23709+ pushq %rdi
23710+ pushq %rcx
23711+ pushq %rax
23712+ pushq %r11
23713+
23714+ GET_THREAD_INFO(%r11)
23715+ mov TI_lowest_stack(%r11), %rdi
23716+ mov $-0xBEEF, %rax
23717+ std
23718+
23719+1: mov %edi, %ecx
23720+ and $THREAD_SIZE_asm - 1, %ecx
23721+ shr $3, %ecx
23722+ repne scasq
23723+ jecxz 2f
23724+
23725+ cmp $2*8, %ecx
23726+ jc 2f
23727+
23728+ mov $2*8, %ecx
23729+ repe scasq
23730+ jecxz 2f
23731+ jne 1b
23732+
23733+2: cld
23734+ mov %esp, %ecx
23735+ sub %edi, %ecx
23736+
23737+ cmp $THREAD_SIZE_asm, %rcx
23738+ jb 3f
23739+ ud2
23740+3:
23741+
23742+ shr $3, %ecx
23743+ rep stosq
23744+
23745+ mov TI_task_thread_sp0(%r11), %rdi
23746+ sub $256, %rdi
23747+ mov %rdi, TI_lowest_stack(%r11)
23748+
23749+ popq %r11
23750+ popq %rax
23751+ popq %rcx
23752+ popq %rdi
23753+ pax_force_retaddr
23754+ ret
23755+ENDPROC(pax_erase_kstack)
23756+#endif
23757
23758 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23759 #ifdef CONFIG_TRACE_IRQFLAGS
23760@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
23761 .endm
23762
23763 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23764- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23765+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23766 jnc 1f
23767 TRACE_IRQS_ON_DEBUG
23768 1:
23769@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
23770 movq \tmp,R11+\offset(%rsp)
23771 .endm
23772
23773- .macro FAKE_STACK_FRAME child_rip
23774- /* push in order ss, rsp, eflags, cs, rip */
23775- xorl %eax, %eax
23776- pushq_cfi $__KERNEL_DS /* ss */
23777- /*CFI_REL_OFFSET ss,0*/
23778- pushq_cfi %rax /* rsp */
23779- CFI_REL_OFFSET rsp,0
23780- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23781- /*CFI_REL_OFFSET rflags,0*/
23782- pushq_cfi $__KERNEL_CS /* cs */
23783- /*CFI_REL_OFFSET cs,0*/
23784- pushq_cfi \child_rip /* rip */
23785- CFI_REL_OFFSET rip,0
23786- pushq_cfi %rax /* orig rax */
23787- .endm
23788-
23789- .macro UNFAKE_STACK_FRAME
23790- addq $8*6, %rsp
23791- CFI_ADJUST_CFA_OFFSET -(6*8)
23792- .endm
23793-
23794 /*
23795 * initial frame state for interrupts (and exceptions without error code)
23796 */
23797@@ -242,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23798 /* save partial stack frame */
23799 .macro SAVE_ARGS_IRQ
23800 cld
23801- /* start from rbp in pt_regs and jump over */
23802- movq_cfi rdi, (RDI-RBP)
23803- movq_cfi rsi, (RSI-RBP)
23804- movq_cfi rdx, (RDX-RBP)
23805- movq_cfi rcx, (RCX-RBP)
23806- movq_cfi rax, (RAX-RBP)
23807- movq_cfi r8, (R8-RBP)
23808- movq_cfi r9, (R9-RBP)
23809- movq_cfi r10, (R10-RBP)
23810- movq_cfi r11, (R11-RBP)
23811+ /* start from r15 in pt_regs and jump over */
23812+ movq_cfi rdi, RDI
23813+ movq_cfi rsi, RSI
23814+ movq_cfi rdx, RDX
23815+ movq_cfi rcx, RCX
23816+ movq_cfi rax, RAX
23817+ movq_cfi r8, R8
23818+ movq_cfi r9, R9
23819+ movq_cfi r10, R10
23820+ movq_cfi r11, R11
23821+ movq_cfi r12, R12
23822
23823 /* Save rbp so that we can unwind from get_irq_regs() */
23824- movq_cfi rbp, 0
23825+ movq_cfi rbp, RBP
23826
23827 /* Save previous stack value */
23828 movq %rsp, %rsi
23829
23830- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23831- testl $3, CS-RBP(%rsi)
23832+ movq %rsp,%rdi /* arg1 for handler */
23833+ testb $3, CS(%rsi)
23834 je 1f
23835 SWAPGS
23836 /*
23837@@ -280,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23838 0x06 /* DW_OP_deref */, \
23839 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23840 0x22 /* DW_OP_plus */
23841+
23842+#ifdef CONFIG_PAX_MEMORY_UDEREF
23843+ testb $3, CS(%rdi)
23844+ jnz 1f
23845+ pax_enter_kernel
23846+ jmp 2f
23847+1: pax_enter_kernel_user
23848+2:
23849+#else
23850+ pax_enter_kernel
23851+#endif
23852+
23853 /* We entered an interrupt context - irqs are off: */
23854 TRACE_IRQS_OFF
23855 .endm
23856@@ -309,9 +727,52 @@ ENTRY(save_paranoid)
23857 js 1f /* negative -> in kernel */
23858 SWAPGS
23859 xorl %ebx,%ebx
23860-1: ret
23861+1:
23862+#ifdef CONFIG_PAX_MEMORY_UDEREF
23863+ testb $3, CS+8(%rsp)
23864+ jnz 1f
23865+ pax_enter_kernel
23866+ jmp 2f
23867+1: pax_enter_kernel_user
23868+2:
23869+#else
23870+ pax_enter_kernel
23871+#endif
23872+ pax_force_retaddr
23873+ ret
23874 CFI_ENDPROC
23875-END(save_paranoid)
23876+ENDPROC(save_paranoid)
23877+
23878+ENTRY(save_paranoid_nmi)
23879+ XCPT_FRAME 1 RDI+8
23880+ cld
23881+ movq_cfi rdi, RDI+8
23882+ movq_cfi rsi, RSI+8
23883+ movq_cfi rdx, RDX+8
23884+ movq_cfi rcx, RCX+8
23885+ movq_cfi rax, RAX+8
23886+ movq_cfi r8, R8+8
23887+ movq_cfi r9, R9+8
23888+ movq_cfi r10, R10+8
23889+ movq_cfi r11, R11+8
23890+ movq_cfi rbx, RBX+8
23891+ movq_cfi rbp, RBP+8
23892+ movq_cfi r12, R12+8
23893+ movq_cfi r13, R13+8
23894+ movq_cfi r14, R14+8
23895+ movq_cfi r15, R15+8
23896+ movl $1,%ebx
23897+ movl $MSR_GS_BASE,%ecx
23898+ rdmsr
23899+ testl %edx,%edx
23900+ js 1f /* negative -> in kernel */
23901+ SWAPGS
23902+ xorl %ebx,%ebx
23903+1: pax_enter_kernel_nmi
23904+ pax_force_retaddr
23905+ ret
23906+ CFI_ENDPROC
23907+ENDPROC(save_paranoid_nmi)
23908
23909 /*
23910 * A newly forked process directly context switches into this address.
23911@@ -332,7 +793,7 @@ ENTRY(ret_from_fork)
23912
23913 RESTORE_REST
23914
23915- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23916+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23917 jz 1f
23918
23919 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23920@@ -342,15 +803,13 @@ ENTRY(ret_from_fork)
23921 jmp ret_from_sys_call # go to the SYSRET fastpath
23922
23923 1:
23924- subq $REST_SKIP, %rsp # leave space for volatiles
23925- CFI_ADJUST_CFA_OFFSET REST_SKIP
23926 movq %rbp, %rdi
23927 call *%rbx
23928 movl $0, RAX(%rsp)
23929 RESTORE_REST
23930 jmp int_ret_from_sys_call
23931 CFI_ENDPROC
23932-END(ret_from_fork)
23933+ENDPROC(ret_from_fork)
23934
23935 /*
23936 * System call entry. Up to 6 arguments in registers are supported.
23937@@ -387,7 +846,7 @@ END(ret_from_fork)
23938 ENTRY(system_call)
23939 CFI_STARTPROC simple
23940 CFI_SIGNAL_FRAME
23941- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23942+ CFI_DEF_CFA rsp,0
23943 CFI_REGISTER rip,rcx
23944 /*CFI_REGISTER rflags,r11*/
23945 SWAPGS_UNSAFE_STACK
23946@@ -400,16 +859,23 @@ GLOBAL(system_call_after_swapgs)
23947
23948 movq %rsp,PER_CPU_VAR(old_rsp)
23949 movq PER_CPU_VAR(kernel_stack),%rsp
23950+ SAVE_ARGS 8*6,0
23951+ pax_enter_kernel_user
23952+
23953+#ifdef CONFIG_PAX_RANDKSTACK
23954+ pax_erase_kstack
23955+#endif
23956+
23957 /*
23958 * No need to follow this irqs off/on section - it's straight
23959 * and short:
23960 */
23961 ENABLE_INTERRUPTS(CLBR_NONE)
23962- SAVE_ARGS 8,0
23963 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23964 movq %rcx,RIP-ARGOFFSET(%rsp)
23965 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23966- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23967+ GET_THREAD_INFO(%rcx)
23968+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23969 jnz tracesys
23970 system_call_fastpath:
23971 #if __SYSCALL_MASK == ~0
23972@@ -433,10 +899,13 @@ sysret_check:
23973 LOCKDEP_SYS_EXIT
23974 DISABLE_INTERRUPTS(CLBR_NONE)
23975 TRACE_IRQS_OFF
23976- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23977+ GET_THREAD_INFO(%rcx)
23978+ movl TI_flags(%rcx),%edx
23979 andl %edi,%edx
23980 jnz sysret_careful
23981 CFI_REMEMBER_STATE
23982+ pax_exit_kernel_user
23983+ pax_erase_kstack
23984 /*
23985 * sysretq will re-enable interrupts:
23986 */
23987@@ -495,6 +964,9 @@ auditsys:
23988 movq %rax,%rsi /* 2nd arg: syscall number */
23989 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23990 call __audit_syscall_entry
23991+
23992+ pax_erase_kstack
23993+
23994 LOAD_ARGS 0 /* reload call-clobbered registers */
23995 jmp system_call_fastpath
23996
23997@@ -516,7 +988,7 @@ sysret_audit:
23998 /* Do syscall tracing */
23999 tracesys:
24000 #ifdef CONFIG_AUDITSYSCALL
24001- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24002+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
24003 jz auditsys
24004 #endif
24005 SAVE_REST
24006@@ -524,12 +996,15 @@ tracesys:
24007 FIXUP_TOP_OF_STACK %rdi
24008 movq %rsp,%rdi
24009 call syscall_trace_enter
24010+
24011+ pax_erase_kstack
24012+
24013 /*
24014 * Reload arg registers from stack in case ptrace changed them.
24015 * We don't reload %rax because syscall_trace_enter() returned
24016 * the value it wants us to use in the table lookup.
24017 */
24018- LOAD_ARGS ARGOFFSET, 1
24019+ LOAD_ARGS 1
24020 RESTORE_REST
24021 #if __SYSCALL_MASK == ~0
24022 cmpq $__NR_syscall_max,%rax
24023@@ -559,7 +1034,9 @@ GLOBAL(int_with_check)
24024 andl %edi,%edx
24025 jnz int_careful
24026 andl $~TS_COMPAT,TI_status(%rcx)
24027- jmp retint_swapgs
24028+ pax_exit_kernel_user
24029+ pax_erase_kstack
24030+ jmp retint_swapgs_pax
24031
24032 /* Either reschedule or signal or syscall exit tracking needed. */
24033 /* First do a reschedule test. */
24034@@ -605,7 +1082,7 @@ int_restore_rest:
24035 TRACE_IRQS_OFF
24036 jmp int_with_check
24037 CFI_ENDPROC
24038-END(system_call)
24039+ENDPROC(system_call)
24040
24041 .macro FORK_LIKE func
24042 ENTRY(stub_\func)
24043@@ -618,9 +1095,10 @@ ENTRY(stub_\func)
24044 DEFAULT_FRAME 0 8 /* offset 8: return address */
24045 call sys_\func
24046 RESTORE_TOP_OF_STACK %r11, 8
24047- ret $REST_SKIP /* pop extended registers */
24048+ pax_force_retaddr
24049+ ret
24050 CFI_ENDPROC
24051-END(stub_\func)
24052+ENDPROC(stub_\func)
24053 .endm
24054
24055 .macro FIXED_FRAME label,func
24056@@ -630,9 +1108,10 @@ ENTRY(\label)
24057 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24058 call \func
24059 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24060+ pax_force_retaddr
24061 ret
24062 CFI_ENDPROC
24063-END(\label)
24064+ENDPROC(\label)
24065 .endm
24066
24067 FORK_LIKE clone
24068@@ -640,19 +1119,6 @@ END(\label)
24069 FORK_LIKE vfork
24070 FIXED_FRAME stub_iopl, sys_iopl
24071
24072-ENTRY(ptregscall_common)
24073- DEFAULT_FRAME 1 8 /* offset 8: return address */
24074- RESTORE_TOP_OF_STACK %r11, 8
24075- movq_cfi_restore R15+8, r15
24076- movq_cfi_restore R14+8, r14
24077- movq_cfi_restore R13+8, r13
24078- movq_cfi_restore R12+8, r12
24079- movq_cfi_restore RBP+8, rbp
24080- movq_cfi_restore RBX+8, rbx
24081- ret $REST_SKIP /* pop extended registers */
24082- CFI_ENDPROC
24083-END(ptregscall_common)
24084-
24085 ENTRY(stub_execve)
24086 CFI_STARTPROC
24087 addq $8, %rsp
24088@@ -664,7 +1130,7 @@ ENTRY(stub_execve)
24089 RESTORE_REST
24090 jmp int_ret_from_sys_call
24091 CFI_ENDPROC
24092-END(stub_execve)
24093+ENDPROC(stub_execve)
24094
24095 /*
24096 * sigreturn is special because it needs to restore all registers on return.
24097@@ -681,7 +1147,7 @@ ENTRY(stub_rt_sigreturn)
24098 RESTORE_REST
24099 jmp int_ret_from_sys_call
24100 CFI_ENDPROC
24101-END(stub_rt_sigreturn)
24102+ENDPROC(stub_rt_sigreturn)
24103
24104 #ifdef CONFIG_X86_X32_ABI
24105 ENTRY(stub_x32_rt_sigreturn)
24106@@ -695,7 +1161,7 @@ ENTRY(stub_x32_rt_sigreturn)
24107 RESTORE_REST
24108 jmp int_ret_from_sys_call
24109 CFI_ENDPROC
24110-END(stub_x32_rt_sigreturn)
24111+ENDPROC(stub_x32_rt_sigreturn)
24112
24113 ENTRY(stub_x32_execve)
24114 CFI_STARTPROC
24115@@ -709,7 +1175,7 @@ ENTRY(stub_x32_execve)
24116 RESTORE_REST
24117 jmp int_ret_from_sys_call
24118 CFI_ENDPROC
24119-END(stub_x32_execve)
24120+ENDPROC(stub_x32_execve)
24121
24122 #endif
24123
24124@@ -746,7 +1212,7 @@ vector=vector+1
24125 2: jmp common_interrupt
24126 .endr
24127 CFI_ENDPROC
24128-END(irq_entries_start)
24129+ENDPROC(irq_entries_start)
24130
24131 .previous
24132 END(interrupt)
24133@@ -763,8 +1229,8 @@ END(interrupt)
24134 /* 0(%rsp): ~(interrupt number) */
24135 .macro interrupt func
24136 /* reserve pt_regs for scratch regs and rbp */
24137- subq $ORIG_RAX-RBP, %rsp
24138- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24139+ subq $ORIG_RAX, %rsp
24140+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24141 SAVE_ARGS_IRQ
24142 call \func
24143 .endm
24144@@ -787,14 +1253,14 @@ ret_from_intr:
24145
24146 /* Restore saved previous stack */
24147 popq %rsi
24148- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24149- leaq ARGOFFSET-RBP(%rsi), %rsp
24150+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24151+ movq %rsi, %rsp
24152 CFI_DEF_CFA_REGISTER rsp
24153- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24154+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24155
24156 exit_intr:
24157 GET_THREAD_INFO(%rcx)
24158- testl $3,CS-ARGOFFSET(%rsp)
24159+ testb $3,CS-ARGOFFSET(%rsp)
24160 je retint_kernel
24161
24162 /* Interrupt came from user space */
24163@@ -816,12 +1282,35 @@ retint_swapgs: /* return to user-space */
24164 * The iretq could re-enable interrupts:
24165 */
24166 DISABLE_INTERRUPTS(CLBR_ANY)
24167+ pax_exit_kernel_user
24168+retint_swapgs_pax:
24169 TRACE_IRQS_IRETQ
24170 SWAPGS
24171 jmp restore_args
24172
24173 retint_restore_args: /* return to kernel space */
24174 DISABLE_INTERRUPTS(CLBR_ANY)
24175+ pax_exit_kernel
24176+
24177+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24178+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24179+ * namely calling EFI runtime services with a phys mapping. We're
24180+ * starting off with NOPs and patch in the real instrumentation
24181+ * (BTS/OR) before starting any userland process; even before starting
24182+ * up the APs.
24183+ */
24184+ .pushsection .altinstr_replacement, "a"
24185+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24186+ 602:
24187+ .popsection
24188+ 603: .fill 602b-601b, 1, 0x90
24189+ .pushsection .altinstructions, "a"
24190+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24191+ .popsection
24192+#else
24193+ pax_force_retaddr (RIP-ARGOFFSET)
24194+#endif
24195+
24196 /*
24197 * The iretq could re-enable interrupts:
24198 */
24199@@ -934,7 +1423,7 @@ ENTRY(retint_kernel)
24200 jmp exit_intr
24201 #endif
24202 CFI_ENDPROC
24203-END(common_interrupt)
24204+ENDPROC(common_interrupt)
24205
24206 /*
24207 * If IRET takes a fault on the espfix stack, then we
24208@@ -956,13 +1445,13 @@ __do_double_fault:
24209 cmpq $native_irq_return_iret,%rax
24210 jne do_double_fault /* This shouldn't happen... */
24211 movq PER_CPU_VAR(kernel_stack),%rax
24212- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
24213+ subq $(6*8),%rax /* Reset to original stack */
24214 movq %rax,RSP(%rdi)
24215 movq $0,(%rax) /* Missing (lost) #GP error code */
24216 movq $general_protection,RIP(%rdi)
24217 retq
24218 CFI_ENDPROC
24219-END(__do_double_fault)
24220+ENDPROC(__do_double_fault)
24221 #else
24222 # define __do_double_fault do_double_fault
24223 #endif
24224@@ -979,7 +1468,7 @@ ENTRY(\sym)
24225 interrupt \do_sym
24226 jmp ret_from_intr
24227 CFI_ENDPROC
24228-END(\sym)
24229+ENDPROC(\sym)
24230 .endm
24231
24232 #ifdef CONFIG_TRACING
24233@@ -1052,7 +1541,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24234 /*
24235 * Exception entry points.
24236 */
24237-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24238+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24239
24240 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24241 ENTRY(\sym)
24242@@ -1103,6 +1592,12 @@ ENTRY(\sym)
24243 .endif
24244
24245 .if \shift_ist != -1
24246+#ifdef CONFIG_SMP
24247+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24248+ lea init_tss(%r13), %r13
24249+#else
24250+ lea init_tss(%rip), %r13
24251+#endif
24252 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24253 .endif
24254
24255@@ -1119,7 +1614,7 @@ ENTRY(\sym)
24256 .endif
24257
24258 CFI_ENDPROC
24259-END(\sym)
24260+ENDPROC(\sym)
24261 .endm
24262
24263 #ifdef CONFIG_TRACING
24264@@ -1160,9 +1655,10 @@ gs_change:
24265 2: mfence /* workaround */
24266 SWAPGS
24267 popfq_cfi
24268+ pax_force_retaddr
24269 ret
24270 CFI_ENDPROC
24271-END(native_load_gs_index)
24272+ENDPROC(native_load_gs_index)
24273
24274 _ASM_EXTABLE(gs_change,bad_gs)
24275 .section .fixup,"ax"
24276@@ -1190,9 +1686,10 @@ ENTRY(do_softirq_own_stack)
24277 CFI_DEF_CFA_REGISTER rsp
24278 CFI_ADJUST_CFA_OFFSET -8
24279 decl PER_CPU_VAR(irq_count)
24280+ pax_force_retaddr
24281 ret
24282 CFI_ENDPROC
24283-END(do_softirq_own_stack)
24284+ENDPROC(do_softirq_own_stack)
24285
24286 #ifdef CONFIG_XEN
24287 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24288@@ -1230,7 +1727,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24289 decl PER_CPU_VAR(irq_count)
24290 jmp error_exit
24291 CFI_ENDPROC
24292-END(xen_do_hypervisor_callback)
24293+ENDPROC(xen_do_hypervisor_callback)
24294
24295 /*
24296 * Hypervisor uses this for application faults while it executes.
24297@@ -1289,7 +1786,7 @@ ENTRY(xen_failsafe_callback)
24298 SAVE_ALL
24299 jmp error_exit
24300 CFI_ENDPROC
24301-END(xen_failsafe_callback)
24302+ENDPROC(xen_failsafe_callback)
24303
24304 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24305 xen_hvm_callback_vector xen_evtchn_do_upcall
24306@@ -1336,18 +1833,33 @@ ENTRY(paranoid_exit)
24307 DEFAULT_FRAME
24308 DISABLE_INTERRUPTS(CLBR_NONE)
24309 TRACE_IRQS_OFF_DEBUG
24310- testl %ebx,%ebx /* swapgs needed? */
24311+ testl $1,%ebx /* swapgs needed? */
24312 jnz paranoid_restore
24313- testl $3,CS(%rsp)
24314+ testb $3,CS(%rsp)
24315 jnz paranoid_userspace
24316+#ifdef CONFIG_PAX_MEMORY_UDEREF
24317+ pax_exit_kernel
24318+ TRACE_IRQS_IRETQ 0
24319+ SWAPGS_UNSAFE_STACK
24320+ RESTORE_ALL 8
24321+ pax_force_retaddr_bts
24322+ jmp irq_return
24323+#endif
24324 paranoid_swapgs:
24325+#ifdef CONFIG_PAX_MEMORY_UDEREF
24326+ pax_exit_kernel_user
24327+#else
24328+ pax_exit_kernel
24329+#endif
24330 TRACE_IRQS_IRETQ 0
24331 SWAPGS_UNSAFE_STACK
24332 RESTORE_ALL 8
24333 jmp irq_return
24334 paranoid_restore:
24335+ pax_exit_kernel
24336 TRACE_IRQS_IRETQ_DEBUG 0
24337 RESTORE_ALL 8
24338+ pax_force_retaddr_bts
24339 jmp irq_return
24340 paranoid_userspace:
24341 GET_THREAD_INFO(%rcx)
24342@@ -1376,7 +1888,7 @@ paranoid_schedule:
24343 TRACE_IRQS_OFF
24344 jmp paranoid_userspace
24345 CFI_ENDPROC
24346-END(paranoid_exit)
24347+ENDPROC(paranoid_exit)
24348
24349 /*
24350 * Exception entry point. This expects an error code/orig_rax on the stack.
24351@@ -1403,12 +1915,23 @@ ENTRY(error_entry)
24352 movq_cfi r14, R14+8
24353 movq_cfi r15, R15+8
24354 xorl %ebx,%ebx
24355- testl $3,CS+8(%rsp)
24356+ testb $3,CS+8(%rsp)
24357 je error_kernelspace
24358 error_swapgs:
24359 SWAPGS
24360 error_sti:
24361+#ifdef CONFIG_PAX_MEMORY_UDEREF
24362+ testb $3, CS+8(%rsp)
24363+ jnz 1f
24364+ pax_enter_kernel
24365+ jmp 2f
24366+1: pax_enter_kernel_user
24367+2:
24368+#else
24369+ pax_enter_kernel
24370+#endif
24371 TRACE_IRQS_OFF
24372+ pax_force_retaddr
24373 ret
24374
24375 /*
24376@@ -1435,7 +1958,7 @@ bstep_iret:
24377 movq %rcx,RIP+8(%rsp)
24378 jmp error_swapgs
24379 CFI_ENDPROC
24380-END(error_entry)
24381+ENDPROC(error_entry)
24382
24383
24384 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24385@@ -1446,7 +1969,7 @@ ENTRY(error_exit)
24386 DISABLE_INTERRUPTS(CLBR_NONE)
24387 TRACE_IRQS_OFF
24388 GET_THREAD_INFO(%rcx)
24389- testl %eax,%eax
24390+ testl $1,%eax
24391 jne retint_kernel
24392 LOCKDEP_SYS_EXIT_IRQ
24393 movl TI_flags(%rcx),%edx
24394@@ -1455,7 +1978,7 @@ ENTRY(error_exit)
24395 jnz retint_careful
24396 jmp retint_swapgs
24397 CFI_ENDPROC
24398-END(error_exit)
24399+ENDPROC(error_exit)
24400
24401 /*
24402 * Test if a given stack is an NMI stack or not.
24403@@ -1513,9 +2036,11 @@ ENTRY(nmi)
24404 * If %cs was not the kernel segment, then the NMI triggered in user
24405 * space, which means it is definitely not nested.
24406 */
24407+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24408+ je 1f
24409 cmpl $__KERNEL_CS, 16(%rsp)
24410 jne first_nmi
24411-
24412+1:
24413 /*
24414 * Check the special variable on the stack to see if NMIs are
24415 * executing.
24416@@ -1549,8 +2074,7 @@ nested_nmi:
24417
24418 1:
24419 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24420- leaq -1*8(%rsp), %rdx
24421- movq %rdx, %rsp
24422+ subq $8, %rsp
24423 CFI_ADJUST_CFA_OFFSET 1*8
24424 leaq -10*8(%rsp), %rdx
24425 pushq_cfi $__KERNEL_DS
24426@@ -1568,6 +2092,7 @@ nested_nmi_out:
24427 CFI_RESTORE rdx
24428
24429 /* No need to check faults here */
24430+# pax_force_retaddr_bts
24431 INTERRUPT_RETURN
24432
24433 CFI_RESTORE_STATE
24434@@ -1664,13 +2189,13 @@ end_repeat_nmi:
24435 subq $ORIG_RAX-R15, %rsp
24436 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24437 /*
24438- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24439+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24440 * as we should not be calling schedule in NMI context.
24441 * Even with normal interrupts enabled. An NMI should not be
24442 * setting NEED_RESCHED or anything that normal interrupts and
24443 * exceptions might do.
24444 */
24445- call save_paranoid
24446+ call save_paranoid_nmi
24447 DEFAULT_FRAME 0
24448
24449 /*
24450@@ -1680,9 +2205,9 @@ end_repeat_nmi:
24451 * NMI itself takes a page fault, the page fault that was preempted
24452 * will read the information from the NMI page fault and not the
24453 * origin fault. Save it off and restore it if it changes.
24454- * Use the r12 callee-saved register.
24455+ * Use the r13 callee-saved register.
24456 */
24457- movq %cr2, %r12
24458+ movq %cr2, %r13
24459
24460 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24461 movq %rsp,%rdi
24462@@ -1691,29 +2216,34 @@ end_repeat_nmi:
24463
24464 /* Did the NMI take a page fault? Restore cr2 if it did */
24465 movq %cr2, %rcx
24466- cmpq %rcx, %r12
24467+ cmpq %rcx, %r13
24468 je 1f
24469- movq %r12, %cr2
24470+ movq %r13, %cr2
24471 1:
24472
24473- testl %ebx,%ebx /* swapgs needed? */
24474+ testl $1,%ebx /* swapgs needed? */
24475 jnz nmi_restore
24476 nmi_swapgs:
24477 SWAPGS_UNSAFE_STACK
24478 nmi_restore:
24479+ pax_exit_kernel_nmi
24480 /* Pop the extra iret frame at once */
24481 RESTORE_ALL 6*8
24482+ testb $3, 8(%rsp)
24483+ jnz 1f
24484+ pax_force_retaddr_bts
24485+1:
24486
24487 /* Clear the NMI executing stack variable */
24488 movq $0, 5*8(%rsp)
24489 jmp irq_return
24490 CFI_ENDPROC
24491-END(nmi)
24492+ENDPROC(nmi)
24493
24494 ENTRY(ignore_sysret)
24495 CFI_STARTPROC
24496 mov $-ENOSYS,%eax
24497 sysret
24498 CFI_ENDPROC
24499-END(ignore_sysret)
24500+ENDPROC(ignore_sysret)
24501
24502diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24503index 94d857f..bf1f0bf 100644
24504--- a/arch/x86/kernel/espfix_64.c
24505+++ b/arch/x86/kernel/espfix_64.c
24506@@ -197,7 +197,7 @@ void init_espfix_ap(void)
24507 set_pte(&pte_p[n*PTE_STRIDE], pte);
24508
24509 /* Job is done for this CPU and any CPU which shares this page */
24510- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24511+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24512
24513 unlock_done:
24514 mutex_unlock(&espfix_init_mutex);
24515diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24516index cbc4a91..b38ee45 100644
24517--- a/arch/x86/kernel/ftrace.c
24518+++ b/arch/x86/kernel/ftrace.c
24519@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24520 * kernel identity mapping to modify code.
24521 */
24522 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24523- ip = (unsigned long)__va(__pa_symbol(ip));
24524+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24525
24526 return ip;
24527 }
24528@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24529 {
24530 unsigned char replaced[MCOUNT_INSN_SIZE];
24531
24532+ ip = ktla_ktva(ip);
24533+
24534 /*
24535 * Note: Due to modules and __init, code can
24536 * disappear and change, we need to protect against faulting
24537@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24538 unsigned char old[MCOUNT_INSN_SIZE];
24539 int ret;
24540
24541- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24542+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24543
24544 ftrace_update_func = ip;
24545 /* Make sure the breakpoints see the ftrace_update_func update */
24546@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
24547 unsigned char replaced[MCOUNT_INSN_SIZE];
24548 unsigned char brk = BREAKPOINT_INSTRUCTION;
24549
24550- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24551+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24552 return -EFAULT;
24553
24554 /* Make sure it is what we expect it to be */
24555diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24556index eda1a86..8f6df48 100644
24557--- a/arch/x86/kernel/head64.c
24558+++ b/arch/x86/kernel/head64.c
24559@@ -67,12 +67,12 @@ again:
24560 pgd = *pgd_p;
24561
24562 /*
24563- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24564- * critical -- __PAGE_OFFSET would point us back into the dynamic
24565+ * The use of __early_va rather than __va here is critical:
24566+ * __va would point us back into the dynamic
24567 * range and we might end up looping forever...
24568 */
24569 if (pgd)
24570- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24571+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24572 else {
24573 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24574 reset_early_page_tables();
24575@@ -82,13 +82,13 @@ again:
24576 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24577 for (i = 0; i < PTRS_PER_PUD; i++)
24578 pud_p[i] = 0;
24579- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24580+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24581 }
24582 pud_p += pud_index(address);
24583 pud = *pud_p;
24584
24585 if (pud)
24586- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24587+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24588 else {
24589 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24590 reset_early_page_tables();
24591@@ -98,7 +98,7 @@ again:
24592 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24593 for (i = 0; i < PTRS_PER_PMD; i++)
24594 pmd_p[i] = 0;
24595- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24596+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24597 }
24598 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24599 pmd_p[pmd_index(address)] = pmd;
24600@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24601 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24602 early_printk("Kernel alive\n");
24603
24604- clear_page(init_level4_pgt);
24605 /* set init_level4_pgt kernel high mapping*/
24606 init_level4_pgt[511] = early_level4_pgt[511];
24607
24608diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24609index f36bd42..0ab4474 100644
24610--- a/arch/x86/kernel/head_32.S
24611+++ b/arch/x86/kernel/head_32.S
24612@@ -26,6 +26,12 @@
24613 /* Physical address */
24614 #define pa(X) ((X) - __PAGE_OFFSET)
24615
24616+#ifdef CONFIG_PAX_KERNEXEC
24617+#define ta(X) (X)
24618+#else
24619+#define ta(X) ((X) - __PAGE_OFFSET)
24620+#endif
24621+
24622 /*
24623 * References to members of the new_cpu_data structure.
24624 */
24625@@ -55,11 +61,7 @@
24626 * and small than max_low_pfn, otherwise will waste some page table entries
24627 */
24628
24629-#if PTRS_PER_PMD > 1
24630-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24631-#else
24632-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24633-#endif
24634+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24635
24636 /* Number of possible pages in the lowmem region */
24637 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24638@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24639 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24640
24641 /*
24642+ * Real beginning of normal "text" segment
24643+ */
24644+ENTRY(stext)
24645+ENTRY(_stext)
24646+
24647+/*
24648 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24649 * %esi points to the real-mode code as a 32-bit pointer.
24650 * CS and DS must be 4 GB flat segments, but we don't depend on
24651@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24652 * can.
24653 */
24654 __HEAD
24655+
24656+#ifdef CONFIG_PAX_KERNEXEC
24657+ jmp startup_32
24658+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24659+.fill PAGE_SIZE-5,1,0xcc
24660+#endif
24661+
24662 ENTRY(startup_32)
24663 movl pa(stack_start),%ecx
24664
24665@@ -106,6 +121,59 @@ ENTRY(startup_32)
24666 2:
24667 leal -__PAGE_OFFSET(%ecx),%esp
24668
24669+#ifdef CONFIG_SMP
24670+ movl $pa(cpu_gdt_table),%edi
24671+ movl $__per_cpu_load,%eax
24672+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24673+ rorl $16,%eax
24674+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24675+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24676+ movl $__per_cpu_end - 1,%eax
24677+ subl $__per_cpu_start,%eax
24678+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24679+#endif
24680+
24681+#ifdef CONFIG_PAX_MEMORY_UDEREF
24682+ movl $NR_CPUS,%ecx
24683+ movl $pa(cpu_gdt_table),%edi
24684+1:
24685+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24686+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24687+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24688+ addl $PAGE_SIZE_asm,%edi
24689+ loop 1b
24690+#endif
24691+
24692+#ifdef CONFIG_PAX_KERNEXEC
24693+ movl $pa(boot_gdt),%edi
24694+ movl $__LOAD_PHYSICAL_ADDR,%eax
24695+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24696+ rorl $16,%eax
24697+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24698+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24699+ rorl $16,%eax
24700+
24701+ ljmp $(__BOOT_CS),$1f
24702+1:
24703+
24704+ movl $NR_CPUS,%ecx
24705+ movl $pa(cpu_gdt_table),%edi
24706+ addl $__PAGE_OFFSET,%eax
24707+1:
24708+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24709+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24710+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24711+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24712+ rorl $16,%eax
24713+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24714+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24715+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24716+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24717+ rorl $16,%eax
24718+ addl $PAGE_SIZE_asm,%edi
24719+ loop 1b
24720+#endif
24721+
24722 /*
24723 * Clear BSS first so that there are no surprises...
24724 */
24725@@ -201,8 +269,11 @@ ENTRY(startup_32)
24726 movl %eax, pa(max_pfn_mapped)
24727
24728 /* Do early initialization of the fixmap area */
24729- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24730- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24731+#ifdef CONFIG_COMPAT_VDSO
24732+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24733+#else
24734+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24735+#endif
24736 #else /* Not PAE */
24737
24738 page_pde_offset = (__PAGE_OFFSET >> 20);
24739@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24740 movl %eax, pa(max_pfn_mapped)
24741
24742 /* Do early initialization of the fixmap area */
24743- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24744- movl %eax,pa(initial_page_table+0xffc)
24745+#ifdef CONFIG_COMPAT_VDSO
24746+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24747+#else
24748+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24749+#endif
24750 #endif
24751
24752 #ifdef CONFIG_PARAVIRT
24753@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24754 cmpl $num_subarch_entries, %eax
24755 jae bad_subarch
24756
24757- movl pa(subarch_entries)(,%eax,4), %eax
24758- subl $__PAGE_OFFSET, %eax
24759- jmp *%eax
24760+ jmp *pa(subarch_entries)(,%eax,4)
24761
24762 bad_subarch:
24763 WEAK(lguest_entry)
24764@@ -261,10 +333,10 @@ WEAK(xen_entry)
24765 __INITDATA
24766
24767 subarch_entries:
24768- .long default_entry /* normal x86/PC */
24769- .long lguest_entry /* lguest hypervisor */
24770- .long xen_entry /* Xen hypervisor */
24771- .long default_entry /* Moorestown MID */
24772+ .long ta(default_entry) /* normal x86/PC */
24773+ .long ta(lguest_entry) /* lguest hypervisor */
24774+ .long ta(xen_entry) /* Xen hypervisor */
24775+ .long ta(default_entry) /* Moorestown MID */
24776 num_subarch_entries = (. - subarch_entries) / 4
24777 .previous
24778 #else
24779@@ -354,6 +426,7 @@ default_entry:
24780 movl pa(mmu_cr4_features),%eax
24781 movl %eax,%cr4
24782
24783+#ifdef CONFIG_X86_PAE
24784 testb $X86_CR4_PAE, %al # check if PAE is enabled
24785 jz enable_paging
24786
24787@@ -382,6 +455,9 @@ default_entry:
24788 /* Make changes effective */
24789 wrmsr
24790
24791+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24792+#endif
24793+
24794 enable_paging:
24795
24796 /*
24797@@ -449,14 +525,20 @@ is486:
24798 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24799 movl %eax,%ss # after changing gdt.
24800
24801- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24802+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24803 movl %eax,%ds
24804 movl %eax,%es
24805
24806 movl $(__KERNEL_PERCPU), %eax
24807 movl %eax,%fs # set this cpu's percpu
24808
24809+#ifdef CONFIG_CC_STACKPROTECTOR
24810 movl $(__KERNEL_STACK_CANARY),%eax
24811+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24812+ movl $(__USER_DS),%eax
24813+#else
24814+ xorl %eax,%eax
24815+#endif
24816 movl %eax,%gs
24817
24818 xorl %eax,%eax # Clear LDT
24819@@ -512,8 +594,11 @@ setup_once:
24820 * relocation. Manually set base address in stack canary
24821 * segment descriptor.
24822 */
24823- movl $gdt_page,%eax
24824+ movl $cpu_gdt_table,%eax
24825 movl $stack_canary,%ecx
24826+#ifdef CONFIG_SMP
24827+ addl $__per_cpu_load,%ecx
24828+#endif
24829 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24830 shrl $16, %ecx
24831 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24832@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24833 cmpl $2,(%esp) # X86_TRAP_NMI
24834 je is_nmi # Ignore NMI
24835
24836- cmpl $2,%ss:early_recursion_flag
24837+ cmpl $1,%ss:early_recursion_flag
24838 je hlt_loop
24839 incl %ss:early_recursion_flag
24840
24841@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24842 pushl (20+6*4)(%esp) /* trapno */
24843 pushl $fault_msg
24844 call printk
24845-#endif
24846 call dump_stack
24847+#endif
24848 hlt_loop:
24849 hlt
24850 jmp hlt_loop
24851@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24852 /* This is the default interrupt "handler" :-) */
24853 ALIGN
24854 ignore_int:
24855- cld
24856 #ifdef CONFIG_PRINTK
24857+ cmpl $2,%ss:early_recursion_flag
24858+ je hlt_loop
24859+ incl %ss:early_recursion_flag
24860+ cld
24861 pushl %eax
24862 pushl %ecx
24863 pushl %edx
24864@@ -617,9 +705,6 @@ ignore_int:
24865 movl $(__KERNEL_DS),%eax
24866 movl %eax,%ds
24867 movl %eax,%es
24868- cmpl $2,early_recursion_flag
24869- je hlt_loop
24870- incl early_recursion_flag
24871 pushl 16(%esp)
24872 pushl 24(%esp)
24873 pushl 32(%esp)
24874@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24875 /*
24876 * BSS section
24877 */
24878-__PAGE_ALIGNED_BSS
24879- .align PAGE_SIZE
24880 #ifdef CONFIG_X86_PAE
24881+.section .initial_pg_pmd,"a",@progbits
24882 initial_pg_pmd:
24883 .fill 1024*KPMDS,4,0
24884 #else
24885+.section .initial_page_table,"a",@progbits
24886 ENTRY(initial_page_table)
24887 .fill 1024,4,0
24888 #endif
24889+.section .initial_pg_fixmap,"a",@progbits
24890 initial_pg_fixmap:
24891 .fill 1024,4,0
24892+.section .empty_zero_page,"a",@progbits
24893 ENTRY(empty_zero_page)
24894 .fill 4096,1,0
24895+.section .swapper_pg_dir,"a",@progbits
24896 ENTRY(swapper_pg_dir)
24897+#ifdef CONFIG_X86_PAE
24898+ .fill 4,8,0
24899+#else
24900 .fill 1024,4,0
24901+#endif
24902
24903 /*
24904 * This starts the data section.
24905 */
24906 #ifdef CONFIG_X86_PAE
24907-__PAGE_ALIGNED_DATA
24908- /* Page-aligned for the benefit of paravirt? */
24909- .align PAGE_SIZE
24910+.section .initial_page_table,"a",@progbits
24911 ENTRY(initial_page_table)
24912 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24913 # if KPMDS == 3
24914@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24915 # error "Kernel PMDs should be 1, 2 or 3"
24916 # endif
24917 .align PAGE_SIZE /* needs to be page-sized too */
24918+
24919+#ifdef CONFIG_PAX_PER_CPU_PGD
24920+ENTRY(cpu_pgd)
24921+ .rept 2*NR_CPUS
24922+ .fill 4,8,0
24923+ .endr
24924+#endif
24925+
24926 #endif
24927
24928 .data
24929 .balign 4
24930 ENTRY(stack_start)
24931- .long init_thread_union+THREAD_SIZE
24932+ .long init_thread_union+THREAD_SIZE-8
24933
24934 __INITRODATA
24935 int_msg:
24936@@ -727,7 +825,7 @@ fault_msg:
24937 * segment size, and 32-bit linear address value:
24938 */
24939
24940- .data
24941+.section .rodata,"a",@progbits
24942 .globl boot_gdt_descr
24943 .globl idt_descr
24944
24945@@ -736,7 +834,7 @@ fault_msg:
24946 .word 0 # 32 bit align gdt_desc.address
24947 boot_gdt_descr:
24948 .word __BOOT_DS+7
24949- .long boot_gdt - __PAGE_OFFSET
24950+ .long pa(boot_gdt)
24951
24952 .word 0 # 32-bit align idt_desc.address
24953 idt_descr:
24954@@ -747,7 +845,7 @@ idt_descr:
24955 .word 0 # 32 bit align gdt_desc.address
24956 ENTRY(early_gdt_descr)
24957 .word GDT_ENTRIES*8-1
24958- .long gdt_page /* Overwritten for secondary CPUs */
24959+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24960
24961 /*
24962 * The boot_gdt must mirror the equivalent in setup.S and is
24963@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24964 .align L1_CACHE_BYTES
24965 ENTRY(boot_gdt)
24966 .fill GDT_ENTRY_BOOT_CS,8,0
24967- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24968- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24969+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24970+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24971+
24972+ .align PAGE_SIZE_asm
24973+ENTRY(cpu_gdt_table)
24974+ .rept NR_CPUS
24975+ .quad 0x0000000000000000 /* NULL descriptor */
24976+ .quad 0x0000000000000000 /* 0x0b reserved */
24977+ .quad 0x0000000000000000 /* 0x13 reserved */
24978+ .quad 0x0000000000000000 /* 0x1b reserved */
24979+
24980+#ifdef CONFIG_PAX_KERNEXEC
24981+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24982+#else
24983+ .quad 0x0000000000000000 /* 0x20 unused */
24984+#endif
24985+
24986+ .quad 0x0000000000000000 /* 0x28 unused */
24987+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24988+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24989+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24990+ .quad 0x0000000000000000 /* 0x4b reserved */
24991+ .quad 0x0000000000000000 /* 0x53 reserved */
24992+ .quad 0x0000000000000000 /* 0x5b reserved */
24993+
24994+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24995+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24996+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24997+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24998+
24999+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
25000+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
25001+
25002+ /*
25003+ * Segments used for calling PnP BIOS have byte granularity.
25004+ * The code segments and data segments have fixed 64k limits,
25005+ * the transfer segment sizes are set at run time.
25006+ */
25007+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
25008+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
25009+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
25010+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
25011+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
25012+
25013+ /*
25014+ * The APM segments have byte granularity and their bases
25015+ * are set at run time. All have 64k limits.
25016+ */
25017+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
25018+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
25019+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
25020+
25021+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
25022+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
25023+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
25024+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
25025+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
25026+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
25027+
25028+ /* Be sure this is zeroed to avoid false validations in Xen */
25029+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
25030+ .endr
25031diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25032index a468c0a..8b5a879 100644
25033--- a/arch/x86/kernel/head_64.S
25034+++ b/arch/x86/kernel/head_64.S
25035@@ -20,6 +20,8 @@
25036 #include <asm/processor-flags.h>
25037 #include <asm/percpu.h>
25038 #include <asm/nops.h>
25039+#include <asm/cpufeature.h>
25040+#include <asm/alternative-asm.h>
25041
25042 #ifdef CONFIG_PARAVIRT
25043 #include <asm/asm-offsets.h>
25044@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25045 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25046 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25047 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25048+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25049+L3_VMALLOC_START = pud_index(VMALLOC_START)
25050+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25051+L3_VMALLOC_END = pud_index(VMALLOC_END)
25052+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25053+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25054
25055 .text
25056 __HEAD
25057@@ -89,11 +97,24 @@ startup_64:
25058 * Fixup the physical addresses in the page table
25059 */
25060 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25061+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25062+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25063+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25064+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25065+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25066
25067- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25068- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25069+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25070+#ifndef CONFIG_XEN
25071+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25072+#endif
25073+
25074+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25075+
25076+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25077+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25078
25079 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25080+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25081
25082 /*
25083 * Set up the identity mapping for the switchover. These
25084@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
25085 * after the boot processor executes this code.
25086 */
25087
25088+ orq $-1, %rbp
25089 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25090 1:
25091
25092- /* Enable PAE mode and PGE */
25093- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25094+ /* Enable PAE mode and PSE/PGE */
25095+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25096 movq %rcx, %cr4
25097
25098 /* Setup early boot stage 4 level pagetables. */
25099@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
25100 movl $MSR_EFER, %ecx
25101 rdmsr
25102 btsl $_EFER_SCE, %eax /* Enable System Call */
25103- btl $20,%edi /* No Execute supported? */
25104+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25105 jnc 1f
25106 btsl $_EFER_NX, %eax
25107+ cmpq $-1, %rbp
25108+ je 1f
25109 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25110+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25111+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25112+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25113+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25114+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25115+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25116+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25117 1: wrmsr /* Make changes effective */
25118
25119 /* Setup cr0 */
25120@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25121 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25122 * address given in m16:64.
25123 */
25124+ pax_set_fptr_mask
25125 movq initial_code(%rip),%rax
25126 pushq $0 # fake return address to stop unwinder
25127 pushq $__KERNEL_CS # set correct cs
25128@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25129 .quad INIT_PER_CPU_VAR(irq_stack_union)
25130
25131 GLOBAL(stack_start)
25132- .quad init_thread_union+THREAD_SIZE-8
25133+ .quad init_thread_union+THREAD_SIZE-16
25134 .word 0
25135 __FINITDATA
25136
25137@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25138 call dump_stack
25139 #ifdef CONFIG_KALLSYMS
25140 leaq early_idt_ripmsg(%rip),%rdi
25141- movq 40(%rsp),%rsi # %rip again
25142+ movq 88(%rsp),%rsi # %rip again
25143 call __print_symbol
25144 #endif
25145 #endif /* EARLY_PRINTK */
25146@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25147 early_recursion_flag:
25148 .long 0
25149
25150+ .section .rodata,"a",@progbits
25151 #ifdef CONFIG_EARLY_PRINTK
25152 early_idt_msg:
25153 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25154@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25155 NEXT_PAGE(early_dynamic_pgts)
25156 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25157
25158- .data
25159+ .section .rodata,"a",@progbits
25160
25161-#ifndef CONFIG_XEN
25162 NEXT_PAGE(init_level4_pgt)
25163- .fill 512,8,0
25164-#else
25165-NEXT_PAGE(init_level4_pgt)
25166- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25167 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25168 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25169+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25170+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25171+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25172+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25173+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25174+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25175 .org init_level4_pgt + L4_START_KERNEL*8, 0
25176 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25177 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25178
25179+#ifdef CONFIG_PAX_PER_CPU_PGD
25180+NEXT_PAGE(cpu_pgd)
25181+ .rept 2*NR_CPUS
25182+ .fill 512,8,0
25183+ .endr
25184+#endif
25185+
25186 NEXT_PAGE(level3_ident_pgt)
25187 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25188+#ifdef CONFIG_XEN
25189 .fill 511, 8, 0
25190+#else
25191+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25192+ .fill 510,8,0
25193+#endif
25194+
25195+NEXT_PAGE(level3_vmalloc_start_pgt)
25196+ .fill 512,8,0
25197+
25198+NEXT_PAGE(level3_vmalloc_end_pgt)
25199+ .fill 512,8,0
25200+
25201+NEXT_PAGE(level3_vmemmap_pgt)
25202+ .fill L3_VMEMMAP_START,8,0
25203+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25204+
25205 NEXT_PAGE(level2_ident_pgt)
25206- /* Since I easily can, map the first 1G.
25207+ /* Since I easily can, map the first 2G.
25208 * Don't set NX because code runs from these pages.
25209 */
25210- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25211-#endif
25212+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25213
25214 NEXT_PAGE(level3_kernel_pgt)
25215 .fill L3_START_KERNEL,8,0
25216@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25217 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25218 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25219
25220+NEXT_PAGE(level2_vmemmap_pgt)
25221+ .fill 512,8,0
25222+
25223 NEXT_PAGE(level2_kernel_pgt)
25224 /*
25225 * 512 MB kernel mapping. We spend a full page on this pagetable
25226@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25227 NEXT_PAGE(level2_fixmap_pgt)
25228 .fill 506,8,0
25229 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25230- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25231- .fill 5,8,0
25232+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25233+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25234+ .fill 4,8,0
25235
25236 NEXT_PAGE(level1_fixmap_pgt)
25237 .fill 512,8,0
25238
25239+NEXT_PAGE(level1_vsyscall_pgt)
25240+ .fill 512,8,0
25241+
25242 #undef PMDS
25243
25244- .data
25245+ .align PAGE_SIZE
25246+ENTRY(cpu_gdt_table)
25247+ .rept NR_CPUS
25248+ .quad 0x0000000000000000 /* NULL descriptor */
25249+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25250+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25251+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25252+ .quad 0x00cffb000000ffff /* __USER32_CS */
25253+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25254+ .quad 0x00affb000000ffff /* __USER_CS */
25255+
25256+#ifdef CONFIG_PAX_KERNEXEC
25257+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25258+#else
25259+ .quad 0x0 /* unused */
25260+#endif
25261+
25262+ .quad 0,0 /* TSS */
25263+ .quad 0,0 /* LDT */
25264+ .quad 0,0,0 /* three TLS descriptors */
25265+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25266+ /* asm/segment.h:GDT_ENTRIES must match this */
25267+
25268+#ifdef CONFIG_PAX_MEMORY_UDEREF
25269+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25270+#else
25271+ .quad 0x0 /* unused */
25272+#endif
25273+
25274+ /* zero the remaining page */
25275+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25276+ .endr
25277+
25278 .align 16
25279 .globl early_gdt_descr
25280 early_gdt_descr:
25281 .word GDT_ENTRIES*8-1
25282 early_gdt_descr_base:
25283- .quad INIT_PER_CPU_VAR(gdt_page)
25284+ .quad cpu_gdt_table
25285
25286 ENTRY(phys_base)
25287 /* This must match the first entry in level2_kernel_pgt */
25288 .quad 0x0000000000000000
25289
25290 #include "../../x86/xen/xen-head.S"
25291-
25292- __PAGE_ALIGNED_BSS
25293+
25294+ .section .rodata,"a",@progbits
25295 NEXT_PAGE(empty_zero_page)
25296 .skip PAGE_SIZE
25297diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25298index 05fd74f..c3548b1 100644
25299--- a/arch/x86/kernel/i386_ksyms_32.c
25300+++ b/arch/x86/kernel/i386_ksyms_32.c
25301@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25302 EXPORT_SYMBOL(cmpxchg8b_emu);
25303 #endif
25304
25305+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25306+
25307 /* Networking helper routines. */
25308 EXPORT_SYMBOL(csum_partial_copy_generic);
25309+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25310+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25311
25312 EXPORT_SYMBOL(__get_user_1);
25313 EXPORT_SYMBOL(__get_user_2);
25314@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25315 EXPORT_SYMBOL(___preempt_schedule_context);
25316 #endif
25317 #endif
25318+
25319+#ifdef CONFIG_PAX_KERNEXEC
25320+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25321+#endif
25322+
25323+#ifdef CONFIG_PAX_PER_CPU_PGD
25324+EXPORT_SYMBOL(cpu_pgd);
25325+#endif
25326diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25327index d5dd808..b6432cf 100644
25328--- a/arch/x86/kernel/i387.c
25329+++ b/arch/x86/kernel/i387.c
25330@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25331 static inline bool interrupted_user_mode(void)
25332 {
25333 struct pt_regs *regs = get_irq_regs();
25334- return regs && user_mode_vm(regs);
25335+ return regs && user_mode(regs);
25336 }
25337
25338 /*
25339diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25340index 8af8171..f8c1169 100644
25341--- a/arch/x86/kernel/i8259.c
25342+++ b/arch/x86/kernel/i8259.c
25343@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25344 static void make_8259A_irq(unsigned int irq)
25345 {
25346 disable_irq_nosync(irq);
25347- io_apic_irqs &= ~(1<<irq);
25348+ io_apic_irqs &= ~(1UL<<irq);
25349 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25350 i8259A_chip.name);
25351 enable_irq(irq);
25352@@ -209,7 +209,7 @@ spurious_8259A_irq:
25353 "spurious 8259A interrupt: IRQ%d.\n", irq);
25354 spurious_irq_mask |= irqmask;
25355 }
25356- atomic_inc(&irq_err_count);
25357+ atomic_inc_unchecked(&irq_err_count);
25358 /*
25359 * Theoretically we do not have to handle this IRQ,
25360 * but in Linux this does not cause problems and is
25361@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25362 /* (slave's support for AEOI in flat mode is to be investigated) */
25363 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25364
25365+ pax_open_kernel();
25366 if (auto_eoi)
25367 /*
25368 * In AEOI mode we just have to mask the interrupt
25369 * when acking.
25370 */
25371- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25372+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25373 else
25374- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25375+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25376+ pax_close_kernel();
25377
25378 udelay(100); /* wait for 8259A to initialize */
25379
25380diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25381index a979b5b..1d6db75 100644
25382--- a/arch/x86/kernel/io_delay.c
25383+++ b/arch/x86/kernel/io_delay.c
25384@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25385 * Quirk table for systems that misbehave (lock up, etc.) if port
25386 * 0x80 is used:
25387 */
25388-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25389+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25390 {
25391 .callback = dmi_io_delay_0xed_port,
25392 .ident = "Compaq Presario V6000",
25393diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25394index 4ddaf66..49d5c18 100644
25395--- a/arch/x86/kernel/ioport.c
25396+++ b/arch/x86/kernel/ioport.c
25397@@ -6,6 +6,7 @@
25398 #include <linux/sched.h>
25399 #include <linux/kernel.h>
25400 #include <linux/capability.h>
25401+#include <linux/security.h>
25402 #include <linux/errno.h>
25403 #include <linux/types.h>
25404 #include <linux/ioport.h>
25405@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25406 return -EINVAL;
25407 if (turn_on && !capable(CAP_SYS_RAWIO))
25408 return -EPERM;
25409+#ifdef CONFIG_GRKERNSEC_IO
25410+ if (turn_on && grsec_disable_privio) {
25411+ gr_handle_ioperm();
25412+ return -ENODEV;
25413+ }
25414+#endif
25415
25416 /*
25417 * If it's the first ioperm() call in this thread's lifetime, set the
25418@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25419 * because the ->io_bitmap_max value must match the bitmap
25420 * contents:
25421 */
25422- tss = &per_cpu(init_tss, get_cpu());
25423+ tss = init_tss + get_cpu();
25424
25425 if (turn_on)
25426 bitmap_clear(t->io_bitmap_ptr, from, num);
25427@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25428 if (level > old) {
25429 if (!capable(CAP_SYS_RAWIO))
25430 return -EPERM;
25431+#ifdef CONFIG_GRKERNSEC_IO
25432+ if (grsec_disable_privio) {
25433+ gr_handle_iopl();
25434+ return -ENODEV;
25435+ }
25436+#endif
25437 }
25438 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25439 t->iopl = level << 12;
25440diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25441index 922d285..6d20692 100644
25442--- a/arch/x86/kernel/irq.c
25443+++ b/arch/x86/kernel/irq.c
25444@@ -22,7 +22,7 @@
25445 #define CREATE_TRACE_POINTS
25446 #include <asm/trace/irq_vectors.h>
25447
25448-atomic_t irq_err_count;
25449+atomic_unchecked_t irq_err_count;
25450
25451 /* Function pointer for generic interrupt vector handling */
25452 void (*x86_platform_ipi_callback)(void) = NULL;
25453@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25454 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25455 seq_printf(p, " Hypervisor callback interrupts\n");
25456 #endif
25457- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25458+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25459 #if defined(CONFIG_X86_IO_APIC)
25460- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25461+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25462 #endif
25463 return 0;
25464 }
25465@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25466
25467 u64 arch_irq_stat(void)
25468 {
25469- u64 sum = atomic_read(&irq_err_count);
25470+ u64 sum = atomic_read_unchecked(&irq_err_count);
25471 return sum;
25472 }
25473
25474diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25475index 63ce838..2ea3e06 100644
25476--- a/arch/x86/kernel/irq_32.c
25477+++ b/arch/x86/kernel/irq_32.c
25478@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25479
25480 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25481
25482+extern void gr_handle_kernel_exploit(void);
25483+
25484 int sysctl_panic_on_stackoverflow __read_mostly;
25485
25486 /* Debugging check for stack overflow: is there less than 1KB free? */
25487@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25488 __asm__ __volatile__("andl %%esp,%0" :
25489 "=r" (sp) : "0" (THREAD_SIZE - 1));
25490
25491- return sp < (sizeof(struct thread_info) + STACK_WARN);
25492+ return sp < STACK_WARN;
25493 }
25494
25495 static void print_stack_overflow(void)
25496 {
25497 printk(KERN_WARNING "low stack detected by irq handler\n");
25498 dump_stack();
25499+ gr_handle_kernel_exploit();
25500 if (sysctl_panic_on_stackoverflow)
25501 panic("low stack detected by irq handler - check messages\n");
25502 }
25503@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25504 static inline int
25505 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25506 {
25507- struct irq_stack *curstk, *irqstk;
25508+ struct irq_stack *irqstk;
25509 u32 *isp, *prev_esp, arg1, arg2;
25510
25511- curstk = (struct irq_stack *) current_stack();
25512 irqstk = __this_cpu_read(hardirq_stack);
25513
25514 /*
25515@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25516 * handler) we can't do that and just have to keep using the
25517 * current stack (which is the irq stack already after all)
25518 */
25519- if (unlikely(curstk == irqstk))
25520+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25521 return 0;
25522
25523- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25524+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25525
25526 /* Save the next esp at the bottom of the stack */
25527 prev_esp = (u32 *)irqstk;
25528 *prev_esp = current_stack_pointer;
25529
25530+#ifdef CONFIG_PAX_MEMORY_UDEREF
25531+ __set_fs(MAKE_MM_SEG(0));
25532+#endif
25533+
25534 if (unlikely(overflow))
25535 call_on_stack(print_stack_overflow, isp);
25536
25537@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25538 : "0" (irq), "1" (desc), "2" (isp),
25539 "D" (desc->handle_irq)
25540 : "memory", "cc", "ecx");
25541+
25542+#ifdef CONFIG_PAX_MEMORY_UDEREF
25543+ __set_fs(current_thread_info()->addr_limit);
25544+#endif
25545+
25546 return 1;
25547 }
25548
25549@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25550 */
25551 void irq_ctx_init(int cpu)
25552 {
25553- struct irq_stack *irqstk;
25554-
25555 if (per_cpu(hardirq_stack, cpu))
25556 return;
25557
25558- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25559- THREADINFO_GFP,
25560- THREAD_SIZE_ORDER));
25561- per_cpu(hardirq_stack, cpu) = irqstk;
25562-
25563- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25564- THREADINFO_GFP,
25565- THREAD_SIZE_ORDER));
25566- per_cpu(softirq_stack, cpu) = irqstk;
25567-
25568- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25569- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25570+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25571+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25572 }
25573
25574 void do_softirq_own_stack(void)
25575 {
25576- struct thread_info *curstk;
25577 struct irq_stack *irqstk;
25578 u32 *isp, *prev_esp;
25579
25580- curstk = current_stack();
25581 irqstk = __this_cpu_read(softirq_stack);
25582
25583 /* build the stack frame on the softirq stack */
25584@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25585 prev_esp = (u32 *)irqstk;
25586 *prev_esp = current_stack_pointer;
25587
25588+#ifdef CONFIG_PAX_MEMORY_UDEREF
25589+ __set_fs(MAKE_MM_SEG(0));
25590+#endif
25591+
25592 call_on_stack(__do_softirq, isp);
25593+
25594+#ifdef CONFIG_PAX_MEMORY_UDEREF
25595+ __set_fs(current_thread_info()->addr_limit);
25596+#endif
25597+
25598 }
25599
25600 bool handle_irq(unsigned irq, struct pt_regs *regs)
25601@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25602 if (unlikely(!desc))
25603 return false;
25604
25605- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25606+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25607 if (unlikely(overflow))
25608 print_stack_overflow();
25609 desc->handle_irq(irq, desc);
25610diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25611index 4d1c746..55a22d6 100644
25612--- a/arch/x86/kernel/irq_64.c
25613+++ b/arch/x86/kernel/irq_64.c
25614@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25615 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25616 EXPORT_PER_CPU_SYMBOL(irq_regs);
25617
25618+extern void gr_handle_kernel_exploit(void);
25619+
25620 int sysctl_panic_on_stackoverflow;
25621
25622 /*
25623@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25624 u64 estack_top, estack_bottom;
25625 u64 curbase = (u64)task_stack_page(current);
25626
25627- if (user_mode_vm(regs))
25628+ if (user_mode(regs))
25629 return;
25630
25631 if (regs->sp >= curbase + sizeof(struct thread_info) +
25632@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25633 irq_stack_top, irq_stack_bottom,
25634 estack_top, estack_bottom);
25635
25636+ gr_handle_kernel_exploit();
25637+
25638 if (sysctl_panic_on_stackoverflow)
25639 panic("low stack detected by irq handler - check messages\n");
25640 #endif
25641diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25642index 26d5a55..a01160a 100644
25643--- a/arch/x86/kernel/jump_label.c
25644+++ b/arch/x86/kernel/jump_label.c
25645@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25646 * Jump label is enabled for the first time.
25647 * So we expect a default_nop...
25648 */
25649- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25650+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25651 != 0))
25652 bug_at((void *)entry->code, __LINE__);
25653 } else {
25654@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25655 * ...otherwise expect an ideal_nop. Otherwise
25656 * something went horribly wrong.
25657 */
25658- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25659+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25660 != 0))
25661 bug_at((void *)entry->code, __LINE__);
25662 }
25663@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25664 * are converting the default nop to the ideal nop.
25665 */
25666 if (init) {
25667- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25668+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25669 bug_at((void *)entry->code, __LINE__);
25670 } else {
25671 code.jump = 0xe9;
25672 code.offset = entry->target -
25673 (entry->code + JUMP_LABEL_NOP_SIZE);
25674- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25675+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25676 bug_at((void *)entry->code, __LINE__);
25677 }
25678 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25679diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25680index 7ec1d5f..5a7d130 100644
25681--- a/arch/x86/kernel/kgdb.c
25682+++ b/arch/x86/kernel/kgdb.c
25683@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25684 #ifdef CONFIG_X86_32
25685 switch (regno) {
25686 case GDB_SS:
25687- if (!user_mode_vm(regs))
25688+ if (!user_mode(regs))
25689 *(unsigned long *)mem = __KERNEL_DS;
25690 break;
25691 case GDB_SP:
25692- if (!user_mode_vm(regs))
25693+ if (!user_mode(regs))
25694 *(unsigned long *)mem = kernel_stack_pointer(regs);
25695 break;
25696 case GDB_GS:
25697@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25698 bp->attr.bp_addr = breakinfo[breakno].addr;
25699 bp->attr.bp_len = breakinfo[breakno].len;
25700 bp->attr.bp_type = breakinfo[breakno].type;
25701- info->address = breakinfo[breakno].addr;
25702+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25703+ info->address = ktla_ktva(breakinfo[breakno].addr);
25704+ else
25705+ info->address = breakinfo[breakno].addr;
25706 info->len = breakinfo[breakno].len;
25707 info->type = breakinfo[breakno].type;
25708 val = arch_install_hw_breakpoint(bp);
25709@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25710 case 'k':
25711 /* clear the trace bit */
25712 linux_regs->flags &= ~X86_EFLAGS_TF;
25713- atomic_set(&kgdb_cpu_doing_single_step, -1);
25714+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25715
25716 /* set the trace bit if we're stepping */
25717 if (remcomInBuffer[0] == 's') {
25718 linux_regs->flags |= X86_EFLAGS_TF;
25719- atomic_set(&kgdb_cpu_doing_single_step,
25720+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25721 raw_smp_processor_id());
25722 }
25723
25724@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25725
25726 switch (cmd) {
25727 case DIE_DEBUG:
25728- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25729+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25730 if (user_mode(regs))
25731 return single_step_cont(regs, args);
25732 break;
25733@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25734 #endif /* CONFIG_DEBUG_RODATA */
25735
25736 bpt->type = BP_BREAKPOINT;
25737- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25738+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25739 BREAK_INSTR_SIZE);
25740 if (err)
25741 return err;
25742- err = probe_kernel_write((char *)bpt->bpt_addr,
25743+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25744 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25745 #ifdef CONFIG_DEBUG_RODATA
25746 if (!err)
25747@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25748 return -EBUSY;
25749 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25750 BREAK_INSTR_SIZE);
25751- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25752+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25753 if (err)
25754 return err;
25755 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25756@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25757 if (mutex_is_locked(&text_mutex))
25758 goto knl_write;
25759 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25760- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25761+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25762 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25763 goto knl_write;
25764 return err;
25765 knl_write:
25766 #endif /* CONFIG_DEBUG_RODATA */
25767- return probe_kernel_write((char *)bpt->bpt_addr,
25768+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25769 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25770 }
25771
25772diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25773index 67e6d19..731ed28 100644
25774--- a/arch/x86/kernel/kprobes/core.c
25775+++ b/arch/x86/kernel/kprobes/core.c
25776@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25777 s32 raddr;
25778 } __packed *insn;
25779
25780- insn = (struct __arch_relative_insn *)from;
25781+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25782+
25783+ pax_open_kernel();
25784 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25785 insn->op = op;
25786+ pax_close_kernel();
25787 }
25788
25789 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25790@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25791 kprobe_opcode_t opcode;
25792 kprobe_opcode_t *orig_opcodes = opcodes;
25793
25794- if (search_exception_tables((unsigned long)opcodes))
25795+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25796 return 0; /* Page fault may occur on this address. */
25797
25798 retry:
25799@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25800 * for the first byte, we can recover the original instruction
25801 * from it and kp->opcode.
25802 */
25803- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25804+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25805 buf[0] = kp->opcode;
25806- return (unsigned long)buf;
25807+ return ktva_ktla((unsigned long)buf);
25808 }
25809
25810 /*
25811@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25812 /* Another subsystem puts a breakpoint, failed to recover */
25813 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25814 return 0;
25815+ pax_open_kernel();
25816 memcpy(dest, insn.kaddr, insn.length);
25817+ pax_close_kernel();
25818
25819 #ifdef CONFIG_X86_64
25820 if (insn_rip_relative(&insn)) {
25821@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25822 return 0;
25823 }
25824 disp = (u8 *) dest + insn_offset_displacement(&insn);
25825+ pax_open_kernel();
25826 *(s32 *) disp = (s32) newdisp;
25827+ pax_close_kernel();
25828 }
25829 #endif
25830 return insn.length;
25831@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25832 * nor set current_kprobe, because it doesn't use single
25833 * stepping.
25834 */
25835- regs->ip = (unsigned long)p->ainsn.insn;
25836+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25837 preempt_enable_no_resched();
25838 return;
25839 }
25840@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25841 regs->flags &= ~X86_EFLAGS_IF;
25842 /* single step inline if the instruction is an int3 */
25843 if (p->opcode == BREAKPOINT_INSTRUCTION)
25844- regs->ip = (unsigned long)p->addr;
25845+ regs->ip = ktla_ktva((unsigned long)p->addr);
25846 else
25847- regs->ip = (unsigned long)p->ainsn.insn;
25848+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25849 }
25850 NOKPROBE_SYMBOL(setup_singlestep);
25851
25852@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25853 struct kprobe *p;
25854 struct kprobe_ctlblk *kcb;
25855
25856- if (user_mode_vm(regs))
25857+ if (user_mode(regs))
25858 return 0;
25859
25860 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25861@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25862 setup_singlestep(p, regs, kcb, 0);
25863 return 1;
25864 }
25865- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25866+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25867 /*
25868 * The breakpoint instruction was removed right
25869 * after we hit it. Another cpu has removed
25870@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
25871 " movq %rax, 152(%rsp)\n"
25872 RESTORE_REGS_STRING
25873 " popfq\n"
25874+#ifdef KERNEXEC_PLUGIN
25875+ " btsq $63,(%rsp)\n"
25876+#endif
25877 #else
25878 " pushf\n"
25879 SAVE_REGS_STRING
25880@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25881 struct kprobe_ctlblk *kcb)
25882 {
25883 unsigned long *tos = stack_addr(regs);
25884- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25885+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25886 unsigned long orig_ip = (unsigned long)p->addr;
25887 kprobe_opcode_t *insn = p->ainsn.insn;
25888
25889@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25890 struct die_args *args = data;
25891 int ret = NOTIFY_DONE;
25892
25893- if (args->regs && user_mode_vm(args->regs))
25894+ if (args->regs && user_mode(args->regs))
25895 return ret;
25896
25897 if (val == DIE_GPF) {
25898diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25899index f304773..551e63c 100644
25900--- a/arch/x86/kernel/kprobes/opt.c
25901+++ b/arch/x86/kernel/kprobes/opt.c
25902@@ -79,6 +79,7 @@ found:
25903 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25904 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25905 {
25906+ pax_open_kernel();
25907 #ifdef CONFIG_X86_64
25908 *addr++ = 0x48;
25909 *addr++ = 0xbf;
25910@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25911 *addr++ = 0xb8;
25912 #endif
25913 *(unsigned long *)addr = val;
25914+ pax_close_kernel();
25915 }
25916
25917 asm (
25918@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25919 * Verify if the address gap is in 2GB range, because this uses
25920 * a relative jump.
25921 */
25922- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25923+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25924 if (abs(rel) > 0x7fffffff)
25925 return -ERANGE;
25926
25927@@ -352,16 +354,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25928 op->optinsn.size = ret;
25929
25930 /* Copy arch-dep-instance from template */
25931- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25932+ pax_open_kernel();
25933+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25934+ pax_close_kernel();
25935
25936 /* Set probe information */
25937 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25938
25939 /* Set probe function call */
25940- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25941+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25942
25943 /* Set returning jmp instruction at the tail of out-of-line buffer */
25944- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25945+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25946 (u8 *)op->kp.addr + op->optinsn.size);
25947
25948 flush_icache_range((unsigned long) buf,
25949@@ -386,7 +390,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25950 WARN_ON(kprobe_disabled(&op->kp));
25951
25952 /* Backup instructions which will be replaced by jump address */
25953- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25954+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25955 RELATIVE_ADDR_SIZE);
25956
25957 insn_buf[0] = RELATIVEJUMP_OPCODE;
25958@@ -434,7 +438,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25959 /* This kprobe is really able to run optimized path. */
25960 op = container_of(p, struct optimized_kprobe, kp);
25961 /* Detour through copied instructions */
25962- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25963+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25964 if (!reenter)
25965 reset_current_kprobe();
25966 preempt_enable_no_resched();
25967diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25968index c2bedae..25e7ab60 100644
25969--- a/arch/x86/kernel/ksysfs.c
25970+++ b/arch/x86/kernel/ksysfs.c
25971@@ -184,7 +184,7 @@ out:
25972
25973 static struct kobj_attribute type_attr = __ATTR_RO(type);
25974
25975-static struct bin_attribute data_attr = {
25976+static bin_attribute_no_const data_attr __read_only = {
25977 .attr = {
25978 .name = "data",
25979 .mode = S_IRUGO,
25980diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25981index c37886d..d851d32 100644
25982--- a/arch/x86/kernel/ldt.c
25983+++ b/arch/x86/kernel/ldt.c
25984@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25985 if (reload) {
25986 #ifdef CONFIG_SMP
25987 preempt_disable();
25988- load_LDT(pc);
25989+ load_LDT_nolock(pc);
25990 if (!cpumask_equal(mm_cpumask(current->mm),
25991 cpumask_of(smp_processor_id())))
25992 smp_call_function(flush_ldt, current->mm, 1);
25993 preempt_enable();
25994 #else
25995- load_LDT(pc);
25996+ load_LDT_nolock(pc);
25997 #endif
25998 }
25999 if (oldsize) {
26000@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
26001 return err;
26002
26003 for (i = 0; i < old->size; i++)
26004- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
26005+ write_ldt_entry(new->ldt, i, old->ldt + i);
26006 return 0;
26007 }
26008
26009@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26010 retval = copy_ldt(&mm->context, &old_mm->context);
26011 mutex_unlock(&old_mm->context.lock);
26012 }
26013+
26014+ if (tsk == current) {
26015+ mm->context.vdso = 0;
26016+
26017+#ifdef CONFIG_X86_32
26018+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26019+ mm->context.user_cs_base = 0UL;
26020+ mm->context.user_cs_limit = ~0UL;
26021+
26022+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
26023+ cpus_clear(mm->context.cpu_user_cs_mask);
26024+#endif
26025+
26026+#endif
26027+#endif
26028+
26029+ }
26030+
26031 return retval;
26032 }
26033
26034@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26035 }
26036 }
26037
26038+#ifdef CONFIG_PAX_SEGMEXEC
26039+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26040+ error = -EINVAL;
26041+ goto out_unlock;
26042+ }
26043+#endif
26044+
26045 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26046 error = -EINVAL;
26047 goto out_unlock;
26048diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26049index 1667b1d..16492c5 100644
26050--- a/arch/x86/kernel/machine_kexec_32.c
26051+++ b/arch/x86/kernel/machine_kexec_32.c
26052@@ -25,7 +25,7 @@
26053 #include <asm/cacheflush.h>
26054 #include <asm/debugreg.h>
26055
26056-static void set_idt(void *newidt, __u16 limit)
26057+static void set_idt(struct desc_struct *newidt, __u16 limit)
26058 {
26059 struct desc_ptr curidt;
26060
26061@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26062 }
26063
26064
26065-static void set_gdt(void *newgdt, __u16 limit)
26066+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26067 {
26068 struct desc_ptr curgdt;
26069
26070@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26071 }
26072
26073 control_page = page_address(image->control_code_page);
26074- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26075+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26076
26077 relocate_kernel_ptr = control_page;
26078 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26079diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26080index c050a01..5774072 100644
26081--- a/arch/x86/kernel/mcount_64.S
26082+++ b/arch/x86/kernel/mcount_64.S
26083@@ -7,7 +7,7 @@
26084 #include <linux/linkage.h>
26085 #include <asm/ptrace.h>
26086 #include <asm/ftrace.h>
26087-
26088+#include <asm/alternative-asm.h>
26089
26090 .code64
26091 .section .entry.text, "ax"
26092@@ -24,8 +24,9 @@
26093 #ifdef CONFIG_DYNAMIC_FTRACE
26094
26095 ENTRY(function_hook)
26096+ pax_force_retaddr
26097 retq
26098-END(function_hook)
26099+ENDPROC(function_hook)
26100
26101 /* skip is set if stack has been adjusted */
26102 .macro ftrace_caller_setup skip=0
26103@@ -66,8 +67,9 @@ GLOBAL(ftrace_graph_call)
26104 #endif
26105
26106 GLOBAL(ftrace_stub)
26107+ pax_force_retaddr
26108 retq
26109-END(ftrace_caller)
26110+ENDPROC(ftrace_caller)
26111
26112 ENTRY(ftrace_regs_caller)
26113 /* Save the current flags before compare (in SS location)*/
26114@@ -135,7 +137,7 @@ ftrace_restore_flags:
26115 popfq
26116 jmp ftrace_stub
26117
26118-END(ftrace_regs_caller)
26119+ENDPROC(ftrace_regs_caller)
26120
26121
26122 #else /* ! CONFIG_DYNAMIC_FTRACE */
26123@@ -156,6 +158,7 @@ ENTRY(function_hook)
26124 #endif
26125
26126 GLOBAL(ftrace_stub)
26127+ pax_force_retaddr
26128 retq
26129
26130 trace:
26131@@ -169,12 +172,13 @@ trace:
26132 #endif
26133 subq $MCOUNT_INSN_SIZE, %rdi
26134
26135+ pax_force_fptr ftrace_trace_function
26136 call *ftrace_trace_function
26137
26138 MCOUNT_RESTORE_FRAME
26139
26140 jmp ftrace_stub
26141-END(function_hook)
26142+ENDPROC(function_hook)
26143 #endif /* CONFIG_DYNAMIC_FTRACE */
26144 #endif /* CONFIG_FUNCTION_TRACER */
26145
26146@@ -196,8 +200,9 @@ ENTRY(ftrace_graph_caller)
26147
26148 MCOUNT_RESTORE_FRAME
26149
26150+ pax_force_retaddr
26151 retq
26152-END(ftrace_graph_caller)
26153+ENDPROC(ftrace_graph_caller)
26154
26155 GLOBAL(return_to_handler)
26156 subq $24, %rsp
26157@@ -213,5 +218,7 @@ GLOBAL(return_to_handler)
26158 movq 8(%rsp), %rdx
26159 movq (%rsp), %rax
26160 addq $24, %rsp
26161+ pax_force_fptr %rdi
26162 jmp *%rdi
26163+ENDPROC(return_to_handler)
26164 #endif
26165diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26166index e69f988..da078ea 100644
26167--- a/arch/x86/kernel/module.c
26168+++ b/arch/x86/kernel/module.c
26169@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26170 }
26171 #endif
26172
26173-void *module_alloc(unsigned long size)
26174+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26175 {
26176- if (PAGE_ALIGN(size) > MODULES_LEN)
26177+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26178 return NULL;
26179 return __vmalloc_node_range(size, 1,
26180 MODULES_VADDR + get_module_load_offset(),
26181- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26182- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26183+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26184+ prot, NUMA_NO_NODE,
26185 __builtin_return_address(0));
26186 }
26187
26188+void *module_alloc(unsigned long size)
26189+{
26190+
26191+#ifdef CONFIG_PAX_KERNEXEC
26192+ return __module_alloc(size, PAGE_KERNEL);
26193+#else
26194+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26195+#endif
26196+
26197+}
26198+
26199+#ifdef CONFIG_PAX_KERNEXEC
26200+#ifdef CONFIG_X86_32
26201+void *module_alloc_exec(unsigned long size)
26202+{
26203+ struct vm_struct *area;
26204+
26205+ if (size == 0)
26206+ return NULL;
26207+
26208+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26209+return area ? area->addr : NULL;
26210+}
26211+EXPORT_SYMBOL(module_alloc_exec);
26212+
26213+void module_free_exec(struct module *mod, void *module_region)
26214+{
26215+ vunmap(module_region);
26216+}
26217+EXPORT_SYMBOL(module_free_exec);
26218+#else
26219+void module_free_exec(struct module *mod, void *module_region)
26220+{
26221+ module_free(mod, module_region);
26222+}
26223+EXPORT_SYMBOL(module_free_exec);
26224+
26225+void *module_alloc_exec(unsigned long size)
26226+{
26227+ return __module_alloc(size, PAGE_KERNEL_RX);
26228+}
26229+EXPORT_SYMBOL(module_alloc_exec);
26230+#endif
26231+#endif
26232+
26233 #ifdef CONFIG_X86_32
26234 int apply_relocate(Elf32_Shdr *sechdrs,
26235 const char *strtab,
26236@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26237 unsigned int i;
26238 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26239 Elf32_Sym *sym;
26240- uint32_t *location;
26241+ uint32_t *plocation, location;
26242
26243 DEBUGP("Applying relocate section %u to %u\n",
26244 relsec, sechdrs[relsec].sh_info);
26245 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26246 /* This is where to make the change */
26247- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26248- + rel[i].r_offset;
26249+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26250+ location = (uint32_t)plocation;
26251+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26252+ plocation = ktla_ktva((void *)plocation);
26253 /* This is the symbol it is referring to. Note that all
26254 undefined symbols have been resolved. */
26255 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26256@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26257 switch (ELF32_R_TYPE(rel[i].r_info)) {
26258 case R_386_32:
26259 /* We add the value into the location given */
26260- *location += sym->st_value;
26261+ pax_open_kernel();
26262+ *plocation += sym->st_value;
26263+ pax_close_kernel();
26264 break;
26265 case R_386_PC32:
26266 /* Add the value, subtract its position */
26267- *location += sym->st_value - (uint32_t)location;
26268+ pax_open_kernel();
26269+ *plocation += sym->st_value - location;
26270+ pax_close_kernel();
26271 break;
26272 default:
26273 pr_err("%s: Unknown relocation: %u\n",
26274@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26275 case R_X86_64_NONE:
26276 break;
26277 case R_X86_64_64:
26278+ pax_open_kernel();
26279 *(u64 *)loc = val;
26280+ pax_close_kernel();
26281 break;
26282 case R_X86_64_32:
26283+ pax_open_kernel();
26284 *(u32 *)loc = val;
26285+ pax_close_kernel();
26286 if (val != *(u32 *)loc)
26287 goto overflow;
26288 break;
26289 case R_X86_64_32S:
26290+ pax_open_kernel();
26291 *(s32 *)loc = val;
26292+ pax_close_kernel();
26293 if ((s64)val != *(s32 *)loc)
26294 goto overflow;
26295 break;
26296 case R_X86_64_PC32:
26297 val -= (u64)loc;
26298+ pax_open_kernel();
26299 *(u32 *)loc = val;
26300+ pax_close_kernel();
26301+
26302 #if 0
26303 if ((s64)val != *(s32 *)loc)
26304 goto overflow;
26305diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26306index c9603ac..9f88728 100644
26307--- a/arch/x86/kernel/msr.c
26308+++ b/arch/x86/kernel/msr.c
26309@@ -37,6 +37,7 @@
26310 #include <linux/notifier.h>
26311 #include <linux/uaccess.h>
26312 #include <linux/gfp.h>
26313+#include <linux/grsecurity.h>
26314
26315 #include <asm/processor.h>
26316 #include <asm/msr.h>
26317@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26318 int err = 0;
26319 ssize_t bytes = 0;
26320
26321+#ifdef CONFIG_GRKERNSEC_KMEM
26322+ gr_handle_msr_write();
26323+ return -EPERM;
26324+#endif
26325+
26326 if (count % 8)
26327 return -EINVAL; /* Invalid chunk size */
26328
26329@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26330 err = -EBADF;
26331 break;
26332 }
26333+#ifdef CONFIG_GRKERNSEC_KMEM
26334+ gr_handle_msr_write();
26335+ return -EPERM;
26336+#endif
26337 if (copy_from_user(&regs, uregs, sizeof regs)) {
26338 err = -EFAULT;
26339 break;
26340@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26341 return notifier_from_errno(err);
26342 }
26343
26344-static struct notifier_block __refdata msr_class_cpu_notifier = {
26345+static struct notifier_block msr_class_cpu_notifier = {
26346 .notifier_call = msr_class_cpu_callback,
26347 };
26348
26349diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26350index c3e985d..110a36a 100644
26351--- a/arch/x86/kernel/nmi.c
26352+++ b/arch/x86/kernel/nmi.c
26353@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26354
26355 static void nmi_max_handler(struct irq_work *w)
26356 {
26357- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26358+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26359 int remainder_ns, decimal_msecs;
26360- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26361+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26362
26363 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26364 decimal_msecs = remainder_ns / 1000;
26365
26366 printk_ratelimited(KERN_INFO
26367 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26368- a->handler, whole_msecs, decimal_msecs);
26369+ n->action->handler, whole_msecs, decimal_msecs);
26370 }
26371
26372 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26373@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26374 delta = sched_clock() - delta;
26375 trace_nmi_handler(a->handler, (int)delta, thishandled);
26376
26377- if (delta < nmi_longest_ns || delta < a->max_duration)
26378+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26379 continue;
26380
26381- a->max_duration = delta;
26382- irq_work_queue(&a->irq_work);
26383+ a->work->max_duration = delta;
26384+ irq_work_queue(&a->work->irq_work);
26385 }
26386
26387 rcu_read_unlock();
26388@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26389 }
26390 NOKPROBE_SYMBOL(nmi_handle);
26391
26392-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26393+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26394 {
26395 struct nmi_desc *desc = nmi_to_desc(type);
26396 unsigned long flags;
26397@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26398 if (!action->handler)
26399 return -EINVAL;
26400
26401- init_irq_work(&action->irq_work, nmi_max_handler);
26402+ action->work->action = action;
26403+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26404
26405 spin_lock_irqsave(&desc->lock, flags);
26406
26407@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26408 * event confuses some handlers (kdump uses this flag)
26409 */
26410 if (action->flags & NMI_FLAG_FIRST)
26411- list_add_rcu(&action->list, &desc->head);
26412+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26413 else
26414- list_add_tail_rcu(&action->list, &desc->head);
26415+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26416
26417 spin_unlock_irqrestore(&desc->lock, flags);
26418 return 0;
26419@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26420 if (!strcmp(n->name, name)) {
26421 WARN(in_nmi(),
26422 "Trying to free NMI (%s) from NMI context!\n", n->name);
26423- list_del_rcu(&n->list);
26424+ pax_list_del_rcu((struct list_head *)&n->list);
26425 break;
26426 }
26427 }
26428@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26429 dotraplinkage notrace void
26430 do_nmi(struct pt_regs *regs, long error_code)
26431 {
26432+
26433+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26434+ if (!user_mode(regs)) {
26435+ unsigned long cs = regs->cs & 0xFFFF;
26436+ unsigned long ip = ktva_ktla(regs->ip);
26437+
26438+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26439+ regs->ip = ip;
26440+ }
26441+#endif
26442+
26443 nmi_nesting_preprocess(regs);
26444
26445 nmi_enter();
26446diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26447index 6d9582e..f746287 100644
26448--- a/arch/x86/kernel/nmi_selftest.c
26449+++ b/arch/x86/kernel/nmi_selftest.c
26450@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26451 {
26452 /* trap all the unknown NMIs we may generate */
26453 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26454- __initdata);
26455+ __initconst);
26456 }
26457
26458 static void __init cleanup_nmi_testsuite(void)
26459@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26460 unsigned long timeout;
26461
26462 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26463- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26464+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26465 nmi_fail = FAILURE;
26466 return;
26467 }
26468diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26469index bbb6c73..24a58ef 100644
26470--- a/arch/x86/kernel/paravirt-spinlocks.c
26471+++ b/arch/x86/kernel/paravirt-spinlocks.c
26472@@ -8,7 +8,7 @@
26473
26474 #include <asm/paravirt.h>
26475
26476-struct pv_lock_ops pv_lock_ops = {
26477+struct pv_lock_ops pv_lock_ops __read_only = {
26478 #ifdef CONFIG_SMP
26479 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26480 .unlock_kick = paravirt_nop,
26481diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26482index 548d25f..f8fb99c 100644
26483--- a/arch/x86/kernel/paravirt.c
26484+++ b/arch/x86/kernel/paravirt.c
26485@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26486 {
26487 return x;
26488 }
26489+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26490+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26491+#endif
26492
26493 void __init default_banner(void)
26494 {
26495@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26496
26497 if (opfunc == NULL)
26498 /* If there's no function, patch it with a ud2a (BUG) */
26499- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26500- else if (opfunc == _paravirt_nop)
26501+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26502+ else if (opfunc == (void *)_paravirt_nop)
26503 /* If the operation is a nop, then nop the callsite */
26504 ret = paravirt_patch_nop();
26505
26506 /* identity functions just return their single argument */
26507- else if (opfunc == _paravirt_ident_32)
26508+ else if (opfunc == (void *)_paravirt_ident_32)
26509 ret = paravirt_patch_ident_32(insnbuf, len);
26510- else if (opfunc == _paravirt_ident_64)
26511+ else if (opfunc == (void *)_paravirt_ident_64)
26512 ret = paravirt_patch_ident_64(insnbuf, len);
26513+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26514+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26515+ ret = paravirt_patch_ident_64(insnbuf, len);
26516+#endif
26517
26518 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26519 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26520@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26521 if (insn_len > len || start == NULL)
26522 insn_len = len;
26523 else
26524- memcpy(insnbuf, start, insn_len);
26525+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26526
26527 return insn_len;
26528 }
26529@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26530 return this_cpu_read(paravirt_lazy_mode);
26531 }
26532
26533-struct pv_info pv_info = {
26534+struct pv_info pv_info __read_only = {
26535 .name = "bare hardware",
26536 .paravirt_enabled = 0,
26537 .kernel_rpl = 0,
26538@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26539 #endif
26540 };
26541
26542-struct pv_init_ops pv_init_ops = {
26543+struct pv_init_ops pv_init_ops __read_only = {
26544 .patch = native_patch,
26545 };
26546
26547-struct pv_time_ops pv_time_ops = {
26548+struct pv_time_ops pv_time_ops __read_only = {
26549 .sched_clock = native_sched_clock,
26550 .steal_clock = native_steal_clock,
26551 };
26552
26553-__visible struct pv_irq_ops pv_irq_ops = {
26554+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26555 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26556 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26557 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26558@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26559 #endif
26560 };
26561
26562-__visible struct pv_cpu_ops pv_cpu_ops = {
26563+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26564 .cpuid = native_cpuid,
26565 .get_debugreg = native_get_debugreg,
26566 .set_debugreg = native_set_debugreg,
26567@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26568 NOKPROBE_SYMBOL(native_set_debugreg);
26569 NOKPROBE_SYMBOL(native_load_idt);
26570
26571-struct pv_apic_ops pv_apic_ops = {
26572+struct pv_apic_ops pv_apic_ops __read_only= {
26573 #ifdef CONFIG_X86_LOCAL_APIC
26574 .startup_ipi_hook = paravirt_nop,
26575 #endif
26576 };
26577
26578-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26579+#ifdef CONFIG_X86_32
26580+#ifdef CONFIG_X86_PAE
26581+/* 64-bit pagetable entries */
26582+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26583+#else
26584 /* 32-bit pagetable entries */
26585 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26586+#endif
26587 #else
26588 /* 64-bit pagetable entries */
26589 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26590 #endif
26591
26592-struct pv_mmu_ops pv_mmu_ops = {
26593+struct pv_mmu_ops pv_mmu_ops __read_only = {
26594
26595 .read_cr2 = native_read_cr2,
26596 .write_cr2 = native_write_cr2,
26597@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26598 .make_pud = PTE_IDENT,
26599
26600 .set_pgd = native_set_pgd,
26601+ .set_pgd_batched = native_set_pgd_batched,
26602 #endif
26603 #endif /* PAGETABLE_LEVELS >= 3 */
26604
26605@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26606 },
26607
26608 .set_fixmap = native_set_fixmap,
26609+
26610+#ifdef CONFIG_PAX_KERNEXEC
26611+ .pax_open_kernel = native_pax_open_kernel,
26612+ .pax_close_kernel = native_pax_close_kernel,
26613+#endif
26614+
26615 };
26616
26617 EXPORT_SYMBOL_GPL(pv_time_ops);
26618diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26619index 0497f71..7186c0d 100644
26620--- a/arch/x86/kernel/pci-calgary_64.c
26621+++ b/arch/x86/kernel/pci-calgary_64.c
26622@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26623 tce_space = be64_to_cpu(readq(target));
26624 tce_space = tce_space & TAR_SW_BITS;
26625
26626- tce_space = tce_space & (~specified_table_size);
26627+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26628 info->tce_space = (u64 *)__va(tce_space);
26629 }
26630 }
26631diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26632index 35ccf75..7a15747 100644
26633--- a/arch/x86/kernel/pci-iommu_table.c
26634+++ b/arch/x86/kernel/pci-iommu_table.c
26635@@ -2,7 +2,7 @@
26636 #include <asm/iommu_table.h>
26637 #include <linux/string.h>
26638 #include <linux/kallsyms.h>
26639-
26640+#include <linux/sched.h>
26641
26642 #define DEBUG 1
26643
26644diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26645index 77dd0ad..9ec4723 100644
26646--- a/arch/x86/kernel/pci-swiotlb.c
26647+++ b/arch/x86/kernel/pci-swiotlb.c
26648@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26649 struct dma_attrs *attrs)
26650 {
26651 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26652- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26653+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26654 else
26655 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26656 }
26657diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
26658index ca7f0d5..8996469 100644
26659--- a/arch/x86/kernel/preempt.S
26660+++ b/arch/x86/kernel/preempt.S
26661@@ -3,12 +3,14 @@
26662 #include <asm/dwarf2.h>
26663 #include <asm/asm.h>
26664 #include <asm/calling.h>
26665+#include <asm/alternative-asm.h>
26666
26667 ENTRY(___preempt_schedule)
26668 CFI_STARTPROC
26669 SAVE_ALL
26670 call preempt_schedule
26671 RESTORE_ALL
26672+ pax_force_retaddr
26673 ret
26674 CFI_ENDPROC
26675
26676@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
26677 SAVE_ALL
26678 call preempt_schedule_context
26679 RESTORE_ALL
26680+ pax_force_retaddr
26681 ret
26682 CFI_ENDPROC
26683
26684diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26685index 4505e2a..ae28b0d 100644
26686--- a/arch/x86/kernel/process.c
26687+++ b/arch/x86/kernel/process.c
26688@@ -36,7 +36,8 @@
26689 * section. Since TSS's are completely CPU-local, we want them
26690 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26691 */
26692-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26693+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26694+EXPORT_SYMBOL(init_tss);
26695
26696 #ifdef CONFIG_X86_64
26697 static DEFINE_PER_CPU(unsigned char, is_idle);
26698@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
26699 task_xstate_cachep =
26700 kmem_cache_create("task_xstate", xstate_size,
26701 __alignof__(union thread_xstate),
26702- SLAB_PANIC | SLAB_NOTRACK, NULL);
26703+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26704 }
26705
26706 /*
26707@@ -105,7 +106,7 @@ void exit_thread(void)
26708 unsigned long *bp = t->io_bitmap_ptr;
26709
26710 if (bp) {
26711- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26712+ struct tss_struct *tss = init_tss + get_cpu();
26713
26714 t->io_bitmap_ptr = NULL;
26715 clear_thread_flag(TIF_IO_BITMAP);
26716@@ -125,6 +126,9 @@ void flush_thread(void)
26717 {
26718 struct task_struct *tsk = current;
26719
26720+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26721+ loadsegment(gs, 0);
26722+#endif
26723 flush_ptrace_hw_breakpoint(tsk);
26724 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26725 drop_init_fpu(tsk);
26726@@ -271,7 +275,7 @@ static void __exit_idle(void)
26727 void exit_idle(void)
26728 {
26729 /* idle loop has pid 0 */
26730- if (current->pid)
26731+ if (task_pid_nr(current))
26732 return;
26733 __exit_idle();
26734 }
26735@@ -324,7 +328,7 @@ bool xen_set_default_idle(void)
26736 return ret;
26737 }
26738 #endif
26739-void stop_this_cpu(void *dummy)
26740+__noreturn void stop_this_cpu(void *dummy)
26741 {
26742 local_irq_disable();
26743 /*
26744@@ -453,16 +457,37 @@ static int __init idle_setup(char *str)
26745 }
26746 early_param("idle", idle_setup);
26747
26748-unsigned long arch_align_stack(unsigned long sp)
26749+#ifdef CONFIG_PAX_RANDKSTACK
26750+void pax_randomize_kstack(struct pt_regs *regs)
26751 {
26752- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26753- sp -= get_random_int() % 8192;
26754- return sp & ~0xf;
26755-}
26756+ struct thread_struct *thread = &current->thread;
26757+ unsigned long time;
26758
26759-unsigned long arch_randomize_brk(struct mm_struct *mm)
26760-{
26761- unsigned long range_end = mm->brk + 0x02000000;
26762- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26763-}
26764+ if (!randomize_va_space)
26765+ return;
26766+
26767+ if (v8086_mode(regs))
26768+ return;
26769
26770+ rdtscl(time);
26771+
26772+ /* P4 seems to return a 0 LSB, ignore it */
26773+#ifdef CONFIG_MPENTIUM4
26774+ time &= 0x3EUL;
26775+ time <<= 2;
26776+#elif defined(CONFIG_X86_64)
26777+ time &= 0xFUL;
26778+ time <<= 4;
26779+#else
26780+ time &= 0x1FUL;
26781+ time <<= 3;
26782+#endif
26783+
26784+ thread->sp0 ^= time;
26785+ load_sp0(init_tss + smp_processor_id(), thread);
26786+
26787+#ifdef CONFIG_X86_64
26788+ this_cpu_write(kernel_stack, thread->sp0);
26789+#endif
26790+}
26791+#endif
26792diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26793index 7bc86bb..0ea06e8 100644
26794--- a/arch/x86/kernel/process_32.c
26795+++ b/arch/x86/kernel/process_32.c
26796@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26797 unsigned long thread_saved_pc(struct task_struct *tsk)
26798 {
26799 return ((unsigned long *)tsk->thread.sp)[3];
26800+//XXX return tsk->thread.eip;
26801 }
26802
26803 void __show_regs(struct pt_regs *regs, int all)
26804@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26805 unsigned long sp;
26806 unsigned short ss, gs;
26807
26808- if (user_mode_vm(regs)) {
26809+ if (user_mode(regs)) {
26810 sp = regs->sp;
26811 ss = regs->ss & 0xffff;
26812- gs = get_user_gs(regs);
26813 } else {
26814 sp = kernel_stack_pointer(regs);
26815 savesegment(ss, ss);
26816- savesegment(gs, gs);
26817 }
26818+ gs = get_user_gs(regs);
26819
26820 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26821 (u16)regs->cs, regs->ip, regs->flags,
26822- smp_processor_id());
26823+ raw_smp_processor_id());
26824 print_symbol("EIP is at %s\n", regs->ip);
26825
26826 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26827@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
26828 int copy_thread(unsigned long clone_flags, unsigned long sp,
26829 unsigned long arg, struct task_struct *p)
26830 {
26831- struct pt_regs *childregs = task_pt_regs(p);
26832+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26833 struct task_struct *tsk;
26834 int err;
26835
26836 p->thread.sp = (unsigned long) childregs;
26837 p->thread.sp0 = (unsigned long) (childregs+1);
26838+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26839
26840 if (unlikely(p->flags & PF_KTHREAD)) {
26841 /* kernel thread */
26842 memset(childregs, 0, sizeof(struct pt_regs));
26843 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26844- task_user_gs(p) = __KERNEL_STACK_CANARY;
26845- childregs->ds = __USER_DS;
26846- childregs->es = __USER_DS;
26847+ savesegment(gs, childregs->gs);
26848+ childregs->ds = __KERNEL_DS;
26849+ childregs->es = __KERNEL_DS;
26850 childregs->fs = __KERNEL_PERCPU;
26851 childregs->bx = sp; /* function */
26852 childregs->bp = arg;
26853@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26854 struct thread_struct *prev = &prev_p->thread,
26855 *next = &next_p->thread;
26856 int cpu = smp_processor_id();
26857- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26858+ struct tss_struct *tss = init_tss + cpu;
26859 fpu_switch_t fpu;
26860
26861 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26862@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26863 */
26864 lazy_save_gs(prev->gs);
26865
26866+#ifdef CONFIG_PAX_MEMORY_UDEREF
26867+ __set_fs(task_thread_info(next_p)->addr_limit);
26868+#endif
26869+
26870 /*
26871 * Load the per-thread Thread-Local Storage descriptor.
26872 */
26873@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26874 */
26875 arch_end_context_switch(next_p);
26876
26877- this_cpu_write(kernel_stack,
26878- (unsigned long)task_stack_page(next_p) +
26879- THREAD_SIZE - KERNEL_STACK_OFFSET);
26880+ this_cpu_write(current_task, next_p);
26881+ this_cpu_write(current_tinfo, &next_p->tinfo);
26882+ this_cpu_write(kernel_stack, next->sp0);
26883
26884 /*
26885 * Restore %gs if needed (which is common)
26886@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26887
26888 switch_fpu_finish(next_p, fpu);
26889
26890- this_cpu_write(current_task, next_p);
26891-
26892 return prev_p;
26893 }
26894
26895@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
26896 } while (count++ < 16);
26897 return 0;
26898 }
26899-
26900diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26901index ca5b02d..c0b2f6a 100644
26902--- a/arch/x86/kernel/process_64.c
26903+++ b/arch/x86/kernel/process_64.c
26904@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26905 struct pt_regs *childregs;
26906 struct task_struct *me = current;
26907
26908- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26909+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26910 childregs = task_pt_regs(p);
26911 p->thread.sp = (unsigned long) childregs;
26912 p->thread.usersp = me->thread.usersp;
26913+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26914 set_tsk_thread_flag(p, TIF_FORK);
26915 p->thread.fpu_counter = 0;
26916 p->thread.io_bitmap_ptr = NULL;
26917@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26918 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26919 savesegment(es, p->thread.es);
26920 savesegment(ds, p->thread.ds);
26921+ savesegment(ss, p->thread.ss);
26922+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26923 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26924
26925 if (unlikely(p->flags & PF_KTHREAD)) {
26926@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26927 struct thread_struct *prev = &prev_p->thread;
26928 struct thread_struct *next = &next_p->thread;
26929 int cpu = smp_processor_id();
26930- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26931+ struct tss_struct *tss = init_tss + cpu;
26932 unsigned fsindex, gsindex;
26933 fpu_switch_t fpu;
26934
26935@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26936 if (unlikely(next->ds | prev->ds))
26937 loadsegment(ds, next->ds);
26938
26939+ savesegment(ss, prev->ss);
26940+ if (unlikely(next->ss != prev->ss))
26941+ loadsegment(ss, next->ss);
26942
26943 /* We must save %fs and %gs before load_TLS() because
26944 * %fs and %gs may be cleared by load_TLS().
26945@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26946 prev->usersp = this_cpu_read(old_rsp);
26947 this_cpu_write(old_rsp, next->usersp);
26948 this_cpu_write(current_task, next_p);
26949+ this_cpu_write(current_tinfo, &next_p->tinfo);
26950
26951 /*
26952 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26953@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26954 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26955 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26956
26957- this_cpu_write(kernel_stack,
26958- (unsigned long)task_stack_page(next_p) +
26959- THREAD_SIZE - KERNEL_STACK_OFFSET);
26960+ this_cpu_write(kernel_stack, next->sp0);
26961
26962 /*
26963 * Now maybe reload the debug registers and handle I/O bitmaps
26964@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
26965 if (!p || p == current || p->state == TASK_RUNNING)
26966 return 0;
26967 stack = (unsigned long)task_stack_page(p);
26968- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26969+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26970 return 0;
26971 fp = *(u64 *)(p->thread.sp);
26972 do {
26973- if (fp < (unsigned long)stack ||
26974- fp >= (unsigned long)stack+THREAD_SIZE)
26975+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26976 return 0;
26977 ip = *(u64 *)(fp+8);
26978 if (!in_sched_functions(ip))
26979diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26980index 678c0ad..2fc2a7b 100644
26981--- a/arch/x86/kernel/ptrace.c
26982+++ b/arch/x86/kernel/ptrace.c
26983@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26984 unsigned long sp = (unsigned long)&regs->sp;
26985 u32 *prev_esp;
26986
26987- if (context == (sp & ~(THREAD_SIZE - 1)))
26988+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26989 return sp;
26990
26991- prev_esp = (u32 *)(context);
26992+ prev_esp = *(u32 **)(context);
26993 if (prev_esp)
26994 return (unsigned long)prev_esp;
26995
26996@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26997 if (child->thread.gs != value)
26998 return do_arch_prctl(child, ARCH_SET_GS, value);
26999 return 0;
27000+
27001+ case offsetof(struct user_regs_struct,ip):
27002+ /*
27003+ * Protect against any attempt to set ip to an
27004+ * impossible address. There are dragons lurking if the
27005+ * address is noncanonical. (This explicitly allows
27006+ * setting ip to TASK_SIZE_MAX, because user code can do
27007+ * that all by itself by running off the end of its
27008+ * address space.
27009+ */
27010+ if (value > TASK_SIZE_MAX)
27011+ return -EIO;
27012+ break;
27013+
27014 #endif
27015 }
27016
27017@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
27018 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
27019 {
27020 int i;
27021- int dr7 = 0;
27022+ unsigned long dr7 = 0;
27023 struct arch_hw_breakpoint *info;
27024
27025 for (i = 0; i < HBP_NUM; i++) {
27026@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
27027 unsigned long addr, unsigned long data)
27028 {
27029 int ret;
27030- unsigned long __user *datap = (unsigned long __user *)data;
27031+ unsigned long __user *datap = (__force unsigned long __user *)data;
27032
27033 switch (request) {
27034 /* read the word at location addr in the USER area. */
27035@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27036 if ((int) addr < 0)
27037 return -EIO;
27038 ret = do_get_thread_area(child, addr,
27039- (struct user_desc __user *)data);
27040+ (__force struct user_desc __user *) data);
27041 break;
27042
27043 case PTRACE_SET_THREAD_AREA:
27044 if ((int) addr < 0)
27045 return -EIO;
27046 ret = do_set_thread_area(child, addr,
27047- (struct user_desc __user *)data, 0);
27048+ (__force struct user_desc __user *) data, 0);
27049 break;
27050 #endif
27051
27052@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27053
27054 #ifdef CONFIG_X86_64
27055
27056-static struct user_regset x86_64_regsets[] __read_mostly = {
27057+static user_regset_no_const x86_64_regsets[] __read_only = {
27058 [REGSET_GENERAL] = {
27059 .core_note_type = NT_PRSTATUS,
27060 .n = sizeof(struct user_regs_struct) / sizeof(long),
27061@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27062 #endif /* CONFIG_X86_64 */
27063
27064 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27065-static struct user_regset x86_32_regsets[] __read_mostly = {
27066+static user_regset_no_const x86_32_regsets[] __read_only = {
27067 [REGSET_GENERAL] = {
27068 .core_note_type = NT_PRSTATUS,
27069 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27070@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27071 */
27072 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27073
27074-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27075+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27076 {
27077 #ifdef CONFIG_X86_64
27078 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27079@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27080 memset(info, 0, sizeof(*info));
27081 info->si_signo = SIGTRAP;
27082 info->si_code = si_code;
27083- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27084+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27085 }
27086
27087 void user_single_step_siginfo(struct task_struct *tsk,
27088@@ -1450,6 +1464,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27089 # define IS_IA32 0
27090 #endif
27091
27092+#ifdef CONFIG_GRKERNSEC_SETXID
27093+extern void gr_delayed_cred_worker(void);
27094+#endif
27095+
27096 /*
27097 * We must return the syscall number to actually look up in the table.
27098 * This can be -1L to skip running any syscall at all.
27099@@ -1460,6 +1478,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27100
27101 user_exit();
27102
27103+#ifdef CONFIG_GRKERNSEC_SETXID
27104+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27105+ gr_delayed_cred_worker();
27106+#endif
27107+
27108 /*
27109 * If we stepped into a sysenter/syscall insn, it trapped in
27110 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27111@@ -1515,6 +1538,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27112 */
27113 user_exit();
27114
27115+#ifdef CONFIG_GRKERNSEC_SETXID
27116+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27117+ gr_delayed_cred_worker();
27118+#endif
27119+
27120 audit_syscall_exit(regs);
27121
27122 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27123diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27124index 2f355d2..e75ed0a 100644
27125--- a/arch/x86/kernel/pvclock.c
27126+++ b/arch/x86/kernel/pvclock.c
27127@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27128 reset_hung_task_detector();
27129 }
27130
27131-static atomic64_t last_value = ATOMIC64_INIT(0);
27132+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27133
27134 void pvclock_resume(void)
27135 {
27136- atomic64_set(&last_value, 0);
27137+ atomic64_set_unchecked(&last_value, 0);
27138 }
27139
27140 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27141@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27142 * updating at the same time, and one of them could be slightly behind,
27143 * making the assumption that last_value always go forward fail to hold.
27144 */
27145- last = atomic64_read(&last_value);
27146+ last = atomic64_read_unchecked(&last_value);
27147 do {
27148 if (ret < last)
27149 return last;
27150- last = atomic64_cmpxchg(&last_value, last, ret);
27151+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27152 } while (unlikely(last != ret));
27153
27154 return ret;
27155diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27156index 52b1157..c6e67c4 100644
27157--- a/arch/x86/kernel/reboot.c
27158+++ b/arch/x86/kernel/reboot.c
27159@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27160
27161 void __noreturn machine_real_restart(unsigned int type)
27162 {
27163+
27164+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27165+ struct desc_struct *gdt;
27166+#endif
27167+
27168 local_irq_disable();
27169
27170 /*
27171@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
27172
27173 /* Jump to the identity-mapped low memory code */
27174 #ifdef CONFIG_X86_32
27175- asm volatile("jmpl *%0" : :
27176+
27177+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27178+ gdt = get_cpu_gdt_table(smp_processor_id());
27179+ pax_open_kernel();
27180+#ifdef CONFIG_PAX_MEMORY_UDEREF
27181+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27182+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27183+ loadsegment(ds, __KERNEL_DS);
27184+ loadsegment(es, __KERNEL_DS);
27185+ loadsegment(ss, __KERNEL_DS);
27186+#endif
27187+#ifdef CONFIG_PAX_KERNEXEC
27188+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27189+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27190+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27191+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27192+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27193+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27194+#endif
27195+ pax_close_kernel();
27196+#endif
27197+
27198+ asm volatile("ljmpl *%0" : :
27199 "rm" (real_mode_header->machine_real_restart_asm),
27200 "a" (type));
27201 #else
27202@@ -486,7 +513,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27203 * This means that this function can never return, it can misbehave
27204 * by not rebooting properly and hanging.
27205 */
27206-static void native_machine_emergency_restart(void)
27207+static void __noreturn native_machine_emergency_restart(void)
27208 {
27209 int i;
27210 int attempt = 0;
27211@@ -610,13 +637,13 @@ void native_machine_shutdown(void)
27212 #endif
27213 }
27214
27215-static void __machine_emergency_restart(int emergency)
27216+static void __noreturn __machine_emergency_restart(int emergency)
27217 {
27218 reboot_emergency = emergency;
27219 machine_ops.emergency_restart();
27220 }
27221
27222-static void native_machine_restart(char *__unused)
27223+static void __noreturn native_machine_restart(char *__unused)
27224 {
27225 pr_notice("machine restart\n");
27226
27227@@ -625,7 +652,7 @@ static void native_machine_restart(char *__unused)
27228 __machine_emergency_restart(0);
27229 }
27230
27231-static void native_machine_halt(void)
27232+static void __noreturn native_machine_halt(void)
27233 {
27234 /* Stop other cpus and apics */
27235 machine_shutdown();
27236@@ -635,7 +662,7 @@ static void native_machine_halt(void)
27237 stop_this_cpu(NULL);
27238 }
27239
27240-static void native_machine_power_off(void)
27241+static void __noreturn native_machine_power_off(void)
27242 {
27243 if (pm_power_off) {
27244 if (!reboot_force)
27245@@ -644,9 +671,10 @@ static void native_machine_power_off(void)
27246 }
27247 /* A fallback in case there is no PM info available */
27248 tboot_shutdown(TB_SHUTDOWN_HALT);
27249+ unreachable();
27250 }
27251
27252-struct machine_ops machine_ops = {
27253+struct machine_ops machine_ops __read_only = {
27254 .power_off = native_machine_power_off,
27255 .shutdown = native_machine_shutdown,
27256 .emergency_restart = native_machine_emergency_restart,
27257diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27258index c8e41e9..64049ef 100644
27259--- a/arch/x86/kernel/reboot_fixups_32.c
27260+++ b/arch/x86/kernel/reboot_fixups_32.c
27261@@ -57,7 +57,7 @@ struct device_fixup {
27262 unsigned int vendor;
27263 unsigned int device;
27264 void (*reboot_fixup)(struct pci_dev *);
27265-};
27266+} __do_const;
27267
27268 /*
27269 * PCI ids solely used for fixups_table go here
27270diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27271index 3fd2c69..a444264 100644
27272--- a/arch/x86/kernel/relocate_kernel_64.S
27273+++ b/arch/x86/kernel/relocate_kernel_64.S
27274@@ -96,8 +96,7 @@ relocate_kernel:
27275
27276 /* jump to identity mapped page */
27277 addq $(identity_mapped - relocate_kernel), %r8
27278- pushq %r8
27279- ret
27280+ jmp *%r8
27281
27282 identity_mapped:
27283 /* set return address to 0 if not preserving context */
27284diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27285index 78a0e62..5c2e510 100644
27286--- a/arch/x86/kernel/setup.c
27287+++ b/arch/x86/kernel/setup.c
27288@@ -110,6 +110,7 @@
27289 #include <asm/mce.h>
27290 #include <asm/alternative.h>
27291 #include <asm/prom.h>
27292+#include <asm/boot.h>
27293
27294 /*
27295 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27296@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27297 #endif
27298
27299
27300-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27301-__visible unsigned long mmu_cr4_features;
27302+#ifdef CONFIG_X86_64
27303+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27304+#elif defined(CONFIG_X86_PAE)
27305+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27306 #else
27307-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27308+__visible unsigned long mmu_cr4_features __read_only;
27309 #endif
27310
27311+void set_in_cr4(unsigned long mask)
27312+{
27313+ unsigned long cr4 = read_cr4();
27314+
27315+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27316+ return;
27317+
27318+ pax_open_kernel();
27319+ mmu_cr4_features |= mask;
27320+ pax_close_kernel();
27321+
27322+ if (trampoline_cr4_features)
27323+ *trampoline_cr4_features = mmu_cr4_features;
27324+ cr4 |= mask;
27325+ write_cr4(cr4);
27326+}
27327+EXPORT_SYMBOL(set_in_cr4);
27328+
27329+void clear_in_cr4(unsigned long mask)
27330+{
27331+ unsigned long cr4 = read_cr4();
27332+
27333+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27334+ return;
27335+
27336+ pax_open_kernel();
27337+ mmu_cr4_features &= ~mask;
27338+ pax_close_kernel();
27339+
27340+ if (trampoline_cr4_features)
27341+ *trampoline_cr4_features = mmu_cr4_features;
27342+ cr4 &= ~mask;
27343+ write_cr4(cr4);
27344+}
27345+EXPORT_SYMBOL(clear_in_cr4);
27346+
27347 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27348 int bootloader_type, bootloader_version;
27349
27350@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27351 * area (640->1Mb) as ram even though it is not.
27352 * take them out.
27353 */
27354- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27355+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27356
27357 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27358 }
27359@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27360 /* called before trim_bios_range() to spare extra sanitize */
27361 static void __init e820_add_kernel_range(void)
27362 {
27363- u64 start = __pa_symbol(_text);
27364+ u64 start = __pa_symbol(ktla_ktva(_text));
27365 u64 size = __pa_symbol(_end) - start;
27366
27367 /*
27368@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27369
27370 void __init setup_arch(char **cmdline_p)
27371 {
27372+#ifdef CONFIG_X86_32
27373+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27374+#else
27375 memblock_reserve(__pa_symbol(_text),
27376 (unsigned long)__bss_stop - (unsigned long)_text);
27377+#endif
27378
27379 early_reserve_initrd();
27380
27381@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27382
27383 if (!boot_params.hdr.root_flags)
27384 root_mountflags &= ~MS_RDONLY;
27385- init_mm.start_code = (unsigned long) _text;
27386- init_mm.end_code = (unsigned long) _etext;
27387+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27388+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27389 init_mm.end_data = (unsigned long) _edata;
27390 init_mm.brk = _brk_end;
27391
27392- code_resource.start = __pa_symbol(_text);
27393- code_resource.end = __pa_symbol(_etext)-1;
27394- data_resource.start = __pa_symbol(_etext);
27395+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27396+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27397+ data_resource.start = __pa_symbol(_sdata);
27398 data_resource.end = __pa_symbol(_edata)-1;
27399 bss_resource.start = __pa_symbol(__bss_start);
27400 bss_resource.end = __pa_symbol(__bss_stop)-1;
27401diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27402index 5cdff03..80fa283 100644
27403--- a/arch/x86/kernel/setup_percpu.c
27404+++ b/arch/x86/kernel/setup_percpu.c
27405@@ -21,19 +21,17 @@
27406 #include <asm/cpu.h>
27407 #include <asm/stackprotector.h>
27408
27409-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27410+#ifdef CONFIG_SMP
27411+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27412 EXPORT_PER_CPU_SYMBOL(cpu_number);
27413+#endif
27414
27415-#ifdef CONFIG_X86_64
27416 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27417-#else
27418-#define BOOT_PERCPU_OFFSET 0
27419-#endif
27420
27421 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27422 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27423
27424-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27425+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27426 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27427 };
27428 EXPORT_SYMBOL(__per_cpu_offset);
27429@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27430 {
27431 #ifdef CONFIG_NEED_MULTIPLE_NODES
27432 pg_data_t *last = NULL;
27433- unsigned int cpu;
27434+ int cpu;
27435
27436 for_each_possible_cpu(cpu) {
27437 int node = early_cpu_to_node(cpu);
27438@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27439 {
27440 #ifdef CONFIG_X86_32
27441 struct desc_struct gdt;
27442+ unsigned long base = per_cpu_offset(cpu);
27443
27444- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27445- 0x2 | DESCTYPE_S, 0x8);
27446- gdt.s = 1;
27447+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27448+ 0x83 | DESCTYPE_S, 0xC);
27449 write_gdt_entry(get_cpu_gdt_table(cpu),
27450 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27451 #endif
27452@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27453 /* alrighty, percpu areas up and running */
27454 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27455 for_each_possible_cpu(cpu) {
27456+#ifdef CONFIG_CC_STACKPROTECTOR
27457+#ifdef CONFIG_X86_32
27458+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27459+#endif
27460+#endif
27461 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27462 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27463 per_cpu(cpu_number, cpu) = cpu;
27464@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27465 */
27466 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27467 #endif
27468+#ifdef CONFIG_CC_STACKPROTECTOR
27469+#ifdef CONFIG_X86_32
27470+ if (!cpu)
27471+ per_cpu(stack_canary.canary, cpu) = canary;
27472+#endif
27473+#endif
27474 /*
27475 * Up to this point, the boot CPU has been using .init.data
27476 * area. Reload any changed state for the boot CPU.
27477diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27478index 2851d63..83bf567 100644
27479--- a/arch/x86/kernel/signal.c
27480+++ b/arch/x86/kernel/signal.c
27481@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27482 * Align the stack pointer according to the i386 ABI,
27483 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27484 */
27485- sp = ((sp + 4) & -16ul) - 4;
27486+ sp = ((sp - 12) & -16ul) - 4;
27487 #else /* !CONFIG_X86_32 */
27488 sp = round_down(sp, 16) - 8;
27489 #endif
27490@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27491 }
27492
27493 if (current->mm->context.vdso)
27494- restorer = current->mm->context.vdso +
27495- selected_vdso32->sym___kernel_sigreturn;
27496+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27497 else
27498- restorer = &frame->retcode;
27499+ restorer = (void __user *)&frame->retcode;
27500 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27501 restorer = ksig->ka.sa.sa_restorer;
27502
27503@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27504 * reasons and because gdb uses it as a signature to notice
27505 * signal handler stack frames.
27506 */
27507- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27508+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27509
27510 if (err)
27511 return -EFAULT;
27512@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27513 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27514
27515 /* Set up to return from userspace. */
27516- restorer = current->mm->context.vdso +
27517- selected_vdso32->sym___kernel_rt_sigreturn;
27518+ if (current->mm->context.vdso)
27519+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27520+ else
27521+ restorer = (void __user *)&frame->retcode;
27522 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27523 restorer = ksig->ka.sa.sa_restorer;
27524 put_user_ex(restorer, &frame->pretcode);
27525@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27526 * reasons and because gdb uses it as a signature to notice
27527 * signal handler stack frames.
27528 */
27529- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27530+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27531 } put_user_catch(err);
27532
27533 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27534@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27535 {
27536 int usig = signr_convert(ksig->sig);
27537 sigset_t *set = sigmask_to_save();
27538- compat_sigset_t *cset = (compat_sigset_t *) set;
27539+ sigset_t sigcopy;
27540+ compat_sigset_t *cset;
27541+
27542+ sigcopy = *set;
27543+
27544+ cset = (compat_sigset_t *) &sigcopy;
27545
27546 /* Set up the stack frame */
27547 if (is_ia32_frame()) {
27548@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27549 } else if (is_x32_frame()) {
27550 return x32_setup_rt_frame(ksig, cset, regs);
27551 } else {
27552- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27553+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27554 }
27555 }
27556
27557diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27558index be8e1bd..a3d93fa 100644
27559--- a/arch/x86/kernel/smp.c
27560+++ b/arch/x86/kernel/smp.c
27561@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27562
27563 __setup("nonmi_ipi", nonmi_ipi_setup);
27564
27565-struct smp_ops smp_ops = {
27566+struct smp_ops smp_ops __read_only = {
27567 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27568 .smp_prepare_cpus = native_smp_prepare_cpus,
27569 .smp_cpus_done = native_smp_cpus_done,
27570diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27571index 5492798..a3bd4f2 100644
27572--- a/arch/x86/kernel/smpboot.c
27573+++ b/arch/x86/kernel/smpboot.c
27574@@ -230,14 +230,17 @@ static void notrace start_secondary(void *unused)
27575
27576 enable_start_cpu0 = 0;
27577
27578-#ifdef CONFIG_X86_32
27579+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27580+ barrier();
27581+
27582 /* switch away from the initial page table */
27583+#ifdef CONFIG_PAX_PER_CPU_PGD
27584+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27585+#else
27586 load_cr3(swapper_pg_dir);
27587+#endif
27588 __flush_tlb_all();
27589-#endif
27590
27591- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27592- barrier();
27593 /*
27594 * Check TSC synchronization with the BP:
27595 */
27596@@ -764,8 +767,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27597 alternatives_enable_smp();
27598
27599 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27600- (THREAD_SIZE + task_stack_page(idle))) - 1);
27601+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27602 per_cpu(current_task, cpu) = idle;
27603+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27604
27605 #ifdef CONFIG_X86_32
27606 /* Stack for startup_32 can be just as for start_secondary onwards */
27607@@ -774,10 +778,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27608 clear_tsk_thread_flag(idle, TIF_FORK);
27609 initial_gs = per_cpu_offset(cpu);
27610 #endif
27611- per_cpu(kernel_stack, cpu) =
27612- (unsigned long)task_stack_page(idle) -
27613- KERNEL_STACK_OFFSET + THREAD_SIZE;
27614+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27615+ pax_open_kernel();
27616 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27617+ pax_close_kernel();
27618 initial_code = (unsigned long)start_secondary;
27619 stack_start = idle->thread.sp;
27620
27621@@ -923,6 +927,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27622 /* the FPU context is blank, nobody can own it */
27623 __cpu_disable_lazy_restore(cpu);
27624
27625+#ifdef CONFIG_PAX_PER_CPU_PGD
27626+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27627+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27628+ KERNEL_PGD_PTRS);
27629+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27630+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27631+ KERNEL_PGD_PTRS);
27632+#endif
27633+
27634 err = do_boot_cpu(apicid, cpu, tidle);
27635 if (err) {
27636 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27637diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27638index 9b4d51d..5d28b58 100644
27639--- a/arch/x86/kernel/step.c
27640+++ b/arch/x86/kernel/step.c
27641@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27642 struct desc_struct *desc;
27643 unsigned long base;
27644
27645- seg &= ~7UL;
27646+ seg >>= 3;
27647
27648 mutex_lock(&child->mm->context.lock);
27649- if (unlikely((seg >> 3) >= child->mm->context.size))
27650+ if (unlikely(seg >= child->mm->context.size))
27651 addr = -1L; /* bogus selector, access would fault */
27652 else {
27653 desc = child->mm->context.ldt + seg;
27654@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27655 addr += base;
27656 }
27657 mutex_unlock(&child->mm->context.lock);
27658- }
27659+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27660+ addr = ktla_ktva(addr);
27661
27662 return addr;
27663 }
27664@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27665 unsigned char opcode[15];
27666 unsigned long addr = convert_ip_to_linear(child, regs);
27667
27668+ if (addr == -EINVAL)
27669+ return 0;
27670+
27671 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27672 for (i = 0; i < copied; i++) {
27673 switch (opcode[i]) {
27674diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27675new file mode 100644
27676index 0000000..5877189
27677--- /dev/null
27678+++ b/arch/x86/kernel/sys_i386_32.c
27679@@ -0,0 +1,189 @@
27680+/*
27681+ * This file contains various random system calls that
27682+ * have a non-standard calling sequence on the Linux/i386
27683+ * platform.
27684+ */
27685+
27686+#include <linux/errno.h>
27687+#include <linux/sched.h>
27688+#include <linux/mm.h>
27689+#include <linux/fs.h>
27690+#include <linux/smp.h>
27691+#include <linux/sem.h>
27692+#include <linux/msg.h>
27693+#include <linux/shm.h>
27694+#include <linux/stat.h>
27695+#include <linux/syscalls.h>
27696+#include <linux/mman.h>
27697+#include <linux/file.h>
27698+#include <linux/utsname.h>
27699+#include <linux/ipc.h>
27700+#include <linux/elf.h>
27701+
27702+#include <linux/uaccess.h>
27703+#include <linux/unistd.h>
27704+
27705+#include <asm/syscalls.h>
27706+
27707+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27708+{
27709+ unsigned long pax_task_size = TASK_SIZE;
27710+
27711+#ifdef CONFIG_PAX_SEGMEXEC
27712+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27713+ pax_task_size = SEGMEXEC_TASK_SIZE;
27714+#endif
27715+
27716+ if (flags & MAP_FIXED)
27717+ if (len > pax_task_size || addr > pax_task_size - len)
27718+ return -EINVAL;
27719+
27720+ return 0;
27721+}
27722+
27723+/*
27724+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27725+ */
27726+static unsigned long get_align_mask(void)
27727+{
27728+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27729+ return 0;
27730+
27731+ if (!(current->flags & PF_RANDOMIZE))
27732+ return 0;
27733+
27734+ return va_align.mask;
27735+}
27736+
27737+unsigned long
27738+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27739+ unsigned long len, unsigned long pgoff, unsigned long flags)
27740+{
27741+ struct mm_struct *mm = current->mm;
27742+ struct vm_area_struct *vma;
27743+ unsigned long pax_task_size = TASK_SIZE;
27744+ struct vm_unmapped_area_info info;
27745+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27746+
27747+#ifdef CONFIG_PAX_SEGMEXEC
27748+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27749+ pax_task_size = SEGMEXEC_TASK_SIZE;
27750+#endif
27751+
27752+ pax_task_size -= PAGE_SIZE;
27753+
27754+ if (len > pax_task_size)
27755+ return -ENOMEM;
27756+
27757+ if (flags & MAP_FIXED)
27758+ return addr;
27759+
27760+#ifdef CONFIG_PAX_RANDMMAP
27761+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27762+#endif
27763+
27764+ if (addr) {
27765+ addr = PAGE_ALIGN(addr);
27766+ if (pax_task_size - len >= addr) {
27767+ vma = find_vma(mm, addr);
27768+ if (check_heap_stack_gap(vma, addr, len, offset))
27769+ return addr;
27770+ }
27771+ }
27772+
27773+ info.flags = 0;
27774+ info.length = len;
27775+ info.align_mask = filp ? get_align_mask() : 0;
27776+ info.align_offset = pgoff << PAGE_SHIFT;
27777+ info.threadstack_offset = offset;
27778+
27779+#ifdef CONFIG_PAX_PAGEEXEC
27780+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27781+ info.low_limit = 0x00110000UL;
27782+ info.high_limit = mm->start_code;
27783+
27784+#ifdef CONFIG_PAX_RANDMMAP
27785+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27786+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27787+#endif
27788+
27789+ if (info.low_limit < info.high_limit) {
27790+ addr = vm_unmapped_area(&info);
27791+ if (!IS_ERR_VALUE(addr))
27792+ return addr;
27793+ }
27794+ } else
27795+#endif
27796+
27797+ info.low_limit = mm->mmap_base;
27798+ info.high_limit = pax_task_size;
27799+
27800+ return vm_unmapped_area(&info);
27801+}
27802+
27803+unsigned long
27804+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27805+ const unsigned long len, const unsigned long pgoff,
27806+ const unsigned long flags)
27807+{
27808+ struct vm_area_struct *vma;
27809+ struct mm_struct *mm = current->mm;
27810+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27811+ struct vm_unmapped_area_info info;
27812+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27813+
27814+#ifdef CONFIG_PAX_SEGMEXEC
27815+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27816+ pax_task_size = SEGMEXEC_TASK_SIZE;
27817+#endif
27818+
27819+ pax_task_size -= PAGE_SIZE;
27820+
27821+ /* requested length too big for entire address space */
27822+ if (len > pax_task_size)
27823+ return -ENOMEM;
27824+
27825+ if (flags & MAP_FIXED)
27826+ return addr;
27827+
27828+#ifdef CONFIG_PAX_PAGEEXEC
27829+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27830+ goto bottomup;
27831+#endif
27832+
27833+#ifdef CONFIG_PAX_RANDMMAP
27834+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27835+#endif
27836+
27837+ /* requesting a specific address */
27838+ if (addr) {
27839+ addr = PAGE_ALIGN(addr);
27840+ if (pax_task_size - len >= addr) {
27841+ vma = find_vma(mm, addr);
27842+ if (check_heap_stack_gap(vma, addr, len, offset))
27843+ return addr;
27844+ }
27845+ }
27846+
27847+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27848+ info.length = len;
27849+ info.low_limit = PAGE_SIZE;
27850+ info.high_limit = mm->mmap_base;
27851+ info.align_mask = filp ? get_align_mask() : 0;
27852+ info.align_offset = pgoff << PAGE_SHIFT;
27853+ info.threadstack_offset = offset;
27854+
27855+ addr = vm_unmapped_area(&info);
27856+ if (!(addr & ~PAGE_MASK))
27857+ return addr;
27858+ VM_BUG_ON(addr != -ENOMEM);
27859+
27860+bottomup:
27861+ /*
27862+ * A failed mmap() very likely causes application failure,
27863+ * so fall back to the bottom-up function here. This scenario
27864+ * can happen with large stack limits and large mmap()
27865+ * allocations.
27866+ */
27867+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27868+}
27869diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27870index 30277e2..5664a29 100644
27871--- a/arch/x86/kernel/sys_x86_64.c
27872+++ b/arch/x86/kernel/sys_x86_64.c
27873@@ -81,8 +81,8 @@ out:
27874 return error;
27875 }
27876
27877-static void find_start_end(unsigned long flags, unsigned long *begin,
27878- unsigned long *end)
27879+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27880+ unsigned long *begin, unsigned long *end)
27881 {
27882 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27883 unsigned long new_begin;
27884@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27885 *begin = new_begin;
27886 }
27887 } else {
27888- *begin = current->mm->mmap_legacy_base;
27889+ *begin = mm->mmap_legacy_base;
27890 *end = TASK_SIZE;
27891 }
27892 }
27893@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27894 struct vm_area_struct *vma;
27895 struct vm_unmapped_area_info info;
27896 unsigned long begin, end;
27897+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27898
27899 if (flags & MAP_FIXED)
27900 return addr;
27901
27902- find_start_end(flags, &begin, &end);
27903+ find_start_end(mm, flags, &begin, &end);
27904
27905 if (len > end)
27906 return -ENOMEM;
27907
27908+#ifdef CONFIG_PAX_RANDMMAP
27909+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27910+#endif
27911+
27912 if (addr) {
27913 addr = PAGE_ALIGN(addr);
27914 vma = find_vma(mm, addr);
27915- if (end - len >= addr &&
27916- (!vma || addr + len <= vma->vm_start))
27917+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27918 return addr;
27919 }
27920
27921@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27922 info.high_limit = end;
27923 info.align_mask = filp ? get_align_mask() : 0;
27924 info.align_offset = pgoff << PAGE_SHIFT;
27925+ info.threadstack_offset = offset;
27926 return vm_unmapped_area(&info);
27927 }
27928
27929@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27930 struct mm_struct *mm = current->mm;
27931 unsigned long addr = addr0;
27932 struct vm_unmapped_area_info info;
27933+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27934
27935 /* requested length too big for entire address space */
27936 if (len > TASK_SIZE)
27937@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27938 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27939 goto bottomup;
27940
27941+#ifdef CONFIG_PAX_RANDMMAP
27942+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27943+#endif
27944+
27945 /* requesting a specific address */
27946 if (addr) {
27947 addr = PAGE_ALIGN(addr);
27948 vma = find_vma(mm, addr);
27949- if (TASK_SIZE - len >= addr &&
27950- (!vma || addr + len <= vma->vm_start))
27951+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27952 return addr;
27953 }
27954
27955@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27956 info.high_limit = mm->mmap_base;
27957 info.align_mask = filp ? get_align_mask() : 0;
27958 info.align_offset = pgoff << PAGE_SHIFT;
27959+ info.threadstack_offset = offset;
27960 addr = vm_unmapped_area(&info);
27961 if (!(addr & ~PAGE_MASK))
27962 return addr;
27963diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27964index 91a4496..bb87552 100644
27965--- a/arch/x86/kernel/tboot.c
27966+++ b/arch/x86/kernel/tboot.c
27967@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27968
27969 void tboot_shutdown(u32 shutdown_type)
27970 {
27971- void (*shutdown)(void);
27972+ void (* __noreturn shutdown)(void);
27973
27974 if (!tboot_enabled())
27975 return;
27976@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27977
27978 switch_to_tboot_pt();
27979
27980- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27981+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27982 shutdown();
27983
27984 /* should not reach here */
27985@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27986 return -ENODEV;
27987 }
27988
27989-static atomic_t ap_wfs_count;
27990+static atomic_unchecked_t ap_wfs_count;
27991
27992 static int tboot_wait_for_aps(int num_aps)
27993 {
27994@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27995 {
27996 switch (action) {
27997 case CPU_DYING:
27998- atomic_inc(&ap_wfs_count);
27999+ atomic_inc_unchecked(&ap_wfs_count);
28000 if (num_online_cpus() == 1)
28001- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
28002+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
28003 return NOTIFY_BAD;
28004 break;
28005 }
28006@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
28007
28008 tboot_create_trampoline();
28009
28010- atomic_set(&ap_wfs_count, 0);
28011+ atomic_set_unchecked(&ap_wfs_count, 0);
28012 register_hotcpu_notifier(&tboot_cpu_notifier);
28013
28014 #ifdef CONFIG_DEBUG_FS
28015diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
28016index bf7ef5c..59d0ac9 100644
28017--- a/arch/x86/kernel/time.c
28018+++ b/arch/x86/kernel/time.c
28019@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
28020 {
28021 unsigned long pc = instruction_pointer(regs);
28022
28023- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
28024+ if (!user_mode(regs) && in_lock_functions(pc)) {
28025 #ifdef CONFIG_FRAME_POINTER
28026- return *(unsigned long *)(regs->bp + sizeof(long));
28027+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
28028 #else
28029 unsigned long *sp =
28030 (unsigned long *)kernel_stack_pointer(regs);
28031@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
28032 * or above a saved flags. Eflags has bits 22-31 zero,
28033 * kernel addresses don't.
28034 */
28035+
28036+#ifdef CONFIG_PAX_KERNEXEC
28037+ return ktla_ktva(sp[0]);
28038+#else
28039 if (sp[0] >> 22)
28040 return sp[0];
28041 if (sp[1] >> 22)
28042 return sp[1];
28043 #endif
28044+
28045+#endif
28046 }
28047 return pc;
28048 }
28049diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28050index f7fec09..9991981 100644
28051--- a/arch/x86/kernel/tls.c
28052+++ b/arch/x86/kernel/tls.c
28053@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28054 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28055 return -EINVAL;
28056
28057+#ifdef CONFIG_PAX_SEGMEXEC
28058+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28059+ return -EINVAL;
28060+#endif
28061+
28062 set_tls_desc(p, idx, &info, 1);
28063
28064 return 0;
28065@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28066
28067 if (kbuf)
28068 info = kbuf;
28069- else if (__copy_from_user(infobuf, ubuf, count))
28070+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28071 return -EFAULT;
28072 else
28073 info = infobuf;
28074diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28075index 1c113db..287b42e 100644
28076--- a/arch/x86/kernel/tracepoint.c
28077+++ b/arch/x86/kernel/tracepoint.c
28078@@ -9,11 +9,11 @@
28079 #include <linux/atomic.h>
28080
28081 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28082-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28083+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28084 (unsigned long) trace_idt_table };
28085
28086 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28087-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28088+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28089
28090 static int trace_irq_vector_refcount;
28091 static DEFINE_MUTEX(irq_vector_mutex);
28092diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28093index 0d0e922..0886373 100644
28094--- a/arch/x86/kernel/traps.c
28095+++ b/arch/x86/kernel/traps.c
28096@@ -67,7 +67,7 @@
28097 #include <asm/proto.h>
28098
28099 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28100-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28101+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28102 #else
28103 #include <asm/processor-flags.h>
28104 #include <asm/setup.h>
28105@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28106 #endif
28107
28108 /* Must be page-aligned because the real IDT is used in a fixmap. */
28109-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28110+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28111
28112 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28113 EXPORT_SYMBOL_GPL(used_vectors);
28114@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28115 }
28116
28117 static nokprobe_inline int
28118-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28119+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28120 struct pt_regs *regs, long error_code)
28121 {
28122 #ifdef CONFIG_X86_32
28123- if (regs->flags & X86_VM_MASK) {
28124+ if (v8086_mode(regs)) {
28125 /*
28126 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28127 * On nmi (interrupt 2), do_trap should not be called.
28128@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28129 return -1;
28130 }
28131 #endif
28132- if (!user_mode(regs)) {
28133+ if (!user_mode_novm(regs)) {
28134 if (!fixup_exception(regs)) {
28135 tsk->thread.error_code = error_code;
28136 tsk->thread.trap_nr = trapnr;
28137+
28138+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28139+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28140+ str = "PAX: suspicious stack segment fault";
28141+#endif
28142+
28143 die(str, regs, error_code);
28144 }
28145+
28146+#ifdef CONFIG_PAX_REFCOUNT
28147+ if (trapnr == X86_TRAP_OF)
28148+ pax_report_refcount_overflow(regs);
28149+#endif
28150+
28151 return 0;
28152 }
28153
28154@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28155 }
28156
28157 static void
28158-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28159+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28160 long error_code, siginfo_t *info)
28161 {
28162 struct task_struct *tsk = current;
28163@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28164 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28165 printk_ratelimit()) {
28166 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28167- tsk->comm, tsk->pid, str,
28168+ tsk->comm, task_pid_nr(tsk), str,
28169 regs->ip, regs->sp, error_code);
28170 print_vma_addr(" in ", regs->ip);
28171 pr_cont("\n");
28172@@ -266,6 +278,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28173 tsk->thread.error_code = error_code;
28174 tsk->thread.trap_nr = X86_TRAP_DF;
28175
28176+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28177+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28178+ die("grsec: kernel stack overflow detected", regs, error_code);
28179+#endif
28180+
28181 #ifdef CONFIG_DOUBLEFAULT
28182 df_debug(regs, error_code);
28183 #endif
28184@@ -288,7 +305,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28185 conditional_sti(regs);
28186
28187 #ifdef CONFIG_X86_32
28188- if (regs->flags & X86_VM_MASK) {
28189+ if (v8086_mode(regs)) {
28190 local_irq_enable();
28191 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28192 goto exit;
28193@@ -296,18 +313,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28194 #endif
28195
28196 tsk = current;
28197- if (!user_mode(regs)) {
28198+ if (!user_mode_novm(regs)) {
28199 if (fixup_exception(regs))
28200 goto exit;
28201
28202 tsk->thread.error_code = error_code;
28203 tsk->thread.trap_nr = X86_TRAP_GP;
28204 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28205- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28206+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28207+
28208+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28209+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28210+ die("PAX: suspicious general protection fault", regs, error_code);
28211+ else
28212+#endif
28213+
28214 die("general protection fault", regs, error_code);
28215+ }
28216 goto exit;
28217 }
28218
28219+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28220+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28221+ struct mm_struct *mm = tsk->mm;
28222+ unsigned long limit;
28223+
28224+ down_write(&mm->mmap_sem);
28225+ limit = mm->context.user_cs_limit;
28226+ if (limit < TASK_SIZE) {
28227+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28228+ up_write(&mm->mmap_sem);
28229+ return;
28230+ }
28231+ up_write(&mm->mmap_sem);
28232+ }
28233+#endif
28234+
28235 tsk->thread.error_code = error_code;
28236 tsk->thread.trap_nr = X86_TRAP_GP;
28237
28238@@ -481,7 +522,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28239 /* It's safe to allow irq's after DR6 has been saved */
28240 preempt_conditional_sti(regs);
28241
28242- if (regs->flags & X86_VM_MASK) {
28243+ if (v8086_mode(regs)) {
28244 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28245 X86_TRAP_DB);
28246 preempt_conditional_cli(regs);
28247@@ -496,7 +537,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28248 * We already checked v86 mode above, so we can check for kernel mode
28249 * by just checking the CPL of CS.
28250 */
28251- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28252+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28253 tsk->thread.debugreg6 &= ~DR_STEP;
28254 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28255 regs->flags &= ~X86_EFLAGS_TF;
28256@@ -529,7 +570,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28257 return;
28258 conditional_sti(regs);
28259
28260- if (!user_mode_vm(regs))
28261+ if (!user_mode(regs))
28262 {
28263 if (!fixup_exception(regs)) {
28264 task->thread.error_code = error_code;
28265diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28266index ea03031..34a5cdda 100644
28267--- a/arch/x86/kernel/tsc.c
28268+++ b/arch/x86/kernel/tsc.c
28269@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28270 */
28271 smp_wmb();
28272
28273- ACCESS_ONCE(c2n->head) = data;
28274+ ACCESS_ONCE_RW(c2n->head) = data;
28275 }
28276
28277 /*
28278diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28279index 5d1cbfe..2a21feb 100644
28280--- a/arch/x86/kernel/uprobes.c
28281+++ b/arch/x86/kernel/uprobes.c
28282@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28283 int ret = NOTIFY_DONE;
28284
28285 /* We are only interested in userspace traps */
28286- if (regs && !user_mode_vm(regs))
28287+ if (regs && !user_mode(regs))
28288 return NOTIFY_DONE;
28289
28290 switch (val) {
28291@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28292
28293 if (nleft != rasize) {
28294 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28295- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28296+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28297
28298 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28299 }
28300diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28301index b9242ba..50c5edd 100644
28302--- a/arch/x86/kernel/verify_cpu.S
28303+++ b/arch/x86/kernel/verify_cpu.S
28304@@ -20,6 +20,7 @@
28305 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28306 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28307 * arch/x86/kernel/head_32.S: processor startup
28308+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28309 *
28310 * verify_cpu, returns the status of longmode and SSE in register %eax.
28311 * 0: Success 1: Failure
28312diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28313index e8edcf5..27f9344 100644
28314--- a/arch/x86/kernel/vm86_32.c
28315+++ b/arch/x86/kernel/vm86_32.c
28316@@ -44,6 +44,7 @@
28317 #include <linux/ptrace.h>
28318 #include <linux/audit.h>
28319 #include <linux/stddef.h>
28320+#include <linux/grsecurity.h>
28321
28322 #include <asm/uaccess.h>
28323 #include <asm/io.h>
28324@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28325 do_exit(SIGSEGV);
28326 }
28327
28328- tss = &per_cpu(init_tss, get_cpu());
28329+ tss = init_tss + get_cpu();
28330 current->thread.sp0 = current->thread.saved_sp0;
28331 current->thread.sysenter_cs = __KERNEL_CS;
28332 load_sp0(tss, &current->thread);
28333@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28334
28335 if (tsk->thread.saved_sp0)
28336 return -EPERM;
28337+
28338+#ifdef CONFIG_GRKERNSEC_VM86
28339+ if (!capable(CAP_SYS_RAWIO)) {
28340+ gr_handle_vm86();
28341+ return -EPERM;
28342+ }
28343+#endif
28344+
28345 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28346 offsetof(struct kernel_vm86_struct, vm86plus) -
28347 sizeof(info.regs));
28348@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28349 int tmp;
28350 struct vm86plus_struct __user *v86;
28351
28352+#ifdef CONFIG_GRKERNSEC_VM86
28353+ if (!capable(CAP_SYS_RAWIO)) {
28354+ gr_handle_vm86();
28355+ return -EPERM;
28356+ }
28357+#endif
28358+
28359 tsk = current;
28360 switch (cmd) {
28361 case VM86_REQUEST_IRQ:
28362@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28363 tsk->thread.saved_fs = info->regs32->fs;
28364 tsk->thread.saved_gs = get_user_gs(info->regs32);
28365
28366- tss = &per_cpu(init_tss, get_cpu());
28367+ tss = init_tss + get_cpu();
28368 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28369 if (cpu_has_sep)
28370 tsk->thread.sysenter_cs = 0;
28371@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28372 goto cannot_handle;
28373 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28374 goto cannot_handle;
28375- intr_ptr = (unsigned long __user *) (i << 2);
28376+ intr_ptr = (__force unsigned long __user *) (i << 2);
28377 if (get_user(segoffs, intr_ptr))
28378 goto cannot_handle;
28379 if ((segoffs >> 16) == BIOSSEG)
28380diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28381index 49edf2d..c0d1362 100644
28382--- a/arch/x86/kernel/vmlinux.lds.S
28383+++ b/arch/x86/kernel/vmlinux.lds.S
28384@@ -26,6 +26,13 @@
28385 #include <asm/page_types.h>
28386 #include <asm/cache.h>
28387 #include <asm/boot.h>
28388+#include <asm/segment.h>
28389+
28390+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28391+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28392+#else
28393+#define __KERNEL_TEXT_OFFSET 0
28394+#endif
28395
28396 #undef i386 /* in case the preprocessor is a 32bit one */
28397
28398@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28399
28400 PHDRS {
28401 text PT_LOAD FLAGS(5); /* R_E */
28402+#ifdef CONFIG_X86_32
28403+ module PT_LOAD FLAGS(5); /* R_E */
28404+#endif
28405+#ifdef CONFIG_XEN
28406+ rodata PT_LOAD FLAGS(5); /* R_E */
28407+#else
28408+ rodata PT_LOAD FLAGS(4); /* R__ */
28409+#endif
28410 data PT_LOAD FLAGS(6); /* RW_ */
28411-#ifdef CONFIG_X86_64
28412+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28413 #ifdef CONFIG_SMP
28414 percpu PT_LOAD FLAGS(6); /* RW_ */
28415 #endif
28416+ text.init PT_LOAD FLAGS(5); /* R_E */
28417+ text.exit PT_LOAD FLAGS(5); /* R_E */
28418 init PT_LOAD FLAGS(7); /* RWE */
28419-#endif
28420 note PT_NOTE FLAGS(0); /* ___ */
28421 }
28422
28423 SECTIONS
28424 {
28425 #ifdef CONFIG_X86_32
28426- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28427- phys_startup_32 = startup_32 - LOAD_OFFSET;
28428+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28429 #else
28430- . = __START_KERNEL;
28431- phys_startup_64 = startup_64 - LOAD_OFFSET;
28432+ . = __START_KERNEL;
28433 #endif
28434
28435 /* Text and read-only data */
28436- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28437- _text = .;
28438+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28439 /* bootstrapping code */
28440+#ifdef CONFIG_X86_32
28441+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28442+#else
28443+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28444+#endif
28445+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28446+ _text = .;
28447 HEAD_TEXT
28448 . = ALIGN(8);
28449 _stext = .;
28450@@ -104,13 +124,47 @@ SECTIONS
28451 IRQENTRY_TEXT
28452 *(.fixup)
28453 *(.gnu.warning)
28454- /* End of text section */
28455- _etext = .;
28456 } :text = 0x9090
28457
28458- NOTES :text :note
28459+ . += __KERNEL_TEXT_OFFSET;
28460
28461- EXCEPTION_TABLE(16) :text = 0x9090
28462+#ifdef CONFIG_X86_32
28463+ . = ALIGN(PAGE_SIZE);
28464+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28465+
28466+#ifdef CONFIG_PAX_KERNEXEC
28467+ MODULES_EXEC_VADDR = .;
28468+ BYTE(0)
28469+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28470+ . = ALIGN(HPAGE_SIZE) - 1;
28471+ MODULES_EXEC_END = .;
28472+#endif
28473+
28474+ } :module
28475+#endif
28476+
28477+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28478+ /* End of text section */
28479+ BYTE(0)
28480+ _etext = . - __KERNEL_TEXT_OFFSET;
28481+ }
28482+
28483+#ifdef CONFIG_X86_32
28484+ . = ALIGN(PAGE_SIZE);
28485+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28486+ . = ALIGN(PAGE_SIZE);
28487+ *(.empty_zero_page)
28488+ *(.initial_pg_fixmap)
28489+ *(.initial_pg_pmd)
28490+ *(.initial_page_table)
28491+ *(.swapper_pg_dir)
28492+ } :rodata
28493+#endif
28494+
28495+ . = ALIGN(PAGE_SIZE);
28496+ NOTES :rodata :note
28497+
28498+ EXCEPTION_TABLE(16) :rodata
28499
28500 #if defined(CONFIG_DEBUG_RODATA)
28501 /* .text should occupy whole number of pages */
28502@@ -122,16 +176,20 @@ SECTIONS
28503
28504 /* Data */
28505 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28506+
28507+#ifdef CONFIG_PAX_KERNEXEC
28508+ . = ALIGN(HPAGE_SIZE);
28509+#else
28510+ . = ALIGN(PAGE_SIZE);
28511+#endif
28512+
28513 /* Start of data section */
28514 _sdata = .;
28515
28516 /* init_task */
28517 INIT_TASK_DATA(THREAD_SIZE)
28518
28519-#ifdef CONFIG_X86_32
28520- /* 32 bit has nosave before _edata */
28521 NOSAVE_DATA
28522-#endif
28523
28524 PAGE_ALIGNED_DATA(PAGE_SIZE)
28525
28526@@ -174,12 +232,19 @@ SECTIONS
28527 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28528
28529 /* Init code and data - will be freed after init */
28530- . = ALIGN(PAGE_SIZE);
28531 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28532+ BYTE(0)
28533+
28534+#ifdef CONFIG_PAX_KERNEXEC
28535+ . = ALIGN(HPAGE_SIZE);
28536+#else
28537+ . = ALIGN(PAGE_SIZE);
28538+#endif
28539+
28540 __init_begin = .; /* paired with __init_end */
28541- }
28542+ } :init.begin
28543
28544-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28545+#ifdef CONFIG_SMP
28546 /*
28547 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28548 * output PHDR, so the next output section - .init.text - should
28549@@ -188,12 +253,27 @@ SECTIONS
28550 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
28551 #endif
28552
28553- INIT_TEXT_SECTION(PAGE_SIZE)
28554-#ifdef CONFIG_X86_64
28555- :init
28556-#endif
28557+ . = ALIGN(PAGE_SIZE);
28558+ init_begin = .;
28559+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28560+ VMLINUX_SYMBOL(_sinittext) = .;
28561+ INIT_TEXT
28562+ VMLINUX_SYMBOL(_einittext) = .;
28563+ . = ALIGN(PAGE_SIZE);
28564+ } :text.init
28565
28566- INIT_DATA_SECTION(16)
28567+ /*
28568+ * .exit.text is discard at runtime, not link time, to deal with
28569+ * references from .altinstructions and .eh_frame
28570+ */
28571+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28572+ EXIT_TEXT
28573+ . = ALIGN(16);
28574+ } :text.exit
28575+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28576+
28577+ . = ALIGN(PAGE_SIZE);
28578+ INIT_DATA_SECTION(16) :init
28579
28580 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28581 __x86_cpu_dev_start = .;
28582@@ -264,19 +344,12 @@ SECTIONS
28583 }
28584
28585 . = ALIGN(8);
28586- /*
28587- * .exit.text is discard at runtime, not link time, to deal with
28588- * references from .altinstructions and .eh_frame
28589- */
28590- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28591- EXIT_TEXT
28592- }
28593
28594 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28595 EXIT_DATA
28596 }
28597
28598-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28599+#ifndef CONFIG_SMP
28600 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28601 #endif
28602
28603@@ -295,16 +368,10 @@ SECTIONS
28604 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28605 __smp_locks = .;
28606 *(.smp_locks)
28607- . = ALIGN(PAGE_SIZE);
28608 __smp_locks_end = .;
28609+ . = ALIGN(PAGE_SIZE);
28610 }
28611
28612-#ifdef CONFIG_X86_64
28613- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28614- NOSAVE_DATA
28615- }
28616-#endif
28617-
28618 /* BSS */
28619 . = ALIGN(PAGE_SIZE);
28620 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28621@@ -320,6 +387,7 @@ SECTIONS
28622 __brk_base = .;
28623 . += 64 * 1024; /* 64k alignment slop space */
28624 *(.brk_reservation) /* areas brk users have reserved */
28625+ . = ALIGN(HPAGE_SIZE);
28626 __brk_limit = .;
28627 }
28628
28629@@ -346,13 +414,12 @@ SECTIONS
28630 * for the boot processor.
28631 */
28632 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28633-INIT_PER_CPU(gdt_page);
28634 INIT_PER_CPU(irq_stack_union);
28635
28636 /*
28637 * Build-time check on the image size:
28638 */
28639-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28640+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28641 "kernel image bigger than KERNEL_IMAGE_SIZE");
28642
28643 #ifdef CONFIG_SMP
28644diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28645index e1e1e80..1400089 100644
28646--- a/arch/x86/kernel/vsyscall_64.c
28647+++ b/arch/x86/kernel/vsyscall_64.c
28648@@ -54,15 +54,13 @@
28649
28650 DEFINE_VVAR(int, vgetcpu_mode);
28651
28652-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28653+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28654
28655 static int __init vsyscall_setup(char *str)
28656 {
28657 if (str) {
28658 if (!strcmp("emulate", str))
28659 vsyscall_mode = EMULATE;
28660- else if (!strcmp("native", str))
28661- vsyscall_mode = NATIVE;
28662 else if (!strcmp("none", str))
28663 vsyscall_mode = NONE;
28664 else
28665@@ -279,8 +277,7 @@ do_ret:
28666 return true;
28667
28668 sigsegv:
28669- force_sig(SIGSEGV, current);
28670- return true;
28671+ do_group_exit(SIGKILL);
28672 }
28673
28674 /*
28675@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
28676 extern char __vsyscall_page;
28677 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28678
28679- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28680- vsyscall_mode == NATIVE
28681- ? PAGE_KERNEL_VSYSCALL
28682- : PAGE_KERNEL_VVAR);
28683+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28684 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28685 (unsigned long)VSYSCALL_ADDR);
28686 }
28687diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28688index 04068192..4d75aa6 100644
28689--- a/arch/x86/kernel/x8664_ksyms_64.c
28690+++ b/arch/x86/kernel/x8664_ksyms_64.c
28691@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28692 EXPORT_SYMBOL(copy_user_generic_unrolled);
28693 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28694 EXPORT_SYMBOL(__copy_user_nocache);
28695-EXPORT_SYMBOL(_copy_from_user);
28696-EXPORT_SYMBOL(_copy_to_user);
28697
28698 EXPORT_SYMBOL(copy_page);
28699 EXPORT_SYMBOL(clear_page);
28700@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28701 EXPORT_SYMBOL(___preempt_schedule_context);
28702 #endif
28703 #endif
28704+
28705+#ifdef CONFIG_PAX_PER_CPU_PGD
28706+EXPORT_SYMBOL(cpu_pgd);
28707+#endif
28708diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28709index e48b674..a451dd9 100644
28710--- a/arch/x86/kernel/x86_init.c
28711+++ b/arch/x86/kernel/x86_init.c
28712@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28713 static void default_nmi_init(void) { };
28714 static int default_i8042_detect(void) { return 1; };
28715
28716-struct x86_platform_ops x86_platform = {
28717+struct x86_platform_ops x86_platform __read_only = {
28718 .calibrate_tsc = native_calibrate_tsc,
28719 .get_wallclock = mach_get_cmos_time,
28720 .set_wallclock = mach_set_rtc_mmss,
28721@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28722 EXPORT_SYMBOL_GPL(x86_platform);
28723
28724 #if defined(CONFIG_PCI_MSI)
28725-struct x86_msi_ops x86_msi = {
28726+struct x86_msi_ops x86_msi __read_only = {
28727 .setup_msi_irqs = native_setup_msi_irqs,
28728 .compose_msi_msg = native_compose_msi_msg,
28729 .teardown_msi_irq = native_teardown_msi_irq,
28730@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
28731 }
28732 #endif
28733
28734-struct x86_io_apic_ops x86_io_apic_ops = {
28735+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28736 .init = native_io_apic_init_mappings,
28737 .read = native_io_apic_read,
28738 .write = native_io_apic_write,
28739diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28740index a4b451c..8dfe1ad 100644
28741--- a/arch/x86/kernel/xsave.c
28742+++ b/arch/x86/kernel/xsave.c
28743@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28744
28745 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28746 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28747- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28748+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28749
28750 if (!use_xsave())
28751 return err;
28752
28753- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28754+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28755
28756 /*
28757 * Read the xstate_bv which we copied (directly from the cpu or
28758 * from the state in task struct) to the user buffers.
28759 */
28760- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28761+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28762
28763 /*
28764 * For legacy compatible, we always set FP/SSE bits in the bit
28765@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28766 */
28767 xstate_bv |= XSTATE_FPSSE;
28768
28769- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28770+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28771
28772 return err;
28773 }
28774@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28775 {
28776 int err;
28777
28778+ buf = (struct xsave_struct __user *)____m(buf);
28779 if (use_xsave())
28780 err = xsave_user(buf);
28781 else if (use_fxsr())
28782@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28783 */
28784 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28785 {
28786+ buf = (void __user *)____m(buf);
28787 if (use_xsave()) {
28788 if ((unsigned long)buf % 64 || fx_only) {
28789 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28790diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28791index 38a0afe..94421a9 100644
28792--- a/arch/x86/kvm/cpuid.c
28793+++ b/arch/x86/kvm/cpuid.c
28794@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28795 struct kvm_cpuid2 *cpuid,
28796 struct kvm_cpuid_entry2 __user *entries)
28797 {
28798- int r;
28799+ int r, i;
28800
28801 r = -E2BIG;
28802 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28803 goto out;
28804 r = -EFAULT;
28805- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28806- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28807+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28808 goto out;
28809+ for (i = 0; i < cpuid->nent; ++i) {
28810+ struct kvm_cpuid_entry2 cpuid_entry;
28811+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28812+ goto out;
28813+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28814+ }
28815 vcpu->arch.cpuid_nent = cpuid->nent;
28816 kvm_apic_set_version(vcpu);
28817 kvm_x86_ops->cpuid_update(vcpu);
28818@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28819 struct kvm_cpuid2 *cpuid,
28820 struct kvm_cpuid_entry2 __user *entries)
28821 {
28822- int r;
28823+ int r, i;
28824
28825 r = -E2BIG;
28826 if (cpuid->nent < vcpu->arch.cpuid_nent)
28827 goto out;
28828 r = -EFAULT;
28829- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28830- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28831+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28832 goto out;
28833+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28834+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28835+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28836+ goto out;
28837+ }
28838 return 0;
28839
28840 out:
28841diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28842index 453e5fb..214168f 100644
28843--- a/arch/x86/kvm/lapic.c
28844+++ b/arch/x86/kvm/lapic.c
28845@@ -55,7 +55,7 @@
28846 #define APIC_BUS_CYCLE_NS 1
28847
28848 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28849-#define apic_debug(fmt, arg...)
28850+#define apic_debug(fmt, arg...) do {} while (0)
28851
28852 #define APIC_LVT_NUM 6
28853 /* 14 is the version for Xeon and Pentium 8.4.8*/
28854diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28855index 4107765..d9eb358 100644
28856--- a/arch/x86/kvm/paging_tmpl.h
28857+++ b/arch/x86/kvm/paging_tmpl.h
28858@@ -331,7 +331,7 @@ retry_walk:
28859 if (unlikely(kvm_is_error_hva(host_addr)))
28860 goto error;
28861
28862- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28863+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28864 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28865 goto error;
28866 walker->ptep_user[walker->level - 1] = ptep_user;
28867diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28868index b5e994a..35b5866 100644
28869--- a/arch/x86/kvm/svm.c
28870+++ b/arch/x86/kvm/svm.c
28871@@ -3541,7 +3541,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28872 int cpu = raw_smp_processor_id();
28873
28874 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28875+
28876+ pax_open_kernel();
28877 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28878+ pax_close_kernel();
28879+
28880 load_TR_desc();
28881 }
28882
28883@@ -3942,6 +3946,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28884 #endif
28885 #endif
28886
28887+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28888+ __set_fs(current_thread_info()->addr_limit);
28889+#endif
28890+
28891 reload_tss(vcpu);
28892
28893 local_irq_disable();
28894diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28895index 801332e..eeff1cc 100644
28896--- a/arch/x86/kvm/vmx.c
28897+++ b/arch/x86/kvm/vmx.c
28898@@ -1339,12 +1339,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28899 #endif
28900 }
28901
28902-static void vmcs_clear_bits(unsigned long field, u32 mask)
28903+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28904 {
28905 vmcs_writel(field, vmcs_readl(field) & ~mask);
28906 }
28907
28908-static void vmcs_set_bits(unsigned long field, u32 mask)
28909+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28910 {
28911 vmcs_writel(field, vmcs_readl(field) | mask);
28912 }
28913@@ -1604,7 +1604,11 @@ static void reload_tss(void)
28914 struct desc_struct *descs;
28915
28916 descs = (void *)gdt->address;
28917+
28918+ pax_open_kernel();
28919 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28920+ pax_close_kernel();
28921+
28922 load_TR_desc();
28923 }
28924
28925@@ -1832,6 +1836,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28926 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28927 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28928
28929+#ifdef CONFIG_PAX_PER_CPU_PGD
28930+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28931+#endif
28932+
28933 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28934 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28935 vmx->loaded_vmcs->cpu = cpu;
28936@@ -2121,7 +2129,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28937 * reads and returns guest's timestamp counter "register"
28938 * guest_tsc = host_tsc + tsc_offset -- 21.3
28939 */
28940-static u64 guest_read_tsc(void)
28941+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28942 {
28943 u64 host_tsc, tsc_offset;
28944
28945@@ -3093,8 +3101,11 @@ static __init int hardware_setup(void)
28946 if (!cpu_has_vmx_flexpriority())
28947 flexpriority_enabled = 0;
28948
28949- if (!cpu_has_vmx_tpr_shadow())
28950- kvm_x86_ops->update_cr8_intercept = NULL;
28951+ if (!cpu_has_vmx_tpr_shadow()) {
28952+ pax_open_kernel();
28953+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28954+ pax_close_kernel();
28955+ }
28956
28957 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28958 kvm_disable_largepages();
28959@@ -3105,13 +3116,15 @@ static __init int hardware_setup(void)
28960 if (!cpu_has_vmx_apicv())
28961 enable_apicv = 0;
28962
28963+ pax_open_kernel();
28964 if (enable_apicv)
28965- kvm_x86_ops->update_cr8_intercept = NULL;
28966+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28967 else {
28968- kvm_x86_ops->hwapic_irr_update = NULL;
28969- kvm_x86_ops->deliver_posted_interrupt = NULL;
28970- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28971+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28972+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28973+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28974 }
28975+ pax_close_kernel();
28976
28977 if (nested)
28978 nested_vmx_setup_ctls_msrs();
28979@@ -4221,7 +4234,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28980
28981 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28982 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28983+
28984+#ifndef CONFIG_PAX_PER_CPU_PGD
28985 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28986+#endif
28987
28988 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28989 #ifdef CONFIG_X86_64
28990@@ -4243,7 +4259,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28991 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28992 vmx->host_idt_base = dt.address;
28993
28994- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28995+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28996
28997 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28998 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28999@@ -7413,6 +7429,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29000 "jmp 2f \n\t"
29001 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
29002 "2: "
29003+
29004+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29005+ "ljmp %[cs],$3f\n\t"
29006+ "3: "
29007+#endif
29008+
29009 /* Save guest registers, load host registers, keep flags */
29010 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
29011 "pop %0 \n\t"
29012@@ -7465,6 +7487,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29013 #endif
29014 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
29015 [wordsize]"i"(sizeof(ulong))
29016+
29017+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29018+ ,[cs]"i"(__KERNEL_CS)
29019+#endif
29020+
29021 : "cc", "memory"
29022 #ifdef CONFIG_X86_64
29023 , "rax", "rbx", "rdi", "rsi"
29024@@ -7478,7 +7505,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29025 if (debugctlmsr)
29026 update_debugctlmsr(debugctlmsr);
29027
29028-#ifndef CONFIG_X86_64
29029+#ifdef CONFIG_X86_32
29030 /*
29031 * The sysexit path does not restore ds/es, so we must set them to
29032 * a reasonable value ourselves.
29033@@ -7487,8 +7514,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29034 * may be executed in interrupt context, which saves and restore segments
29035 * around it, nullifying its effect.
29036 */
29037- loadsegment(ds, __USER_DS);
29038- loadsegment(es, __USER_DS);
29039+ loadsegment(ds, __KERNEL_DS);
29040+ loadsegment(es, __KERNEL_DS);
29041+ loadsegment(ss, __KERNEL_DS);
29042+
29043+#ifdef CONFIG_PAX_KERNEXEC
29044+ loadsegment(fs, __KERNEL_PERCPU);
29045+#endif
29046+
29047+#ifdef CONFIG_PAX_MEMORY_UDEREF
29048+ __set_fs(current_thread_info()->addr_limit);
29049+#endif
29050+
29051 #endif
29052
29053 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29054diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29055index ef432f8..a630659 100644
29056--- a/arch/x86/kvm/x86.c
29057+++ b/arch/x86/kvm/x86.c
29058@@ -1808,8 +1808,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29059 {
29060 struct kvm *kvm = vcpu->kvm;
29061 int lm = is_long_mode(vcpu);
29062- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29063- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29064+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29065+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29066 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29067 : kvm->arch.xen_hvm_config.blob_size_32;
29068 u32 page_num = data & ~PAGE_MASK;
29069@@ -2729,6 +2729,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29070 if (n < msr_list.nmsrs)
29071 goto out;
29072 r = -EFAULT;
29073+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29074+ goto out;
29075 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29076 num_msrs_to_save * sizeof(u32)))
29077 goto out;
29078@@ -5567,7 +5569,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29079 };
29080 #endif
29081
29082-int kvm_arch_init(void *opaque)
29083+int kvm_arch_init(const void *opaque)
29084 {
29085 int r;
29086 struct kvm_x86_ops *ops = opaque;
29087diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29088index aae9413..d11e829 100644
29089--- a/arch/x86/lguest/boot.c
29090+++ b/arch/x86/lguest/boot.c
29091@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29092 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29093 * Launcher to reboot us.
29094 */
29095-static void lguest_restart(char *reason)
29096+static __noreturn void lguest_restart(char *reason)
29097 {
29098 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29099+ BUG();
29100 }
29101
29102 /*G:050
29103diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29104index 00933d5..3a64af9 100644
29105--- a/arch/x86/lib/atomic64_386_32.S
29106+++ b/arch/x86/lib/atomic64_386_32.S
29107@@ -48,6 +48,10 @@ BEGIN(read)
29108 movl (v), %eax
29109 movl 4(v), %edx
29110 RET_ENDP
29111+BEGIN(read_unchecked)
29112+ movl (v), %eax
29113+ movl 4(v), %edx
29114+RET_ENDP
29115 #undef v
29116
29117 #define v %esi
29118@@ -55,6 +59,10 @@ BEGIN(set)
29119 movl %ebx, (v)
29120 movl %ecx, 4(v)
29121 RET_ENDP
29122+BEGIN(set_unchecked)
29123+ movl %ebx, (v)
29124+ movl %ecx, 4(v)
29125+RET_ENDP
29126 #undef v
29127
29128 #define v %esi
29129@@ -70,6 +78,20 @@ RET_ENDP
29130 BEGIN(add)
29131 addl %eax, (v)
29132 adcl %edx, 4(v)
29133+
29134+#ifdef CONFIG_PAX_REFCOUNT
29135+ jno 0f
29136+ subl %eax, (v)
29137+ sbbl %edx, 4(v)
29138+ int $4
29139+0:
29140+ _ASM_EXTABLE(0b, 0b)
29141+#endif
29142+
29143+RET_ENDP
29144+BEGIN(add_unchecked)
29145+ addl %eax, (v)
29146+ adcl %edx, 4(v)
29147 RET_ENDP
29148 #undef v
29149
29150@@ -77,6 +99,24 @@ RET_ENDP
29151 BEGIN(add_return)
29152 addl (v), %eax
29153 adcl 4(v), %edx
29154+
29155+#ifdef CONFIG_PAX_REFCOUNT
29156+ into
29157+1234:
29158+ _ASM_EXTABLE(1234b, 2f)
29159+#endif
29160+
29161+ movl %eax, (v)
29162+ movl %edx, 4(v)
29163+
29164+#ifdef CONFIG_PAX_REFCOUNT
29165+2:
29166+#endif
29167+
29168+RET_ENDP
29169+BEGIN(add_return_unchecked)
29170+ addl (v), %eax
29171+ adcl 4(v), %edx
29172 movl %eax, (v)
29173 movl %edx, 4(v)
29174 RET_ENDP
29175@@ -86,6 +126,20 @@ RET_ENDP
29176 BEGIN(sub)
29177 subl %eax, (v)
29178 sbbl %edx, 4(v)
29179+
29180+#ifdef CONFIG_PAX_REFCOUNT
29181+ jno 0f
29182+ addl %eax, (v)
29183+ adcl %edx, 4(v)
29184+ int $4
29185+0:
29186+ _ASM_EXTABLE(0b, 0b)
29187+#endif
29188+
29189+RET_ENDP
29190+BEGIN(sub_unchecked)
29191+ subl %eax, (v)
29192+ sbbl %edx, 4(v)
29193 RET_ENDP
29194 #undef v
29195
29196@@ -96,6 +150,27 @@ BEGIN(sub_return)
29197 sbbl $0, %edx
29198 addl (v), %eax
29199 adcl 4(v), %edx
29200+
29201+#ifdef CONFIG_PAX_REFCOUNT
29202+ into
29203+1234:
29204+ _ASM_EXTABLE(1234b, 2f)
29205+#endif
29206+
29207+ movl %eax, (v)
29208+ movl %edx, 4(v)
29209+
29210+#ifdef CONFIG_PAX_REFCOUNT
29211+2:
29212+#endif
29213+
29214+RET_ENDP
29215+BEGIN(sub_return_unchecked)
29216+ negl %edx
29217+ negl %eax
29218+ sbbl $0, %edx
29219+ addl (v), %eax
29220+ adcl 4(v), %edx
29221 movl %eax, (v)
29222 movl %edx, 4(v)
29223 RET_ENDP
29224@@ -105,6 +180,20 @@ RET_ENDP
29225 BEGIN(inc)
29226 addl $1, (v)
29227 adcl $0, 4(v)
29228+
29229+#ifdef CONFIG_PAX_REFCOUNT
29230+ jno 0f
29231+ subl $1, (v)
29232+ sbbl $0, 4(v)
29233+ int $4
29234+0:
29235+ _ASM_EXTABLE(0b, 0b)
29236+#endif
29237+
29238+RET_ENDP
29239+BEGIN(inc_unchecked)
29240+ addl $1, (v)
29241+ adcl $0, 4(v)
29242 RET_ENDP
29243 #undef v
29244
29245@@ -114,6 +203,26 @@ BEGIN(inc_return)
29246 movl 4(v), %edx
29247 addl $1, %eax
29248 adcl $0, %edx
29249+
29250+#ifdef CONFIG_PAX_REFCOUNT
29251+ into
29252+1234:
29253+ _ASM_EXTABLE(1234b, 2f)
29254+#endif
29255+
29256+ movl %eax, (v)
29257+ movl %edx, 4(v)
29258+
29259+#ifdef CONFIG_PAX_REFCOUNT
29260+2:
29261+#endif
29262+
29263+RET_ENDP
29264+BEGIN(inc_return_unchecked)
29265+ movl (v), %eax
29266+ movl 4(v), %edx
29267+ addl $1, %eax
29268+ adcl $0, %edx
29269 movl %eax, (v)
29270 movl %edx, 4(v)
29271 RET_ENDP
29272@@ -123,6 +232,20 @@ RET_ENDP
29273 BEGIN(dec)
29274 subl $1, (v)
29275 sbbl $0, 4(v)
29276+
29277+#ifdef CONFIG_PAX_REFCOUNT
29278+ jno 0f
29279+ addl $1, (v)
29280+ adcl $0, 4(v)
29281+ int $4
29282+0:
29283+ _ASM_EXTABLE(0b, 0b)
29284+#endif
29285+
29286+RET_ENDP
29287+BEGIN(dec_unchecked)
29288+ subl $1, (v)
29289+ sbbl $0, 4(v)
29290 RET_ENDP
29291 #undef v
29292
29293@@ -132,6 +255,26 @@ BEGIN(dec_return)
29294 movl 4(v), %edx
29295 subl $1, %eax
29296 sbbl $0, %edx
29297+
29298+#ifdef CONFIG_PAX_REFCOUNT
29299+ into
29300+1234:
29301+ _ASM_EXTABLE(1234b, 2f)
29302+#endif
29303+
29304+ movl %eax, (v)
29305+ movl %edx, 4(v)
29306+
29307+#ifdef CONFIG_PAX_REFCOUNT
29308+2:
29309+#endif
29310+
29311+RET_ENDP
29312+BEGIN(dec_return_unchecked)
29313+ movl (v), %eax
29314+ movl 4(v), %edx
29315+ subl $1, %eax
29316+ sbbl $0, %edx
29317 movl %eax, (v)
29318 movl %edx, 4(v)
29319 RET_ENDP
29320@@ -143,6 +286,13 @@ BEGIN(add_unless)
29321 adcl %edx, %edi
29322 addl (v), %eax
29323 adcl 4(v), %edx
29324+
29325+#ifdef CONFIG_PAX_REFCOUNT
29326+ into
29327+1234:
29328+ _ASM_EXTABLE(1234b, 2f)
29329+#endif
29330+
29331 cmpl %eax, %ecx
29332 je 3f
29333 1:
29334@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29335 1:
29336 addl $1, %eax
29337 adcl $0, %edx
29338+
29339+#ifdef CONFIG_PAX_REFCOUNT
29340+ into
29341+1234:
29342+ _ASM_EXTABLE(1234b, 2f)
29343+#endif
29344+
29345 movl %eax, (v)
29346 movl %edx, 4(v)
29347 movl $1, %eax
29348@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29349 movl 4(v), %edx
29350 subl $1, %eax
29351 sbbl $0, %edx
29352+
29353+#ifdef CONFIG_PAX_REFCOUNT
29354+ into
29355+1234:
29356+ _ASM_EXTABLE(1234b, 1f)
29357+#endif
29358+
29359 js 1f
29360 movl %eax, (v)
29361 movl %edx, 4(v)
29362diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29363index f5cc9eb..51fa319 100644
29364--- a/arch/x86/lib/atomic64_cx8_32.S
29365+++ b/arch/x86/lib/atomic64_cx8_32.S
29366@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29367 CFI_STARTPROC
29368
29369 read64 %ecx
29370+ pax_force_retaddr
29371 ret
29372 CFI_ENDPROC
29373 ENDPROC(atomic64_read_cx8)
29374
29375+ENTRY(atomic64_read_unchecked_cx8)
29376+ CFI_STARTPROC
29377+
29378+ read64 %ecx
29379+ pax_force_retaddr
29380+ ret
29381+ CFI_ENDPROC
29382+ENDPROC(atomic64_read_unchecked_cx8)
29383+
29384 ENTRY(atomic64_set_cx8)
29385 CFI_STARTPROC
29386
29387@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29388 cmpxchg8b (%esi)
29389 jne 1b
29390
29391+ pax_force_retaddr
29392 ret
29393 CFI_ENDPROC
29394 ENDPROC(atomic64_set_cx8)
29395
29396+ENTRY(atomic64_set_unchecked_cx8)
29397+ CFI_STARTPROC
29398+
29399+1:
29400+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29401+ * are atomic on 586 and newer */
29402+ cmpxchg8b (%esi)
29403+ jne 1b
29404+
29405+ pax_force_retaddr
29406+ ret
29407+ CFI_ENDPROC
29408+ENDPROC(atomic64_set_unchecked_cx8)
29409+
29410 ENTRY(atomic64_xchg_cx8)
29411 CFI_STARTPROC
29412
29413@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29414 cmpxchg8b (%esi)
29415 jne 1b
29416
29417+ pax_force_retaddr
29418 ret
29419 CFI_ENDPROC
29420 ENDPROC(atomic64_xchg_cx8)
29421
29422-.macro addsub_return func ins insc
29423-ENTRY(atomic64_\func\()_return_cx8)
29424+.macro addsub_return func ins insc unchecked=""
29425+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29426 CFI_STARTPROC
29427 SAVE ebp
29428 SAVE ebx
29429@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29430 movl %edx, %ecx
29431 \ins\()l %esi, %ebx
29432 \insc\()l %edi, %ecx
29433+
29434+.ifb \unchecked
29435+#ifdef CONFIG_PAX_REFCOUNT
29436+ into
29437+2:
29438+ _ASM_EXTABLE(2b, 3f)
29439+#endif
29440+.endif
29441+
29442 LOCK_PREFIX
29443 cmpxchg8b (%ebp)
29444 jne 1b
29445-
29446-10:
29447 movl %ebx, %eax
29448 movl %ecx, %edx
29449+
29450+.ifb \unchecked
29451+#ifdef CONFIG_PAX_REFCOUNT
29452+3:
29453+#endif
29454+.endif
29455+
29456 RESTORE edi
29457 RESTORE esi
29458 RESTORE ebx
29459 RESTORE ebp
29460+ pax_force_retaddr
29461 ret
29462 CFI_ENDPROC
29463-ENDPROC(atomic64_\func\()_return_cx8)
29464+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29465 .endm
29466
29467 addsub_return add add adc
29468 addsub_return sub sub sbb
29469+addsub_return add add adc _unchecked
29470+addsub_return sub sub sbb _unchecked
29471
29472-.macro incdec_return func ins insc
29473-ENTRY(atomic64_\func\()_return_cx8)
29474+.macro incdec_return func ins insc unchecked=""
29475+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29476 CFI_STARTPROC
29477 SAVE ebx
29478
29479@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29480 movl %edx, %ecx
29481 \ins\()l $1, %ebx
29482 \insc\()l $0, %ecx
29483+
29484+.ifb \unchecked
29485+#ifdef CONFIG_PAX_REFCOUNT
29486+ into
29487+2:
29488+ _ASM_EXTABLE(2b, 3f)
29489+#endif
29490+.endif
29491+
29492 LOCK_PREFIX
29493 cmpxchg8b (%esi)
29494 jne 1b
29495
29496-10:
29497 movl %ebx, %eax
29498 movl %ecx, %edx
29499+
29500+.ifb \unchecked
29501+#ifdef CONFIG_PAX_REFCOUNT
29502+3:
29503+#endif
29504+.endif
29505+
29506 RESTORE ebx
29507+ pax_force_retaddr
29508 ret
29509 CFI_ENDPROC
29510-ENDPROC(atomic64_\func\()_return_cx8)
29511+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29512 .endm
29513
29514 incdec_return inc add adc
29515 incdec_return dec sub sbb
29516+incdec_return inc add adc _unchecked
29517+incdec_return dec sub sbb _unchecked
29518
29519 ENTRY(atomic64_dec_if_positive_cx8)
29520 CFI_STARTPROC
29521@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29522 movl %edx, %ecx
29523 subl $1, %ebx
29524 sbb $0, %ecx
29525+
29526+#ifdef CONFIG_PAX_REFCOUNT
29527+ into
29528+1234:
29529+ _ASM_EXTABLE(1234b, 2f)
29530+#endif
29531+
29532 js 2f
29533 LOCK_PREFIX
29534 cmpxchg8b (%esi)
29535@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29536 movl %ebx, %eax
29537 movl %ecx, %edx
29538 RESTORE ebx
29539+ pax_force_retaddr
29540 ret
29541 CFI_ENDPROC
29542 ENDPROC(atomic64_dec_if_positive_cx8)
29543@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29544 movl %edx, %ecx
29545 addl %ebp, %ebx
29546 adcl %edi, %ecx
29547+
29548+#ifdef CONFIG_PAX_REFCOUNT
29549+ into
29550+1234:
29551+ _ASM_EXTABLE(1234b, 3f)
29552+#endif
29553+
29554 LOCK_PREFIX
29555 cmpxchg8b (%esi)
29556 jne 1b
29557@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29558 CFI_ADJUST_CFA_OFFSET -8
29559 RESTORE ebx
29560 RESTORE ebp
29561+ pax_force_retaddr
29562 ret
29563 4:
29564 cmpl %edx, 4(%esp)
29565@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29566 xorl %ecx, %ecx
29567 addl $1, %ebx
29568 adcl %edx, %ecx
29569+
29570+#ifdef CONFIG_PAX_REFCOUNT
29571+ into
29572+1234:
29573+ _ASM_EXTABLE(1234b, 3f)
29574+#endif
29575+
29576 LOCK_PREFIX
29577 cmpxchg8b (%esi)
29578 jne 1b
29579@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29580 movl $1, %eax
29581 3:
29582 RESTORE ebx
29583+ pax_force_retaddr
29584 ret
29585 CFI_ENDPROC
29586 ENDPROC(atomic64_inc_not_zero_cx8)
29587diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29588index e78b8eee..7e173a8 100644
29589--- a/arch/x86/lib/checksum_32.S
29590+++ b/arch/x86/lib/checksum_32.S
29591@@ -29,7 +29,8 @@
29592 #include <asm/dwarf2.h>
29593 #include <asm/errno.h>
29594 #include <asm/asm.h>
29595-
29596+#include <asm/segment.h>
29597+
29598 /*
29599 * computes a partial checksum, e.g. for TCP/UDP fragments
29600 */
29601@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29602
29603 #define ARGBASE 16
29604 #define FP 12
29605-
29606-ENTRY(csum_partial_copy_generic)
29607+
29608+ENTRY(csum_partial_copy_generic_to_user)
29609 CFI_STARTPROC
29610+
29611+#ifdef CONFIG_PAX_MEMORY_UDEREF
29612+ pushl_cfi %gs
29613+ popl_cfi %es
29614+ jmp csum_partial_copy_generic
29615+#endif
29616+
29617+ENTRY(csum_partial_copy_generic_from_user)
29618+
29619+#ifdef CONFIG_PAX_MEMORY_UDEREF
29620+ pushl_cfi %gs
29621+ popl_cfi %ds
29622+#endif
29623+
29624+ENTRY(csum_partial_copy_generic)
29625 subl $4,%esp
29626 CFI_ADJUST_CFA_OFFSET 4
29627 pushl_cfi %edi
29628@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29629 jmp 4f
29630 SRC(1: movw (%esi), %bx )
29631 addl $2, %esi
29632-DST( movw %bx, (%edi) )
29633+DST( movw %bx, %es:(%edi) )
29634 addl $2, %edi
29635 addw %bx, %ax
29636 adcl $0, %eax
29637@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29638 SRC(1: movl (%esi), %ebx )
29639 SRC( movl 4(%esi), %edx )
29640 adcl %ebx, %eax
29641-DST( movl %ebx, (%edi) )
29642+DST( movl %ebx, %es:(%edi) )
29643 adcl %edx, %eax
29644-DST( movl %edx, 4(%edi) )
29645+DST( movl %edx, %es:4(%edi) )
29646
29647 SRC( movl 8(%esi), %ebx )
29648 SRC( movl 12(%esi), %edx )
29649 adcl %ebx, %eax
29650-DST( movl %ebx, 8(%edi) )
29651+DST( movl %ebx, %es:8(%edi) )
29652 adcl %edx, %eax
29653-DST( movl %edx, 12(%edi) )
29654+DST( movl %edx, %es:12(%edi) )
29655
29656 SRC( movl 16(%esi), %ebx )
29657 SRC( movl 20(%esi), %edx )
29658 adcl %ebx, %eax
29659-DST( movl %ebx, 16(%edi) )
29660+DST( movl %ebx, %es:16(%edi) )
29661 adcl %edx, %eax
29662-DST( movl %edx, 20(%edi) )
29663+DST( movl %edx, %es:20(%edi) )
29664
29665 SRC( movl 24(%esi), %ebx )
29666 SRC( movl 28(%esi), %edx )
29667 adcl %ebx, %eax
29668-DST( movl %ebx, 24(%edi) )
29669+DST( movl %ebx, %es:24(%edi) )
29670 adcl %edx, %eax
29671-DST( movl %edx, 28(%edi) )
29672+DST( movl %edx, %es:28(%edi) )
29673
29674 lea 32(%esi), %esi
29675 lea 32(%edi), %edi
29676@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29677 shrl $2, %edx # This clears CF
29678 SRC(3: movl (%esi), %ebx )
29679 adcl %ebx, %eax
29680-DST( movl %ebx, (%edi) )
29681+DST( movl %ebx, %es:(%edi) )
29682 lea 4(%esi), %esi
29683 lea 4(%edi), %edi
29684 dec %edx
29685@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29686 jb 5f
29687 SRC( movw (%esi), %cx )
29688 leal 2(%esi), %esi
29689-DST( movw %cx, (%edi) )
29690+DST( movw %cx, %es:(%edi) )
29691 leal 2(%edi), %edi
29692 je 6f
29693 shll $16,%ecx
29694 SRC(5: movb (%esi), %cl )
29695-DST( movb %cl, (%edi) )
29696+DST( movb %cl, %es:(%edi) )
29697 6: addl %ecx, %eax
29698 adcl $0, %eax
29699 7:
29700@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29701
29702 6001:
29703 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29704- movl $-EFAULT, (%ebx)
29705+ movl $-EFAULT, %ss:(%ebx)
29706
29707 # zero the complete destination - computing the rest
29708 # is too much work
29709@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29710
29711 6002:
29712 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29713- movl $-EFAULT,(%ebx)
29714+ movl $-EFAULT,%ss:(%ebx)
29715 jmp 5000b
29716
29717 .previous
29718
29719+ pushl_cfi %ss
29720+ popl_cfi %ds
29721+ pushl_cfi %ss
29722+ popl_cfi %es
29723 popl_cfi %ebx
29724 CFI_RESTORE ebx
29725 popl_cfi %esi
29726@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29727 popl_cfi %ecx # equivalent to addl $4,%esp
29728 ret
29729 CFI_ENDPROC
29730-ENDPROC(csum_partial_copy_generic)
29731+ENDPROC(csum_partial_copy_generic_to_user)
29732
29733 #else
29734
29735 /* Version for PentiumII/PPro */
29736
29737 #define ROUND1(x) \
29738+ nop; nop; nop; \
29739 SRC(movl x(%esi), %ebx ) ; \
29740 addl %ebx, %eax ; \
29741- DST(movl %ebx, x(%edi) ) ;
29742+ DST(movl %ebx, %es:x(%edi)) ;
29743
29744 #define ROUND(x) \
29745+ nop; nop; nop; \
29746 SRC(movl x(%esi), %ebx ) ; \
29747 adcl %ebx, %eax ; \
29748- DST(movl %ebx, x(%edi) ) ;
29749+ DST(movl %ebx, %es:x(%edi)) ;
29750
29751 #define ARGBASE 12
29752-
29753-ENTRY(csum_partial_copy_generic)
29754+
29755+ENTRY(csum_partial_copy_generic_to_user)
29756 CFI_STARTPROC
29757+
29758+#ifdef CONFIG_PAX_MEMORY_UDEREF
29759+ pushl_cfi %gs
29760+ popl_cfi %es
29761+ jmp csum_partial_copy_generic
29762+#endif
29763+
29764+ENTRY(csum_partial_copy_generic_from_user)
29765+
29766+#ifdef CONFIG_PAX_MEMORY_UDEREF
29767+ pushl_cfi %gs
29768+ popl_cfi %ds
29769+#endif
29770+
29771+ENTRY(csum_partial_copy_generic)
29772 pushl_cfi %ebx
29773 CFI_REL_OFFSET ebx, 0
29774 pushl_cfi %edi
29775@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29776 subl %ebx, %edi
29777 lea -1(%esi),%edx
29778 andl $-32,%edx
29779- lea 3f(%ebx,%ebx), %ebx
29780+ lea 3f(%ebx,%ebx,2), %ebx
29781 testl %esi, %esi
29782 jmp *%ebx
29783 1: addl $64,%esi
29784@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29785 jb 5f
29786 SRC( movw (%esi), %dx )
29787 leal 2(%esi), %esi
29788-DST( movw %dx, (%edi) )
29789+DST( movw %dx, %es:(%edi) )
29790 leal 2(%edi), %edi
29791 je 6f
29792 shll $16,%edx
29793 5:
29794 SRC( movb (%esi), %dl )
29795-DST( movb %dl, (%edi) )
29796+DST( movb %dl, %es:(%edi) )
29797 6: addl %edx, %eax
29798 adcl $0, %eax
29799 7:
29800 .section .fixup, "ax"
29801 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29802- movl $-EFAULT, (%ebx)
29803+ movl $-EFAULT, %ss:(%ebx)
29804 # zero the complete destination (computing the rest is too much work)
29805 movl ARGBASE+8(%esp),%edi # dst
29806 movl ARGBASE+12(%esp),%ecx # len
29807@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29808 rep; stosb
29809 jmp 7b
29810 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29811- movl $-EFAULT, (%ebx)
29812+ movl $-EFAULT, %ss:(%ebx)
29813 jmp 7b
29814 .previous
29815
29816+#ifdef CONFIG_PAX_MEMORY_UDEREF
29817+ pushl_cfi %ss
29818+ popl_cfi %ds
29819+ pushl_cfi %ss
29820+ popl_cfi %es
29821+#endif
29822+
29823 popl_cfi %esi
29824 CFI_RESTORE esi
29825 popl_cfi %edi
29826@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29827 CFI_RESTORE ebx
29828 ret
29829 CFI_ENDPROC
29830-ENDPROC(csum_partial_copy_generic)
29831+ENDPROC(csum_partial_copy_generic_to_user)
29832
29833 #undef ROUND
29834 #undef ROUND1
29835diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29836index f2145cf..cea889d 100644
29837--- a/arch/x86/lib/clear_page_64.S
29838+++ b/arch/x86/lib/clear_page_64.S
29839@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29840 movl $4096/8,%ecx
29841 xorl %eax,%eax
29842 rep stosq
29843+ pax_force_retaddr
29844 ret
29845 CFI_ENDPROC
29846 ENDPROC(clear_page_c)
29847@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29848 movl $4096,%ecx
29849 xorl %eax,%eax
29850 rep stosb
29851+ pax_force_retaddr
29852 ret
29853 CFI_ENDPROC
29854 ENDPROC(clear_page_c_e)
29855@@ -43,6 +45,7 @@ ENTRY(clear_page)
29856 leaq 64(%rdi),%rdi
29857 jnz .Lloop
29858 nop
29859+ pax_force_retaddr
29860 ret
29861 CFI_ENDPROC
29862 .Lclear_page_end:
29863@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29864
29865 #include <asm/cpufeature.h>
29866
29867- .section .altinstr_replacement,"ax"
29868+ .section .altinstr_replacement,"a"
29869 1: .byte 0xeb /* jmp <disp8> */
29870 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29871 2: .byte 0xeb /* jmp <disp8> */
29872diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29873index 1e572c5..2a162cd 100644
29874--- a/arch/x86/lib/cmpxchg16b_emu.S
29875+++ b/arch/x86/lib/cmpxchg16b_emu.S
29876@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29877
29878 popf
29879 mov $1, %al
29880+ pax_force_retaddr
29881 ret
29882
29883 not_same:
29884 popf
29885 xor %al,%al
29886+ pax_force_retaddr
29887 ret
29888
29889 CFI_ENDPROC
29890diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29891index 176cca6..e0d658e 100644
29892--- a/arch/x86/lib/copy_page_64.S
29893+++ b/arch/x86/lib/copy_page_64.S
29894@@ -9,6 +9,7 @@ copy_page_rep:
29895 CFI_STARTPROC
29896 movl $4096/8, %ecx
29897 rep movsq
29898+ pax_force_retaddr
29899 ret
29900 CFI_ENDPROC
29901 ENDPROC(copy_page_rep)
29902@@ -24,8 +25,8 @@ ENTRY(copy_page)
29903 CFI_ADJUST_CFA_OFFSET 2*8
29904 movq %rbx, (%rsp)
29905 CFI_REL_OFFSET rbx, 0
29906- movq %r12, 1*8(%rsp)
29907- CFI_REL_OFFSET r12, 1*8
29908+ movq %r13, 1*8(%rsp)
29909+ CFI_REL_OFFSET r13, 1*8
29910
29911 movl $(4096/64)-5, %ecx
29912 .p2align 4
29913@@ -38,7 +39,7 @@ ENTRY(copy_page)
29914 movq 0x8*4(%rsi), %r9
29915 movq 0x8*5(%rsi), %r10
29916 movq 0x8*6(%rsi), %r11
29917- movq 0x8*7(%rsi), %r12
29918+ movq 0x8*7(%rsi), %r13
29919
29920 prefetcht0 5*64(%rsi)
29921
29922@@ -49,7 +50,7 @@ ENTRY(copy_page)
29923 movq %r9, 0x8*4(%rdi)
29924 movq %r10, 0x8*5(%rdi)
29925 movq %r11, 0x8*6(%rdi)
29926- movq %r12, 0x8*7(%rdi)
29927+ movq %r13, 0x8*7(%rdi)
29928
29929 leaq 64 (%rsi), %rsi
29930 leaq 64 (%rdi), %rdi
29931@@ -68,7 +69,7 @@ ENTRY(copy_page)
29932 movq 0x8*4(%rsi), %r9
29933 movq 0x8*5(%rsi), %r10
29934 movq 0x8*6(%rsi), %r11
29935- movq 0x8*7(%rsi), %r12
29936+ movq 0x8*7(%rsi), %r13
29937
29938 movq %rax, 0x8*0(%rdi)
29939 movq %rbx, 0x8*1(%rdi)
29940@@ -77,7 +78,7 @@ ENTRY(copy_page)
29941 movq %r9, 0x8*4(%rdi)
29942 movq %r10, 0x8*5(%rdi)
29943 movq %r11, 0x8*6(%rdi)
29944- movq %r12, 0x8*7(%rdi)
29945+ movq %r13, 0x8*7(%rdi)
29946
29947 leaq 64(%rdi), %rdi
29948 leaq 64(%rsi), %rsi
29949@@ -85,10 +86,11 @@ ENTRY(copy_page)
29950
29951 movq (%rsp), %rbx
29952 CFI_RESTORE rbx
29953- movq 1*8(%rsp), %r12
29954- CFI_RESTORE r12
29955+ movq 1*8(%rsp), %r13
29956+ CFI_RESTORE r13
29957 addq $2*8, %rsp
29958 CFI_ADJUST_CFA_OFFSET -2*8
29959+ pax_force_retaddr
29960 ret
29961 .Lcopy_page_end:
29962 CFI_ENDPROC
29963@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29964
29965 #include <asm/cpufeature.h>
29966
29967- .section .altinstr_replacement,"ax"
29968+ .section .altinstr_replacement,"a"
29969 1: .byte 0xeb /* jmp <disp8> */
29970 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29971 2:
29972diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29973index dee945d..a84067b 100644
29974--- a/arch/x86/lib/copy_user_64.S
29975+++ b/arch/x86/lib/copy_user_64.S
29976@@ -18,31 +18,7 @@
29977 #include <asm/alternative-asm.h>
29978 #include <asm/asm.h>
29979 #include <asm/smap.h>
29980-
29981-/*
29982- * By placing feature2 after feature1 in altinstructions section, we logically
29983- * implement:
29984- * If CPU has feature2, jmp to alt2 is used
29985- * else if CPU has feature1, jmp to alt1 is used
29986- * else jmp to orig is used.
29987- */
29988- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29989-0:
29990- .byte 0xe9 /* 32bit jump */
29991- .long \orig-1f /* by default jump to orig */
29992-1:
29993- .section .altinstr_replacement,"ax"
29994-2: .byte 0xe9 /* near jump with 32bit immediate */
29995- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29996-3: .byte 0xe9 /* near jump with 32bit immediate */
29997- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29998- .previous
29999-
30000- .section .altinstructions,"a"
30001- altinstruction_entry 0b,2b,\feature1,5,5
30002- altinstruction_entry 0b,3b,\feature2,5,5
30003- .previous
30004- .endm
30005+#include <asm/pgtable.h>
30006
30007 .macro ALIGN_DESTINATION
30008 #ifdef FIX_ALIGNMENT
30009@@ -70,52 +46,6 @@
30010 #endif
30011 .endm
30012
30013-/* Standard copy_to_user with segment limit checking */
30014-ENTRY(_copy_to_user)
30015- CFI_STARTPROC
30016- GET_THREAD_INFO(%rax)
30017- movq %rdi,%rcx
30018- addq %rdx,%rcx
30019- jc bad_to_user
30020- cmpq TI_addr_limit(%rax),%rcx
30021- ja bad_to_user
30022- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30023- copy_user_generic_unrolled,copy_user_generic_string, \
30024- copy_user_enhanced_fast_string
30025- CFI_ENDPROC
30026-ENDPROC(_copy_to_user)
30027-
30028-/* Standard copy_from_user with segment limit checking */
30029-ENTRY(_copy_from_user)
30030- CFI_STARTPROC
30031- GET_THREAD_INFO(%rax)
30032- movq %rsi,%rcx
30033- addq %rdx,%rcx
30034- jc bad_from_user
30035- cmpq TI_addr_limit(%rax),%rcx
30036- ja bad_from_user
30037- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30038- copy_user_generic_unrolled,copy_user_generic_string, \
30039- copy_user_enhanced_fast_string
30040- CFI_ENDPROC
30041-ENDPROC(_copy_from_user)
30042-
30043- .section .fixup,"ax"
30044- /* must zero dest */
30045-ENTRY(bad_from_user)
30046-bad_from_user:
30047- CFI_STARTPROC
30048- movl %edx,%ecx
30049- xorl %eax,%eax
30050- rep
30051- stosb
30052-bad_to_user:
30053- movl %edx,%eax
30054- ret
30055- CFI_ENDPROC
30056-ENDPROC(bad_from_user)
30057- .previous
30058-
30059 /*
30060 * copy_user_generic_unrolled - memory copy with exception handling.
30061 * This version is for CPUs like P4 that don't have efficient micro
30062@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30063 */
30064 ENTRY(copy_user_generic_unrolled)
30065 CFI_STARTPROC
30066+ ASM_PAX_OPEN_USERLAND
30067 ASM_STAC
30068 cmpl $8,%edx
30069 jb 20f /* less then 8 bytes, go to byte copy loop */
30070@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30071 jnz 21b
30072 23: xor %eax,%eax
30073 ASM_CLAC
30074+ ASM_PAX_CLOSE_USERLAND
30075+ pax_force_retaddr
30076 ret
30077
30078 .section .fixup,"ax"
30079@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30080 */
30081 ENTRY(copy_user_generic_string)
30082 CFI_STARTPROC
30083+ ASM_PAX_OPEN_USERLAND
30084 ASM_STAC
30085 cmpl $8,%edx
30086 jb 2f /* less than 8 bytes, go to byte copy loop */
30087@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30088 movsb
30089 xorl %eax,%eax
30090 ASM_CLAC
30091+ ASM_PAX_CLOSE_USERLAND
30092+ pax_force_retaddr
30093 ret
30094
30095 .section .fixup,"ax"
30096@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30097 */
30098 ENTRY(copy_user_enhanced_fast_string)
30099 CFI_STARTPROC
30100+ ASM_PAX_OPEN_USERLAND
30101 ASM_STAC
30102 movl %edx,%ecx
30103 1: rep
30104 movsb
30105 xorl %eax,%eax
30106 ASM_CLAC
30107+ ASM_PAX_CLOSE_USERLAND
30108+ pax_force_retaddr
30109 ret
30110
30111 .section .fixup,"ax"
30112diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30113index 6a4f43c..c70fb52 100644
30114--- a/arch/x86/lib/copy_user_nocache_64.S
30115+++ b/arch/x86/lib/copy_user_nocache_64.S
30116@@ -8,6 +8,7 @@
30117
30118 #include <linux/linkage.h>
30119 #include <asm/dwarf2.h>
30120+#include <asm/alternative-asm.h>
30121
30122 #define FIX_ALIGNMENT 1
30123
30124@@ -16,6 +17,7 @@
30125 #include <asm/thread_info.h>
30126 #include <asm/asm.h>
30127 #include <asm/smap.h>
30128+#include <asm/pgtable.h>
30129
30130 .macro ALIGN_DESTINATION
30131 #ifdef FIX_ALIGNMENT
30132@@ -49,6 +51,16 @@
30133 */
30134 ENTRY(__copy_user_nocache)
30135 CFI_STARTPROC
30136+
30137+#ifdef CONFIG_PAX_MEMORY_UDEREF
30138+ mov pax_user_shadow_base,%rcx
30139+ cmp %rcx,%rsi
30140+ jae 1f
30141+ add %rcx,%rsi
30142+1:
30143+#endif
30144+
30145+ ASM_PAX_OPEN_USERLAND
30146 ASM_STAC
30147 cmpl $8,%edx
30148 jb 20f /* less then 8 bytes, go to byte copy loop */
30149@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30150 jnz 21b
30151 23: xorl %eax,%eax
30152 ASM_CLAC
30153+ ASM_PAX_CLOSE_USERLAND
30154 sfence
30155+ pax_force_retaddr
30156 ret
30157
30158 .section .fixup,"ax"
30159diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30160index 2419d5f..fe52d0e 100644
30161--- a/arch/x86/lib/csum-copy_64.S
30162+++ b/arch/x86/lib/csum-copy_64.S
30163@@ -9,6 +9,7 @@
30164 #include <asm/dwarf2.h>
30165 #include <asm/errno.h>
30166 #include <asm/asm.h>
30167+#include <asm/alternative-asm.h>
30168
30169 /*
30170 * Checksum copy with exception handling.
30171@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30172 CFI_ADJUST_CFA_OFFSET 7*8
30173 movq %rbx, 2*8(%rsp)
30174 CFI_REL_OFFSET rbx, 2*8
30175- movq %r12, 3*8(%rsp)
30176- CFI_REL_OFFSET r12, 3*8
30177+ movq %r15, 3*8(%rsp)
30178+ CFI_REL_OFFSET r15, 3*8
30179 movq %r14, 4*8(%rsp)
30180 CFI_REL_OFFSET r14, 4*8
30181 movq %r13, 5*8(%rsp)
30182@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30183 movl %edx, %ecx
30184
30185 xorl %r9d, %r9d
30186- movq %rcx, %r12
30187+ movq %rcx, %r15
30188
30189- shrq $6, %r12
30190+ shrq $6, %r15
30191 jz .Lhandle_tail /* < 64 */
30192
30193 clc
30194
30195 /* main loop. clear in 64 byte blocks */
30196 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30197- /* r11: temp3, rdx: temp4, r12 loopcnt */
30198+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30199 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30200 .p2align 4
30201 .Lloop:
30202@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30203 adcq %r14, %rax
30204 adcq %r13, %rax
30205
30206- decl %r12d
30207+ decl %r15d
30208
30209 dest
30210 movq %rbx, (%rsi)
30211@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30212 .Lende:
30213 movq 2*8(%rsp), %rbx
30214 CFI_RESTORE rbx
30215- movq 3*8(%rsp), %r12
30216- CFI_RESTORE r12
30217+ movq 3*8(%rsp), %r15
30218+ CFI_RESTORE r15
30219 movq 4*8(%rsp), %r14
30220 CFI_RESTORE r14
30221 movq 5*8(%rsp), %r13
30222@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30223 CFI_RESTORE rbp
30224 addq $7*8, %rsp
30225 CFI_ADJUST_CFA_OFFSET -7*8
30226+ pax_force_retaddr
30227 ret
30228 CFI_RESTORE_STATE
30229
30230diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30231index 7609e0e..b449b98 100644
30232--- a/arch/x86/lib/csum-wrappers_64.c
30233+++ b/arch/x86/lib/csum-wrappers_64.c
30234@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30235 len -= 2;
30236 }
30237 }
30238+ pax_open_userland();
30239 stac();
30240- isum = csum_partial_copy_generic((__force const void *)src,
30241+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30242 dst, len, isum, errp, NULL);
30243 clac();
30244+ pax_close_userland();
30245 if (unlikely(*errp))
30246 goto out_err;
30247
30248@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30249 }
30250
30251 *errp = 0;
30252+ pax_open_userland();
30253 stac();
30254- ret = csum_partial_copy_generic(src, (void __force *)dst,
30255+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30256 len, isum, NULL, errp);
30257 clac();
30258+ pax_close_userland();
30259 return ret;
30260 }
30261 EXPORT_SYMBOL(csum_partial_copy_to_user);
30262diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30263index a451235..1daa956 100644
30264--- a/arch/x86/lib/getuser.S
30265+++ b/arch/x86/lib/getuser.S
30266@@ -33,17 +33,40 @@
30267 #include <asm/thread_info.h>
30268 #include <asm/asm.h>
30269 #include <asm/smap.h>
30270+#include <asm/segment.h>
30271+#include <asm/pgtable.h>
30272+#include <asm/alternative-asm.h>
30273+
30274+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30275+#define __copyuser_seg gs;
30276+#else
30277+#define __copyuser_seg
30278+#endif
30279
30280 .text
30281 ENTRY(__get_user_1)
30282 CFI_STARTPROC
30283+
30284+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30285 GET_THREAD_INFO(%_ASM_DX)
30286 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30287 jae bad_get_user
30288 ASM_STAC
30289-1: movzbl (%_ASM_AX),%edx
30290+
30291+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30292+ mov pax_user_shadow_base,%_ASM_DX
30293+ cmp %_ASM_DX,%_ASM_AX
30294+ jae 1234f
30295+ add %_ASM_DX,%_ASM_AX
30296+1234:
30297+#endif
30298+
30299+#endif
30300+
30301+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30302 xor %eax,%eax
30303 ASM_CLAC
30304+ pax_force_retaddr
30305 ret
30306 CFI_ENDPROC
30307 ENDPROC(__get_user_1)
30308@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30309 ENTRY(__get_user_2)
30310 CFI_STARTPROC
30311 add $1,%_ASM_AX
30312+
30313+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30314 jc bad_get_user
30315 GET_THREAD_INFO(%_ASM_DX)
30316 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30317 jae bad_get_user
30318 ASM_STAC
30319-2: movzwl -1(%_ASM_AX),%edx
30320+
30321+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30322+ mov pax_user_shadow_base,%_ASM_DX
30323+ cmp %_ASM_DX,%_ASM_AX
30324+ jae 1234f
30325+ add %_ASM_DX,%_ASM_AX
30326+1234:
30327+#endif
30328+
30329+#endif
30330+
30331+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30332 xor %eax,%eax
30333 ASM_CLAC
30334+ pax_force_retaddr
30335 ret
30336 CFI_ENDPROC
30337 ENDPROC(__get_user_2)
30338@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30339 ENTRY(__get_user_4)
30340 CFI_STARTPROC
30341 add $3,%_ASM_AX
30342+
30343+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30344 jc bad_get_user
30345 GET_THREAD_INFO(%_ASM_DX)
30346 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30347 jae bad_get_user
30348 ASM_STAC
30349-3: movl -3(%_ASM_AX),%edx
30350+
30351+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30352+ mov pax_user_shadow_base,%_ASM_DX
30353+ cmp %_ASM_DX,%_ASM_AX
30354+ jae 1234f
30355+ add %_ASM_DX,%_ASM_AX
30356+1234:
30357+#endif
30358+
30359+#endif
30360+
30361+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30362 xor %eax,%eax
30363 ASM_CLAC
30364+ pax_force_retaddr
30365 ret
30366 CFI_ENDPROC
30367 ENDPROC(__get_user_4)
30368@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30369 GET_THREAD_INFO(%_ASM_DX)
30370 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30371 jae bad_get_user
30372+
30373+#ifdef CONFIG_PAX_MEMORY_UDEREF
30374+ mov pax_user_shadow_base,%_ASM_DX
30375+ cmp %_ASM_DX,%_ASM_AX
30376+ jae 1234f
30377+ add %_ASM_DX,%_ASM_AX
30378+1234:
30379+#endif
30380+
30381 ASM_STAC
30382 4: movq -7(%_ASM_AX),%rdx
30383 xor %eax,%eax
30384 ASM_CLAC
30385+ pax_force_retaddr
30386 ret
30387 #else
30388 add $7,%_ASM_AX
30389@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30390 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30391 jae bad_get_user_8
30392 ASM_STAC
30393-4: movl -7(%_ASM_AX),%edx
30394-5: movl -3(%_ASM_AX),%ecx
30395+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30396+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30397 xor %eax,%eax
30398 ASM_CLAC
30399+ pax_force_retaddr
30400 ret
30401 #endif
30402 CFI_ENDPROC
30403@@ -113,6 +175,7 @@ bad_get_user:
30404 xor %edx,%edx
30405 mov $(-EFAULT),%_ASM_AX
30406 ASM_CLAC
30407+ pax_force_retaddr
30408 ret
30409 CFI_ENDPROC
30410 END(bad_get_user)
30411@@ -124,6 +187,7 @@ bad_get_user_8:
30412 xor %ecx,%ecx
30413 mov $(-EFAULT),%_ASM_AX
30414 ASM_CLAC
30415+ pax_force_retaddr
30416 ret
30417 CFI_ENDPROC
30418 END(bad_get_user_8)
30419diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30420index 54fcffe..7be149e 100644
30421--- a/arch/x86/lib/insn.c
30422+++ b/arch/x86/lib/insn.c
30423@@ -20,8 +20,10 @@
30424
30425 #ifdef __KERNEL__
30426 #include <linux/string.h>
30427+#include <asm/pgtable_types.h>
30428 #else
30429 #include <string.h>
30430+#define ktla_ktva(addr) addr
30431 #endif
30432 #include <asm/inat.h>
30433 #include <asm/insn.h>
30434@@ -53,8 +55,8 @@
30435 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30436 {
30437 memset(insn, 0, sizeof(*insn));
30438- insn->kaddr = kaddr;
30439- insn->next_byte = kaddr;
30440+ insn->kaddr = ktla_ktva(kaddr);
30441+ insn->next_byte = ktla_ktva(kaddr);
30442 insn->x86_64 = x86_64 ? 1 : 0;
30443 insn->opnd_bytes = 4;
30444 if (x86_64)
30445diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30446index 05a95e7..326f2fa 100644
30447--- a/arch/x86/lib/iomap_copy_64.S
30448+++ b/arch/x86/lib/iomap_copy_64.S
30449@@ -17,6 +17,7 @@
30450
30451 #include <linux/linkage.h>
30452 #include <asm/dwarf2.h>
30453+#include <asm/alternative-asm.h>
30454
30455 /*
30456 * override generic version in lib/iomap_copy.c
30457@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30458 CFI_STARTPROC
30459 movl %edx,%ecx
30460 rep movsd
30461+ pax_force_retaddr
30462 ret
30463 CFI_ENDPROC
30464 ENDPROC(__iowrite32_copy)
30465diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30466index 56313a3..0db417e 100644
30467--- a/arch/x86/lib/memcpy_64.S
30468+++ b/arch/x86/lib/memcpy_64.S
30469@@ -24,7 +24,7 @@
30470 * This gets patched over the unrolled variant (below) via the
30471 * alternative instructions framework:
30472 */
30473- .section .altinstr_replacement, "ax", @progbits
30474+ .section .altinstr_replacement, "a", @progbits
30475 .Lmemcpy_c:
30476 movq %rdi, %rax
30477 movq %rdx, %rcx
30478@@ -33,6 +33,7 @@
30479 rep movsq
30480 movl %edx, %ecx
30481 rep movsb
30482+ pax_force_retaddr
30483 ret
30484 .Lmemcpy_e:
30485 .previous
30486@@ -44,11 +45,12 @@
30487 * This gets patched over the unrolled variant (below) via the
30488 * alternative instructions framework:
30489 */
30490- .section .altinstr_replacement, "ax", @progbits
30491+ .section .altinstr_replacement, "a", @progbits
30492 .Lmemcpy_c_e:
30493 movq %rdi, %rax
30494 movq %rdx, %rcx
30495 rep movsb
30496+ pax_force_retaddr
30497 ret
30498 .Lmemcpy_e_e:
30499 .previous
30500@@ -136,6 +138,7 @@ ENTRY(memcpy)
30501 movq %r9, 1*8(%rdi)
30502 movq %r10, -2*8(%rdi, %rdx)
30503 movq %r11, -1*8(%rdi, %rdx)
30504+ pax_force_retaddr
30505 retq
30506 .p2align 4
30507 .Lless_16bytes:
30508@@ -148,6 +151,7 @@ ENTRY(memcpy)
30509 movq -1*8(%rsi, %rdx), %r9
30510 movq %r8, 0*8(%rdi)
30511 movq %r9, -1*8(%rdi, %rdx)
30512+ pax_force_retaddr
30513 retq
30514 .p2align 4
30515 .Lless_8bytes:
30516@@ -161,6 +165,7 @@ ENTRY(memcpy)
30517 movl -4(%rsi, %rdx), %r8d
30518 movl %ecx, (%rdi)
30519 movl %r8d, -4(%rdi, %rdx)
30520+ pax_force_retaddr
30521 retq
30522 .p2align 4
30523 .Lless_3bytes:
30524@@ -179,6 +184,7 @@ ENTRY(memcpy)
30525 movb %cl, (%rdi)
30526
30527 .Lend:
30528+ pax_force_retaddr
30529 retq
30530 CFI_ENDPROC
30531 ENDPROC(memcpy)
30532diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30533index 65268a6..dd1de11 100644
30534--- a/arch/x86/lib/memmove_64.S
30535+++ b/arch/x86/lib/memmove_64.S
30536@@ -202,14 +202,16 @@ ENTRY(memmove)
30537 movb (%rsi), %r11b
30538 movb %r11b, (%rdi)
30539 13:
30540+ pax_force_retaddr
30541 retq
30542 CFI_ENDPROC
30543
30544- .section .altinstr_replacement,"ax"
30545+ .section .altinstr_replacement,"a"
30546 .Lmemmove_begin_forward_efs:
30547 /* Forward moving data. */
30548 movq %rdx, %rcx
30549 rep movsb
30550+ pax_force_retaddr
30551 retq
30552 .Lmemmove_end_forward_efs:
30553 .previous
30554diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30555index 2dcb380..2eb79fe 100644
30556--- a/arch/x86/lib/memset_64.S
30557+++ b/arch/x86/lib/memset_64.S
30558@@ -16,7 +16,7 @@
30559 *
30560 * rax original destination
30561 */
30562- .section .altinstr_replacement, "ax", @progbits
30563+ .section .altinstr_replacement, "a", @progbits
30564 .Lmemset_c:
30565 movq %rdi,%r9
30566 movq %rdx,%rcx
30567@@ -30,6 +30,7 @@
30568 movl %edx,%ecx
30569 rep stosb
30570 movq %r9,%rax
30571+ pax_force_retaddr
30572 ret
30573 .Lmemset_e:
30574 .previous
30575@@ -45,13 +46,14 @@
30576 *
30577 * rax original destination
30578 */
30579- .section .altinstr_replacement, "ax", @progbits
30580+ .section .altinstr_replacement, "a", @progbits
30581 .Lmemset_c_e:
30582 movq %rdi,%r9
30583 movb %sil,%al
30584 movq %rdx,%rcx
30585 rep stosb
30586 movq %r9,%rax
30587+ pax_force_retaddr
30588 ret
30589 .Lmemset_e_e:
30590 .previous
30591@@ -118,6 +120,7 @@ ENTRY(__memset)
30592
30593 .Lende:
30594 movq %r10,%rax
30595+ pax_force_retaddr
30596 ret
30597
30598 CFI_RESTORE_STATE
30599diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30600index c9f2d9b..e7fd2c0 100644
30601--- a/arch/x86/lib/mmx_32.c
30602+++ b/arch/x86/lib/mmx_32.c
30603@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30604 {
30605 void *p;
30606 int i;
30607+ unsigned long cr0;
30608
30609 if (unlikely(in_interrupt()))
30610 return __memcpy(to, from, len);
30611@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30612 kernel_fpu_begin();
30613
30614 __asm__ __volatile__ (
30615- "1: prefetch (%0)\n" /* This set is 28 bytes */
30616- " prefetch 64(%0)\n"
30617- " prefetch 128(%0)\n"
30618- " prefetch 192(%0)\n"
30619- " prefetch 256(%0)\n"
30620+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30621+ " prefetch 64(%1)\n"
30622+ " prefetch 128(%1)\n"
30623+ " prefetch 192(%1)\n"
30624+ " prefetch 256(%1)\n"
30625 "2: \n"
30626 ".section .fixup, \"ax\"\n"
30627- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30628+ "3: \n"
30629+
30630+#ifdef CONFIG_PAX_KERNEXEC
30631+ " movl %%cr0, %0\n"
30632+ " movl %0, %%eax\n"
30633+ " andl $0xFFFEFFFF, %%eax\n"
30634+ " movl %%eax, %%cr0\n"
30635+#endif
30636+
30637+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30638+
30639+#ifdef CONFIG_PAX_KERNEXEC
30640+ " movl %0, %%cr0\n"
30641+#endif
30642+
30643 " jmp 2b\n"
30644 ".previous\n"
30645 _ASM_EXTABLE(1b, 3b)
30646- : : "r" (from));
30647+ : "=&r" (cr0) : "r" (from) : "ax");
30648
30649 for ( ; i > 5; i--) {
30650 __asm__ __volatile__ (
30651- "1: prefetch 320(%0)\n"
30652- "2: movq (%0), %%mm0\n"
30653- " movq 8(%0), %%mm1\n"
30654- " movq 16(%0), %%mm2\n"
30655- " movq 24(%0), %%mm3\n"
30656- " movq %%mm0, (%1)\n"
30657- " movq %%mm1, 8(%1)\n"
30658- " movq %%mm2, 16(%1)\n"
30659- " movq %%mm3, 24(%1)\n"
30660- " movq 32(%0), %%mm0\n"
30661- " movq 40(%0), %%mm1\n"
30662- " movq 48(%0), %%mm2\n"
30663- " movq 56(%0), %%mm3\n"
30664- " movq %%mm0, 32(%1)\n"
30665- " movq %%mm1, 40(%1)\n"
30666- " movq %%mm2, 48(%1)\n"
30667- " movq %%mm3, 56(%1)\n"
30668+ "1: prefetch 320(%1)\n"
30669+ "2: movq (%1), %%mm0\n"
30670+ " movq 8(%1), %%mm1\n"
30671+ " movq 16(%1), %%mm2\n"
30672+ " movq 24(%1), %%mm3\n"
30673+ " movq %%mm0, (%2)\n"
30674+ " movq %%mm1, 8(%2)\n"
30675+ " movq %%mm2, 16(%2)\n"
30676+ " movq %%mm3, 24(%2)\n"
30677+ " movq 32(%1), %%mm0\n"
30678+ " movq 40(%1), %%mm1\n"
30679+ " movq 48(%1), %%mm2\n"
30680+ " movq 56(%1), %%mm3\n"
30681+ " movq %%mm0, 32(%2)\n"
30682+ " movq %%mm1, 40(%2)\n"
30683+ " movq %%mm2, 48(%2)\n"
30684+ " movq %%mm3, 56(%2)\n"
30685 ".section .fixup, \"ax\"\n"
30686- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30687+ "3:\n"
30688+
30689+#ifdef CONFIG_PAX_KERNEXEC
30690+ " movl %%cr0, %0\n"
30691+ " movl %0, %%eax\n"
30692+ " andl $0xFFFEFFFF, %%eax\n"
30693+ " movl %%eax, %%cr0\n"
30694+#endif
30695+
30696+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30697+
30698+#ifdef CONFIG_PAX_KERNEXEC
30699+ " movl %0, %%cr0\n"
30700+#endif
30701+
30702 " jmp 2b\n"
30703 ".previous\n"
30704 _ASM_EXTABLE(1b, 3b)
30705- : : "r" (from), "r" (to) : "memory");
30706+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30707
30708 from += 64;
30709 to += 64;
30710@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30711 static void fast_copy_page(void *to, void *from)
30712 {
30713 int i;
30714+ unsigned long cr0;
30715
30716 kernel_fpu_begin();
30717
30718@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30719 * but that is for later. -AV
30720 */
30721 __asm__ __volatile__(
30722- "1: prefetch (%0)\n"
30723- " prefetch 64(%0)\n"
30724- " prefetch 128(%0)\n"
30725- " prefetch 192(%0)\n"
30726- " prefetch 256(%0)\n"
30727+ "1: prefetch (%1)\n"
30728+ " prefetch 64(%1)\n"
30729+ " prefetch 128(%1)\n"
30730+ " prefetch 192(%1)\n"
30731+ " prefetch 256(%1)\n"
30732 "2: \n"
30733 ".section .fixup, \"ax\"\n"
30734- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30735+ "3: \n"
30736+
30737+#ifdef CONFIG_PAX_KERNEXEC
30738+ " movl %%cr0, %0\n"
30739+ " movl %0, %%eax\n"
30740+ " andl $0xFFFEFFFF, %%eax\n"
30741+ " movl %%eax, %%cr0\n"
30742+#endif
30743+
30744+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30745+
30746+#ifdef CONFIG_PAX_KERNEXEC
30747+ " movl %0, %%cr0\n"
30748+#endif
30749+
30750 " jmp 2b\n"
30751 ".previous\n"
30752- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30753+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30754
30755 for (i = 0; i < (4096-320)/64; i++) {
30756 __asm__ __volatile__ (
30757- "1: prefetch 320(%0)\n"
30758- "2: movq (%0), %%mm0\n"
30759- " movntq %%mm0, (%1)\n"
30760- " movq 8(%0), %%mm1\n"
30761- " movntq %%mm1, 8(%1)\n"
30762- " movq 16(%0), %%mm2\n"
30763- " movntq %%mm2, 16(%1)\n"
30764- " movq 24(%0), %%mm3\n"
30765- " movntq %%mm3, 24(%1)\n"
30766- " movq 32(%0), %%mm4\n"
30767- " movntq %%mm4, 32(%1)\n"
30768- " movq 40(%0), %%mm5\n"
30769- " movntq %%mm5, 40(%1)\n"
30770- " movq 48(%0), %%mm6\n"
30771- " movntq %%mm6, 48(%1)\n"
30772- " movq 56(%0), %%mm7\n"
30773- " movntq %%mm7, 56(%1)\n"
30774+ "1: prefetch 320(%1)\n"
30775+ "2: movq (%1), %%mm0\n"
30776+ " movntq %%mm0, (%2)\n"
30777+ " movq 8(%1), %%mm1\n"
30778+ " movntq %%mm1, 8(%2)\n"
30779+ " movq 16(%1), %%mm2\n"
30780+ " movntq %%mm2, 16(%2)\n"
30781+ " movq 24(%1), %%mm3\n"
30782+ " movntq %%mm3, 24(%2)\n"
30783+ " movq 32(%1), %%mm4\n"
30784+ " movntq %%mm4, 32(%2)\n"
30785+ " movq 40(%1), %%mm5\n"
30786+ " movntq %%mm5, 40(%2)\n"
30787+ " movq 48(%1), %%mm6\n"
30788+ " movntq %%mm6, 48(%2)\n"
30789+ " movq 56(%1), %%mm7\n"
30790+ " movntq %%mm7, 56(%2)\n"
30791 ".section .fixup, \"ax\"\n"
30792- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30793+ "3:\n"
30794+
30795+#ifdef CONFIG_PAX_KERNEXEC
30796+ " movl %%cr0, %0\n"
30797+ " movl %0, %%eax\n"
30798+ " andl $0xFFFEFFFF, %%eax\n"
30799+ " movl %%eax, %%cr0\n"
30800+#endif
30801+
30802+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30803+
30804+#ifdef CONFIG_PAX_KERNEXEC
30805+ " movl %0, %%cr0\n"
30806+#endif
30807+
30808 " jmp 2b\n"
30809 ".previous\n"
30810- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30811+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30812
30813 from += 64;
30814 to += 64;
30815@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30816 static void fast_copy_page(void *to, void *from)
30817 {
30818 int i;
30819+ unsigned long cr0;
30820
30821 kernel_fpu_begin();
30822
30823 __asm__ __volatile__ (
30824- "1: prefetch (%0)\n"
30825- " prefetch 64(%0)\n"
30826- " prefetch 128(%0)\n"
30827- " prefetch 192(%0)\n"
30828- " prefetch 256(%0)\n"
30829+ "1: prefetch (%1)\n"
30830+ " prefetch 64(%1)\n"
30831+ " prefetch 128(%1)\n"
30832+ " prefetch 192(%1)\n"
30833+ " prefetch 256(%1)\n"
30834 "2: \n"
30835 ".section .fixup, \"ax\"\n"
30836- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30837+ "3: \n"
30838+
30839+#ifdef CONFIG_PAX_KERNEXEC
30840+ " movl %%cr0, %0\n"
30841+ " movl %0, %%eax\n"
30842+ " andl $0xFFFEFFFF, %%eax\n"
30843+ " movl %%eax, %%cr0\n"
30844+#endif
30845+
30846+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30847+
30848+#ifdef CONFIG_PAX_KERNEXEC
30849+ " movl %0, %%cr0\n"
30850+#endif
30851+
30852 " jmp 2b\n"
30853 ".previous\n"
30854- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30855+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30856
30857 for (i = 0; i < 4096/64; i++) {
30858 __asm__ __volatile__ (
30859- "1: prefetch 320(%0)\n"
30860- "2: movq (%0), %%mm0\n"
30861- " movq 8(%0), %%mm1\n"
30862- " movq 16(%0), %%mm2\n"
30863- " movq 24(%0), %%mm3\n"
30864- " movq %%mm0, (%1)\n"
30865- " movq %%mm1, 8(%1)\n"
30866- " movq %%mm2, 16(%1)\n"
30867- " movq %%mm3, 24(%1)\n"
30868- " movq 32(%0), %%mm0\n"
30869- " movq 40(%0), %%mm1\n"
30870- " movq 48(%0), %%mm2\n"
30871- " movq 56(%0), %%mm3\n"
30872- " movq %%mm0, 32(%1)\n"
30873- " movq %%mm1, 40(%1)\n"
30874- " movq %%mm2, 48(%1)\n"
30875- " movq %%mm3, 56(%1)\n"
30876+ "1: prefetch 320(%1)\n"
30877+ "2: movq (%1), %%mm0\n"
30878+ " movq 8(%1), %%mm1\n"
30879+ " movq 16(%1), %%mm2\n"
30880+ " movq 24(%1), %%mm3\n"
30881+ " movq %%mm0, (%2)\n"
30882+ " movq %%mm1, 8(%2)\n"
30883+ " movq %%mm2, 16(%2)\n"
30884+ " movq %%mm3, 24(%2)\n"
30885+ " movq 32(%1), %%mm0\n"
30886+ " movq 40(%1), %%mm1\n"
30887+ " movq 48(%1), %%mm2\n"
30888+ " movq 56(%1), %%mm3\n"
30889+ " movq %%mm0, 32(%2)\n"
30890+ " movq %%mm1, 40(%2)\n"
30891+ " movq %%mm2, 48(%2)\n"
30892+ " movq %%mm3, 56(%2)\n"
30893 ".section .fixup, \"ax\"\n"
30894- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30895+ "3:\n"
30896+
30897+#ifdef CONFIG_PAX_KERNEXEC
30898+ " movl %%cr0, %0\n"
30899+ " movl %0, %%eax\n"
30900+ " andl $0xFFFEFFFF, %%eax\n"
30901+ " movl %%eax, %%cr0\n"
30902+#endif
30903+
30904+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30905+
30906+#ifdef CONFIG_PAX_KERNEXEC
30907+ " movl %0, %%cr0\n"
30908+#endif
30909+
30910 " jmp 2b\n"
30911 ".previous\n"
30912 _ASM_EXTABLE(1b, 3b)
30913- : : "r" (from), "r" (to) : "memory");
30914+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30915
30916 from += 64;
30917 to += 64;
30918diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30919index f6d13ee..d789440 100644
30920--- a/arch/x86/lib/msr-reg.S
30921+++ b/arch/x86/lib/msr-reg.S
30922@@ -3,6 +3,7 @@
30923 #include <asm/dwarf2.h>
30924 #include <asm/asm.h>
30925 #include <asm/msr.h>
30926+#include <asm/alternative-asm.h>
30927
30928 #ifdef CONFIG_X86_64
30929 /*
30930@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30931 movl %edi, 28(%r10)
30932 popq_cfi %rbp
30933 popq_cfi %rbx
30934+ pax_force_retaddr
30935 ret
30936 3:
30937 CFI_RESTORE_STATE
30938diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30939index fc6ba17..d4d989d 100644
30940--- a/arch/x86/lib/putuser.S
30941+++ b/arch/x86/lib/putuser.S
30942@@ -16,7 +16,9 @@
30943 #include <asm/errno.h>
30944 #include <asm/asm.h>
30945 #include <asm/smap.h>
30946-
30947+#include <asm/segment.h>
30948+#include <asm/pgtable.h>
30949+#include <asm/alternative-asm.h>
30950
30951 /*
30952 * __put_user_X
30953@@ -30,57 +32,125 @@
30954 * as they get called from within inline assembly.
30955 */
30956
30957-#define ENTER CFI_STARTPROC ; \
30958- GET_THREAD_INFO(%_ASM_BX)
30959-#define EXIT ASM_CLAC ; \
30960- ret ; \
30961+#define ENTER CFI_STARTPROC
30962+#define EXIT ASM_CLAC ; \
30963+ pax_force_retaddr ; \
30964+ ret ; \
30965 CFI_ENDPROC
30966
30967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30968+#define _DEST %_ASM_CX,%_ASM_BX
30969+#else
30970+#define _DEST %_ASM_CX
30971+#endif
30972+
30973+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30974+#define __copyuser_seg gs;
30975+#else
30976+#define __copyuser_seg
30977+#endif
30978+
30979 .text
30980 ENTRY(__put_user_1)
30981 ENTER
30982+
30983+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30984+ GET_THREAD_INFO(%_ASM_BX)
30985 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30986 jae bad_put_user
30987 ASM_STAC
30988-1: movb %al,(%_ASM_CX)
30989+
30990+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30991+ mov pax_user_shadow_base,%_ASM_BX
30992+ cmp %_ASM_BX,%_ASM_CX
30993+ jb 1234f
30994+ xor %ebx,%ebx
30995+1234:
30996+#endif
30997+
30998+#endif
30999+
31000+1: __copyuser_seg movb %al,(_DEST)
31001 xor %eax,%eax
31002 EXIT
31003 ENDPROC(__put_user_1)
31004
31005 ENTRY(__put_user_2)
31006 ENTER
31007+
31008+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31009+ GET_THREAD_INFO(%_ASM_BX)
31010 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31011 sub $1,%_ASM_BX
31012 cmp %_ASM_BX,%_ASM_CX
31013 jae bad_put_user
31014 ASM_STAC
31015-2: movw %ax,(%_ASM_CX)
31016+
31017+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31018+ mov pax_user_shadow_base,%_ASM_BX
31019+ cmp %_ASM_BX,%_ASM_CX
31020+ jb 1234f
31021+ xor %ebx,%ebx
31022+1234:
31023+#endif
31024+
31025+#endif
31026+
31027+2: __copyuser_seg movw %ax,(_DEST)
31028 xor %eax,%eax
31029 EXIT
31030 ENDPROC(__put_user_2)
31031
31032 ENTRY(__put_user_4)
31033 ENTER
31034+
31035+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31036+ GET_THREAD_INFO(%_ASM_BX)
31037 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31038 sub $3,%_ASM_BX
31039 cmp %_ASM_BX,%_ASM_CX
31040 jae bad_put_user
31041 ASM_STAC
31042-3: movl %eax,(%_ASM_CX)
31043+
31044+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31045+ mov pax_user_shadow_base,%_ASM_BX
31046+ cmp %_ASM_BX,%_ASM_CX
31047+ jb 1234f
31048+ xor %ebx,%ebx
31049+1234:
31050+#endif
31051+
31052+#endif
31053+
31054+3: __copyuser_seg movl %eax,(_DEST)
31055 xor %eax,%eax
31056 EXIT
31057 ENDPROC(__put_user_4)
31058
31059 ENTRY(__put_user_8)
31060 ENTER
31061+
31062+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31063+ GET_THREAD_INFO(%_ASM_BX)
31064 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31065 sub $7,%_ASM_BX
31066 cmp %_ASM_BX,%_ASM_CX
31067 jae bad_put_user
31068 ASM_STAC
31069-4: mov %_ASM_AX,(%_ASM_CX)
31070+
31071+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31072+ mov pax_user_shadow_base,%_ASM_BX
31073+ cmp %_ASM_BX,%_ASM_CX
31074+ jb 1234f
31075+ xor %ebx,%ebx
31076+1234:
31077+#endif
31078+
31079+#endif
31080+
31081+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31082 #ifdef CONFIG_X86_32
31083-5: movl %edx,4(%_ASM_CX)
31084+5: __copyuser_seg movl %edx,4(_DEST)
31085 #endif
31086 xor %eax,%eax
31087 EXIT
31088diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
31089index 1cad221..de671ee 100644
31090--- a/arch/x86/lib/rwlock.S
31091+++ b/arch/x86/lib/rwlock.S
31092@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
31093 FRAME
31094 0: LOCK_PREFIX
31095 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31096+
31097+#ifdef CONFIG_PAX_REFCOUNT
31098+ jno 1234f
31099+ LOCK_PREFIX
31100+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31101+ int $4
31102+1234:
31103+ _ASM_EXTABLE(1234b, 1234b)
31104+#endif
31105+
31106 1: rep; nop
31107 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
31108 jne 1b
31109 LOCK_PREFIX
31110 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31111+
31112+#ifdef CONFIG_PAX_REFCOUNT
31113+ jno 1234f
31114+ LOCK_PREFIX
31115+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31116+ int $4
31117+1234:
31118+ _ASM_EXTABLE(1234b, 1234b)
31119+#endif
31120+
31121 jnz 0b
31122 ENDFRAME
31123+ pax_force_retaddr
31124 ret
31125 CFI_ENDPROC
31126 END(__write_lock_failed)
31127@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
31128 FRAME
31129 0: LOCK_PREFIX
31130 READ_LOCK_SIZE(inc) (%__lock_ptr)
31131+
31132+#ifdef CONFIG_PAX_REFCOUNT
31133+ jno 1234f
31134+ LOCK_PREFIX
31135+ READ_LOCK_SIZE(dec) (%__lock_ptr)
31136+ int $4
31137+1234:
31138+ _ASM_EXTABLE(1234b, 1234b)
31139+#endif
31140+
31141 1: rep; nop
31142 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
31143 js 1b
31144 LOCK_PREFIX
31145 READ_LOCK_SIZE(dec) (%__lock_ptr)
31146+
31147+#ifdef CONFIG_PAX_REFCOUNT
31148+ jno 1234f
31149+ LOCK_PREFIX
31150+ READ_LOCK_SIZE(inc) (%__lock_ptr)
31151+ int $4
31152+1234:
31153+ _ASM_EXTABLE(1234b, 1234b)
31154+#endif
31155+
31156 js 0b
31157 ENDFRAME
31158+ pax_force_retaddr
31159 ret
31160 CFI_ENDPROC
31161 END(__read_lock_failed)
31162diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31163index 5dff5f0..cadebf4 100644
31164--- a/arch/x86/lib/rwsem.S
31165+++ b/arch/x86/lib/rwsem.S
31166@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31167 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31168 CFI_RESTORE __ASM_REG(dx)
31169 restore_common_regs
31170+ pax_force_retaddr
31171 ret
31172 CFI_ENDPROC
31173 ENDPROC(call_rwsem_down_read_failed)
31174@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31175 movq %rax,%rdi
31176 call rwsem_down_write_failed
31177 restore_common_regs
31178+ pax_force_retaddr
31179 ret
31180 CFI_ENDPROC
31181 ENDPROC(call_rwsem_down_write_failed)
31182@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31183 movq %rax,%rdi
31184 call rwsem_wake
31185 restore_common_regs
31186-1: ret
31187+1: pax_force_retaddr
31188+ ret
31189 CFI_ENDPROC
31190 ENDPROC(call_rwsem_wake)
31191
31192@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31193 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31194 CFI_RESTORE __ASM_REG(dx)
31195 restore_common_regs
31196+ pax_force_retaddr
31197 ret
31198 CFI_ENDPROC
31199 ENDPROC(call_rwsem_downgrade_wake)
31200diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31201index 92d9fea..b2762c8 100644
31202--- a/arch/x86/lib/thunk_64.S
31203+++ b/arch/x86/lib/thunk_64.S
31204@@ -9,6 +9,7 @@
31205 #include <asm/dwarf2.h>
31206 #include <asm/calling.h>
31207 #include <asm/asm.h>
31208+#include <asm/alternative-asm.h>
31209
31210 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31211 .macro THUNK name, func, put_ret_addr_in_rdi=0
31212@@ -16,11 +17,11 @@
31213 \name:
31214 CFI_STARTPROC
31215
31216- /* this one pushes 9 elems, the next one would be %rIP */
31217- SAVE_ARGS
31218+ /* this one pushes 15+1 elems, the next one would be %rIP */
31219+ SAVE_ARGS 8
31220
31221 .if \put_ret_addr_in_rdi
31222- movq_cfi_restore 9*8, rdi
31223+ movq_cfi_restore RIP, rdi
31224 .endif
31225
31226 call \func
31227@@ -40,9 +41,10 @@
31228
31229 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31230 CFI_STARTPROC
31231- SAVE_ARGS
31232+ SAVE_ARGS 8
31233 restore:
31234- RESTORE_ARGS
31235+ RESTORE_ARGS 1,8
31236+ pax_force_retaddr
31237 ret
31238 CFI_ENDPROC
31239 _ASM_NOKPROBE(restore)
31240diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31241index e2f5e21..4b22130 100644
31242--- a/arch/x86/lib/usercopy_32.c
31243+++ b/arch/x86/lib/usercopy_32.c
31244@@ -42,11 +42,13 @@ do { \
31245 int __d0; \
31246 might_fault(); \
31247 __asm__ __volatile__( \
31248+ __COPYUSER_SET_ES \
31249 ASM_STAC "\n" \
31250 "0: rep; stosl\n" \
31251 " movl %2,%0\n" \
31252 "1: rep; stosb\n" \
31253 "2: " ASM_CLAC "\n" \
31254+ __COPYUSER_RESTORE_ES \
31255 ".section .fixup,\"ax\"\n" \
31256 "3: lea 0(%2,%0,4),%0\n" \
31257 " jmp 2b\n" \
31258@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31259
31260 #ifdef CONFIG_X86_INTEL_USERCOPY
31261 static unsigned long
31262-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31263+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31264 {
31265 int d0, d1;
31266 __asm__ __volatile__(
31267@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31268 " .align 2,0x90\n"
31269 "3: movl 0(%4), %%eax\n"
31270 "4: movl 4(%4), %%edx\n"
31271- "5: movl %%eax, 0(%3)\n"
31272- "6: movl %%edx, 4(%3)\n"
31273+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31274+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31275 "7: movl 8(%4), %%eax\n"
31276 "8: movl 12(%4),%%edx\n"
31277- "9: movl %%eax, 8(%3)\n"
31278- "10: movl %%edx, 12(%3)\n"
31279+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31280+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31281 "11: movl 16(%4), %%eax\n"
31282 "12: movl 20(%4), %%edx\n"
31283- "13: movl %%eax, 16(%3)\n"
31284- "14: movl %%edx, 20(%3)\n"
31285+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31286+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31287 "15: movl 24(%4), %%eax\n"
31288 "16: movl 28(%4), %%edx\n"
31289- "17: movl %%eax, 24(%3)\n"
31290- "18: movl %%edx, 28(%3)\n"
31291+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31292+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31293 "19: movl 32(%4), %%eax\n"
31294 "20: movl 36(%4), %%edx\n"
31295- "21: movl %%eax, 32(%3)\n"
31296- "22: movl %%edx, 36(%3)\n"
31297+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31298+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31299 "23: movl 40(%4), %%eax\n"
31300 "24: movl 44(%4), %%edx\n"
31301- "25: movl %%eax, 40(%3)\n"
31302- "26: movl %%edx, 44(%3)\n"
31303+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31304+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31305 "27: movl 48(%4), %%eax\n"
31306 "28: movl 52(%4), %%edx\n"
31307- "29: movl %%eax, 48(%3)\n"
31308- "30: movl %%edx, 52(%3)\n"
31309+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31310+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31311 "31: movl 56(%4), %%eax\n"
31312 "32: movl 60(%4), %%edx\n"
31313- "33: movl %%eax, 56(%3)\n"
31314- "34: movl %%edx, 60(%3)\n"
31315+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31316+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31317 " addl $-64, %0\n"
31318 " addl $64, %4\n"
31319 " addl $64, %3\n"
31320@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31321 " shrl $2, %0\n"
31322 " andl $3, %%eax\n"
31323 " cld\n"
31324+ __COPYUSER_SET_ES
31325 "99: rep; movsl\n"
31326 "36: movl %%eax, %0\n"
31327 "37: rep; movsb\n"
31328 "100:\n"
31329+ __COPYUSER_RESTORE_ES
31330+ ".section .fixup,\"ax\"\n"
31331+ "101: lea 0(%%eax,%0,4),%0\n"
31332+ " jmp 100b\n"
31333+ ".previous\n"
31334+ _ASM_EXTABLE(1b,100b)
31335+ _ASM_EXTABLE(2b,100b)
31336+ _ASM_EXTABLE(3b,100b)
31337+ _ASM_EXTABLE(4b,100b)
31338+ _ASM_EXTABLE(5b,100b)
31339+ _ASM_EXTABLE(6b,100b)
31340+ _ASM_EXTABLE(7b,100b)
31341+ _ASM_EXTABLE(8b,100b)
31342+ _ASM_EXTABLE(9b,100b)
31343+ _ASM_EXTABLE(10b,100b)
31344+ _ASM_EXTABLE(11b,100b)
31345+ _ASM_EXTABLE(12b,100b)
31346+ _ASM_EXTABLE(13b,100b)
31347+ _ASM_EXTABLE(14b,100b)
31348+ _ASM_EXTABLE(15b,100b)
31349+ _ASM_EXTABLE(16b,100b)
31350+ _ASM_EXTABLE(17b,100b)
31351+ _ASM_EXTABLE(18b,100b)
31352+ _ASM_EXTABLE(19b,100b)
31353+ _ASM_EXTABLE(20b,100b)
31354+ _ASM_EXTABLE(21b,100b)
31355+ _ASM_EXTABLE(22b,100b)
31356+ _ASM_EXTABLE(23b,100b)
31357+ _ASM_EXTABLE(24b,100b)
31358+ _ASM_EXTABLE(25b,100b)
31359+ _ASM_EXTABLE(26b,100b)
31360+ _ASM_EXTABLE(27b,100b)
31361+ _ASM_EXTABLE(28b,100b)
31362+ _ASM_EXTABLE(29b,100b)
31363+ _ASM_EXTABLE(30b,100b)
31364+ _ASM_EXTABLE(31b,100b)
31365+ _ASM_EXTABLE(32b,100b)
31366+ _ASM_EXTABLE(33b,100b)
31367+ _ASM_EXTABLE(34b,100b)
31368+ _ASM_EXTABLE(35b,100b)
31369+ _ASM_EXTABLE(36b,100b)
31370+ _ASM_EXTABLE(37b,100b)
31371+ _ASM_EXTABLE(99b,101b)
31372+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31373+ : "1"(to), "2"(from), "0"(size)
31374+ : "eax", "edx", "memory");
31375+ return size;
31376+}
31377+
31378+static unsigned long
31379+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31380+{
31381+ int d0, d1;
31382+ __asm__ __volatile__(
31383+ " .align 2,0x90\n"
31384+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31385+ " cmpl $67, %0\n"
31386+ " jbe 3f\n"
31387+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31388+ " .align 2,0x90\n"
31389+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31390+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31391+ "5: movl %%eax, 0(%3)\n"
31392+ "6: movl %%edx, 4(%3)\n"
31393+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31394+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31395+ "9: movl %%eax, 8(%3)\n"
31396+ "10: movl %%edx, 12(%3)\n"
31397+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31398+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31399+ "13: movl %%eax, 16(%3)\n"
31400+ "14: movl %%edx, 20(%3)\n"
31401+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31402+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31403+ "17: movl %%eax, 24(%3)\n"
31404+ "18: movl %%edx, 28(%3)\n"
31405+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31406+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31407+ "21: movl %%eax, 32(%3)\n"
31408+ "22: movl %%edx, 36(%3)\n"
31409+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31410+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31411+ "25: movl %%eax, 40(%3)\n"
31412+ "26: movl %%edx, 44(%3)\n"
31413+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31414+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31415+ "29: movl %%eax, 48(%3)\n"
31416+ "30: movl %%edx, 52(%3)\n"
31417+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31418+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31419+ "33: movl %%eax, 56(%3)\n"
31420+ "34: movl %%edx, 60(%3)\n"
31421+ " addl $-64, %0\n"
31422+ " addl $64, %4\n"
31423+ " addl $64, %3\n"
31424+ " cmpl $63, %0\n"
31425+ " ja 1b\n"
31426+ "35: movl %0, %%eax\n"
31427+ " shrl $2, %0\n"
31428+ " andl $3, %%eax\n"
31429+ " cld\n"
31430+ "99: rep; "__copyuser_seg" movsl\n"
31431+ "36: movl %%eax, %0\n"
31432+ "37: rep; "__copyuser_seg" movsb\n"
31433+ "100:\n"
31434 ".section .fixup,\"ax\"\n"
31435 "101: lea 0(%%eax,%0,4),%0\n"
31436 " jmp 100b\n"
31437@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31438 int d0, d1;
31439 __asm__ __volatile__(
31440 " .align 2,0x90\n"
31441- "0: movl 32(%4), %%eax\n"
31442+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31443 " cmpl $67, %0\n"
31444 " jbe 2f\n"
31445- "1: movl 64(%4), %%eax\n"
31446+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31447 " .align 2,0x90\n"
31448- "2: movl 0(%4), %%eax\n"
31449- "21: movl 4(%4), %%edx\n"
31450+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31451+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31452 " movl %%eax, 0(%3)\n"
31453 " movl %%edx, 4(%3)\n"
31454- "3: movl 8(%4), %%eax\n"
31455- "31: movl 12(%4),%%edx\n"
31456+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31457+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31458 " movl %%eax, 8(%3)\n"
31459 " movl %%edx, 12(%3)\n"
31460- "4: movl 16(%4), %%eax\n"
31461- "41: movl 20(%4), %%edx\n"
31462+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31463+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31464 " movl %%eax, 16(%3)\n"
31465 " movl %%edx, 20(%3)\n"
31466- "10: movl 24(%4), %%eax\n"
31467- "51: movl 28(%4), %%edx\n"
31468+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31469+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31470 " movl %%eax, 24(%3)\n"
31471 " movl %%edx, 28(%3)\n"
31472- "11: movl 32(%4), %%eax\n"
31473- "61: movl 36(%4), %%edx\n"
31474+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31475+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31476 " movl %%eax, 32(%3)\n"
31477 " movl %%edx, 36(%3)\n"
31478- "12: movl 40(%4), %%eax\n"
31479- "71: movl 44(%4), %%edx\n"
31480+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31481+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31482 " movl %%eax, 40(%3)\n"
31483 " movl %%edx, 44(%3)\n"
31484- "13: movl 48(%4), %%eax\n"
31485- "81: movl 52(%4), %%edx\n"
31486+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31487+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31488 " movl %%eax, 48(%3)\n"
31489 " movl %%edx, 52(%3)\n"
31490- "14: movl 56(%4), %%eax\n"
31491- "91: movl 60(%4), %%edx\n"
31492+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31493+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31494 " movl %%eax, 56(%3)\n"
31495 " movl %%edx, 60(%3)\n"
31496 " addl $-64, %0\n"
31497@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31498 " shrl $2, %0\n"
31499 " andl $3, %%eax\n"
31500 " cld\n"
31501- "6: rep; movsl\n"
31502+ "6: rep; "__copyuser_seg" movsl\n"
31503 " movl %%eax,%0\n"
31504- "7: rep; movsb\n"
31505+ "7: rep; "__copyuser_seg" movsb\n"
31506 "8:\n"
31507 ".section .fixup,\"ax\"\n"
31508 "9: lea 0(%%eax,%0,4),%0\n"
31509@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31510
31511 __asm__ __volatile__(
31512 " .align 2,0x90\n"
31513- "0: movl 32(%4), %%eax\n"
31514+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31515 " cmpl $67, %0\n"
31516 " jbe 2f\n"
31517- "1: movl 64(%4), %%eax\n"
31518+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31519 " .align 2,0x90\n"
31520- "2: movl 0(%4), %%eax\n"
31521- "21: movl 4(%4), %%edx\n"
31522+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31523+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31524 " movnti %%eax, 0(%3)\n"
31525 " movnti %%edx, 4(%3)\n"
31526- "3: movl 8(%4), %%eax\n"
31527- "31: movl 12(%4),%%edx\n"
31528+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31529+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31530 " movnti %%eax, 8(%3)\n"
31531 " movnti %%edx, 12(%3)\n"
31532- "4: movl 16(%4), %%eax\n"
31533- "41: movl 20(%4), %%edx\n"
31534+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31535+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31536 " movnti %%eax, 16(%3)\n"
31537 " movnti %%edx, 20(%3)\n"
31538- "10: movl 24(%4), %%eax\n"
31539- "51: movl 28(%4), %%edx\n"
31540+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31541+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31542 " movnti %%eax, 24(%3)\n"
31543 " movnti %%edx, 28(%3)\n"
31544- "11: movl 32(%4), %%eax\n"
31545- "61: movl 36(%4), %%edx\n"
31546+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31547+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31548 " movnti %%eax, 32(%3)\n"
31549 " movnti %%edx, 36(%3)\n"
31550- "12: movl 40(%4), %%eax\n"
31551- "71: movl 44(%4), %%edx\n"
31552+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31553+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31554 " movnti %%eax, 40(%3)\n"
31555 " movnti %%edx, 44(%3)\n"
31556- "13: movl 48(%4), %%eax\n"
31557- "81: movl 52(%4), %%edx\n"
31558+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31559+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31560 " movnti %%eax, 48(%3)\n"
31561 " movnti %%edx, 52(%3)\n"
31562- "14: movl 56(%4), %%eax\n"
31563- "91: movl 60(%4), %%edx\n"
31564+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31565+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31566 " movnti %%eax, 56(%3)\n"
31567 " movnti %%edx, 60(%3)\n"
31568 " addl $-64, %0\n"
31569@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31570 " shrl $2, %0\n"
31571 " andl $3, %%eax\n"
31572 " cld\n"
31573- "6: rep; movsl\n"
31574+ "6: rep; "__copyuser_seg" movsl\n"
31575 " movl %%eax,%0\n"
31576- "7: rep; movsb\n"
31577+ "7: rep; "__copyuser_seg" movsb\n"
31578 "8:\n"
31579 ".section .fixup,\"ax\"\n"
31580 "9: lea 0(%%eax,%0,4),%0\n"
31581@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31582
31583 __asm__ __volatile__(
31584 " .align 2,0x90\n"
31585- "0: movl 32(%4), %%eax\n"
31586+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31587 " cmpl $67, %0\n"
31588 " jbe 2f\n"
31589- "1: movl 64(%4), %%eax\n"
31590+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31591 " .align 2,0x90\n"
31592- "2: movl 0(%4), %%eax\n"
31593- "21: movl 4(%4), %%edx\n"
31594+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31595+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31596 " movnti %%eax, 0(%3)\n"
31597 " movnti %%edx, 4(%3)\n"
31598- "3: movl 8(%4), %%eax\n"
31599- "31: movl 12(%4),%%edx\n"
31600+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31601+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31602 " movnti %%eax, 8(%3)\n"
31603 " movnti %%edx, 12(%3)\n"
31604- "4: movl 16(%4), %%eax\n"
31605- "41: movl 20(%4), %%edx\n"
31606+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31607+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31608 " movnti %%eax, 16(%3)\n"
31609 " movnti %%edx, 20(%3)\n"
31610- "10: movl 24(%4), %%eax\n"
31611- "51: movl 28(%4), %%edx\n"
31612+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31613+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31614 " movnti %%eax, 24(%3)\n"
31615 " movnti %%edx, 28(%3)\n"
31616- "11: movl 32(%4), %%eax\n"
31617- "61: movl 36(%4), %%edx\n"
31618+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31619+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31620 " movnti %%eax, 32(%3)\n"
31621 " movnti %%edx, 36(%3)\n"
31622- "12: movl 40(%4), %%eax\n"
31623- "71: movl 44(%4), %%edx\n"
31624+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31625+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31626 " movnti %%eax, 40(%3)\n"
31627 " movnti %%edx, 44(%3)\n"
31628- "13: movl 48(%4), %%eax\n"
31629- "81: movl 52(%4), %%edx\n"
31630+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31631+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31632 " movnti %%eax, 48(%3)\n"
31633 " movnti %%edx, 52(%3)\n"
31634- "14: movl 56(%4), %%eax\n"
31635- "91: movl 60(%4), %%edx\n"
31636+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31637+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31638 " movnti %%eax, 56(%3)\n"
31639 " movnti %%edx, 60(%3)\n"
31640 " addl $-64, %0\n"
31641@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31642 " shrl $2, %0\n"
31643 " andl $3, %%eax\n"
31644 " cld\n"
31645- "6: rep; movsl\n"
31646+ "6: rep; "__copyuser_seg" movsl\n"
31647 " movl %%eax,%0\n"
31648- "7: rep; movsb\n"
31649+ "7: rep; "__copyuser_seg" movsb\n"
31650 "8:\n"
31651 ".section .fixup,\"ax\"\n"
31652 "9: lea 0(%%eax,%0,4),%0\n"
31653@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31654 */
31655 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31656 unsigned long size);
31657-unsigned long __copy_user_intel(void __user *to, const void *from,
31658+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31659+ unsigned long size);
31660+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31661 unsigned long size);
31662 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31663 const void __user *from, unsigned long size);
31664 #endif /* CONFIG_X86_INTEL_USERCOPY */
31665
31666 /* Generic arbitrary sized copy. */
31667-#define __copy_user(to, from, size) \
31668+#define __copy_user(to, from, size, prefix, set, restore) \
31669 do { \
31670 int __d0, __d1, __d2; \
31671 __asm__ __volatile__( \
31672+ set \
31673 " cmp $7,%0\n" \
31674 " jbe 1f\n" \
31675 " movl %1,%0\n" \
31676 " negl %0\n" \
31677 " andl $7,%0\n" \
31678 " subl %0,%3\n" \
31679- "4: rep; movsb\n" \
31680+ "4: rep; "prefix"movsb\n" \
31681 " movl %3,%0\n" \
31682 " shrl $2,%0\n" \
31683 " andl $3,%3\n" \
31684 " .align 2,0x90\n" \
31685- "0: rep; movsl\n" \
31686+ "0: rep; "prefix"movsl\n" \
31687 " movl %3,%0\n" \
31688- "1: rep; movsb\n" \
31689+ "1: rep; "prefix"movsb\n" \
31690 "2:\n" \
31691+ restore \
31692 ".section .fixup,\"ax\"\n" \
31693 "5: addl %3,%0\n" \
31694 " jmp 2b\n" \
31695@@ -538,14 +650,14 @@ do { \
31696 " negl %0\n" \
31697 " andl $7,%0\n" \
31698 " subl %0,%3\n" \
31699- "4: rep; movsb\n" \
31700+ "4: rep; "__copyuser_seg"movsb\n" \
31701 " movl %3,%0\n" \
31702 " shrl $2,%0\n" \
31703 " andl $3,%3\n" \
31704 " .align 2,0x90\n" \
31705- "0: rep; movsl\n" \
31706+ "0: rep; "__copyuser_seg"movsl\n" \
31707 " movl %3,%0\n" \
31708- "1: rep; movsb\n" \
31709+ "1: rep; "__copyuser_seg"movsb\n" \
31710 "2:\n" \
31711 ".section .fixup,\"ax\"\n" \
31712 "5: addl %3,%0\n" \
31713@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31714 {
31715 stac();
31716 if (movsl_is_ok(to, from, n))
31717- __copy_user(to, from, n);
31718+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31719 else
31720- n = __copy_user_intel(to, from, n);
31721+ n = __generic_copy_to_user_intel(to, from, n);
31722 clac();
31723 return n;
31724 }
31725@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31726 {
31727 stac();
31728 if (movsl_is_ok(to, from, n))
31729- __copy_user(to, from, n);
31730+ __copy_user(to, from, n, __copyuser_seg, "", "");
31731 else
31732- n = __copy_user_intel((void __user *)to,
31733- (const void *)from, n);
31734+ n = __generic_copy_from_user_intel(to, from, n);
31735 clac();
31736 return n;
31737 }
31738@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31739 if (n > 64 && cpu_has_xmm2)
31740 n = __copy_user_intel_nocache(to, from, n);
31741 else
31742- __copy_user(to, from, n);
31743+ __copy_user(to, from, n, __copyuser_seg, "", "");
31744 #else
31745- __copy_user(to, from, n);
31746+ __copy_user(to, from, n, __copyuser_seg, "", "");
31747 #endif
31748 clac();
31749 return n;
31750 }
31751 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31752
31753-/**
31754- * copy_to_user: - Copy a block of data into user space.
31755- * @to: Destination address, in user space.
31756- * @from: Source address, in kernel space.
31757- * @n: Number of bytes to copy.
31758- *
31759- * Context: User context only. This function may sleep.
31760- *
31761- * Copy data from kernel space to user space.
31762- *
31763- * Returns number of bytes that could not be copied.
31764- * On success, this will be zero.
31765- */
31766-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31767+#ifdef CONFIG_PAX_MEMORY_UDEREF
31768+void __set_fs(mm_segment_t x)
31769 {
31770- if (access_ok(VERIFY_WRITE, to, n))
31771- n = __copy_to_user(to, from, n);
31772- return n;
31773+ switch (x.seg) {
31774+ case 0:
31775+ loadsegment(gs, 0);
31776+ break;
31777+ case TASK_SIZE_MAX:
31778+ loadsegment(gs, __USER_DS);
31779+ break;
31780+ case -1UL:
31781+ loadsegment(gs, __KERNEL_DS);
31782+ break;
31783+ default:
31784+ BUG();
31785+ }
31786 }
31787-EXPORT_SYMBOL(_copy_to_user);
31788+EXPORT_SYMBOL(__set_fs);
31789
31790-/**
31791- * copy_from_user: - Copy a block of data from user space.
31792- * @to: Destination address, in kernel space.
31793- * @from: Source address, in user space.
31794- * @n: Number of bytes to copy.
31795- *
31796- * Context: User context only. This function may sleep.
31797- *
31798- * Copy data from user space to kernel space.
31799- *
31800- * Returns number of bytes that could not be copied.
31801- * On success, this will be zero.
31802- *
31803- * If some data could not be copied, this function will pad the copied
31804- * data to the requested size using zero bytes.
31805- */
31806-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31807+void set_fs(mm_segment_t x)
31808 {
31809- if (access_ok(VERIFY_READ, from, n))
31810- n = __copy_from_user(to, from, n);
31811- else
31812- memset(to, 0, n);
31813- return n;
31814+ current_thread_info()->addr_limit = x;
31815+ __set_fs(x);
31816 }
31817-EXPORT_SYMBOL(_copy_from_user);
31818+EXPORT_SYMBOL(set_fs);
31819+#endif
31820diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31821index c905e89..01ab928 100644
31822--- a/arch/x86/lib/usercopy_64.c
31823+++ b/arch/x86/lib/usercopy_64.c
31824@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31825 might_fault();
31826 /* no memory constraint because it doesn't change any memory gcc knows
31827 about */
31828+ pax_open_userland();
31829 stac();
31830 asm volatile(
31831 " testq %[size8],%[size8]\n"
31832@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31833 _ASM_EXTABLE(0b,3b)
31834 _ASM_EXTABLE(1b,2b)
31835 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31836- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31837+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31838 [zero] "r" (0UL), [eight] "r" (8UL));
31839 clac();
31840+ pax_close_userland();
31841 return size;
31842 }
31843 EXPORT_SYMBOL(__clear_user);
31844@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31845 }
31846 EXPORT_SYMBOL(clear_user);
31847
31848-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31849+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31850 {
31851- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31852- return copy_user_generic((__force void *)to, (__force void *)from, len);
31853- }
31854- return len;
31855+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31856+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31857+ return len;
31858 }
31859 EXPORT_SYMBOL(copy_in_user);
31860
31861@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31862 * it is not necessary to optimize tail handling.
31863 */
31864 __visible unsigned long
31865-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31866+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31867 {
31868 char c;
31869 unsigned zero_len;
31870
31871+ clac();
31872+ pax_close_userland();
31873 for (; len; --len, to++) {
31874 if (__get_user_nocheck(c, from++, sizeof(char)))
31875 break;
31876@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31877 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31878 if (__put_user_nocheck(c, to++, sizeof(char)))
31879 break;
31880- clac();
31881 return len;
31882 }
31883diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31884index 6a19ad9..1c48f9a 100644
31885--- a/arch/x86/mm/Makefile
31886+++ b/arch/x86/mm/Makefile
31887@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31888 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31889
31890 obj-$(CONFIG_MEMTEST) += memtest.o
31891+
31892+quote:="
31893+obj-$(CONFIG_X86_64) += uderef_64.o
31894+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31895diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31896index 903ec1e..c4166b2 100644
31897--- a/arch/x86/mm/extable.c
31898+++ b/arch/x86/mm/extable.c
31899@@ -6,12 +6,24 @@
31900 static inline unsigned long
31901 ex_insn_addr(const struct exception_table_entry *x)
31902 {
31903- return (unsigned long)&x->insn + x->insn;
31904+ unsigned long reloc = 0;
31905+
31906+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31907+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31908+#endif
31909+
31910+ return (unsigned long)&x->insn + x->insn + reloc;
31911 }
31912 static inline unsigned long
31913 ex_fixup_addr(const struct exception_table_entry *x)
31914 {
31915- return (unsigned long)&x->fixup + x->fixup;
31916+ unsigned long reloc = 0;
31917+
31918+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31919+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31920+#endif
31921+
31922+ return (unsigned long)&x->fixup + x->fixup + reloc;
31923 }
31924
31925 int fixup_exception(struct pt_regs *regs)
31926@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31927 unsigned long new_ip;
31928
31929 #ifdef CONFIG_PNPBIOS
31930- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31931+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31932 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31933 extern u32 pnp_bios_is_utter_crap;
31934 pnp_bios_is_utter_crap = 1;
31935@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31936 i += 4;
31937 p->fixup -= i;
31938 i += 4;
31939+
31940+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31941+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31942+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31943+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31944+#endif
31945+
31946 }
31947 }
31948
31949diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31950index 3664279..c6a7830 100644
31951--- a/arch/x86/mm/fault.c
31952+++ b/arch/x86/mm/fault.c
31953@@ -14,12 +14,19 @@
31954 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31955 #include <linux/prefetch.h> /* prefetchw */
31956 #include <linux/context_tracking.h> /* exception_enter(), ... */
31957+#include <linux/unistd.h>
31958+#include <linux/compiler.h>
31959
31960 #include <asm/traps.h> /* dotraplinkage, ... */
31961 #include <asm/pgalloc.h> /* pgd_*(), ... */
31962 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31963 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31964 #include <asm/vsyscall.h> /* emulate_vsyscall */
31965+#include <asm/tlbflush.h>
31966+
31967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31968+#include <asm/stacktrace.h>
31969+#endif
31970
31971 #define CREATE_TRACE_POINTS
31972 #include <asm/trace/exceptions.h>
31973@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31974 int ret = 0;
31975
31976 /* kprobe_running() needs smp_processor_id() */
31977- if (kprobes_built_in() && !user_mode_vm(regs)) {
31978+ if (kprobes_built_in() && !user_mode(regs)) {
31979 preempt_disable();
31980 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31981 ret = 1;
31982@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31983 return !instr_lo || (instr_lo>>1) == 1;
31984 case 0x00:
31985 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31986- if (probe_kernel_address(instr, opcode))
31987+ if (user_mode(regs)) {
31988+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31989+ return 0;
31990+ } else if (probe_kernel_address(instr, opcode))
31991 return 0;
31992
31993 *prefetch = (instr_lo == 0xF) &&
31994@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31995 while (instr < max_instr) {
31996 unsigned char opcode;
31997
31998- if (probe_kernel_address(instr, opcode))
31999+ if (user_mode(regs)) {
32000+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32001+ break;
32002+ } else if (probe_kernel_address(instr, opcode))
32003 break;
32004
32005 instr++;
32006@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
32007 force_sig_info(si_signo, &info, tsk);
32008 }
32009
32010+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32011+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
32012+#endif
32013+
32014+#ifdef CONFIG_PAX_EMUTRAMP
32015+static int pax_handle_fetch_fault(struct pt_regs *regs);
32016+#endif
32017+
32018+#ifdef CONFIG_PAX_PAGEEXEC
32019+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
32020+{
32021+ pgd_t *pgd;
32022+ pud_t *pud;
32023+ pmd_t *pmd;
32024+
32025+ pgd = pgd_offset(mm, address);
32026+ if (!pgd_present(*pgd))
32027+ return NULL;
32028+ pud = pud_offset(pgd, address);
32029+ if (!pud_present(*pud))
32030+ return NULL;
32031+ pmd = pmd_offset(pud, address);
32032+ if (!pmd_present(*pmd))
32033+ return NULL;
32034+ return pmd;
32035+}
32036+#endif
32037+
32038 DEFINE_SPINLOCK(pgd_lock);
32039 LIST_HEAD(pgd_list);
32040
32041@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
32042 for (address = VMALLOC_START & PMD_MASK;
32043 address >= TASK_SIZE && address < FIXADDR_TOP;
32044 address += PMD_SIZE) {
32045+
32046+#ifdef CONFIG_PAX_PER_CPU_PGD
32047+ unsigned long cpu;
32048+#else
32049 struct page *page;
32050+#endif
32051
32052 spin_lock(&pgd_lock);
32053+
32054+#ifdef CONFIG_PAX_PER_CPU_PGD
32055+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32056+ pgd_t *pgd = get_cpu_pgd(cpu, user);
32057+ pmd_t *ret;
32058+
32059+ ret = vmalloc_sync_one(pgd, address);
32060+ if (!ret)
32061+ break;
32062+ pgd = get_cpu_pgd(cpu, kernel);
32063+#else
32064 list_for_each_entry(page, &pgd_list, lru) {
32065+ pgd_t *pgd;
32066 spinlock_t *pgt_lock;
32067 pmd_t *ret;
32068
32069@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
32070 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32071
32072 spin_lock(pgt_lock);
32073- ret = vmalloc_sync_one(page_address(page), address);
32074+ pgd = page_address(page);
32075+#endif
32076+
32077+ ret = vmalloc_sync_one(pgd, address);
32078+
32079+#ifndef CONFIG_PAX_PER_CPU_PGD
32080 spin_unlock(pgt_lock);
32081+#endif
32082
32083 if (!ret)
32084 break;
32085@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
32086 * an interrupt in the middle of a task switch..
32087 */
32088 pgd_paddr = read_cr3();
32089+
32090+#ifdef CONFIG_PAX_PER_CPU_PGD
32091+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32092+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32093+#endif
32094+
32095 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32096 if (!pmd_k)
32097 return -1;
32098@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
32099 * happen within a race in page table update. In the later
32100 * case just flush:
32101 */
32102- pgd = pgd_offset(current->active_mm, address);
32103+
32104 pgd_ref = pgd_offset_k(address);
32105 if (pgd_none(*pgd_ref))
32106 return -1;
32107
32108+#ifdef CONFIG_PAX_PER_CPU_PGD
32109+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32110+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32111+ if (pgd_none(*pgd)) {
32112+ set_pgd(pgd, *pgd_ref);
32113+ arch_flush_lazy_mmu_mode();
32114+ } else {
32115+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32116+ }
32117+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32118+#else
32119+ pgd = pgd_offset(current->active_mm, address);
32120+#endif
32121+
32122 if (pgd_none(*pgd)) {
32123 set_pgd(pgd, *pgd_ref);
32124 arch_flush_lazy_mmu_mode();
32125@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32126 static int is_errata100(struct pt_regs *regs, unsigned long address)
32127 {
32128 #ifdef CONFIG_X86_64
32129- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32130+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32131 return 1;
32132 #endif
32133 return 0;
32134@@ -576,7 +660,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32135 }
32136
32137 static const char nx_warning[] = KERN_CRIT
32138-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32139+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32140
32141 static void
32142 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32143@@ -585,7 +669,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32144 if (!oops_may_print())
32145 return;
32146
32147- if (error_code & PF_INSTR) {
32148+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32149 unsigned int level;
32150 pgd_t *pgd;
32151 pte_t *pte;
32152@@ -596,9 +680,21 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32153 pte = lookup_address_in_pgd(pgd, address, &level);
32154
32155 if (pte && pte_present(*pte) && !pte_exec(*pte))
32156- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32157+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32158 }
32159
32160+#ifdef CONFIG_PAX_KERNEXEC
32161+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32162+ if (current->signal->curr_ip)
32163+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32164+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32165+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32166+ else
32167+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32168+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32169+ }
32170+#endif
32171+
32172 printk(KERN_ALERT "BUG: unable to handle kernel ");
32173 if (address < PAGE_SIZE)
32174 printk(KERN_CONT "NULL pointer dereference");
32175@@ -779,6 +875,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32176 return;
32177 }
32178 #endif
32179+
32180+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32181+ if (pax_is_fetch_fault(regs, error_code, address)) {
32182+
32183+#ifdef CONFIG_PAX_EMUTRAMP
32184+ switch (pax_handle_fetch_fault(regs)) {
32185+ case 2:
32186+ return;
32187+ }
32188+#endif
32189+
32190+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32191+ do_group_exit(SIGKILL);
32192+ }
32193+#endif
32194+
32195 /* Kernel addresses are always protection faults: */
32196 if (address >= TASK_SIZE)
32197 error_code |= PF_PROT;
32198@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32199 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32200 printk(KERN_ERR
32201 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32202- tsk->comm, tsk->pid, address);
32203+ tsk->comm, task_pid_nr(tsk), address);
32204 code = BUS_MCEERR_AR;
32205 }
32206 #endif
32207@@ -918,6 +1030,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32208 return 1;
32209 }
32210
32211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32212+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32213+{
32214+ pte_t *pte;
32215+ pmd_t *pmd;
32216+ spinlock_t *ptl;
32217+ unsigned char pte_mask;
32218+
32219+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32220+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32221+ return 0;
32222+
32223+ /* PaX: it's our fault, let's handle it if we can */
32224+
32225+ /* PaX: take a look at read faults before acquiring any locks */
32226+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32227+ /* instruction fetch attempt from a protected page in user mode */
32228+ up_read(&mm->mmap_sem);
32229+
32230+#ifdef CONFIG_PAX_EMUTRAMP
32231+ switch (pax_handle_fetch_fault(regs)) {
32232+ case 2:
32233+ return 1;
32234+ }
32235+#endif
32236+
32237+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32238+ do_group_exit(SIGKILL);
32239+ }
32240+
32241+ pmd = pax_get_pmd(mm, address);
32242+ if (unlikely(!pmd))
32243+ return 0;
32244+
32245+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32246+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32247+ pte_unmap_unlock(pte, ptl);
32248+ return 0;
32249+ }
32250+
32251+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32252+ /* write attempt to a protected page in user mode */
32253+ pte_unmap_unlock(pte, ptl);
32254+ return 0;
32255+ }
32256+
32257+#ifdef CONFIG_SMP
32258+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32259+#else
32260+ if (likely(address > get_limit(regs->cs)))
32261+#endif
32262+ {
32263+ set_pte(pte, pte_mkread(*pte));
32264+ __flush_tlb_one(address);
32265+ pte_unmap_unlock(pte, ptl);
32266+ up_read(&mm->mmap_sem);
32267+ return 1;
32268+ }
32269+
32270+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32271+
32272+ /*
32273+ * PaX: fill DTLB with user rights and retry
32274+ */
32275+ __asm__ __volatile__ (
32276+ "orb %2,(%1)\n"
32277+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32278+/*
32279+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32280+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32281+ * page fault when examined during a TLB load attempt. this is true not only
32282+ * for PTEs holding a non-present entry but also present entries that will
32283+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32284+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32285+ * for our target pages since their PTEs are simply not in the TLBs at all.
32286+
32287+ * the best thing in omitting it is that we gain around 15-20% speed in the
32288+ * fast path of the page fault handler and can get rid of tracing since we
32289+ * can no longer flush unintended entries.
32290+ */
32291+ "invlpg (%0)\n"
32292+#endif
32293+ __copyuser_seg"testb $0,(%0)\n"
32294+ "xorb %3,(%1)\n"
32295+ :
32296+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32297+ : "memory", "cc");
32298+ pte_unmap_unlock(pte, ptl);
32299+ up_read(&mm->mmap_sem);
32300+ return 1;
32301+}
32302+#endif
32303+
32304 /*
32305 * Handle a spurious fault caused by a stale TLB entry.
32306 *
32307@@ -985,6 +1190,9 @@ int show_unhandled_signals = 1;
32308 static inline int
32309 access_error(unsigned long error_code, struct vm_area_struct *vma)
32310 {
32311+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32312+ return 1;
32313+
32314 if (error_code & PF_WRITE) {
32315 /* write, present and write, not present: */
32316 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32317@@ -1019,7 +1227,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32318 if (error_code & PF_USER)
32319 return false;
32320
32321- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32322+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32323 return false;
32324
32325 return true;
32326@@ -1047,6 +1255,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32327 tsk = current;
32328 mm = tsk->mm;
32329
32330+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32331+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32332+ if (!search_exception_tables(regs->ip)) {
32333+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32334+ bad_area_nosemaphore(regs, error_code, address);
32335+ return;
32336+ }
32337+ if (address < pax_user_shadow_base) {
32338+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32339+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32340+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32341+ } else
32342+ address -= pax_user_shadow_base;
32343+ }
32344+#endif
32345+
32346 /*
32347 * Detect and handle instructions that would cause a page fault for
32348 * both a tracked kernel page and a userspace page.
32349@@ -1124,7 +1348,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32350 * User-mode registers count as a user access even for any
32351 * potential system fault or CPU buglet:
32352 */
32353- if (user_mode_vm(regs)) {
32354+ if (user_mode(regs)) {
32355 local_irq_enable();
32356 error_code |= PF_USER;
32357 flags |= FAULT_FLAG_USER;
32358@@ -1171,6 +1395,11 @@ retry:
32359 might_sleep();
32360 }
32361
32362+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32363+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32364+ return;
32365+#endif
32366+
32367 vma = find_vma(mm, address);
32368 if (unlikely(!vma)) {
32369 bad_area(regs, error_code, address);
32370@@ -1182,18 +1411,24 @@ retry:
32371 bad_area(regs, error_code, address);
32372 return;
32373 }
32374- if (error_code & PF_USER) {
32375- /*
32376- * Accessing the stack below %sp is always a bug.
32377- * The large cushion allows instructions like enter
32378- * and pusha to work. ("enter $65535, $31" pushes
32379- * 32 pointers and then decrements %sp by 65535.)
32380- */
32381- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32382- bad_area(regs, error_code, address);
32383- return;
32384- }
32385+ /*
32386+ * Accessing the stack below %sp is always a bug.
32387+ * The large cushion allows instructions like enter
32388+ * and pusha to work. ("enter $65535, $31" pushes
32389+ * 32 pointers and then decrements %sp by 65535.)
32390+ */
32391+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32392+ bad_area(regs, error_code, address);
32393+ return;
32394 }
32395+
32396+#ifdef CONFIG_PAX_SEGMEXEC
32397+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32398+ bad_area(regs, error_code, address);
32399+ return;
32400+ }
32401+#endif
32402+
32403 if (unlikely(expand_stack(vma, address))) {
32404 bad_area(regs, error_code, address);
32405 return;
32406@@ -1309,3 +1544,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32407 }
32408 NOKPROBE_SYMBOL(trace_do_page_fault);
32409 #endif /* CONFIG_TRACING */
32410+
32411+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32412+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32413+{
32414+ struct mm_struct *mm = current->mm;
32415+ unsigned long ip = regs->ip;
32416+
32417+ if (v8086_mode(regs))
32418+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32419+
32420+#ifdef CONFIG_PAX_PAGEEXEC
32421+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32422+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32423+ return true;
32424+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32425+ return true;
32426+ return false;
32427+ }
32428+#endif
32429+
32430+#ifdef CONFIG_PAX_SEGMEXEC
32431+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32432+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32433+ return true;
32434+ return false;
32435+ }
32436+#endif
32437+
32438+ return false;
32439+}
32440+#endif
32441+
32442+#ifdef CONFIG_PAX_EMUTRAMP
32443+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32444+{
32445+ int err;
32446+
32447+ do { /* PaX: libffi trampoline emulation */
32448+ unsigned char mov, jmp;
32449+ unsigned int addr1, addr2;
32450+
32451+#ifdef CONFIG_X86_64
32452+ if ((regs->ip + 9) >> 32)
32453+ break;
32454+#endif
32455+
32456+ err = get_user(mov, (unsigned char __user *)regs->ip);
32457+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32458+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32459+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32460+
32461+ if (err)
32462+ break;
32463+
32464+ if (mov == 0xB8 && jmp == 0xE9) {
32465+ regs->ax = addr1;
32466+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32467+ return 2;
32468+ }
32469+ } while (0);
32470+
32471+ do { /* PaX: gcc trampoline emulation #1 */
32472+ unsigned char mov1, mov2;
32473+ unsigned short jmp;
32474+ unsigned int addr1, addr2;
32475+
32476+#ifdef CONFIG_X86_64
32477+ if ((regs->ip + 11) >> 32)
32478+ break;
32479+#endif
32480+
32481+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32482+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32483+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32484+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32485+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32486+
32487+ if (err)
32488+ break;
32489+
32490+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32491+ regs->cx = addr1;
32492+ regs->ax = addr2;
32493+ regs->ip = addr2;
32494+ return 2;
32495+ }
32496+ } while (0);
32497+
32498+ do { /* PaX: gcc trampoline emulation #2 */
32499+ unsigned char mov, jmp;
32500+ unsigned int addr1, addr2;
32501+
32502+#ifdef CONFIG_X86_64
32503+ if ((regs->ip + 9) >> 32)
32504+ break;
32505+#endif
32506+
32507+ err = get_user(mov, (unsigned char __user *)regs->ip);
32508+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32509+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32510+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32511+
32512+ if (err)
32513+ break;
32514+
32515+ if (mov == 0xB9 && jmp == 0xE9) {
32516+ regs->cx = addr1;
32517+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32518+ return 2;
32519+ }
32520+ } while (0);
32521+
32522+ return 1; /* PaX in action */
32523+}
32524+
32525+#ifdef CONFIG_X86_64
32526+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32527+{
32528+ int err;
32529+
32530+ do { /* PaX: libffi trampoline emulation */
32531+ unsigned short mov1, mov2, jmp1;
32532+ unsigned char stcclc, jmp2;
32533+ unsigned long addr1, addr2;
32534+
32535+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32536+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32537+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32538+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32539+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32540+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32541+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32542+
32543+ if (err)
32544+ break;
32545+
32546+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32547+ regs->r11 = addr1;
32548+ regs->r10 = addr2;
32549+ if (stcclc == 0xF8)
32550+ regs->flags &= ~X86_EFLAGS_CF;
32551+ else
32552+ regs->flags |= X86_EFLAGS_CF;
32553+ regs->ip = addr1;
32554+ return 2;
32555+ }
32556+ } while (0);
32557+
32558+ do { /* PaX: gcc trampoline emulation #1 */
32559+ unsigned short mov1, mov2, jmp1;
32560+ unsigned char jmp2;
32561+ unsigned int addr1;
32562+ unsigned long addr2;
32563+
32564+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32565+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32566+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32567+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32568+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32569+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32570+
32571+ if (err)
32572+ break;
32573+
32574+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32575+ regs->r11 = addr1;
32576+ regs->r10 = addr2;
32577+ regs->ip = addr1;
32578+ return 2;
32579+ }
32580+ } while (0);
32581+
32582+ do { /* PaX: gcc trampoline emulation #2 */
32583+ unsigned short mov1, mov2, jmp1;
32584+ unsigned char jmp2;
32585+ unsigned long addr1, addr2;
32586+
32587+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32588+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32589+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32590+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32591+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32592+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32593+
32594+ if (err)
32595+ break;
32596+
32597+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32598+ regs->r11 = addr1;
32599+ regs->r10 = addr2;
32600+ regs->ip = addr1;
32601+ return 2;
32602+ }
32603+ } while (0);
32604+
32605+ return 1; /* PaX in action */
32606+}
32607+#endif
32608+
32609+/*
32610+ * PaX: decide what to do with offenders (regs->ip = fault address)
32611+ *
32612+ * returns 1 when task should be killed
32613+ * 2 when gcc trampoline was detected
32614+ */
32615+static int pax_handle_fetch_fault(struct pt_regs *regs)
32616+{
32617+ if (v8086_mode(regs))
32618+ return 1;
32619+
32620+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32621+ return 1;
32622+
32623+#ifdef CONFIG_X86_32
32624+ return pax_handle_fetch_fault_32(regs);
32625+#else
32626+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32627+ return pax_handle_fetch_fault_32(regs);
32628+ else
32629+ return pax_handle_fetch_fault_64(regs);
32630+#endif
32631+}
32632+#endif
32633+
32634+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32635+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32636+{
32637+ long i;
32638+
32639+ printk(KERN_ERR "PAX: bytes at PC: ");
32640+ for (i = 0; i < 20; i++) {
32641+ unsigned char c;
32642+ if (get_user(c, (unsigned char __force_user *)pc+i))
32643+ printk(KERN_CONT "?? ");
32644+ else
32645+ printk(KERN_CONT "%02x ", c);
32646+ }
32647+ printk("\n");
32648+
32649+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32650+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32651+ unsigned long c;
32652+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32653+#ifdef CONFIG_X86_32
32654+ printk(KERN_CONT "???????? ");
32655+#else
32656+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32657+ printk(KERN_CONT "???????? ???????? ");
32658+ else
32659+ printk(KERN_CONT "???????????????? ");
32660+#endif
32661+ } else {
32662+#ifdef CONFIG_X86_64
32663+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32664+ printk(KERN_CONT "%08x ", (unsigned int)c);
32665+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32666+ } else
32667+#endif
32668+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32669+ }
32670+ }
32671+ printk("\n");
32672+}
32673+#endif
32674+
32675+/**
32676+ * probe_kernel_write(): safely attempt to write to a location
32677+ * @dst: address to write to
32678+ * @src: pointer to the data that shall be written
32679+ * @size: size of the data chunk
32680+ *
32681+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32682+ * happens, handle that and return -EFAULT.
32683+ */
32684+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32685+{
32686+ long ret;
32687+ mm_segment_t old_fs = get_fs();
32688+
32689+ set_fs(KERNEL_DS);
32690+ pagefault_disable();
32691+ pax_open_kernel();
32692+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32693+ pax_close_kernel();
32694+ pagefault_enable();
32695+ set_fs(old_fs);
32696+
32697+ return ret ? -EFAULT : 0;
32698+}
32699diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32700index 207d9aef..69030980 100644
32701--- a/arch/x86/mm/gup.c
32702+++ b/arch/x86/mm/gup.c
32703@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32704 addr = start;
32705 len = (unsigned long) nr_pages << PAGE_SHIFT;
32706 end = start + len;
32707- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32708+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32709 (void __user *)start, len)))
32710 return 0;
32711
32712@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32713 goto slow_irqon;
32714 #endif
32715
32716+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32717+ (void __user *)start, len)))
32718+ return 0;
32719+
32720 /*
32721 * XXX: batch / limit 'nr', to avoid large irq off latency
32722 * needs some instrumenting to determine the common sizes used by
32723diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32724index 4500142..53a363c 100644
32725--- a/arch/x86/mm/highmem_32.c
32726+++ b/arch/x86/mm/highmem_32.c
32727@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32728 idx = type + KM_TYPE_NR*smp_processor_id();
32729 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32730 BUG_ON(!pte_none(*(kmap_pte-idx)));
32731+
32732+ pax_open_kernel();
32733 set_pte(kmap_pte-idx, mk_pte(page, prot));
32734+ pax_close_kernel();
32735+
32736 arch_flush_lazy_mmu_mode();
32737
32738 return (void *)vaddr;
32739diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32740index 8b977eb..4732c33 100644
32741--- a/arch/x86/mm/hugetlbpage.c
32742+++ b/arch/x86/mm/hugetlbpage.c
32743@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
32744 #ifdef CONFIG_HUGETLB_PAGE
32745 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32746 unsigned long addr, unsigned long len,
32747- unsigned long pgoff, unsigned long flags)
32748+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32749 {
32750 struct hstate *h = hstate_file(file);
32751 struct vm_unmapped_area_info info;
32752-
32753+
32754 info.flags = 0;
32755 info.length = len;
32756 info.low_limit = current->mm->mmap_legacy_base;
32757 info.high_limit = TASK_SIZE;
32758 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32759 info.align_offset = 0;
32760+ info.threadstack_offset = offset;
32761 return vm_unmapped_area(&info);
32762 }
32763
32764 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32765 unsigned long addr0, unsigned long len,
32766- unsigned long pgoff, unsigned long flags)
32767+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32768 {
32769 struct hstate *h = hstate_file(file);
32770 struct vm_unmapped_area_info info;
32771@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32772 info.high_limit = current->mm->mmap_base;
32773 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32774 info.align_offset = 0;
32775+ info.threadstack_offset = offset;
32776 addr = vm_unmapped_area(&info);
32777
32778 /*
32779@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32780 VM_BUG_ON(addr != -ENOMEM);
32781 info.flags = 0;
32782 info.low_limit = TASK_UNMAPPED_BASE;
32783+
32784+#ifdef CONFIG_PAX_RANDMMAP
32785+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32786+ info.low_limit += current->mm->delta_mmap;
32787+#endif
32788+
32789 info.high_limit = TASK_SIZE;
32790 addr = vm_unmapped_area(&info);
32791 }
32792@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32793 struct hstate *h = hstate_file(file);
32794 struct mm_struct *mm = current->mm;
32795 struct vm_area_struct *vma;
32796+ unsigned long pax_task_size = TASK_SIZE;
32797+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32798
32799 if (len & ~huge_page_mask(h))
32800 return -EINVAL;
32801- if (len > TASK_SIZE)
32802+
32803+#ifdef CONFIG_PAX_SEGMEXEC
32804+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32805+ pax_task_size = SEGMEXEC_TASK_SIZE;
32806+#endif
32807+
32808+ pax_task_size -= PAGE_SIZE;
32809+
32810+ if (len > pax_task_size)
32811 return -ENOMEM;
32812
32813 if (flags & MAP_FIXED) {
32814@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32815 return addr;
32816 }
32817
32818+#ifdef CONFIG_PAX_RANDMMAP
32819+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32820+#endif
32821+
32822 if (addr) {
32823 addr = ALIGN(addr, huge_page_size(h));
32824 vma = find_vma(mm, addr);
32825- if (TASK_SIZE - len >= addr &&
32826- (!vma || addr + len <= vma->vm_start))
32827+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32828 return addr;
32829 }
32830 if (mm->get_unmapped_area == arch_get_unmapped_area)
32831 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32832- pgoff, flags);
32833+ pgoff, flags, offset);
32834 else
32835 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32836- pgoff, flags);
32837+ pgoff, flags, offset);
32838 }
32839 #endif /* CONFIG_HUGETLB_PAGE */
32840
32841diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32842index f971306..e83e0f6 100644
32843--- a/arch/x86/mm/init.c
32844+++ b/arch/x86/mm/init.c
32845@@ -4,6 +4,7 @@
32846 #include <linux/swap.h>
32847 #include <linux/memblock.h>
32848 #include <linux/bootmem.h> /* for max_low_pfn */
32849+#include <linux/tboot.h>
32850
32851 #include <asm/cacheflush.h>
32852 #include <asm/e820.h>
32853@@ -17,6 +18,8 @@
32854 #include <asm/proto.h>
32855 #include <asm/dma.h> /* for MAX_DMA_PFN */
32856 #include <asm/microcode.h>
32857+#include <asm/desc.h>
32858+#include <asm/bios_ebda.h>
32859
32860 #include "mm_internal.h"
32861
32862@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32863 early_ioremap_page_table_range_init();
32864 #endif
32865
32866+#ifdef CONFIG_PAX_PER_CPU_PGD
32867+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32868+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32869+ KERNEL_PGD_PTRS);
32870+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32871+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32872+ KERNEL_PGD_PTRS);
32873+ load_cr3(get_cpu_pgd(0, kernel));
32874+#else
32875 load_cr3(swapper_pg_dir);
32876+#endif
32877+
32878 __flush_tlb_all();
32879
32880 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32881@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32882 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32883 * mmio resources as well as potential bios/acpi data regions.
32884 */
32885+
32886+#ifdef CONFIG_GRKERNSEC_KMEM
32887+static unsigned int ebda_start __read_only;
32888+static unsigned int ebda_end __read_only;
32889+#endif
32890+
32891 int devmem_is_allowed(unsigned long pagenr)
32892 {
32893- if (pagenr < 256)
32894+#ifdef CONFIG_GRKERNSEC_KMEM
32895+ /* allow BDA */
32896+ if (!pagenr)
32897 return 1;
32898+ /* allow EBDA */
32899+ if (pagenr >= ebda_start && pagenr < ebda_end)
32900+ return 1;
32901+ /* if tboot is in use, allow access to its hardcoded serial log range */
32902+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32903+ return 1;
32904+#else
32905+ if (!pagenr)
32906+ return 1;
32907+#ifdef CONFIG_VM86
32908+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32909+ return 1;
32910+#endif
32911+#endif
32912+
32913+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32914+ return 1;
32915+#ifdef CONFIG_GRKERNSEC_KMEM
32916+ /* throw out everything else below 1MB */
32917+ if (pagenr <= 256)
32918+ return 0;
32919+#endif
32920 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32921 return 0;
32922 if (!page_is_ram(pagenr))
32923@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32924 #endif
32925 }
32926
32927+#ifdef CONFIG_GRKERNSEC_KMEM
32928+static inline void gr_init_ebda(void)
32929+{
32930+ unsigned int ebda_addr;
32931+ unsigned int ebda_size = 0;
32932+
32933+ ebda_addr = get_bios_ebda();
32934+ if (ebda_addr) {
32935+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32936+ ebda_size <<= 10;
32937+ }
32938+ if (ebda_addr && ebda_size) {
32939+ ebda_start = ebda_addr >> PAGE_SHIFT;
32940+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32941+ } else {
32942+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32943+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32944+ }
32945+}
32946+#else
32947+static inline void gr_init_ebda(void) { }
32948+#endif
32949+
32950 void free_initmem(void)
32951 {
32952+#ifdef CONFIG_PAX_KERNEXEC
32953+#ifdef CONFIG_X86_32
32954+ /* PaX: limit KERNEL_CS to actual size */
32955+ unsigned long addr, limit;
32956+ struct desc_struct d;
32957+ int cpu;
32958+#else
32959+ pgd_t *pgd;
32960+ pud_t *pud;
32961+ pmd_t *pmd;
32962+ unsigned long addr, end;
32963+#endif
32964+#endif
32965+
32966+ gr_init_ebda();
32967+
32968+#ifdef CONFIG_PAX_KERNEXEC
32969+#ifdef CONFIG_X86_32
32970+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32971+ limit = (limit - 1UL) >> PAGE_SHIFT;
32972+
32973+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32974+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32975+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32976+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32977+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32978+ }
32979+
32980+ /* PaX: make KERNEL_CS read-only */
32981+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32982+ if (!paravirt_enabled())
32983+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32984+/*
32985+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32986+ pgd = pgd_offset_k(addr);
32987+ pud = pud_offset(pgd, addr);
32988+ pmd = pmd_offset(pud, addr);
32989+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32990+ }
32991+*/
32992+#ifdef CONFIG_X86_PAE
32993+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32994+/*
32995+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32996+ pgd = pgd_offset_k(addr);
32997+ pud = pud_offset(pgd, addr);
32998+ pmd = pmd_offset(pud, addr);
32999+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33000+ }
33001+*/
33002+#endif
33003+
33004+#ifdef CONFIG_MODULES
33005+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
33006+#endif
33007+
33008+#else
33009+ /* PaX: make kernel code/rodata read-only, rest non-executable */
33010+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
33011+ pgd = pgd_offset_k(addr);
33012+ pud = pud_offset(pgd, addr);
33013+ pmd = pmd_offset(pud, addr);
33014+ if (!pmd_present(*pmd))
33015+ continue;
33016+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
33017+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33018+ else
33019+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33020+ }
33021+
33022+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
33023+ end = addr + KERNEL_IMAGE_SIZE;
33024+ for (; addr < end; addr += PMD_SIZE) {
33025+ pgd = pgd_offset_k(addr);
33026+ pud = pud_offset(pgd, addr);
33027+ pmd = pmd_offset(pud, addr);
33028+ if (!pmd_present(*pmd))
33029+ continue;
33030+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
33031+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33032+ }
33033+#endif
33034+
33035+ flush_tlb_all();
33036+#endif
33037+
33038 free_init_pages("unused kernel",
33039 (unsigned long)(&__init_begin),
33040 (unsigned long)(&__init_end));
33041diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
33042index e395048..cd38278 100644
33043--- a/arch/x86/mm/init_32.c
33044+++ b/arch/x86/mm/init_32.c
33045@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
33046 bool __read_mostly __vmalloc_start_set = false;
33047
33048 /*
33049- * Creates a middle page table and puts a pointer to it in the
33050- * given global directory entry. This only returns the gd entry
33051- * in non-PAE compilation mode, since the middle layer is folded.
33052- */
33053-static pmd_t * __init one_md_table_init(pgd_t *pgd)
33054-{
33055- pud_t *pud;
33056- pmd_t *pmd_table;
33057-
33058-#ifdef CONFIG_X86_PAE
33059- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
33060- pmd_table = (pmd_t *)alloc_low_page();
33061- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
33062- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
33063- pud = pud_offset(pgd, 0);
33064- BUG_ON(pmd_table != pmd_offset(pud, 0));
33065-
33066- return pmd_table;
33067- }
33068-#endif
33069- pud = pud_offset(pgd, 0);
33070- pmd_table = pmd_offset(pud, 0);
33071-
33072- return pmd_table;
33073-}
33074-
33075-/*
33076 * Create a page table and place a pointer to it in a middle page
33077 * directory entry:
33078 */
33079@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33080 pte_t *page_table = (pte_t *)alloc_low_page();
33081
33082 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33083+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33084+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33085+#else
33086 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33087+#endif
33088 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33089 }
33090
33091 return pte_offset_kernel(pmd, 0);
33092 }
33093
33094+static pmd_t * __init one_md_table_init(pgd_t *pgd)
33095+{
33096+ pud_t *pud;
33097+ pmd_t *pmd_table;
33098+
33099+ pud = pud_offset(pgd, 0);
33100+ pmd_table = pmd_offset(pud, 0);
33101+
33102+ return pmd_table;
33103+}
33104+
33105 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33106 {
33107 int pgd_idx = pgd_index(vaddr);
33108@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33109 int pgd_idx, pmd_idx;
33110 unsigned long vaddr;
33111 pgd_t *pgd;
33112+ pud_t *pud;
33113 pmd_t *pmd;
33114 pte_t *pte = NULL;
33115 unsigned long count = page_table_range_init_count(start, end);
33116@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33117 pgd = pgd_base + pgd_idx;
33118
33119 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33120- pmd = one_md_table_init(pgd);
33121- pmd = pmd + pmd_index(vaddr);
33122+ pud = pud_offset(pgd, vaddr);
33123+ pmd = pmd_offset(pud, vaddr);
33124+
33125+#ifdef CONFIG_X86_PAE
33126+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33127+#endif
33128+
33129 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33130 pmd++, pmd_idx++) {
33131 pte = page_table_kmap_check(one_page_table_init(pmd),
33132@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33133 }
33134 }
33135
33136-static inline int is_kernel_text(unsigned long addr)
33137+static inline int is_kernel_text(unsigned long start, unsigned long end)
33138 {
33139- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33140- return 1;
33141- return 0;
33142+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33143+ end <= ktla_ktva((unsigned long)_stext)) &&
33144+ (start >= ktla_ktva((unsigned long)_einittext) ||
33145+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33146+
33147+#ifdef CONFIG_ACPI_SLEEP
33148+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33149+#endif
33150+
33151+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33152+ return 0;
33153+ return 1;
33154 }
33155
33156 /*
33157@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33158 unsigned long last_map_addr = end;
33159 unsigned long start_pfn, end_pfn;
33160 pgd_t *pgd_base = swapper_pg_dir;
33161- int pgd_idx, pmd_idx, pte_ofs;
33162+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33163 unsigned long pfn;
33164 pgd_t *pgd;
33165+ pud_t *pud;
33166 pmd_t *pmd;
33167 pte_t *pte;
33168 unsigned pages_2m, pages_4k;
33169@@ -291,8 +295,13 @@ repeat:
33170 pfn = start_pfn;
33171 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33172 pgd = pgd_base + pgd_idx;
33173- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33174- pmd = one_md_table_init(pgd);
33175+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33176+ pud = pud_offset(pgd, 0);
33177+ pmd = pmd_offset(pud, 0);
33178+
33179+#ifdef CONFIG_X86_PAE
33180+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33181+#endif
33182
33183 if (pfn >= end_pfn)
33184 continue;
33185@@ -304,14 +313,13 @@ repeat:
33186 #endif
33187 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33188 pmd++, pmd_idx++) {
33189- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33190+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33191
33192 /*
33193 * Map with big pages if possible, otherwise
33194 * create normal page tables:
33195 */
33196 if (use_pse) {
33197- unsigned int addr2;
33198 pgprot_t prot = PAGE_KERNEL_LARGE;
33199 /*
33200 * first pass will use the same initial
33201@@ -322,11 +330,7 @@ repeat:
33202 _PAGE_PSE);
33203
33204 pfn &= PMD_MASK >> PAGE_SHIFT;
33205- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33206- PAGE_OFFSET + PAGE_SIZE-1;
33207-
33208- if (is_kernel_text(addr) ||
33209- is_kernel_text(addr2))
33210+ if (is_kernel_text(address, address + PMD_SIZE))
33211 prot = PAGE_KERNEL_LARGE_EXEC;
33212
33213 pages_2m++;
33214@@ -343,7 +347,7 @@ repeat:
33215 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33216 pte += pte_ofs;
33217 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33218- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33219+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33220 pgprot_t prot = PAGE_KERNEL;
33221 /*
33222 * first pass will use the same initial
33223@@ -351,7 +355,7 @@ repeat:
33224 */
33225 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33226
33227- if (is_kernel_text(addr))
33228+ if (is_kernel_text(address, address + PAGE_SIZE))
33229 prot = PAGE_KERNEL_EXEC;
33230
33231 pages_4k++;
33232@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33233
33234 pud = pud_offset(pgd, va);
33235 pmd = pmd_offset(pud, va);
33236- if (!pmd_present(*pmd))
33237+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33238 break;
33239
33240 /* should not be large page here */
33241@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33242
33243 static void __init pagetable_init(void)
33244 {
33245- pgd_t *pgd_base = swapper_pg_dir;
33246-
33247- permanent_kmaps_init(pgd_base);
33248+ permanent_kmaps_init(swapper_pg_dir);
33249 }
33250
33251-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33252+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33253 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33254
33255 /* user-defined highmem size */
33256@@ -787,10 +789,10 @@ void __init mem_init(void)
33257 ((unsigned long)&__init_end -
33258 (unsigned long)&__init_begin) >> 10,
33259
33260- (unsigned long)&_etext, (unsigned long)&_edata,
33261- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33262+ (unsigned long)&_sdata, (unsigned long)&_edata,
33263+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33264
33265- (unsigned long)&_text, (unsigned long)&_etext,
33266+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33267 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33268
33269 /*
33270@@ -883,6 +885,7 @@ void set_kernel_text_rw(void)
33271 if (!kernel_set_to_readonly)
33272 return;
33273
33274+ start = ktla_ktva(start);
33275 pr_debug("Set kernel text: %lx - %lx for read write\n",
33276 start, start+size);
33277
33278@@ -897,6 +900,7 @@ void set_kernel_text_ro(void)
33279 if (!kernel_set_to_readonly)
33280 return;
33281
33282+ start = ktla_ktva(start);
33283 pr_debug("Set kernel text: %lx - %lx for read only\n",
33284 start, start+size);
33285
33286@@ -925,6 +929,7 @@ void mark_rodata_ro(void)
33287 unsigned long start = PFN_ALIGN(_text);
33288 unsigned long size = PFN_ALIGN(_etext) - start;
33289
33290+ start = ktla_ktva(start);
33291 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33292 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33293 size >> 10);
33294diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33295index df1a992..94c272c 100644
33296--- a/arch/x86/mm/init_64.c
33297+++ b/arch/x86/mm/init_64.c
33298@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33299 * around without checking the pgd every time.
33300 */
33301
33302-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33303+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33304 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33305
33306 int force_personality32;
33307@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33308
33309 for (address = start; address <= end; address += PGDIR_SIZE) {
33310 const pgd_t *pgd_ref = pgd_offset_k(address);
33311+
33312+#ifdef CONFIG_PAX_PER_CPU_PGD
33313+ unsigned long cpu;
33314+#else
33315 struct page *page;
33316+#endif
33317
33318 if (pgd_none(*pgd_ref))
33319 continue;
33320
33321 spin_lock(&pgd_lock);
33322+
33323+#ifdef CONFIG_PAX_PER_CPU_PGD
33324+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33325+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33326+
33327+ if (pgd_none(*pgd))
33328+ set_pgd(pgd, *pgd_ref);
33329+ else
33330+ BUG_ON(pgd_page_vaddr(*pgd)
33331+ != pgd_page_vaddr(*pgd_ref));
33332+ pgd = pgd_offset_cpu(cpu, kernel, address);
33333+#else
33334 list_for_each_entry(page, &pgd_list, lru) {
33335 pgd_t *pgd;
33336 spinlock_t *pgt_lock;
33337@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33338 /* the pgt_lock only for Xen */
33339 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33340 spin_lock(pgt_lock);
33341+#endif
33342
33343 if (pgd_none(*pgd))
33344 set_pgd(pgd, *pgd_ref);
33345@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33346 BUG_ON(pgd_page_vaddr(*pgd)
33347 != pgd_page_vaddr(*pgd_ref));
33348
33349+#ifndef CONFIG_PAX_PER_CPU_PGD
33350 spin_unlock(pgt_lock);
33351+#endif
33352+
33353 }
33354 spin_unlock(&pgd_lock);
33355 }
33356@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33357 {
33358 if (pgd_none(*pgd)) {
33359 pud_t *pud = (pud_t *)spp_getpage();
33360- pgd_populate(&init_mm, pgd, pud);
33361+ pgd_populate_kernel(&init_mm, pgd, pud);
33362 if (pud != pud_offset(pgd, 0))
33363 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33364 pud, pud_offset(pgd, 0));
33365@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33366 {
33367 if (pud_none(*pud)) {
33368 pmd_t *pmd = (pmd_t *) spp_getpage();
33369- pud_populate(&init_mm, pud, pmd);
33370+ pud_populate_kernel(&init_mm, pud, pmd);
33371 if (pmd != pmd_offset(pud, 0))
33372 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33373 pmd, pmd_offset(pud, 0));
33374@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33375 pmd = fill_pmd(pud, vaddr);
33376 pte = fill_pte(pmd, vaddr);
33377
33378+ pax_open_kernel();
33379 set_pte(pte, new_pte);
33380+ pax_close_kernel();
33381
33382 /*
33383 * It's enough to flush this one mapping.
33384@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33385 pgd = pgd_offset_k((unsigned long)__va(phys));
33386 if (pgd_none(*pgd)) {
33387 pud = (pud_t *) spp_getpage();
33388- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33389- _PAGE_USER));
33390+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33391 }
33392 pud = pud_offset(pgd, (unsigned long)__va(phys));
33393 if (pud_none(*pud)) {
33394 pmd = (pmd_t *) spp_getpage();
33395- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33396- _PAGE_USER));
33397+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33398 }
33399 pmd = pmd_offset(pud, phys);
33400 BUG_ON(!pmd_none(*pmd));
33401@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33402 prot);
33403
33404 spin_lock(&init_mm.page_table_lock);
33405- pud_populate(&init_mm, pud, pmd);
33406+ pud_populate_kernel(&init_mm, pud, pmd);
33407 spin_unlock(&init_mm.page_table_lock);
33408 }
33409 __flush_tlb_all();
33410@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33411 page_size_mask);
33412
33413 spin_lock(&init_mm.page_table_lock);
33414- pgd_populate(&init_mm, pgd, pud);
33415+ pgd_populate_kernel(&init_mm, pgd, pud);
33416 spin_unlock(&init_mm.page_table_lock);
33417 pgd_changed = true;
33418 }
33419@@ -1195,8 +1216,8 @@ static struct vm_operations_struct gate_vma_ops = {
33420 static struct vm_area_struct gate_vma = {
33421 .vm_start = VSYSCALL_ADDR,
33422 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
33423- .vm_page_prot = PAGE_READONLY_EXEC,
33424- .vm_flags = VM_READ | VM_EXEC,
33425+ .vm_page_prot = PAGE_READONLY,
33426+ .vm_flags = VM_READ,
33427 .vm_ops = &gate_vma_ops,
33428 };
33429
33430diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33431index 7b179b4..6bd17777 100644
33432--- a/arch/x86/mm/iomap_32.c
33433+++ b/arch/x86/mm/iomap_32.c
33434@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33435 type = kmap_atomic_idx_push();
33436 idx = type + KM_TYPE_NR * smp_processor_id();
33437 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33438+
33439+ pax_open_kernel();
33440 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33441+ pax_close_kernel();
33442+
33443 arch_flush_lazy_mmu_mode();
33444
33445 return (void *)vaddr;
33446diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33447index baff1da..2816ef4 100644
33448--- a/arch/x86/mm/ioremap.c
33449+++ b/arch/x86/mm/ioremap.c
33450@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33451 unsigned long i;
33452
33453 for (i = 0; i < nr_pages; ++i)
33454- if (pfn_valid(start_pfn + i) &&
33455- !PageReserved(pfn_to_page(start_pfn + i)))
33456+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33457+ !PageReserved(pfn_to_page(start_pfn + i))))
33458 return 1;
33459
33460 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33461@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
33462 *
33463 * Caller must ensure there is only one unmapping for the same pointer.
33464 */
33465-void iounmap(volatile void __iomem *addr)
33466+void iounmap(const volatile void __iomem *addr)
33467 {
33468 struct vm_struct *p, *o;
33469
33470@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33471
33472 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33473 if (page_is_ram(start >> PAGE_SHIFT))
33474+#ifdef CONFIG_HIGHMEM
33475+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33476+#endif
33477 return __va(phys);
33478
33479 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33480@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33481 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33482 {
33483 if (page_is_ram(phys >> PAGE_SHIFT))
33484+#ifdef CONFIG_HIGHMEM
33485+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33486+#endif
33487 return;
33488
33489 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33490 return;
33491 }
33492
33493-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33494+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33495
33496 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33497 {
33498@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33499 early_ioremap_setup();
33500
33501 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33502- memset(bm_pte, 0, sizeof(bm_pte));
33503- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33504+ pmd_populate_user(&init_mm, pmd, bm_pte);
33505
33506 /*
33507 * The boot-ioremap range spans multiple pmds, for which
33508diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33509index dd89a13..d77bdcc 100644
33510--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33511+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33512@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33513 * memory (e.g. tracked pages)? For now, we need this to avoid
33514 * invoking kmemcheck for PnP BIOS calls.
33515 */
33516- if (regs->flags & X86_VM_MASK)
33517+ if (v8086_mode(regs))
33518 return false;
33519- if (regs->cs != __KERNEL_CS)
33520+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33521 return false;
33522
33523 pte = kmemcheck_pte_lookup(address);
33524diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33525index 25e7e13..1964579 100644
33526--- a/arch/x86/mm/mmap.c
33527+++ b/arch/x86/mm/mmap.c
33528@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
33529 * Leave an at least ~128 MB hole with possible stack randomization.
33530 */
33531 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33532-#define MAX_GAP (TASK_SIZE/6*5)
33533+#define MAX_GAP (pax_task_size/6*5)
33534
33535 static int mmap_is_legacy(void)
33536 {
33537@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33538 return rnd << PAGE_SHIFT;
33539 }
33540
33541-static unsigned long mmap_base(void)
33542+static unsigned long mmap_base(struct mm_struct *mm)
33543 {
33544 unsigned long gap = rlimit(RLIMIT_STACK);
33545+ unsigned long pax_task_size = TASK_SIZE;
33546+
33547+#ifdef CONFIG_PAX_SEGMEXEC
33548+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33549+ pax_task_size = SEGMEXEC_TASK_SIZE;
33550+#endif
33551
33552 if (gap < MIN_GAP)
33553 gap = MIN_GAP;
33554 else if (gap > MAX_GAP)
33555 gap = MAX_GAP;
33556
33557- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33558+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33559 }
33560
33561 /*
33562 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33563 * does, but not when emulating X86_32
33564 */
33565-static unsigned long mmap_legacy_base(void)
33566+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33567 {
33568- if (mmap_is_ia32())
33569+ if (mmap_is_ia32()) {
33570+
33571+#ifdef CONFIG_PAX_SEGMEXEC
33572+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33573+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33574+ else
33575+#endif
33576+
33577 return TASK_UNMAPPED_BASE;
33578- else
33579+ } else
33580 return TASK_UNMAPPED_BASE + mmap_rnd();
33581 }
33582
33583@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33584 */
33585 void arch_pick_mmap_layout(struct mm_struct *mm)
33586 {
33587- mm->mmap_legacy_base = mmap_legacy_base();
33588- mm->mmap_base = mmap_base();
33589+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33590+ mm->mmap_base = mmap_base(mm);
33591+
33592+#ifdef CONFIG_PAX_RANDMMAP
33593+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33594+ mm->mmap_legacy_base += mm->delta_mmap;
33595+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33596+ }
33597+#endif
33598
33599 if (mmap_is_legacy()) {
33600 mm->mmap_base = mm->mmap_legacy_base;
33601diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33602index 0057a7a..95c7edd 100644
33603--- a/arch/x86/mm/mmio-mod.c
33604+++ b/arch/x86/mm/mmio-mod.c
33605@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33606 break;
33607 default:
33608 {
33609- unsigned char *ip = (unsigned char *)instptr;
33610+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33611 my_trace->opcode = MMIO_UNKNOWN_OP;
33612 my_trace->width = 0;
33613 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33614@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33615 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33616 void __iomem *addr)
33617 {
33618- static atomic_t next_id;
33619+ static atomic_unchecked_t next_id;
33620 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33621 /* These are page-unaligned. */
33622 struct mmiotrace_map map = {
33623@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33624 .private = trace
33625 },
33626 .phys = offset,
33627- .id = atomic_inc_return(&next_id)
33628+ .id = atomic_inc_return_unchecked(&next_id)
33629 };
33630 map.map_id = trace->id;
33631
33632@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33633 ioremap_trace_core(offset, size, addr);
33634 }
33635
33636-static void iounmap_trace_core(volatile void __iomem *addr)
33637+static void iounmap_trace_core(const volatile void __iomem *addr)
33638 {
33639 struct mmiotrace_map map = {
33640 .phys = 0,
33641@@ -328,7 +328,7 @@ not_enabled:
33642 }
33643 }
33644
33645-void mmiotrace_iounmap(volatile void __iomem *addr)
33646+void mmiotrace_iounmap(const volatile void __iomem *addr)
33647 {
33648 might_sleep();
33649 if (is_enabled()) /* recheck and proper locking in *_core() */
33650diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33651index a32b706..efb308b 100644
33652--- a/arch/x86/mm/numa.c
33653+++ b/arch/x86/mm/numa.c
33654@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
33655 return true;
33656 }
33657
33658-static int __init numa_register_memblks(struct numa_meminfo *mi)
33659+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33660 {
33661 unsigned long uninitialized_var(pfn_align);
33662 int i, nid;
33663diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33664index ae242a7..1c7998f 100644
33665--- a/arch/x86/mm/pageattr.c
33666+++ b/arch/x86/mm/pageattr.c
33667@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33668 */
33669 #ifdef CONFIG_PCI_BIOS
33670 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33671- pgprot_val(forbidden) |= _PAGE_NX;
33672+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33673 #endif
33674
33675 /*
33676@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33677 * Does not cover __inittext since that is gone later on. On
33678 * 64bit we do not enforce !NX on the low mapping
33679 */
33680- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33681- pgprot_val(forbidden) |= _PAGE_NX;
33682+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33683+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33684
33685+#ifdef CONFIG_DEBUG_RODATA
33686 /*
33687 * The .rodata section needs to be read-only. Using the pfn
33688 * catches all aliases.
33689@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33690 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33691 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33692 pgprot_val(forbidden) |= _PAGE_RW;
33693+#endif
33694
33695 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33696 /*
33697@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33698 }
33699 #endif
33700
33701+#ifdef CONFIG_PAX_KERNEXEC
33702+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33703+ pgprot_val(forbidden) |= _PAGE_RW;
33704+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33705+ }
33706+#endif
33707+
33708 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33709
33710 return prot;
33711@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33712 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33713 {
33714 /* change init_mm */
33715+ pax_open_kernel();
33716 set_pte_atomic(kpte, pte);
33717+
33718 #ifdef CONFIG_X86_32
33719 if (!SHARED_KERNEL_PMD) {
33720+
33721+#ifdef CONFIG_PAX_PER_CPU_PGD
33722+ unsigned long cpu;
33723+#else
33724 struct page *page;
33725+#endif
33726
33727+#ifdef CONFIG_PAX_PER_CPU_PGD
33728+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33729+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33730+#else
33731 list_for_each_entry(page, &pgd_list, lru) {
33732- pgd_t *pgd;
33733+ pgd_t *pgd = (pgd_t *)page_address(page);
33734+#endif
33735+
33736 pud_t *pud;
33737 pmd_t *pmd;
33738
33739- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33740+ pgd += pgd_index(address);
33741 pud = pud_offset(pgd, address);
33742 pmd = pmd_offset(pud, address);
33743 set_pte_atomic((pte_t *)pmd, pte);
33744 }
33745 }
33746 #endif
33747+ pax_close_kernel();
33748 }
33749
33750 static int
33751diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33752index 6574388..87e9bef 100644
33753--- a/arch/x86/mm/pat.c
33754+++ b/arch/x86/mm/pat.c
33755@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33756
33757 if (!entry) {
33758 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33759- current->comm, current->pid, start, end - 1);
33760+ current->comm, task_pid_nr(current), start, end - 1);
33761 return -EINVAL;
33762 }
33763
33764@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33765
33766 while (cursor < to) {
33767 if (!devmem_is_allowed(pfn)) {
33768- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33769- current->comm, from, to - 1);
33770+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33771+ current->comm, from, to - 1, cursor);
33772 return 0;
33773 }
33774 cursor += PAGE_SIZE;
33775@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33776 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33777 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33778 "for [mem %#010Lx-%#010Lx]\n",
33779- current->comm, current->pid,
33780+ current->comm, task_pid_nr(current),
33781 cattr_name(flags),
33782 base, (unsigned long long)(base + size-1));
33783 return -EINVAL;
33784@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33785 flags = lookup_memtype(paddr);
33786 if (want_flags != flags) {
33787 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33788- current->comm, current->pid,
33789+ current->comm, task_pid_nr(current),
33790 cattr_name(want_flags),
33791 (unsigned long long)paddr,
33792 (unsigned long long)(paddr + size - 1),
33793@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33794 free_memtype(paddr, paddr + size);
33795 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33796 " for [mem %#010Lx-%#010Lx], got %s\n",
33797- current->comm, current->pid,
33798+ current->comm, task_pid_nr(current),
33799 cattr_name(want_flags),
33800 (unsigned long long)paddr,
33801 (unsigned long long)(paddr + size - 1),
33802diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33803index 415f6c4..d319983 100644
33804--- a/arch/x86/mm/pat_rbtree.c
33805+++ b/arch/x86/mm/pat_rbtree.c
33806@@ -160,7 +160,7 @@ success:
33807
33808 failure:
33809 printk(KERN_INFO "%s:%d conflicting memory types "
33810- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33811+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33812 end, cattr_name(found_type), cattr_name(match->type));
33813 return -EBUSY;
33814 }
33815diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33816index 9f0614d..92ae64a 100644
33817--- a/arch/x86/mm/pf_in.c
33818+++ b/arch/x86/mm/pf_in.c
33819@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33820 int i;
33821 enum reason_type rv = OTHERS;
33822
33823- p = (unsigned char *)ins_addr;
33824+ p = (unsigned char *)ktla_ktva(ins_addr);
33825 p += skip_prefix(p, &prf);
33826 p += get_opcode(p, &opcode);
33827
33828@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33829 struct prefix_bits prf;
33830 int i;
33831
33832- p = (unsigned char *)ins_addr;
33833+ p = (unsigned char *)ktla_ktva(ins_addr);
33834 p += skip_prefix(p, &prf);
33835 p += get_opcode(p, &opcode);
33836
33837@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33838 struct prefix_bits prf;
33839 int i;
33840
33841- p = (unsigned char *)ins_addr;
33842+ p = (unsigned char *)ktla_ktva(ins_addr);
33843 p += skip_prefix(p, &prf);
33844 p += get_opcode(p, &opcode);
33845
33846@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33847 struct prefix_bits prf;
33848 int i;
33849
33850- p = (unsigned char *)ins_addr;
33851+ p = (unsigned char *)ktla_ktva(ins_addr);
33852 p += skip_prefix(p, &prf);
33853 p += get_opcode(p, &opcode);
33854 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33855@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33856 struct prefix_bits prf;
33857 int i;
33858
33859- p = (unsigned char *)ins_addr;
33860+ p = (unsigned char *)ktla_ktva(ins_addr);
33861 p += skip_prefix(p, &prf);
33862 p += get_opcode(p, &opcode);
33863 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33864diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33865index 6fb6927..4fc13c0 100644
33866--- a/arch/x86/mm/pgtable.c
33867+++ b/arch/x86/mm/pgtable.c
33868@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33869 list_del(&page->lru);
33870 }
33871
33872-#define UNSHARED_PTRS_PER_PGD \
33873- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33874+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33875+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33876
33877+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33878+{
33879+ unsigned int count = USER_PGD_PTRS;
33880
33881+ if (!pax_user_shadow_base)
33882+ return;
33883+
33884+ while (count--)
33885+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33886+}
33887+#endif
33888+
33889+#ifdef CONFIG_PAX_PER_CPU_PGD
33890+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33891+{
33892+ unsigned int count = USER_PGD_PTRS;
33893+
33894+ while (count--) {
33895+ pgd_t pgd;
33896+
33897+#ifdef CONFIG_X86_64
33898+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33899+#else
33900+ pgd = *src++;
33901+#endif
33902+
33903+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33904+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33905+#endif
33906+
33907+ *dst++ = pgd;
33908+ }
33909+
33910+}
33911+#endif
33912+
33913+#ifdef CONFIG_X86_64
33914+#define pxd_t pud_t
33915+#define pyd_t pgd_t
33916+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33917+#define pgtable_pxd_page_ctor(page) true
33918+#define pgtable_pxd_page_dtor(page)
33919+#define pxd_free(mm, pud) pud_free((mm), (pud))
33920+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33921+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33922+#define PYD_SIZE PGDIR_SIZE
33923+#else
33924+#define pxd_t pmd_t
33925+#define pyd_t pud_t
33926+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33927+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33928+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33929+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33930+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33931+#define pyd_offset(mm, address) pud_offset((mm), (address))
33932+#define PYD_SIZE PUD_SIZE
33933+#endif
33934+
33935+#ifdef CONFIG_PAX_PER_CPU_PGD
33936+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33937+static inline void pgd_dtor(pgd_t *pgd) {}
33938+#else
33939 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33940 {
33941 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33942@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33943 pgd_list_del(pgd);
33944 spin_unlock(&pgd_lock);
33945 }
33946+#endif
33947
33948 /*
33949 * List of all pgd's needed for non-PAE so it can invalidate entries
33950@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33951 * -- nyc
33952 */
33953
33954-#ifdef CONFIG_X86_PAE
33955+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33956 /*
33957 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33958 * updating the top-level pagetable entries to guarantee the
33959@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33960 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33961 * and initialize the kernel pmds here.
33962 */
33963-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33964+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33965
33966 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33967 {
33968@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33969 */
33970 flush_tlb_mm(mm);
33971 }
33972+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33973+#define PREALLOCATED_PXDS USER_PGD_PTRS
33974 #else /* !CONFIG_X86_PAE */
33975
33976 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33977-#define PREALLOCATED_PMDS 0
33978+#define PREALLOCATED_PXDS 0
33979
33980 #endif /* CONFIG_X86_PAE */
33981
33982-static void free_pmds(pmd_t *pmds[])
33983+static void free_pxds(pxd_t *pxds[])
33984 {
33985 int i;
33986
33987- for(i = 0; i < PREALLOCATED_PMDS; i++)
33988- if (pmds[i]) {
33989- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33990- free_page((unsigned long)pmds[i]);
33991+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33992+ if (pxds[i]) {
33993+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33994+ free_page((unsigned long)pxds[i]);
33995 }
33996 }
33997
33998-static int preallocate_pmds(pmd_t *pmds[])
33999+static int preallocate_pxds(pxd_t *pxds[])
34000 {
34001 int i;
34002 bool failed = false;
34003
34004- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34005- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
34006- if (!pmd)
34007+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34008+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
34009+ if (!pxd)
34010 failed = true;
34011- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
34012- free_page((unsigned long)pmd);
34013- pmd = NULL;
34014+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
34015+ free_page((unsigned long)pxd);
34016+ pxd = NULL;
34017 failed = true;
34018 }
34019- pmds[i] = pmd;
34020+ pxds[i] = pxd;
34021 }
34022
34023 if (failed) {
34024- free_pmds(pmds);
34025+ free_pxds(pxds);
34026 return -ENOMEM;
34027 }
34028
34029@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
34030 * preallocate which never got a corresponding vma will need to be
34031 * freed manually.
34032 */
34033-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
34034+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34035 {
34036 int i;
34037
34038- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34039+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34040 pgd_t pgd = pgdp[i];
34041
34042 if (pgd_val(pgd) != 0) {
34043- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34044+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34045
34046- pgdp[i] = native_make_pgd(0);
34047+ set_pgd(pgdp + i, native_make_pgd(0));
34048
34049- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34050- pmd_free(mm, pmd);
34051+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34052+ pxd_free(mm, pxd);
34053 }
34054 }
34055 }
34056
34057-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34058+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34059 {
34060- pud_t *pud;
34061+ pyd_t *pyd;
34062 int i;
34063
34064- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34065+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34066 return;
34067
34068- pud = pud_offset(pgd, 0);
34069-
34070- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34071- pmd_t *pmd = pmds[i];
34072+#ifdef CONFIG_X86_64
34073+ pyd = pyd_offset(mm, 0L);
34074+#else
34075+ pyd = pyd_offset(pgd, 0L);
34076+#endif
34077
34078+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34079+ pxd_t *pxd = pxds[i];
34080 if (i >= KERNEL_PGD_BOUNDARY)
34081- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34082- sizeof(pmd_t) * PTRS_PER_PMD);
34083+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34084+ sizeof(pxd_t) * PTRS_PER_PMD);
34085
34086- pud_populate(mm, pud, pmd);
34087+ pyd_populate(mm, pyd, pxd);
34088 }
34089 }
34090
34091 pgd_t *pgd_alloc(struct mm_struct *mm)
34092 {
34093 pgd_t *pgd;
34094- pmd_t *pmds[PREALLOCATED_PMDS];
34095+ pxd_t *pxds[PREALLOCATED_PXDS];
34096
34097 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34098
34099@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34100
34101 mm->pgd = pgd;
34102
34103- if (preallocate_pmds(pmds) != 0)
34104+ if (preallocate_pxds(pxds) != 0)
34105 goto out_free_pgd;
34106
34107 if (paravirt_pgd_alloc(mm) != 0)
34108- goto out_free_pmds;
34109+ goto out_free_pxds;
34110
34111 /*
34112 * Make sure that pre-populating the pmds is atomic with
34113@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34114 spin_lock(&pgd_lock);
34115
34116 pgd_ctor(mm, pgd);
34117- pgd_prepopulate_pmd(mm, pgd, pmds);
34118+ pgd_prepopulate_pxd(mm, pgd, pxds);
34119
34120 spin_unlock(&pgd_lock);
34121
34122 return pgd;
34123
34124-out_free_pmds:
34125- free_pmds(pmds);
34126+out_free_pxds:
34127+ free_pxds(pxds);
34128 out_free_pgd:
34129 free_page((unsigned long)pgd);
34130 out:
34131@@ -313,7 +380,7 @@ out:
34132
34133 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34134 {
34135- pgd_mop_up_pmds(mm, pgd);
34136+ pgd_mop_up_pxds(mm, pgd);
34137 pgd_dtor(pgd);
34138 paravirt_pgd_free(mm, pgd);
34139 free_page((unsigned long)pgd);
34140diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34141index 4dd8cf6..f9d143e 100644
34142--- a/arch/x86/mm/pgtable_32.c
34143+++ b/arch/x86/mm/pgtable_32.c
34144@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34145 return;
34146 }
34147 pte = pte_offset_kernel(pmd, vaddr);
34148+
34149+ pax_open_kernel();
34150 if (pte_val(pteval))
34151 set_pte_at(&init_mm, vaddr, pte, pteval);
34152 else
34153 pte_clear(&init_mm, vaddr, pte);
34154+ pax_close_kernel();
34155
34156 /*
34157 * It's enough to flush this one mapping.
34158diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34159index e666cbb..61788c45 100644
34160--- a/arch/x86/mm/physaddr.c
34161+++ b/arch/x86/mm/physaddr.c
34162@@ -10,7 +10,7 @@
34163 #ifdef CONFIG_X86_64
34164
34165 #ifdef CONFIG_DEBUG_VIRTUAL
34166-unsigned long __phys_addr(unsigned long x)
34167+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34168 {
34169 unsigned long y = x - __START_KERNEL_map;
34170
34171@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34172 #else
34173
34174 #ifdef CONFIG_DEBUG_VIRTUAL
34175-unsigned long __phys_addr(unsigned long x)
34176+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34177 {
34178 unsigned long phys_addr = x - PAGE_OFFSET;
34179 /* VMALLOC_* aren't constants */
34180diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34181index 90555bf..f5f1828 100644
34182--- a/arch/x86/mm/setup_nx.c
34183+++ b/arch/x86/mm/setup_nx.c
34184@@ -5,8 +5,10 @@
34185 #include <asm/pgtable.h>
34186 #include <asm/proto.h>
34187
34188+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34189 static int disable_nx;
34190
34191+#ifndef CONFIG_PAX_PAGEEXEC
34192 /*
34193 * noexec = on|off
34194 *
34195@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34196 return 0;
34197 }
34198 early_param("noexec", noexec_setup);
34199+#endif
34200+
34201+#endif
34202
34203 void x86_configure_nx(void)
34204 {
34205+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34206 if (cpu_has_nx && !disable_nx)
34207 __supported_pte_mask |= _PAGE_NX;
34208 else
34209+#endif
34210 __supported_pte_mask &= ~_PAGE_NX;
34211 }
34212
34213diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34214index dd8dda1..9e9b0f6 100644
34215--- a/arch/x86/mm/tlb.c
34216+++ b/arch/x86/mm/tlb.c
34217@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34218 BUG();
34219 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34220 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34221+
34222+#ifndef CONFIG_PAX_PER_CPU_PGD
34223 load_cr3(swapper_pg_dir);
34224+#endif
34225+
34226 }
34227 }
34228 EXPORT_SYMBOL_GPL(leave_mm);
34229diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34230new file mode 100644
34231index 0000000..dace51c
34232--- /dev/null
34233+++ b/arch/x86/mm/uderef_64.c
34234@@ -0,0 +1,37 @@
34235+#include <linux/mm.h>
34236+#include <asm/pgtable.h>
34237+#include <asm/uaccess.h>
34238+
34239+#ifdef CONFIG_PAX_MEMORY_UDEREF
34240+/* PaX: due to the special call convention these functions must
34241+ * - remain leaf functions under all configurations,
34242+ * - never be called directly, only dereferenced from the wrappers.
34243+ */
34244+void __pax_open_userland(void)
34245+{
34246+ unsigned int cpu;
34247+
34248+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34249+ return;
34250+
34251+ cpu = raw_get_cpu();
34252+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34253+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34254+ raw_put_cpu_no_resched();
34255+}
34256+EXPORT_SYMBOL(__pax_open_userland);
34257+
34258+void __pax_close_userland(void)
34259+{
34260+ unsigned int cpu;
34261+
34262+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34263+ return;
34264+
34265+ cpu = raw_get_cpu();
34266+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34267+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34268+ raw_put_cpu_no_resched();
34269+}
34270+EXPORT_SYMBOL(__pax_close_userland);
34271+#endif
34272diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34273index 6440221..f746de8 100644
34274--- a/arch/x86/net/bpf_jit.S
34275+++ b/arch/x86/net/bpf_jit.S
34276@@ -9,19 +9,17 @@
34277 */
34278 #include <linux/linkage.h>
34279 #include <asm/dwarf2.h>
34280+#include <asm/alternative-asm.h>
34281
34282 /*
34283 * Calling convention :
34284- * rbx : skb pointer (callee saved)
34285+ * rdi : skb pointer
34286 * esi : offset of byte(s) to fetch in skb (can be scratched)
34287- * r10 : copy of skb->data
34288+ * r8 : copy of skb->data
34289 * r9d : hlen = skb->len - skb->data_len
34290 */
34291-#define SKBDATA %r10
34292+#define SKBDATA %r8
34293 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
34294-#define MAX_BPF_STACK (512 /* from filter.h */ + \
34295- 32 /* space for rbx,r13,r14,r15 */ + \
34296- 8 /* space for skb_copy_bits */)
34297
34298 sk_load_word:
34299 .globl sk_load_word
34300@@ -38,6 +36,7 @@ sk_load_word_positive_offset:
34301 jle bpf_slow_path_word
34302 mov (SKBDATA,%rsi),%eax
34303 bswap %eax /* ntohl() */
34304+ pax_force_retaddr
34305 ret
34306
34307 sk_load_half:
34308@@ -55,6 +54,7 @@ sk_load_half_positive_offset:
34309 jle bpf_slow_path_half
34310 movzwl (SKBDATA,%rsi),%eax
34311 rol $8,%ax # ntohs()
34312+ pax_force_retaddr
34313 ret
34314
34315 sk_load_byte:
34316@@ -69,45 +69,83 @@ sk_load_byte_positive_offset:
34317 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34318 jle bpf_slow_path_byte
34319 movzbl (SKBDATA,%rsi),%eax
34320+ pax_force_retaddr
34321+ ret
34322+
34323+/**
34324+ * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
34325+ *
34326+ * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
34327+ * Must preserve A accumulator (%eax)
34328+ * Inputs : %esi is the offset value
34329+ */
34330+sk_load_byte_msh:
34331+ .globl sk_load_byte_msh
34332+ test %esi,%esi
34333+ js bpf_slow_path_byte_msh_neg
34334+
34335+sk_load_byte_msh_positive_offset:
34336+ .globl sk_load_byte_msh_positive_offset
34337+ cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
34338+ jle bpf_slow_path_byte_msh
34339+ movzbl (SKBDATA,%rsi),%ebx
34340+ and $15,%bl
34341+ shl $2,%bl
34342+ pax_force_retaddr
34343 ret
34344
34345 /* rsi contains offset and can be scratched */
34346 #define bpf_slow_path_common(LEN) \
34347- mov %rbx, %rdi; /* arg1 == skb */ \
34348+ push %rdi; /* save skb */ \
34349 push %r9; \
34350 push SKBDATA; \
34351 /* rsi already has offset */ \
34352 mov $LEN,%ecx; /* len */ \
34353- lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
34354+ lea -12(%rbp),%rdx; \
34355 call skb_copy_bits; \
34356 test %eax,%eax; \
34357 pop SKBDATA; \
34358- pop %r9;
34359+ pop %r9; \
34360+ pop %rdi
34361
34362
34363 bpf_slow_path_word:
34364 bpf_slow_path_common(4)
34365 js bpf_error
34366- mov - MAX_BPF_STACK + 32(%rbp),%eax
34367+ mov -12(%rbp),%eax
34368 bswap %eax
34369+ pax_force_retaddr
34370 ret
34371
34372 bpf_slow_path_half:
34373 bpf_slow_path_common(2)
34374 js bpf_error
34375- mov - MAX_BPF_STACK + 32(%rbp),%ax
34376+ mov -12(%rbp),%ax
34377 rol $8,%ax
34378 movzwl %ax,%eax
34379+ pax_force_retaddr
34380 ret
34381
34382 bpf_slow_path_byte:
34383 bpf_slow_path_common(1)
34384 js bpf_error
34385- movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34386+ movzbl -12(%rbp),%eax
34387+ pax_force_retaddr
34388+ ret
34389+
34390+bpf_slow_path_byte_msh:
34391+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34392+ bpf_slow_path_common(1)
34393+ js bpf_error
34394+ movzbl -12(%rbp),%eax
34395+ and $15,%al
34396+ shl $2,%al
34397+ xchg %eax,%ebx
34398+ pax_force_retaddr
34399 ret
34400
34401 #define sk_negative_common(SIZE) \
34402- mov %rbx, %rdi; /* arg1 == skb */ \
34403+ push %rdi; /* save skb */ \
34404 push %r9; \
34405 push SKBDATA; \
34406 /* rsi already has offset */ \
34407@@ -116,8 +154,10 @@ bpf_slow_path_byte:
34408 test %rax,%rax; \
34409 pop SKBDATA; \
34410 pop %r9; \
34411+ pop %rdi; \
34412 jz bpf_error
34413
34414+
34415 bpf_slow_path_word_neg:
34416 cmp SKF_MAX_NEG_OFF, %esi /* test range */
34417 jl bpf_error /* offset lower -> error */
34418@@ -126,6 +166,7 @@ sk_load_word_negative_offset:
34419 sk_negative_common(4)
34420 mov (%rax), %eax
34421 bswap %eax
34422+ pax_force_retaddr
34423 ret
34424
34425 bpf_slow_path_half_neg:
34426@@ -137,6 +178,7 @@ sk_load_half_negative_offset:
34427 mov (%rax),%ax
34428 rol $8,%ax
34429 movzwl %ax,%eax
34430+ pax_force_retaddr
34431 ret
34432
34433 bpf_slow_path_byte_neg:
34434@@ -146,14 +188,27 @@ sk_load_byte_negative_offset:
34435 .globl sk_load_byte_negative_offset
34436 sk_negative_common(1)
34437 movzbl (%rax), %eax
34438+ pax_force_retaddr
34439+ ret
34440+
34441+bpf_slow_path_byte_msh_neg:
34442+ cmp SKF_MAX_NEG_OFF, %esi
34443+ jl bpf_error
34444+sk_load_byte_msh_negative_offset:
34445+ .globl sk_load_byte_msh_negative_offset
34446+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34447+ sk_negative_common(1)
34448+ movzbl (%rax),%eax
34449+ and $15,%al
34450+ shl $2,%al
34451+ xchg %eax,%ebx
34452+ pax_force_retaddr
34453 ret
34454
34455 bpf_error:
34456 # force a return 0 from jit handler
34457- xor %eax,%eax
34458- mov - MAX_BPF_STACK(%rbp),%rbx
34459- mov - MAX_BPF_STACK + 8(%rbp),%r13
34460- mov - MAX_BPF_STACK + 16(%rbp),%r14
34461- mov - MAX_BPF_STACK + 24(%rbp),%r15
34462+ xor %eax,%eax
34463+ mov -8(%rbp),%rbx
34464 leaveq
34465+ pax_force_retaddr
34466 ret
34467diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34468index 99bef86..bdfb5c6 100644
34469--- a/arch/x86/net/bpf_jit_comp.c
34470+++ b/arch/x86/net/bpf_jit_comp.c
34471@@ -1,7 +1,6 @@
34472 /* bpf_jit_comp.c : BPF JIT compiler
34473 *
34474 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
34475- * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
34476 *
34477 * This program is free software; you can redistribute it and/or
34478 * modify it under the terms of the GNU General Public License
34479@@ -15,16 +14,28 @@
34480 #include <linux/if_vlan.h>
34481 #include <linux/random.h>
34482
34483+/*
34484+ * Conventions :
34485+ * EAX : BPF A accumulator
34486+ * EBX : BPF X accumulator
34487+ * RDI : pointer to skb (first argument given to JIT function)
34488+ * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
34489+ * ECX,EDX,ESI : scratch registers
34490+ * r9d : skb->len - skb->data_len (headlen)
34491+ * r8 : skb->data
34492+ * -8(RBP) : saved RBX value
34493+ * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
34494+ */
34495 int bpf_jit_enable __read_mostly;
34496
34497 /*
34498 * assembly code in arch/x86/net/bpf_jit.S
34499 */
34500-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
34501+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
34502 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34503-extern u8 sk_load_byte_positive_offset[];
34504+extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
34505 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
34506-extern u8 sk_load_byte_negative_offset[];
34507+extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
34508
34509 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34510 {
34511@@ -39,50 +50,113 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34512 return ptr + len;
34513 }
34514
34515+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34516+#define MAX_INSTR_CODE_SIZE 96
34517+#else
34518+#define MAX_INSTR_CODE_SIZE 64
34519+#endif
34520+
34521 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
34522
34523 #define EMIT1(b1) EMIT(b1, 1)
34524 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34525 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
34526 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
34527-#define EMIT1_off32(b1, off) \
34528- do {EMIT1(b1); EMIT(off, 4); } while (0)
34529-#define EMIT2_off32(b1, b2, off) \
34530- do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
34531-#define EMIT3_off32(b1, b2, b3, off) \
34532- do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
34533-#define EMIT4_off32(b1, b2, b3, b4, off) \
34534- do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
34535+
34536+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34537+/* original constant will appear in ecx */
34538+#define DILUTE_CONST_SEQUENCE(_off, _key) \
34539+do { \
34540+ /* mov ecx, randkey */ \
34541+ EMIT1(0xb9); \
34542+ EMIT(_key, 4); \
34543+ /* xor ecx, randkey ^ off */ \
34544+ EMIT2(0x81, 0xf1); \
34545+ EMIT((_key) ^ (_off), 4); \
34546+} while (0)
34547+
34548+#define EMIT1_off32(b1, _off) \
34549+do { \
34550+ switch (b1) { \
34551+ case 0x05: /* add eax, imm32 */ \
34552+ case 0x2d: /* sub eax, imm32 */ \
34553+ case 0x25: /* and eax, imm32 */ \
34554+ case 0x0d: /* or eax, imm32 */ \
34555+ case 0xb8: /* mov eax, imm32 */ \
34556+ case 0x35: /* xor eax, imm32 */ \
34557+ case 0x3d: /* cmp eax, imm32 */ \
34558+ case 0xa9: /* test eax, imm32 */ \
34559+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34560+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
34561+ break; \
34562+ case 0xbb: /* mov ebx, imm32 */ \
34563+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34564+ /* mov ebx, ecx */ \
34565+ EMIT2(0x89, 0xcb); \
34566+ break; \
34567+ case 0xbe: /* mov esi, imm32 */ \
34568+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34569+ /* mov esi, ecx */ \
34570+ EMIT2(0x89, 0xce); \
34571+ break; \
34572+ case 0xe8: /* call rel imm32, always to known funcs */ \
34573+ EMIT1(b1); \
34574+ EMIT(_off, 4); \
34575+ break; \
34576+ case 0xe9: /* jmp rel imm32 */ \
34577+ EMIT1(b1); \
34578+ EMIT(_off, 4); \
34579+ /* prevent fall-through, we're not called if off = 0 */ \
34580+ EMIT(0xcccccccc, 4); \
34581+ EMIT(0xcccccccc, 4); \
34582+ break; \
34583+ default: \
34584+ BUILD_BUG(); \
34585+ } \
34586+} while (0)
34587+
34588+#define EMIT2_off32(b1, b2, _off) \
34589+do { \
34590+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
34591+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
34592+ EMIT(randkey, 4); \
34593+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
34594+ EMIT((_off) - randkey, 4); \
34595+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
34596+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34597+ /* imul eax, ecx */ \
34598+ EMIT3(0x0f, 0xaf, 0xc1); \
34599+ } else { \
34600+ BUILD_BUG(); \
34601+ } \
34602+} while (0)
34603+#else
34604+#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
34605+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
34606+#endif
34607+
34608+#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
34609+#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
34610
34611 static inline bool is_imm8(int value)
34612 {
34613 return value <= 127 && value >= -128;
34614 }
34615
34616-static inline bool is_simm32(s64 value)
34617+static inline bool is_near(int offset)
34618 {
34619- return value == (s64) (s32) value;
34620+ return offset <= 127 && offset >= -128;
34621 }
34622
34623-/* mov dst, src */
34624-#define EMIT_mov(DST, SRC) \
34625- do {if (DST != SRC) \
34626- EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
34627- } while (0)
34628-
34629-static int bpf_size_to_x86_bytes(int bpf_size)
34630-{
34631- if (bpf_size == BPF_W)
34632- return 4;
34633- else if (bpf_size == BPF_H)
34634- return 2;
34635- else if (bpf_size == BPF_B)
34636- return 1;
34637- else if (bpf_size == BPF_DW)
34638- return 4; /* imm32 */
34639- else
34640- return 0;
34641-}
34642+#define EMIT_JMP(offset) \
34643+do { \
34644+ if (offset) { \
34645+ if (is_near(offset)) \
34646+ EMIT2(0xeb, offset); /* jmp .+off8 */ \
34647+ else \
34648+ EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \
34649+ } \
34650+} while (0)
34651
34652 /* list of x86 cond jumps opcodes (. + s8)
34653 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
34654@@ -93,8 +167,46 @@ static int bpf_size_to_x86_bytes(int bpf_size)
34655 #define X86_JNE 0x75
34656 #define X86_JBE 0x76
34657 #define X86_JA 0x77
34658-#define X86_JGE 0x7D
34659-#define X86_JG 0x7F
34660+
34661+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34662+#define APPEND_FLOW_VERIFY() \
34663+do { \
34664+ /* mov ecx, randkey */ \
34665+ EMIT1(0xb9); \
34666+ EMIT(randkey, 4); \
34667+ /* cmp ecx, randkey */ \
34668+ EMIT2(0x81, 0xf9); \
34669+ EMIT(randkey, 4); \
34670+ /* jz after 8 int 3s */ \
34671+ EMIT2(0x74, 0x08); \
34672+ EMIT(0xcccccccc, 4); \
34673+ EMIT(0xcccccccc, 4); \
34674+} while (0)
34675+#else
34676+#define APPEND_FLOW_VERIFY() do { } while (0)
34677+#endif
34678+
34679+#define EMIT_COND_JMP(op, offset) \
34680+do { \
34681+ if (is_near(offset)) \
34682+ EMIT2(op, offset); /* jxx .+off8 */ \
34683+ else { \
34684+ EMIT2(0x0f, op + 0x10); \
34685+ EMIT(offset, 4); /* jxx .+off32 */ \
34686+ APPEND_FLOW_VERIFY(); \
34687+ } \
34688+} while (0)
34689+
34690+#define COND_SEL(CODE, TOP, FOP) \
34691+ case CODE: \
34692+ t_op = TOP; \
34693+ f_op = FOP; \
34694+ goto cond_branch
34695+
34696+
34697+#define SEEN_DATAREF 1 /* might call external helpers */
34698+#define SEEN_XREG 2 /* ebx is used */
34699+#define SEEN_MEM 4 /* use mem[] for temporary storage */
34700
34701 static inline void bpf_flush_icache(void *start, void *end)
34702 {
34703@@ -109,804 +221,646 @@ static inline void bpf_flush_icache(void *start, void *end)
34704 #define CHOOSE_LOAD_FUNC(K, func) \
34705 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
34706
34707-struct bpf_binary_header {
34708- unsigned int pages;
34709- /* Note : for security reasons, bpf code will follow a randomly
34710- * sized amount of int3 instructions
34711- */
34712- u8 image[];
34713-};
34714+/* Helper to find the offset of pkt_type in sk_buff
34715+ * We want to make sure its still a 3bit field starting at a byte boundary.
34716+ */
34717+#define PKT_TYPE_MAX 7
34718+static int pkt_type_offset(void)
34719+{
34720+ struct sk_buff skb_probe = {
34721+ .pkt_type = ~0,
34722+ };
34723+ char *ct = (char *)&skb_probe;
34724+ unsigned int off;
34725
34726-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34727+ for (off = 0; off < sizeof(struct sk_buff); off++) {
34728+ if (ct[off] == PKT_TYPE_MAX)
34729+ return off;
34730+ }
34731+ pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
34732+ return -1;
34733+}
34734+
34735+/* Note : for security reasons, bpf code will follow a randomly
34736+ * sized amount of int3 instructions
34737+ */
34738+static u8 *bpf_alloc_binary(unsigned int proglen,
34739 u8 **image_ptr)
34740 {
34741 unsigned int sz, hole;
34742- struct bpf_binary_header *header;
34743+ u8 *header;
34744
34745 /* Most of BPF filters are really small,
34746 * but if some of them fill a page, allow at least
34747 * 128 extra bytes to insert a random section of int3
34748 */
34749- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34750- header = module_alloc(sz);
34751+ sz = round_up(proglen + 128, PAGE_SIZE);
34752+ header = module_alloc_exec(sz);
34753 if (!header)
34754 return NULL;
34755
34756+ pax_open_kernel();
34757 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34758+ pax_close_kernel();
34759
34760- header->pages = sz / PAGE_SIZE;
34761- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34762+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34763
34764 /* insert a random number of int3 instructions before BPF code */
34765- *image_ptr = &header->image[prandom_u32() % hole];
34766+ *image_ptr = &header[prandom_u32() % hole];
34767 return header;
34768 }
34769
34770-/* pick a register outside of BPF range for JIT internal work */
34771-#define AUX_REG (MAX_BPF_REG + 1)
34772-
34773-/* the following table maps BPF registers to x64 registers.
34774- * x64 register r12 is unused, since if used as base address register
34775- * in load/store instructions, it always needs an extra byte of encoding
34776- */
34777-static const int reg2hex[] = {
34778- [BPF_REG_0] = 0, /* rax */
34779- [BPF_REG_1] = 7, /* rdi */
34780- [BPF_REG_2] = 6, /* rsi */
34781- [BPF_REG_3] = 2, /* rdx */
34782- [BPF_REG_4] = 1, /* rcx */
34783- [BPF_REG_5] = 0, /* r8 */
34784- [BPF_REG_6] = 3, /* rbx callee saved */
34785- [BPF_REG_7] = 5, /* r13 callee saved */
34786- [BPF_REG_8] = 6, /* r14 callee saved */
34787- [BPF_REG_9] = 7, /* r15 callee saved */
34788- [BPF_REG_FP] = 5, /* rbp readonly */
34789- [AUX_REG] = 3, /* r11 temp register */
34790-};
34791-
34792-/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
34793- * which need extra byte of encoding.
34794- * rax,rcx,...,rbp have simpler encoding
34795- */
34796-static inline bool is_ereg(u32 reg)
34797-{
34798- if (reg == BPF_REG_5 || reg == AUX_REG ||
34799- (reg >= BPF_REG_7 && reg <= BPF_REG_9))
34800- return true;
34801- else
34802- return false;
34803-}
34804-
34805-/* add modifiers if 'reg' maps to x64 registers r8..r15 */
34806-static inline u8 add_1mod(u8 byte, u32 reg)
34807-{
34808- if (is_ereg(reg))
34809- byte |= 1;
34810- return byte;
34811-}
34812-
34813-static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
34814-{
34815- if (is_ereg(r1))
34816- byte |= 1;
34817- if (is_ereg(r2))
34818- byte |= 4;
34819- return byte;
34820-}
34821-
34822-/* encode 'dst_reg' register into x64 opcode 'byte' */
34823-static inline u8 add_1reg(u8 byte, u32 dst_reg)
34824-{
34825- return byte + reg2hex[dst_reg];
34826-}
34827-
34828-/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
34829-static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34830-{
34831- return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
34832-}
34833-
34834-struct jit_context {
34835- unsigned int cleanup_addr; /* epilogue code offset */
34836- bool seen_ld_abs;
34837-};
34838-
34839-static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
34840- int oldproglen, struct jit_context *ctx)
34841-{
34842- struct sock_filter_int *insn = bpf_prog->insnsi;
34843- int insn_cnt = bpf_prog->len;
34844- u8 temp[64];
34845- int i;
34846- int proglen = 0;
34847- u8 *prog = temp;
34848- int stacksize = MAX_BPF_STACK +
34849- 32 /* space for rbx, r13, r14, r15 */ +
34850- 8 /* space for skb_copy_bits() buffer */;
34851-
34852- EMIT1(0x55); /* push rbp */
34853- EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
34854-
34855- /* sub rsp, stacksize */
34856- EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
34857-
34858- /* all classic BPF filters use R6(rbx) save it */
34859-
34860- /* mov qword ptr [rbp-X],rbx */
34861- EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
34862-
34863- /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
34864- * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
34865- * R8(r14). R9(r15) spill could be made conditional, but there is only
34866- * one 'bpf_error' return path out of helper functions inside bpf_jit.S
34867- * The overhead of extra spill is negligible for any filter other
34868- * than synthetic ones. Therefore not worth adding complexity.
34869- */
34870-
34871- /* mov qword ptr [rbp-X],r13 */
34872- EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
34873- /* mov qword ptr [rbp-X],r14 */
34874- EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
34875- /* mov qword ptr [rbp-X],r15 */
34876- EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
34877-
34878- /* clear A and X registers */
34879- EMIT2(0x31, 0xc0); /* xor eax, eax */
34880- EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
34881-
34882- if (ctx->seen_ld_abs) {
34883- /* r9d : skb->len - skb->data_len (headlen)
34884- * r10 : skb->data
34885- */
34886- if (is_imm8(offsetof(struct sk_buff, len)))
34887- /* mov %r9d, off8(%rdi) */
34888- EMIT4(0x44, 0x8b, 0x4f,
34889- offsetof(struct sk_buff, len));
34890- else
34891- /* mov %r9d, off32(%rdi) */
34892- EMIT3_off32(0x44, 0x8b, 0x8f,
34893- offsetof(struct sk_buff, len));
34894-
34895- if (is_imm8(offsetof(struct sk_buff, data_len)))
34896- /* sub %r9d, off8(%rdi) */
34897- EMIT4(0x44, 0x2b, 0x4f,
34898- offsetof(struct sk_buff, data_len));
34899- else
34900- EMIT3_off32(0x44, 0x2b, 0x8f,
34901- offsetof(struct sk_buff, data_len));
34902-
34903- if (is_imm8(offsetof(struct sk_buff, data)))
34904- /* mov %r10, off8(%rdi) */
34905- EMIT4(0x4c, 0x8b, 0x57,
34906- offsetof(struct sk_buff, data));
34907- else
34908- /* mov %r10, off32(%rdi) */
34909- EMIT3_off32(0x4c, 0x8b, 0x97,
34910- offsetof(struct sk_buff, data));
34911- }
34912-
34913- for (i = 0; i < insn_cnt; i++, insn++) {
34914- const s32 imm32 = insn->imm;
34915- u32 dst_reg = insn->dst_reg;
34916- u32 src_reg = insn->src_reg;
34917- u8 b1 = 0, b2 = 0, b3 = 0;
34918- s64 jmp_offset;
34919- u8 jmp_cond;
34920- int ilen;
34921- u8 *func;
34922-
34923- switch (insn->code) {
34924- /* ALU */
34925- case BPF_ALU | BPF_ADD | BPF_X:
34926- case BPF_ALU | BPF_SUB | BPF_X:
34927- case BPF_ALU | BPF_AND | BPF_X:
34928- case BPF_ALU | BPF_OR | BPF_X:
34929- case BPF_ALU | BPF_XOR | BPF_X:
34930- case BPF_ALU64 | BPF_ADD | BPF_X:
34931- case BPF_ALU64 | BPF_SUB | BPF_X:
34932- case BPF_ALU64 | BPF_AND | BPF_X:
34933- case BPF_ALU64 | BPF_OR | BPF_X:
34934- case BPF_ALU64 | BPF_XOR | BPF_X:
34935- switch (BPF_OP(insn->code)) {
34936- case BPF_ADD: b2 = 0x01; break;
34937- case BPF_SUB: b2 = 0x29; break;
34938- case BPF_AND: b2 = 0x21; break;
34939- case BPF_OR: b2 = 0x09; break;
34940- case BPF_XOR: b2 = 0x31; break;
34941- }
34942- if (BPF_CLASS(insn->code) == BPF_ALU64)
34943- EMIT1(add_2mod(0x48, dst_reg, src_reg));
34944- else if (is_ereg(dst_reg) || is_ereg(src_reg))
34945- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34946- EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
34947- break;
34948-
34949- /* mov dst, src */
34950- case BPF_ALU64 | BPF_MOV | BPF_X:
34951- EMIT_mov(dst_reg, src_reg);
34952- break;
34953-
34954- /* mov32 dst, src */
34955- case BPF_ALU | BPF_MOV | BPF_X:
34956- if (is_ereg(dst_reg) || is_ereg(src_reg))
34957- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34958- EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
34959- break;
34960-
34961- /* neg dst */
34962- case BPF_ALU | BPF_NEG:
34963- case BPF_ALU64 | BPF_NEG:
34964- if (BPF_CLASS(insn->code) == BPF_ALU64)
34965- EMIT1(add_1mod(0x48, dst_reg));
34966- else if (is_ereg(dst_reg))
34967- EMIT1(add_1mod(0x40, dst_reg));
34968- EMIT2(0xF7, add_1reg(0xD8, dst_reg));
34969- break;
34970-
34971- case BPF_ALU | BPF_ADD | BPF_K:
34972- case BPF_ALU | BPF_SUB | BPF_K:
34973- case BPF_ALU | BPF_AND | BPF_K:
34974- case BPF_ALU | BPF_OR | BPF_K:
34975- case BPF_ALU | BPF_XOR | BPF_K:
34976- case BPF_ALU64 | BPF_ADD | BPF_K:
34977- case BPF_ALU64 | BPF_SUB | BPF_K:
34978- case BPF_ALU64 | BPF_AND | BPF_K:
34979- case BPF_ALU64 | BPF_OR | BPF_K:
34980- case BPF_ALU64 | BPF_XOR | BPF_K:
34981- if (BPF_CLASS(insn->code) == BPF_ALU64)
34982- EMIT1(add_1mod(0x48, dst_reg));
34983- else if (is_ereg(dst_reg))
34984- EMIT1(add_1mod(0x40, dst_reg));
34985-
34986- switch (BPF_OP(insn->code)) {
34987- case BPF_ADD: b3 = 0xC0; break;
34988- case BPF_SUB: b3 = 0xE8; break;
34989- case BPF_AND: b3 = 0xE0; break;
34990- case BPF_OR: b3 = 0xC8; break;
34991- case BPF_XOR: b3 = 0xF0; break;
34992- }
34993-
34994- if (is_imm8(imm32))
34995- EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
34996- else
34997- EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
34998- break;
34999-
35000- case BPF_ALU64 | BPF_MOV | BPF_K:
35001- /* optimization: if imm32 is positive,
35002- * use 'mov eax, imm32' (which zero-extends imm32)
35003- * to save 2 bytes
35004- */
35005- if (imm32 < 0) {
35006- /* 'mov rax, imm32' sign extends imm32 */
35007- b1 = add_1mod(0x48, dst_reg);
35008- b2 = 0xC7;
35009- b3 = 0xC0;
35010- EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
35011- break;
35012- }
35013-
35014- case BPF_ALU | BPF_MOV | BPF_K:
35015- /* mov %eax, imm32 */
35016- if (is_ereg(dst_reg))
35017- EMIT1(add_1mod(0x40, dst_reg));
35018- EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
35019- break;
35020-
35021- /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
35022- case BPF_ALU | BPF_MOD | BPF_X:
35023- case BPF_ALU | BPF_DIV | BPF_X:
35024- case BPF_ALU | BPF_MOD | BPF_K:
35025- case BPF_ALU | BPF_DIV | BPF_K:
35026- case BPF_ALU64 | BPF_MOD | BPF_X:
35027- case BPF_ALU64 | BPF_DIV | BPF_X:
35028- case BPF_ALU64 | BPF_MOD | BPF_K:
35029- case BPF_ALU64 | BPF_DIV | BPF_K:
35030- EMIT1(0x50); /* push rax */
35031- EMIT1(0x52); /* push rdx */
35032-
35033- if (BPF_SRC(insn->code) == BPF_X)
35034- /* mov r11, src_reg */
35035- EMIT_mov(AUX_REG, src_reg);
35036- else
35037- /* mov r11, imm32 */
35038- EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
35039-
35040- /* mov rax, dst_reg */
35041- EMIT_mov(BPF_REG_0, dst_reg);
35042-
35043- /* xor edx, edx
35044- * equivalent to 'xor rdx, rdx', but one byte less
35045- */
35046- EMIT2(0x31, 0xd2);
35047-
35048- if (BPF_SRC(insn->code) == BPF_X) {
35049- /* if (src_reg == 0) return 0 */
35050-
35051- /* cmp r11, 0 */
35052- EMIT4(0x49, 0x83, 0xFB, 0x00);
35053-
35054- /* jne .+9 (skip over pop, pop, xor and jmp) */
35055- EMIT2(X86_JNE, 1 + 1 + 2 + 5);
35056- EMIT1(0x5A); /* pop rdx */
35057- EMIT1(0x58); /* pop rax */
35058- EMIT2(0x31, 0xc0); /* xor eax, eax */
35059-
35060- /* jmp cleanup_addr
35061- * addrs[i] - 11, because there are 11 bytes
35062- * after this insn: div, mov, pop, pop, mov
35063- */
35064- jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
35065- EMIT1_off32(0xE9, jmp_offset);
35066- }
35067-
35068- if (BPF_CLASS(insn->code) == BPF_ALU64)
35069- /* div r11 */
35070- EMIT3(0x49, 0xF7, 0xF3);
35071- else
35072- /* div r11d */
35073- EMIT3(0x41, 0xF7, 0xF3);
35074-
35075- if (BPF_OP(insn->code) == BPF_MOD)
35076- /* mov r11, rdx */
35077- EMIT3(0x49, 0x89, 0xD3);
35078- else
35079- /* mov r11, rax */
35080- EMIT3(0x49, 0x89, 0xC3);
35081-
35082- EMIT1(0x5A); /* pop rdx */
35083- EMIT1(0x58); /* pop rax */
35084-
35085- /* mov dst_reg, r11 */
35086- EMIT_mov(dst_reg, AUX_REG);
35087- break;
35088-
35089- case BPF_ALU | BPF_MUL | BPF_K:
35090- case BPF_ALU | BPF_MUL | BPF_X:
35091- case BPF_ALU64 | BPF_MUL | BPF_K:
35092- case BPF_ALU64 | BPF_MUL | BPF_X:
35093- EMIT1(0x50); /* push rax */
35094- EMIT1(0x52); /* push rdx */
35095-
35096- /* mov r11, dst_reg */
35097- EMIT_mov(AUX_REG, dst_reg);
35098-
35099- if (BPF_SRC(insn->code) == BPF_X)
35100- /* mov rax, src_reg */
35101- EMIT_mov(BPF_REG_0, src_reg);
35102- else
35103- /* mov rax, imm32 */
35104- EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
35105-
35106- if (BPF_CLASS(insn->code) == BPF_ALU64)
35107- EMIT1(add_1mod(0x48, AUX_REG));
35108- else if (is_ereg(AUX_REG))
35109- EMIT1(add_1mod(0x40, AUX_REG));
35110- /* mul(q) r11 */
35111- EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
35112-
35113- /* mov r11, rax */
35114- EMIT_mov(AUX_REG, BPF_REG_0);
35115-
35116- EMIT1(0x5A); /* pop rdx */
35117- EMIT1(0x58); /* pop rax */
35118-
35119- /* mov dst_reg, r11 */
35120- EMIT_mov(dst_reg, AUX_REG);
35121- break;
35122-
35123- /* shifts */
35124- case BPF_ALU | BPF_LSH | BPF_K:
35125- case BPF_ALU | BPF_RSH | BPF_K:
35126- case BPF_ALU | BPF_ARSH | BPF_K:
35127- case BPF_ALU64 | BPF_LSH | BPF_K:
35128- case BPF_ALU64 | BPF_RSH | BPF_K:
35129- case BPF_ALU64 | BPF_ARSH | BPF_K:
35130- if (BPF_CLASS(insn->code) == BPF_ALU64)
35131- EMIT1(add_1mod(0x48, dst_reg));
35132- else if (is_ereg(dst_reg))
35133- EMIT1(add_1mod(0x40, dst_reg));
35134-
35135- switch (BPF_OP(insn->code)) {
35136- case BPF_LSH: b3 = 0xE0; break;
35137- case BPF_RSH: b3 = 0xE8; break;
35138- case BPF_ARSH: b3 = 0xF8; break;
35139- }
35140- EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
35141- break;
35142-
35143- case BPF_ALU | BPF_END | BPF_FROM_BE:
35144- switch (imm32) {
35145- case 16:
35146- /* emit 'ror %ax, 8' to swap lower 2 bytes */
35147- EMIT1(0x66);
35148- if (is_ereg(dst_reg))
35149- EMIT1(0x41);
35150- EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
35151- break;
35152- case 32:
35153- /* emit 'bswap eax' to swap lower 4 bytes */
35154- if (is_ereg(dst_reg))
35155- EMIT2(0x41, 0x0F);
35156- else
35157- EMIT1(0x0F);
35158- EMIT1(add_1reg(0xC8, dst_reg));
35159- break;
35160- case 64:
35161- /* emit 'bswap rax' to swap 8 bytes */
35162- EMIT3(add_1mod(0x48, dst_reg), 0x0F,
35163- add_1reg(0xC8, dst_reg));
35164- break;
35165- }
35166- break;
35167-
35168- case BPF_ALU | BPF_END | BPF_FROM_LE:
35169- break;
35170-
35171- /* ST: *(u8*)(dst_reg + off) = imm */
35172- case BPF_ST | BPF_MEM | BPF_B:
35173- if (is_ereg(dst_reg))
35174- EMIT2(0x41, 0xC6);
35175- else
35176- EMIT1(0xC6);
35177- goto st;
35178- case BPF_ST | BPF_MEM | BPF_H:
35179- if (is_ereg(dst_reg))
35180- EMIT3(0x66, 0x41, 0xC7);
35181- else
35182- EMIT2(0x66, 0xC7);
35183- goto st;
35184- case BPF_ST | BPF_MEM | BPF_W:
35185- if (is_ereg(dst_reg))
35186- EMIT2(0x41, 0xC7);
35187- else
35188- EMIT1(0xC7);
35189- goto st;
35190- case BPF_ST | BPF_MEM | BPF_DW:
35191- EMIT2(add_1mod(0x48, dst_reg), 0xC7);
35192-
35193-st: if (is_imm8(insn->off))
35194- EMIT2(add_1reg(0x40, dst_reg), insn->off);
35195- else
35196- EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
35197-
35198- EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
35199- break;
35200-
35201- /* STX: *(u8*)(dst_reg + off) = src_reg */
35202- case BPF_STX | BPF_MEM | BPF_B:
35203- /* emit 'mov byte ptr [rax + off], al' */
35204- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
35205- /* have to add extra byte for x86 SIL, DIL regs */
35206- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
35207- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
35208- else
35209- EMIT1(0x88);
35210- goto stx;
35211- case BPF_STX | BPF_MEM | BPF_H:
35212- if (is_ereg(dst_reg) || is_ereg(src_reg))
35213- EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
35214- else
35215- EMIT2(0x66, 0x89);
35216- goto stx;
35217- case BPF_STX | BPF_MEM | BPF_W:
35218- if (is_ereg(dst_reg) || is_ereg(src_reg))
35219- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
35220- else
35221- EMIT1(0x89);
35222- goto stx;
35223- case BPF_STX | BPF_MEM | BPF_DW:
35224- EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
35225-stx: if (is_imm8(insn->off))
35226- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35227- else
35228- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35229- insn->off);
35230- break;
35231-
35232- /* LDX: dst_reg = *(u8*)(src_reg + off) */
35233- case BPF_LDX | BPF_MEM | BPF_B:
35234- /* emit 'movzx rax, byte ptr [rax + off]' */
35235- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
35236- goto ldx;
35237- case BPF_LDX | BPF_MEM | BPF_H:
35238- /* emit 'movzx rax, word ptr [rax + off]' */
35239- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
35240- goto ldx;
35241- case BPF_LDX | BPF_MEM | BPF_W:
35242- /* emit 'mov eax, dword ptr [rax+0x14]' */
35243- if (is_ereg(dst_reg) || is_ereg(src_reg))
35244- EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
35245- else
35246- EMIT1(0x8B);
35247- goto ldx;
35248- case BPF_LDX | BPF_MEM | BPF_DW:
35249- /* emit 'mov rax, qword ptr [rax+0x14]' */
35250- EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
35251-ldx: /* if insn->off == 0 we can save one extra byte, but
35252- * special case of x86 r13 which always needs an offset
35253- * is not worth the hassle
35254- */
35255- if (is_imm8(insn->off))
35256- EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
35257- else
35258- EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
35259- insn->off);
35260- break;
35261-
35262- /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
35263- case BPF_STX | BPF_XADD | BPF_W:
35264- /* emit 'lock add dword ptr [rax + off], eax' */
35265- if (is_ereg(dst_reg) || is_ereg(src_reg))
35266- EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
35267- else
35268- EMIT2(0xF0, 0x01);
35269- goto xadd;
35270- case BPF_STX | BPF_XADD | BPF_DW:
35271- EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
35272-xadd: if (is_imm8(insn->off))
35273- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35274- else
35275- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35276- insn->off);
35277- break;
35278-
35279- /* call */
35280- case BPF_JMP | BPF_CALL:
35281- func = (u8 *) __bpf_call_base + imm32;
35282- jmp_offset = func - (image + addrs[i]);
35283- if (ctx->seen_ld_abs) {
35284- EMIT2(0x41, 0x52); /* push %r10 */
35285- EMIT2(0x41, 0x51); /* push %r9 */
35286- /* need to adjust jmp offset, since
35287- * pop %r9, pop %r10 take 4 bytes after call insn
35288- */
35289- jmp_offset += 4;
35290- }
35291- if (!imm32 || !is_simm32(jmp_offset)) {
35292- pr_err("unsupported bpf func %d addr %p image %p\n",
35293- imm32, func, image);
35294- return -EINVAL;
35295- }
35296- EMIT1_off32(0xE8, jmp_offset);
35297- if (ctx->seen_ld_abs) {
35298- EMIT2(0x41, 0x59); /* pop %r9 */
35299- EMIT2(0x41, 0x5A); /* pop %r10 */
35300- }
35301- break;
35302-
35303- /* cond jump */
35304- case BPF_JMP | BPF_JEQ | BPF_X:
35305- case BPF_JMP | BPF_JNE | BPF_X:
35306- case BPF_JMP | BPF_JGT | BPF_X:
35307- case BPF_JMP | BPF_JGE | BPF_X:
35308- case BPF_JMP | BPF_JSGT | BPF_X:
35309- case BPF_JMP | BPF_JSGE | BPF_X:
35310- /* cmp dst_reg, src_reg */
35311- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
35312- add_2reg(0xC0, dst_reg, src_reg));
35313- goto emit_cond_jmp;
35314-
35315- case BPF_JMP | BPF_JSET | BPF_X:
35316- /* test dst_reg, src_reg */
35317- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
35318- add_2reg(0xC0, dst_reg, src_reg));
35319- goto emit_cond_jmp;
35320-
35321- case BPF_JMP | BPF_JSET | BPF_K:
35322- /* test dst_reg, imm32 */
35323- EMIT1(add_1mod(0x48, dst_reg));
35324- EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
35325- goto emit_cond_jmp;
35326-
35327- case BPF_JMP | BPF_JEQ | BPF_K:
35328- case BPF_JMP | BPF_JNE | BPF_K:
35329- case BPF_JMP | BPF_JGT | BPF_K:
35330- case BPF_JMP | BPF_JGE | BPF_K:
35331- case BPF_JMP | BPF_JSGT | BPF_K:
35332- case BPF_JMP | BPF_JSGE | BPF_K:
35333- /* cmp dst_reg, imm8/32 */
35334- EMIT1(add_1mod(0x48, dst_reg));
35335-
35336- if (is_imm8(imm32))
35337- EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
35338- else
35339- EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
35340-
35341-emit_cond_jmp: /* convert BPF opcode to x86 */
35342- switch (BPF_OP(insn->code)) {
35343- case BPF_JEQ:
35344- jmp_cond = X86_JE;
35345- break;
35346- case BPF_JSET:
35347- case BPF_JNE:
35348- jmp_cond = X86_JNE;
35349- break;
35350- case BPF_JGT:
35351- /* GT is unsigned '>', JA in x86 */
35352- jmp_cond = X86_JA;
35353- break;
35354- case BPF_JGE:
35355- /* GE is unsigned '>=', JAE in x86 */
35356- jmp_cond = X86_JAE;
35357- break;
35358- case BPF_JSGT:
35359- /* signed '>', GT in x86 */
35360- jmp_cond = X86_JG;
35361- break;
35362- case BPF_JSGE:
35363- /* signed '>=', GE in x86 */
35364- jmp_cond = X86_JGE;
35365- break;
35366- default: /* to silence gcc warning */
35367- return -EFAULT;
35368- }
35369- jmp_offset = addrs[i + insn->off] - addrs[i];
35370- if (is_imm8(jmp_offset)) {
35371- EMIT2(jmp_cond, jmp_offset);
35372- } else if (is_simm32(jmp_offset)) {
35373- EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
35374- } else {
35375- pr_err("cond_jmp gen bug %llx\n", jmp_offset);
35376- return -EFAULT;
35377- }
35378-
35379- break;
35380-
35381- case BPF_JMP | BPF_JA:
35382- jmp_offset = addrs[i + insn->off] - addrs[i];
35383- if (!jmp_offset)
35384- /* optimize out nop jumps */
35385- break;
35386-emit_jmp:
35387- if (is_imm8(jmp_offset)) {
35388- EMIT2(0xEB, jmp_offset);
35389- } else if (is_simm32(jmp_offset)) {
35390- EMIT1_off32(0xE9, jmp_offset);
35391- } else {
35392- pr_err("jmp gen bug %llx\n", jmp_offset);
35393- return -EFAULT;
35394- }
35395- break;
35396-
35397- case BPF_LD | BPF_IND | BPF_W:
35398- func = sk_load_word;
35399- goto common_load;
35400- case BPF_LD | BPF_ABS | BPF_W:
35401- func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
35402-common_load: ctx->seen_ld_abs = true;
35403- jmp_offset = func - (image + addrs[i]);
35404- if (!func || !is_simm32(jmp_offset)) {
35405- pr_err("unsupported bpf func %d addr %p image %p\n",
35406- imm32, func, image);
35407- return -EINVAL;
35408- }
35409- if (BPF_MODE(insn->code) == BPF_ABS) {
35410- /* mov %esi, imm32 */
35411- EMIT1_off32(0xBE, imm32);
35412- } else {
35413- /* mov %rsi, src_reg */
35414- EMIT_mov(BPF_REG_2, src_reg);
35415- if (imm32) {
35416- if (is_imm8(imm32))
35417- /* add %esi, imm8 */
35418- EMIT3(0x83, 0xC6, imm32);
35419- else
35420- /* add %esi, imm32 */
35421- EMIT2_off32(0x81, 0xC6, imm32);
35422- }
35423- }
35424- /* skb pointer is in R6 (%rbx), it will be copied into
35425- * %rdi if skb_copy_bits() call is necessary.
35426- * sk_load_* helpers also use %r10 and %r9d.
35427- * See bpf_jit.S
35428- */
35429- EMIT1_off32(0xE8, jmp_offset); /* call */
35430- break;
35431-
35432- case BPF_LD | BPF_IND | BPF_H:
35433- func = sk_load_half;
35434- goto common_load;
35435- case BPF_LD | BPF_ABS | BPF_H:
35436- func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
35437- goto common_load;
35438- case BPF_LD | BPF_IND | BPF_B:
35439- func = sk_load_byte;
35440- goto common_load;
35441- case BPF_LD | BPF_ABS | BPF_B:
35442- func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
35443- goto common_load;
35444-
35445- case BPF_JMP | BPF_EXIT:
35446- if (i != insn_cnt - 1) {
35447- jmp_offset = ctx->cleanup_addr - addrs[i];
35448- goto emit_jmp;
35449- }
35450- /* update cleanup_addr */
35451- ctx->cleanup_addr = proglen;
35452- /* mov rbx, qword ptr [rbp-X] */
35453- EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
35454- /* mov r13, qword ptr [rbp-X] */
35455- EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
35456- /* mov r14, qword ptr [rbp-X] */
35457- EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
35458- /* mov r15, qword ptr [rbp-X] */
35459- EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
35460-
35461- EMIT1(0xC9); /* leave */
35462- EMIT1(0xC3); /* ret */
35463- break;
35464-
35465- default:
35466- /* By design x64 JIT should support all BPF instructions
35467- * This error will be seen if new instruction was added
35468- * to interpreter, but not to JIT
35469- * or if there is junk in sk_filter
35470- */
35471- pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
35472- return -EINVAL;
35473- }
35474-
35475- ilen = prog - temp;
35476- if (image) {
35477- if (unlikely(proglen + ilen > oldproglen)) {
35478- pr_err("bpf_jit_compile fatal error\n");
35479- return -EFAULT;
35480- }
35481- memcpy(image + proglen, temp, ilen);
35482- }
35483- proglen += ilen;
35484- addrs[i] = proglen;
35485- prog = temp;
35486- }
35487- return proglen;
35488-}
35489-
35490-void bpf_jit_compile(struct sk_filter *prog)
35491-{
35492-}
35493-
35494-void bpf_int_jit_compile(struct sk_filter *prog)
35495-{
35496- struct bpf_binary_header *header = NULL;
35497- int proglen, oldproglen = 0;
35498- struct jit_context ctx = {};
35499+void bpf_jit_compile(struct sk_filter *fp)
35500+{
35501+ u8 temp[MAX_INSTR_CODE_SIZE];
35502+ u8 *prog;
35503+ unsigned int proglen, oldproglen = 0;
35504+ int ilen, i;
35505+ int t_offset, f_offset;
35506+ u8 t_op, f_op, seen = 0, pass;
35507 u8 *image = NULL;
35508- int *addrs;
35509- int pass;
35510- int i;
35511+ u8 *header = NULL;
35512+ u8 *func;
35513+ int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
35514+ unsigned int cleanup_addr; /* epilogue code offset */
35515+ unsigned int *addrs;
35516+ const struct sock_filter *filter = fp->insns;
35517+ int flen = fp->len;
35518+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35519+ unsigned int randkey;
35520+#endif
35521
35522 if (!bpf_jit_enable)
35523 return;
35524
35525- if (!prog || !prog->len)
35526- return;
35527-
35528- addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
35529- if (!addrs)
35530+ addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
35531+ if (addrs == NULL)
35532 return;
35533
35534 /* Before first pass, make a rough estimation of addrs[]
35535- * each bpf instruction is translated to less than 64 bytes
35536+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
35537 */
35538- for (proglen = 0, i = 0; i < prog->len; i++) {
35539- proglen += 64;
35540+ for (proglen = 0, i = 0; i < flen; i++) {
35541+ proglen += MAX_INSTR_CODE_SIZE;
35542 addrs[i] = proglen;
35543 }
35544- ctx.cleanup_addr = proglen;
35545+ cleanup_addr = proglen; /* epilogue address */
35546
35547 for (pass = 0; pass < 10; pass++) {
35548- proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
35549- if (proglen <= 0) {
35550- image = NULL;
35551- if (header)
35552- module_free(NULL, header);
35553- goto out;
35554+ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
35555+ /* no prologue/epilogue for trivial filters (RET something) */
35556+ proglen = 0;
35557+ prog = temp;
35558+
35559+ if (seen_or_pass0) {
35560+ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
35561+ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
35562+ /* note : must save %rbx in case bpf_error is hit */
35563+ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
35564+ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
35565+ if (seen_or_pass0 & SEEN_XREG)
35566+ CLEAR_X(); /* make sure we dont leek kernel memory */
35567+
35568+ /*
35569+ * If this filter needs to access skb data,
35570+ * loads r9 and r8 with :
35571+ * r9 = skb->len - skb->data_len
35572+ * r8 = skb->data
35573+ */
35574+ if (seen_or_pass0 & SEEN_DATAREF) {
35575+ if (offsetof(struct sk_buff, len) <= 127)
35576+ /* mov off8(%rdi),%r9d */
35577+ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
35578+ else {
35579+ /* mov off32(%rdi),%r9d */
35580+ EMIT3(0x44, 0x8b, 0x8f);
35581+ EMIT(offsetof(struct sk_buff, len), 4);
35582+ }
35583+ if (is_imm8(offsetof(struct sk_buff, data_len)))
35584+ /* sub off8(%rdi),%r9d */
35585+ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
35586+ else {
35587+ EMIT3(0x44, 0x2b, 0x8f);
35588+ EMIT(offsetof(struct sk_buff, data_len), 4);
35589+ }
35590+
35591+ if (is_imm8(offsetof(struct sk_buff, data)))
35592+ /* mov off8(%rdi),%r8 */
35593+ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
35594+ else {
35595+ /* mov off32(%rdi),%r8 */
35596+ EMIT3(0x4c, 0x8b, 0x87);
35597+ EMIT(offsetof(struct sk_buff, data), 4);
35598+ }
35599+ }
35600 }
35601+
35602+ switch (filter[0].code) {
35603+ case BPF_S_RET_K:
35604+ case BPF_S_LD_W_LEN:
35605+ case BPF_S_ANC_PROTOCOL:
35606+ case BPF_S_ANC_IFINDEX:
35607+ case BPF_S_ANC_MARK:
35608+ case BPF_S_ANC_RXHASH:
35609+ case BPF_S_ANC_CPU:
35610+ case BPF_S_ANC_VLAN_TAG:
35611+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35612+ case BPF_S_ANC_QUEUE:
35613+ case BPF_S_ANC_PKTTYPE:
35614+ case BPF_S_LD_W_ABS:
35615+ case BPF_S_LD_H_ABS:
35616+ case BPF_S_LD_B_ABS:
35617+ /* first instruction sets A register (or is RET 'constant') */
35618+ break;
35619+ default:
35620+ /* make sure we dont leak kernel information to user */
35621+ CLEAR_A(); /* A = 0 */
35622+ }
35623+
35624+ for (i = 0; i < flen; i++) {
35625+ unsigned int K = filter[i].k;
35626+
35627+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35628+ randkey = prandom_u32();
35629+#endif
35630+
35631+ switch (filter[i].code) {
35632+ case BPF_S_ALU_ADD_X: /* A += X; */
35633+ seen |= SEEN_XREG;
35634+ EMIT2(0x01, 0xd8); /* add %ebx,%eax */
35635+ break;
35636+ case BPF_S_ALU_ADD_K: /* A += K; */
35637+ if (!K)
35638+ break;
35639+ if (is_imm8(K))
35640+ EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
35641+ else
35642+ EMIT1_off32(0x05, K); /* add imm32,%eax */
35643+ break;
35644+ case BPF_S_ALU_SUB_X: /* A -= X; */
35645+ seen |= SEEN_XREG;
35646+ EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
35647+ break;
35648+ case BPF_S_ALU_SUB_K: /* A -= K */
35649+ if (!K)
35650+ break;
35651+ if (is_imm8(K))
35652+ EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
35653+ else
35654+ EMIT1_off32(0x2d, K); /* sub imm32,%eax */
35655+ break;
35656+ case BPF_S_ALU_MUL_X: /* A *= X; */
35657+ seen |= SEEN_XREG;
35658+ EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
35659+ break;
35660+ case BPF_S_ALU_MUL_K: /* A *= K */
35661+ if (is_imm8(K))
35662+ EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
35663+ else
35664+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
35665+ break;
35666+ case BPF_S_ALU_DIV_X: /* A /= X; */
35667+ seen |= SEEN_XREG;
35668+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35669+ if (pc_ret0 > 0) {
35670+ /* addrs[pc_ret0 - 1] is start address of target
35671+ * (addrs[i] - 4) is the address following this jmp
35672+ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
35673+ */
35674+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35675+ (addrs[i] - 4));
35676+ } else {
35677+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35678+ CLEAR_A();
35679+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
35680+ }
35681+ EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
35682+ break;
35683+ case BPF_S_ALU_MOD_X: /* A %= X; */
35684+ seen |= SEEN_XREG;
35685+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35686+ if (pc_ret0 > 0) {
35687+ /* addrs[pc_ret0 - 1] is start address of target
35688+ * (addrs[i] - 6) is the address following this jmp
35689+ * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
35690+ */
35691+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35692+ (addrs[i] - 6));
35693+ } else {
35694+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35695+ CLEAR_A();
35696+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
35697+ }
35698+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35699+ EMIT2(0xf7, 0xf3); /* div %ebx */
35700+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35701+ break;
35702+ case BPF_S_ALU_MOD_K: /* A %= K; */
35703+ if (K == 1) {
35704+ CLEAR_A();
35705+ break;
35706+ }
35707+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35708+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35709+ DILUTE_CONST_SEQUENCE(K, randkey);
35710+#else
35711+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35712+#endif
35713+ EMIT2(0xf7, 0xf1); /* div %ecx */
35714+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35715+ break;
35716+ case BPF_S_ALU_DIV_K: /* A /= K */
35717+ if (K == 1)
35718+ break;
35719+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35720+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35721+ DILUTE_CONST_SEQUENCE(K, randkey);
35722+#else
35723+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35724+#endif
35725+ EMIT2(0xf7, 0xf1); /* div %ecx */
35726+ break;
35727+ case BPF_S_ALU_AND_X:
35728+ seen |= SEEN_XREG;
35729+ EMIT2(0x21, 0xd8); /* and %ebx,%eax */
35730+ break;
35731+ case BPF_S_ALU_AND_K:
35732+ if (K >= 0xFFFFFF00) {
35733+ EMIT2(0x24, K & 0xFF); /* and imm8,%al */
35734+ } else if (K >= 0xFFFF0000) {
35735+ EMIT2(0x66, 0x25); /* and imm16,%ax */
35736+ EMIT(K, 2);
35737+ } else {
35738+ EMIT1_off32(0x25, K); /* and imm32,%eax */
35739+ }
35740+ break;
35741+ case BPF_S_ALU_OR_X:
35742+ seen |= SEEN_XREG;
35743+ EMIT2(0x09, 0xd8); /* or %ebx,%eax */
35744+ break;
35745+ case BPF_S_ALU_OR_K:
35746+ if (is_imm8(K))
35747+ EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
35748+ else
35749+ EMIT1_off32(0x0d, K); /* or imm32,%eax */
35750+ break;
35751+ case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
35752+ case BPF_S_ALU_XOR_X:
35753+ seen |= SEEN_XREG;
35754+ EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
35755+ break;
35756+ case BPF_S_ALU_XOR_K: /* A ^= K; */
35757+ if (K == 0)
35758+ break;
35759+ if (is_imm8(K))
35760+ EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
35761+ else
35762+ EMIT1_off32(0x35, K); /* xor imm32,%eax */
35763+ break;
35764+ case BPF_S_ALU_LSH_X: /* A <<= X; */
35765+ seen |= SEEN_XREG;
35766+ EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
35767+ break;
35768+ case BPF_S_ALU_LSH_K:
35769+ if (K == 0)
35770+ break;
35771+ else if (K == 1)
35772+ EMIT2(0xd1, 0xe0); /* shl %eax */
35773+ else
35774+ EMIT3(0xc1, 0xe0, K);
35775+ break;
35776+ case BPF_S_ALU_RSH_X: /* A >>= X; */
35777+ seen |= SEEN_XREG;
35778+ EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
35779+ break;
35780+ case BPF_S_ALU_RSH_K: /* A >>= K; */
35781+ if (K == 0)
35782+ break;
35783+ else if (K == 1)
35784+ EMIT2(0xd1, 0xe8); /* shr %eax */
35785+ else
35786+ EMIT3(0xc1, 0xe8, K);
35787+ break;
35788+ case BPF_S_ALU_NEG:
35789+ EMIT2(0xf7, 0xd8); /* neg %eax */
35790+ break;
35791+ case BPF_S_RET_K:
35792+ if (!K) {
35793+ if (pc_ret0 == -1)
35794+ pc_ret0 = i;
35795+ CLEAR_A();
35796+ } else {
35797+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35798+ }
35799+ /* fallinto */
35800+ case BPF_S_RET_A:
35801+ if (seen_or_pass0) {
35802+ if (i != flen - 1) {
35803+ EMIT_JMP(cleanup_addr - addrs[i]);
35804+ break;
35805+ }
35806+ if (seen_or_pass0 & SEEN_XREG)
35807+ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
35808+ EMIT1(0xc9); /* leaveq */
35809+ }
35810+ EMIT1(0xc3); /* ret */
35811+ break;
35812+ case BPF_S_MISC_TAX: /* X = A */
35813+ seen |= SEEN_XREG;
35814+ EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
35815+ break;
35816+ case BPF_S_MISC_TXA: /* A = X */
35817+ seen |= SEEN_XREG;
35818+ EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
35819+ break;
35820+ case BPF_S_LD_IMM: /* A = K */
35821+ if (!K)
35822+ CLEAR_A();
35823+ else
35824+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35825+ break;
35826+ case BPF_S_LDX_IMM: /* X = K */
35827+ seen |= SEEN_XREG;
35828+ if (!K)
35829+ CLEAR_X();
35830+ else
35831+ EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
35832+ break;
35833+ case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
35834+ seen |= SEEN_MEM;
35835+ EMIT3(0x8b, 0x45, 0xf0 - K*4);
35836+ break;
35837+ case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
35838+ seen |= SEEN_XREG | SEEN_MEM;
35839+ EMIT3(0x8b, 0x5d, 0xf0 - K*4);
35840+ break;
35841+ case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
35842+ seen |= SEEN_MEM;
35843+ EMIT3(0x89, 0x45, 0xf0 - K*4);
35844+ break;
35845+ case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
35846+ seen |= SEEN_XREG | SEEN_MEM;
35847+ EMIT3(0x89, 0x5d, 0xf0 - K*4);
35848+ break;
35849+ case BPF_S_LD_W_LEN: /* A = skb->len; */
35850+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
35851+ if (is_imm8(offsetof(struct sk_buff, len)))
35852+ /* mov off8(%rdi),%eax */
35853+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
35854+ else {
35855+ EMIT2(0x8b, 0x87);
35856+ EMIT(offsetof(struct sk_buff, len), 4);
35857+ }
35858+ break;
35859+ case BPF_S_LDX_W_LEN: /* X = skb->len; */
35860+ seen |= SEEN_XREG;
35861+ if (is_imm8(offsetof(struct sk_buff, len)))
35862+ /* mov off8(%rdi),%ebx */
35863+ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
35864+ else {
35865+ EMIT2(0x8b, 0x9f);
35866+ EMIT(offsetof(struct sk_buff, len), 4);
35867+ }
35868+ break;
35869+ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
35870+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
35871+ if (is_imm8(offsetof(struct sk_buff, protocol))) {
35872+ /* movzwl off8(%rdi),%eax */
35873+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
35874+ } else {
35875+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35876+ EMIT(offsetof(struct sk_buff, protocol), 4);
35877+ }
35878+ EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
35879+ break;
35880+ case BPF_S_ANC_IFINDEX:
35881+ if (is_imm8(offsetof(struct sk_buff, dev))) {
35882+ /* movq off8(%rdi),%rax */
35883+ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
35884+ } else {
35885+ EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
35886+ EMIT(offsetof(struct sk_buff, dev), 4);
35887+ }
35888+ EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
35889+ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
35890+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
35891+ EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
35892+ EMIT(offsetof(struct net_device, ifindex), 4);
35893+ break;
35894+ case BPF_S_ANC_MARK:
35895+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
35896+ if (is_imm8(offsetof(struct sk_buff, mark))) {
35897+ /* mov off8(%rdi),%eax */
35898+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
35899+ } else {
35900+ EMIT2(0x8b, 0x87);
35901+ EMIT(offsetof(struct sk_buff, mark), 4);
35902+ }
35903+ break;
35904+ case BPF_S_ANC_RXHASH:
35905+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
35906+ if (is_imm8(offsetof(struct sk_buff, hash))) {
35907+ /* mov off8(%rdi),%eax */
35908+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
35909+ } else {
35910+ EMIT2(0x8b, 0x87);
35911+ EMIT(offsetof(struct sk_buff, hash), 4);
35912+ }
35913+ break;
35914+ case BPF_S_ANC_QUEUE:
35915+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
35916+ if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
35917+ /* movzwl off8(%rdi),%eax */
35918+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
35919+ } else {
35920+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35921+ EMIT(offsetof(struct sk_buff, queue_mapping), 4);
35922+ }
35923+ break;
35924+ case BPF_S_ANC_CPU:
35925+#ifdef CONFIG_SMP
35926+ EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
35927+ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
35928+#else
35929+ CLEAR_A();
35930+#endif
35931+ break;
35932+ case BPF_S_ANC_VLAN_TAG:
35933+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35934+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
35935+ if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
35936+ /* movzwl off8(%rdi),%eax */
35937+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
35938+ } else {
35939+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35940+ EMIT(offsetof(struct sk_buff, vlan_tci), 4);
35941+ }
35942+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
35943+ if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
35944+ EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */
35945+ } else {
35946+ EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */
35947+ EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
35948+ }
35949+ break;
35950+ case BPF_S_ANC_PKTTYPE:
35951+ {
35952+ int off = pkt_type_offset();
35953+
35954+ if (off < 0)
35955+ goto out;
35956+ if (is_imm8(off)) {
35957+ /* movzbl off8(%rdi),%eax */
35958+ EMIT4(0x0f, 0xb6, 0x47, off);
35959+ } else {
35960+ /* movbl off32(%rdi),%eax */
35961+ EMIT3(0x0f, 0xb6, 0x87);
35962+ EMIT(off, 4);
35963+ }
35964+ EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
35965+ break;
35966+ }
35967+ case BPF_S_LD_W_ABS:
35968+ func = CHOOSE_LOAD_FUNC(K, sk_load_word);
35969+common_load: seen |= SEEN_DATAREF;
35970+ t_offset = func - (image + addrs[i]);
35971+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35972+ EMIT1_off32(0xe8, t_offset); /* call */
35973+ break;
35974+ case BPF_S_LD_H_ABS:
35975+ func = CHOOSE_LOAD_FUNC(K, sk_load_half);
35976+ goto common_load;
35977+ case BPF_S_LD_B_ABS:
35978+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
35979+ goto common_load;
35980+ case BPF_S_LDX_B_MSH:
35981+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
35982+ seen |= SEEN_DATAREF | SEEN_XREG;
35983+ t_offset = func - (image + addrs[i]);
35984+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35985+ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
35986+ break;
35987+ case BPF_S_LD_W_IND:
35988+ func = sk_load_word;
35989+common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
35990+ t_offset = func - (image + addrs[i]);
35991+ if (K) {
35992+ if (is_imm8(K)) {
35993+ EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
35994+ } else {
35995+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
35996+ }
35997+ } else {
35998+ EMIT2(0x89,0xde); /* mov %ebx,%esi */
35999+ }
36000+ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
36001+ break;
36002+ case BPF_S_LD_H_IND:
36003+ func = sk_load_half;
36004+ goto common_load_ind;
36005+ case BPF_S_LD_B_IND:
36006+ func = sk_load_byte;
36007+ goto common_load_ind;
36008+ case BPF_S_JMP_JA:
36009+ t_offset = addrs[i + K] - addrs[i];
36010+ EMIT_JMP(t_offset);
36011+ break;
36012+ COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
36013+ COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
36014+ COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
36015+ COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
36016+ COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
36017+ COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
36018+ COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
36019+ COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
36020+
36021+cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
36022+ t_offset = addrs[i + filter[i].jt] - addrs[i];
36023+
36024+ /* same targets, can avoid doing the test :) */
36025+ if (filter[i].jt == filter[i].jf) {
36026+ EMIT_JMP(t_offset);
36027+ break;
36028+ }
36029+
36030+ switch (filter[i].code) {
36031+ case BPF_S_JMP_JGT_X:
36032+ case BPF_S_JMP_JGE_X:
36033+ case BPF_S_JMP_JEQ_X:
36034+ seen |= SEEN_XREG;
36035+ EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
36036+ break;
36037+ case BPF_S_JMP_JSET_X:
36038+ seen |= SEEN_XREG;
36039+ EMIT2(0x85, 0xd8); /* test %ebx,%eax */
36040+ break;
36041+ case BPF_S_JMP_JEQ_K:
36042+ if (K == 0) {
36043+ EMIT2(0x85, 0xc0); /* test %eax,%eax */
36044+ break;
36045+ }
36046+ case BPF_S_JMP_JGT_K:
36047+ case BPF_S_JMP_JGE_K:
36048+ if (K <= 127)
36049+ EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
36050+ else
36051+ EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
36052+ break;
36053+ case BPF_S_JMP_JSET_K:
36054+ if (K <= 0xFF)
36055+ EMIT2(0xa8, K); /* test imm8,%al */
36056+ else if (!(K & 0xFFFF00FF))
36057+ EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
36058+ else if (K <= 0xFFFF) {
36059+ EMIT2(0x66, 0xa9); /* test imm16,%ax */
36060+ EMIT(K, 2);
36061+ } else {
36062+ EMIT1_off32(0xa9, K); /* test imm32,%eax */
36063+ }
36064+ break;
36065+ }
36066+ if (filter[i].jt != 0) {
36067+ if (filter[i].jf && f_offset)
36068+ t_offset += is_near(f_offset) ? 2 : 5;
36069+ EMIT_COND_JMP(t_op, t_offset);
36070+ if (filter[i].jf)
36071+ EMIT_JMP(f_offset);
36072+ break;
36073+ }
36074+ EMIT_COND_JMP(f_op, f_offset);
36075+ break;
36076+ default:
36077+ /* hmm, too complex filter, give up with jit compiler */
36078+ goto out;
36079+ }
36080+ ilen = prog - temp;
36081+ if (image) {
36082+ if (unlikely(proglen + ilen > oldproglen)) {
36083+ pr_err("bpb_jit_compile fatal error\n");
36084+ kfree(addrs);
36085+ module_free_exec(NULL, image);
36086+ return;
36087+ }
36088+ pax_open_kernel();
36089+ memcpy(image + proglen, temp, ilen);
36090+ pax_close_kernel();
36091+ }
36092+ proglen += ilen;
36093+ addrs[i] = proglen;
36094+ prog = temp;
36095+ }
36096+ /* last bpf instruction is always a RET :
36097+ * use it to give the cleanup instruction(s) addr
36098+ */
36099+ cleanup_addr = proglen - 1; /* ret */
36100+ if (seen_or_pass0)
36101+ cleanup_addr -= 1; /* leaveq */
36102+ if (seen_or_pass0 & SEEN_XREG)
36103+ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
36104+
36105 if (image) {
36106 if (proglen != oldproglen)
36107- pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
36108- proglen, oldproglen);
36109+ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
36110 break;
36111 }
36112 if (proglen == oldproglen) {
36113@@ -918,32 +872,30 @@ void bpf_int_jit_compile(struct sk_filter *prog)
36114 }
36115
36116 if (bpf_jit_enable > 1)
36117- bpf_jit_dump(prog->len, proglen, 0, image);
36118+ bpf_jit_dump(flen, proglen, pass, image);
36119
36120 if (image) {
36121 bpf_flush_icache(header, image + proglen);
36122- set_memory_ro((unsigned long)header, header->pages);
36123- prog->bpf_func = (void *)image;
36124- prog->jited = 1;
36125+ fp->bpf_func = (void *)image;
36126 }
36127 out:
36128 kfree(addrs);
36129+ return;
36130 }
36131
36132 static void bpf_jit_free_deferred(struct work_struct *work)
36133 {
36134 struct sk_filter *fp = container_of(work, struct sk_filter, work);
36135 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
36136- struct bpf_binary_header *header = (void *)addr;
36137
36138- set_memory_rw(addr, header->pages);
36139- module_free(NULL, header);
36140+ set_memory_rw(addr, 1);
36141+ module_free_exec(NULL, (void *)addr);
36142 kfree(fp);
36143 }
36144
36145 void bpf_jit_free(struct sk_filter *fp)
36146 {
36147- if (fp->jited) {
36148+ if (fp->bpf_func != sk_run_filter) {
36149 INIT_WORK(&fp->work, bpf_jit_free_deferred);
36150 schedule_work(&fp->work);
36151 } else {
36152diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
36153index 5d04be5..2beeaa2 100644
36154--- a/arch/x86/oprofile/backtrace.c
36155+++ b/arch/x86/oprofile/backtrace.c
36156@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
36157 struct stack_frame_ia32 *fp;
36158 unsigned long bytes;
36159
36160- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36161+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36162 if (bytes != 0)
36163 return NULL;
36164
36165- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
36166+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
36167
36168 oprofile_add_trace(bufhead[0].return_address);
36169
36170@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
36171 struct stack_frame bufhead[2];
36172 unsigned long bytes;
36173
36174- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36175+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36176 if (bytes != 0)
36177 return NULL;
36178
36179@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
36180 {
36181 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
36182
36183- if (!user_mode_vm(regs)) {
36184+ if (!user_mode(regs)) {
36185 unsigned long stack = kernel_stack_pointer(regs);
36186 if (depth)
36187 dump_trace(NULL, regs, (unsigned long *)stack, 0,
36188diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
36189index 379e8bd..6386e09 100644
36190--- a/arch/x86/oprofile/nmi_int.c
36191+++ b/arch/x86/oprofile/nmi_int.c
36192@@ -23,6 +23,7 @@
36193 #include <asm/nmi.h>
36194 #include <asm/msr.h>
36195 #include <asm/apic.h>
36196+#include <asm/pgtable.h>
36197
36198 #include "op_counter.h"
36199 #include "op_x86_model.h"
36200@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
36201 if (ret)
36202 return ret;
36203
36204- if (!model->num_virt_counters)
36205- model->num_virt_counters = model->num_counters;
36206+ if (!model->num_virt_counters) {
36207+ pax_open_kernel();
36208+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
36209+ pax_close_kernel();
36210+ }
36211
36212 mux_init(ops);
36213
36214diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
36215index 50d86c0..7985318 100644
36216--- a/arch/x86/oprofile/op_model_amd.c
36217+++ b/arch/x86/oprofile/op_model_amd.c
36218@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
36219 num_counters = AMD64_NUM_COUNTERS;
36220 }
36221
36222- op_amd_spec.num_counters = num_counters;
36223- op_amd_spec.num_controls = num_counters;
36224- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36225+ pax_open_kernel();
36226+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
36227+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
36228+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36229+ pax_close_kernel();
36230
36231 return 0;
36232 }
36233diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
36234index d90528e..0127e2b 100644
36235--- a/arch/x86/oprofile/op_model_ppro.c
36236+++ b/arch/x86/oprofile/op_model_ppro.c
36237@@ -19,6 +19,7 @@
36238 #include <asm/msr.h>
36239 #include <asm/apic.h>
36240 #include <asm/nmi.h>
36241+#include <asm/pgtable.h>
36242
36243 #include "op_x86_model.h"
36244 #include "op_counter.h"
36245@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
36246
36247 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
36248
36249- op_arch_perfmon_spec.num_counters = num_counters;
36250- op_arch_perfmon_spec.num_controls = num_counters;
36251+ pax_open_kernel();
36252+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
36253+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
36254+ pax_close_kernel();
36255 }
36256
36257 static int arch_perfmon_init(struct oprofile_operations *ignore)
36258diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
36259index 71e8a67..6a313bb 100644
36260--- a/arch/x86/oprofile/op_x86_model.h
36261+++ b/arch/x86/oprofile/op_x86_model.h
36262@@ -52,7 +52,7 @@ struct op_x86_model_spec {
36263 void (*switch_ctrl)(struct op_x86_model_spec const *model,
36264 struct op_msrs const * const msrs);
36265 #endif
36266-};
36267+} __do_const;
36268
36269 struct op_counter_config;
36270
36271diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
36272index 84b9d67..260e5ff 100644
36273--- a/arch/x86/pci/intel_mid_pci.c
36274+++ b/arch/x86/pci/intel_mid_pci.c
36275@@ -245,7 +245,7 @@ int __init intel_mid_pci_init(void)
36276 pr_info("Intel MID platform detected, using MID PCI ops\n");
36277 pci_mmcfg_late_init();
36278 pcibios_enable_irq = intel_mid_pci_irq_enable;
36279- pci_root_ops = intel_mid_pci_ops;
36280+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
36281 pci_soc_mode = 1;
36282 /* Continue with standard init */
36283 return 1;
36284diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
36285index 84112f5..6334d60 100644
36286--- a/arch/x86/pci/irq.c
36287+++ b/arch/x86/pci/irq.c
36288@@ -50,7 +50,7 @@ struct irq_router {
36289 struct irq_router_handler {
36290 u16 vendor;
36291 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
36292-};
36293+} __do_const;
36294
36295 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
36296 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
36297@@ -790,7 +790,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
36298 return 0;
36299 }
36300
36301-static __initdata struct irq_router_handler pirq_routers[] = {
36302+static __initconst const struct irq_router_handler pirq_routers[] = {
36303 { PCI_VENDOR_ID_INTEL, intel_router_probe },
36304 { PCI_VENDOR_ID_AL, ali_router_probe },
36305 { PCI_VENDOR_ID_ITE, ite_router_probe },
36306@@ -817,7 +817,7 @@ static struct pci_dev *pirq_router_dev;
36307 static void __init pirq_find_router(struct irq_router *r)
36308 {
36309 struct irq_routing_table *rt = pirq_table;
36310- struct irq_router_handler *h;
36311+ const struct irq_router_handler *h;
36312
36313 #ifdef CONFIG_PCI_BIOS
36314 if (!rt->signature) {
36315@@ -1090,7 +1090,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
36316 return 0;
36317 }
36318
36319-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
36320+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
36321 {
36322 .callback = fix_broken_hp_bios_irq9,
36323 .ident = "HP Pavilion N5400 Series Laptop",
36324diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
36325index c77b24a..c979855 100644
36326--- a/arch/x86/pci/pcbios.c
36327+++ b/arch/x86/pci/pcbios.c
36328@@ -79,7 +79,7 @@ union bios32 {
36329 static struct {
36330 unsigned long address;
36331 unsigned short segment;
36332-} bios32_indirect = { 0, __KERNEL_CS };
36333+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
36334
36335 /*
36336 * Returns the entry point for the given service, NULL on error
36337@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
36338 unsigned long length; /* %ecx */
36339 unsigned long entry; /* %edx */
36340 unsigned long flags;
36341+ struct desc_struct d, *gdt;
36342
36343 local_irq_save(flags);
36344- __asm__("lcall *(%%edi); cld"
36345+
36346+ gdt = get_cpu_gdt_table(smp_processor_id());
36347+
36348+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
36349+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36350+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
36351+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36352+
36353+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
36354 : "=a" (return_code),
36355 "=b" (address),
36356 "=c" (length),
36357 "=d" (entry)
36358 : "0" (service),
36359 "1" (0),
36360- "D" (&bios32_indirect));
36361+ "D" (&bios32_indirect),
36362+ "r"(__PCIBIOS_DS)
36363+ : "memory");
36364+
36365+ pax_open_kernel();
36366+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
36367+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
36368+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
36369+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
36370+ pax_close_kernel();
36371+
36372 local_irq_restore(flags);
36373
36374 switch (return_code) {
36375- case 0:
36376- return address + entry;
36377- case 0x80: /* Not present */
36378- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36379- return 0;
36380- default: /* Shouldn't happen */
36381- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36382- service, return_code);
36383+ case 0: {
36384+ int cpu;
36385+ unsigned char flags;
36386+
36387+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
36388+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
36389+ printk(KERN_WARNING "bios32_service: not valid\n");
36390 return 0;
36391+ }
36392+ address = address + PAGE_OFFSET;
36393+ length += 16UL; /* some BIOSs underreport this... */
36394+ flags = 4;
36395+ if (length >= 64*1024*1024) {
36396+ length >>= PAGE_SHIFT;
36397+ flags |= 8;
36398+ }
36399+
36400+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
36401+ gdt = get_cpu_gdt_table(cpu);
36402+ pack_descriptor(&d, address, length, 0x9b, flags);
36403+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36404+ pack_descriptor(&d, address, length, 0x93, flags);
36405+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36406+ }
36407+ return entry;
36408+ }
36409+ case 0x80: /* Not present */
36410+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36411+ return 0;
36412+ default: /* Shouldn't happen */
36413+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36414+ service, return_code);
36415+ return 0;
36416 }
36417 }
36418
36419 static struct {
36420 unsigned long address;
36421 unsigned short segment;
36422-} pci_indirect = { 0, __KERNEL_CS };
36423+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
36424
36425-static int pci_bios_present;
36426+static int pci_bios_present __read_only;
36427
36428 static int check_pcibios(void)
36429 {
36430@@ -131,11 +174,13 @@ static int check_pcibios(void)
36431 unsigned long flags, pcibios_entry;
36432
36433 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
36434- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
36435+ pci_indirect.address = pcibios_entry;
36436
36437 local_irq_save(flags);
36438- __asm__(
36439- "lcall *(%%edi); cld\n\t"
36440+ __asm__("movw %w6, %%ds\n\t"
36441+ "lcall *%%ss:(%%edi); cld\n\t"
36442+ "push %%ss\n\t"
36443+ "pop %%ds\n\t"
36444 "jc 1f\n\t"
36445 "xor %%ah, %%ah\n"
36446 "1:"
36447@@ -144,7 +189,8 @@ static int check_pcibios(void)
36448 "=b" (ebx),
36449 "=c" (ecx)
36450 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
36451- "D" (&pci_indirect)
36452+ "D" (&pci_indirect),
36453+ "r" (__PCIBIOS_DS)
36454 : "memory");
36455 local_irq_restore(flags);
36456
36457@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36458
36459 switch (len) {
36460 case 1:
36461- __asm__("lcall *(%%esi); cld\n\t"
36462+ __asm__("movw %w6, %%ds\n\t"
36463+ "lcall *%%ss:(%%esi); cld\n\t"
36464+ "push %%ss\n\t"
36465+ "pop %%ds\n\t"
36466 "jc 1f\n\t"
36467 "xor %%ah, %%ah\n"
36468 "1:"
36469@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36470 : "1" (PCIBIOS_READ_CONFIG_BYTE),
36471 "b" (bx),
36472 "D" ((long)reg),
36473- "S" (&pci_indirect));
36474+ "S" (&pci_indirect),
36475+ "r" (__PCIBIOS_DS));
36476 /*
36477 * Zero-extend the result beyond 8 bits, do not trust the
36478 * BIOS having done it:
36479@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36480 *value &= 0xff;
36481 break;
36482 case 2:
36483- __asm__("lcall *(%%esi); cld\n\t"
36484+ __asm__("movw %w6, %%ds\n\t"
36485+ "lcall *%%ss:(%%esi); cld\n\t"
36486+ "push %%ss\n\t"
36487+ "pop %%ds\n\t"
36488 "jc 1f\n\t"
36489 "xor %%ah, %%ah\n"
36490 "1:"
36491@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36492 : "1" (PCIBIOS_READ_CONFIG_WORD),
36493 "b" (bx),
36494 "D" ((long)reg),
36495- "S" (&pci_indirect));
36496+ "S" (&pci_indirect),
36497+ "r" (__PCIBIOS_DS));
36498 /*
36499 * Zero-extend the result beyond 16 bits, do not trust the
36500 * BIOS having done it:
36501@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36502 *value &= 0xffff;
36503 break;
36504 case 4:
36505- __asm__("lcall *(%%esi); cld\n\t"
36506+ __asm__("movw %w6, %%ds\n\t"
36507+ "lcall *%%ss:(%%esi); cld\n\t"
36508+ "push %%ss\n\t"
36509+ "pop %%ds\n\t"
36510 "jc 1f\n\t"
36511 "xor %%ah, %%ah\n"
36512 "1:"
36513@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36514 : "1" (PCIBIOS_READ_CONFIG_DWORD),
36515 "b" (bx),
36516 "D" ((long)reg),
36517- "S" (&pci_indirect));
36518+ "S" (&pci_indirect),
36519+ "r" (__PCIBIOS_DS));
36520 break;
36521 }
36522
36523@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36524
36525 switch (len) {
36526 case 1:
36527- __asm__("lcall *(%%esi); cld\n\t"
36528+ __asm__("movw %w6, %%ds\n\t"
36529+ "lcall *%%ss:(%%esi); cld\n\t"
36530+ "push %%ss\n\t"
36531+ "pop %%ds\n\t"
36532 "jc 1f\n\t"
36533 "xor %%ah, %%ah\n"
36534 "1:"
36535@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36536 "c" (value),
36537 "b" (bx),
36538 "D" ((long)reg),
36539- "S" (&pci_indirect));
36540+ "S" (&pci_indirect),
36541+ "r" (__PCIBIOS_DS));
36542 break;
36543 case 2:
36544- __asm__("lcall *(%%esi); cld\n\t"
36545+ __asm__("movw %w6, %%ds\n\t"
36546+ "lcall *%%ss:(%%esi); cld\n\t"
36547+ "push %%ss\n\t"
36548+ "pop %%ds\n\t"
36549 "jc 1f\n\t"
36550 "xor %%ah, %%ah\n"
36551 "1:"
36552@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36553 "c" (value),
36554 "b" (bx),
36555 "D" ((long)reg),
36556- "S" (&pci_indirect));
36557+ "S" (&pci_indirect),
36558+ "r" (__PCIBIOS_DS));
36559 break;
36560 case 4:
36561- __asm__("lcall *(%%esi); cld\n\t"
36562+ __asm__("movw %w6, %%ds\n\t"
36563+ "lcall *%%ss:(%%esi); cld\n\t"
36564+ "push %%ss\n\t"
36565+ "pop %%ds\n\t"
36566 "jc 1f\n\t"
36567 "xor %%ah, %%ah\n"
36568 "1:"
36569@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36570 "c" (value),
36571 "b" (bx),
36572 "D" ((long)reg),
36573- "S" (&pci_indirect));
36574+ "S" (&pci_indirect),
36575+ "r" (__PCIBIOS_DS));
36576 break;
36577 }
36578
36579@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36580
36581 DBG("PCI: Fetching IRQ routing table... ");
36582 __asm__("push %%es\n\t"
36583+ "movw %w8, %%ds\n\t"
36584 "push %%ds\n\t"
36585 "pop %%es\n\t"
36586- "lcall *(%%esi); cld\n\t"
36587+ "lcall *%%ss:(%%esi); cld\n\t"
36588 "pop %%es\n\t"
36589+ "push %%ss\n\t"
36590+ "pop %%ds\n"
36591 "jc 1f\n\t"
36592 "xor %%ah, %%ah\n"
36593 "1:"
36594@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36595 "1" (0),
36596 "D" ((long) &opt),
36597 "S" (&pci_indirect),
36598- "m" (opt)
36599+ "m" (opt),
36600+ "r" (__PCIBIOS_DS)
36601 : "memory");
36602 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
36603 if (ret & 0xff00)
36604@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36605 {
36606 int ret;
36607
36608- __asm__("lcall *(%%esi); cld\n\t"
36609+ __asm__("movw %w5, %%ds\n\t"
36610+ "lcall *%%ss:(%%esi); cld\n\t"
36611+ "push %%ss\n\t"
36612+ "pop %%ds\n"
36613 "jc 1f\n\t"
36614 "xor %%ah, %%ah\n"
36615 "1:"
36616@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36617 : "0" (PCIBIOS_SET_PCI_HW_INT),
36618 "b" ((dev->bus->number << 8) | dev->devfn),
36619 "c" ((irq << 8) | (pin + 10)),
36620- "S" (&pci_indirect));
36621+ "S" (&pci_indirect),
36622+ "r" (__PCIBIOS_DS));
36623 return !(ret & 0xff00);
36624 }
36625 EXPORT_SYMBOL(pcibios_set_irq_routing);
36626diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
36627index 9ee3491..872192f 100644
36628--- a/arch/x86/platform/efi/efi_32.c
36629+++ b/arch/x86/platform/efi/efi_32.c
36630@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
36631 {
36632 struct desc_ptr gdt_descr;
36633
36634+#ifdef CONFIG_PAX_KERNEXEC
36635+ struct desc_struct d;
36636+#endif
36637+
36638 local_irq_save(efi_rt_eflags);
36639
36640 load_cr3(initial_page_table);
36641 __flush_tlb_all();
36642
36643+#ifdef CONFIG_PAX_KERNEXEC
36644+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
36645+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36646+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
36647+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36648+#endif
36649+
36650 gdt_descr.address = __pa(get_cpu_gdt_table(0));
36651 gdt_descr.size = GDT_SIZE - 1;
36652 load_gdt(&gdt_descr);
36653@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
36654 {
36655 struct desc_ptr gdt_descr;
36656
36657+#ifdef CONFIG_PAX_KERNEXEC
36658+ struct desc_struct d;
36659+
36660+ memset(&d, 0, sizeof d);
36661+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36662+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36663+#endif
36664+
36665 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
36666 gdt_descr.size = GDT_SIZE - 1;
36667 load_gdt(&gdt_descr);
36668
36669+#ifdef CONFIG_PAX_PER_CPU_PGD
36670+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36671+#else
36672 load_cr3(swapper_pg_dir);
36673+#endif
36674+
36675 __flush_tlb_all();
36676
36677 local_irq_restore(efi_rt_eflags);
36678diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
36679index 290d397..e09d270 100644
36680--- a/arch/x86/platform/efi/efi_64.c
36681+++ b/arch/x86/platform/efi/efi_64.c
36682@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
36683 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
36684 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
36685 }
36686+
36687+#ifdef CONFIG_PAX_PER_CPU_PGD
36688+ load_cr3(swapper_pg_dir);
36689+#endif
36690+
36691 __flush_tlb_all();
36692 }
36693
36694@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
36695 for (pgd = 0; pgd < n_pgds; pgd++)
36696 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
36697 kfree(save_pgd);
36698+
36699+#ifdef CONFIG_PAX_PER_CPU_PGD
36700+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36701+#endif
36702+
36703 __flush_tlb_all();
36704 local_irq_restore(efi_flags);
36705 early_code_mapping_set_exec(0);
36706@@ -146,8 +156,23 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
36707 unsigned npages;
36708 pgd_t *pgd;
36709
36710- if (efi_enabled(EFI_OLD_MEMMAP))
36711+ if (efi_enabled(EFI_OLD_MEMMAP)) {
36712+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
36713+ * able to execute the EFI services.
36714+ */
36715+ if (__supported_pte_mask & _PAGE_NX) {
36716+ unsigned long addr = (unsigned long) __va(0);
36717+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
36718+
36719+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
36720+#ifdef CONFIG_PAX_PER_CPU_PGD
36721+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
36722+#endif
36723+ set_pgd(pgd_offset_k(addr), pe);
36724+ }
36725+
36726 return 0;
36727+ }
36728
36729 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
36730 pgd = __va(efi_scratch.efi_pgt);
36731diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
36732index fbe66e6..eae5e38 100644
36733--- a/arch/x86/platform/efi/efi_stub_32.S
36734+++ b/arch/x86/platform/efi/efi_stub_32.S
36735@@ -6,7 +6,9 @@
36736 */
36737
36738 #include <linux/linkage.h>
36739+#include <linux/init.h>
36740 #include <asm/page_types.h>
36741+#include <asm/segment.h>
36742
36743 /*
36744 * efi_call_phys(void *, ...) is a function with variable parameters.
36745@@ -20,7 +22,7 @@
36746 * service functions will comply with gcc calling convention, too.
36747 */
36748
36749-.text
36750+__INIT
36751 ENTRY(efi_call_phys)
36752 /*
36753 * 0. The function can only be called in Linux kernel. So CS has been
36754@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
36755 * The mapping of lower virtual memory has been created in prelog and
36756 * epilog.
36757 */
36758- movl $1f, %edx
36759- subl $__PAGE_OFFSET, %edx
36760- jmp *%edx
36761+#ifdef CONFIG_PAX_KERNEXEC
36762+ movl $(__KERNEXEC_EFI_DS), %edx
36763+ mov %edx, %ds
36764+ mov %edx, %es
36765+ mov %edx, %ss
36766+ addl $2f,(1f)
36767+ ljmp *(1f)
36768+
36769+__INITDATA
36770+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
36771+.previous
36772+
36773+2:
36774+ subl $2b,(1b)
36775+#else
36776+ jmp 1f-__PAGE_OFFSET
36777 1:
36778+#endif
36779
36780 /*
36781 * 2. Now on the top of stack is the return
36782@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
36783 * parameter 2, ..., param n. To make things easy, we save the return
36784 * address of efi_call_phys in a global variable.
36785 */
36786- popl %edx
36787- movl %edx, saved_return_addr
36788- /* get the function pointer into ECX*/
36789- popl %ecx
36790- movl %ecx, efi_rt_function_ptr
36791- movl $2f, %edx
36792- subl $__PAGE_OFFSET, %edx
36793- pushl %edx
36794+ popl (saved_return_addr)
36795+ popl (efi_rt_function_ptr)
36796
36797 /*
36798 * 3. Clear PG bit in %CR0.
36799@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
36800 /*
36801 * 5. Call the physical function.
36802 */
36803- jmp *%ecx
36804+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
36805
36806-2:
36807 /*
36808 * 6. After EFI runtime service returns, control will return to
36809 * following instruction. We'd better readjust stack pointer first.
36810@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
36811 movl %cr0, %edx
36812 orl $0x80000000, %edx
36813 movl %edx, %cr0
36814- jmp 1f
36815-1:
36816+
36817 /*
36818 * 8. Now restore the virtual mode from flat mode by
36819 * adding EIP with PAGE_OFFSET.
36820 */
36821- movl $1f, %edx
36822- jmp *%edx
36823+#ifdef CONFIG_PAX_KERNEXEC
36824+ movl $(__KERNEL_DS), %edx
36825+ mov %edx, %ds
36826+ mov %edx, %es
36827+ mov %edx, %ss
36828+ ljmp $(__KERNEL_CS),$1f
36829+#else
36830+ jmp 1f+__PAGE_OFFSET
36831+#endif
36832 1:
36833
36834 /*
36835 * 9. Balance the stack. And because EAX contain the return value,
36836 * we'd better not clobber it.
36837 */
36838- leal efi_rt_function_ptr, %edx
36839- movl (%edx), %ecx
36840- pushl %ecx
36841+ pushl (efi_rt_function_ptr)
36842
36843 /*
36844- * 10. Push the saved return address onto the stack and return.
36845+ * 10. Return to the saved return address.
36846 */
36847- leal saved_return_addr, %edx
36848- movl (%edx), %ecx
36849- pushl %ecx
36850- ret
36851+ jmpl *(saved_return_addr)
36852 ENDPROC(efi_call_phys)
36853 .previous
36854
36855-.data
36856+__INITDATA
36857 saved_return_addr:
36858 .long 0
36859 efi_rt_function_ptr:
36860diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
36861index 5fcda72..cd4dc41 100644
36862--- a/arch/x86/platform/efi/efi_stub_64.S
36863+++ b/arch/x86/platform/efi/efi_stub_64.S
36864@@ -11,6 +11,7 @@
36865 #include <asm/msr.h>
36866 #include <asm/processor-flags.h>
36867 #include <asm/page_types.h>
36868+#include <asm/alternative-asm.h>
36869
36870 #define SAVE_XMM \
36871 mov %rsp, %rax; \
36872@@ -88,6 +89,7 @@ ENTRY(efi_call)
36873 RESTORE_PGT
36874 addq $48, %rsp
36875 RESTORE_XMM
36876+ pax_force_retaddr 0, 1
36877 ret
36878 ENDPROC(efi_call)
36879
36880@@ -245,8 +247,8 @@ efi_gdt64:
36881 .long 0 /* Filled out by user */
36882 .word 0
36883 .quad 0x0000000000000000 /* NULL descriptor */
36884- .quad 0x00af9a000000ffff /* __KERNEL_CS */
36885- .quad 0x00cf92000000ffff /* __KERNEL_DS */
36886+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
36887+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
36888 .quad 0x0080890000000000 /* TS descriptor */
36889 .quad 0x0000000000000000 /* TS continued */
36890 efi_gdt64_end:
36891diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
36892index 1bbedc4..eb795b5 100644
36893--- a/arch/x86/platform/intel-mid/intel-mid.c
36894+++ b/arch/x86/platform/intel-mid/intel-mid.c
36895@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
36896 {
36897 };
36898
36899-static void intel_mid_reboot(void)
36900+static void __noreturn intel_mid_reboot(void)
36901 {
36902 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
36903+ BUG();
36904 }
36905
36906 static unsigned long __init intel_mid_calibrate_tsc(void)
36907diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
36908index d6ee929..3637cb5 100644
36909--- a/arch/x86/platform/olpc/olpc_dt.c
36910+++ b/arch/x86/platform/olpc/olpc_dt.c
36911@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
36912 return res;
36913 }
36914
36915-static struct of_pdt_ops prom_olpc_ops __initdata = {
36916+static struct of_pdt_ops prom_olpc_ops __initconst = {
36917 .nextprop = olpc_dt_nextprop,
36918 .getproplen = olpc_dt_getproplen,
36919 .getproperty = olpc_dt_getproperty,
36920diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
36921index 424f4c9..f2a2988 100644
36922--- a/arch/x86/power/cpu.c
36923+++ b/arch/x86/power/cpu.c
36924@@ -137,11 +137,8 @@ static void do_fpu_end(void)
36925 static void fix_processor_context(void)
36926 {
36927 int cpu = smp_processor_id();
36928- struct tss_struct *t = &per_cpu(init_tss, cpu);
36929-#ifdef CONFIG_X86_64
36930- struct desc_struct *desc = get_cpu_gdt_table(cpu);
36931- tss_desc tss;
36932-#endif
36933+ struct tss_struct *t = init_tss + cpu;
36934+
36935 set_tss_desc(cpu, t); /*
36936 * This just modifies memory; should not be
36937 * necessary. But... This is necessary, because
36938@@ -150,10 +147,6 @@ static void fix_processor_context(void)
36939 */
36940
36941 #ifdef CONFIG_X86_64
36942- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
36943- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
36944- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
36945-
36946 syscall_init(); /* This sets MSR_*STAR and related */
36947 #endif
36948 load_TR_desc(); /* This does ltr */
36949diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
36950index bad628a..a102610 100644
36951--- a/arch/x86/realmode/init.c
36952+++ b/arch/x86/realmode/init.c
36953@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
36954 __va(real_mode_header->trampoline_header);
36955
36956 #ifdef CONFIG_X86_32
36957- trampoline_header->start = __pa_symbol(startup_32_smp);
36958+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
36959+
36960+#ifdef CONFIG_PAX_KERNEXEC
36961+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
36962+#endif
36963+
36964+ trampoline_header->boot_cs = __BOOT_CS;
36965 trampoline_header->gdt_limit = __BOOT_DS + 7;
36966 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
36967 #else
36968@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
36969 *trampoline_cr4_features = read_cr4();
36970
36971 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
36972- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
36973+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
36974 trampoline_pgd[511] = init_level4_pgt[511].pgd;
36975 #endif
36976 }
36977diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
36978index 7c0d7be..d24dc88 100644
36979--- a/arch/x86/realmode/rm/Makefile
36980+++ b/arch/x86/realmode/rm/Makefile
36981@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
36982
36983 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
36984 -I$(srctree)/arch/x86/boot
36985+ifdef CONSTIFY_PLUGIN
36986+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
36987+endif
36988 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
36989 GCOV_PROFILE := n
36990diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
36991index a28221d..93c40f1 100644
36992--- a/arch/x86/realmode/rm/header.S
36993+++ b/arch/x86/realmode/rm/header.S
36994@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
36995 #endif
36996 /* APM/BIOS reboot */
36997 .long pa_machine_real_restart_asm
36998-#ifdef CONFIG_X86_64
36999+#ifdef CONFIG_X86_32
37000+ .long __KERNEL_CS
37001+#else
37002 .long __KERNEL32_CS
37003 #endif
37004 END(real_mode_header)
37005diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
37006index 48ddd76..c26749f 100644
37007--- a/arch/x86/realmode/rm/trampoline_32.S
37008+++ b/arch/x86/realmode/rm/trampoline_32.S
37009@@ -24,6 +24,12 @@
37010 #include <asm/page_types.h>
37011 #include "realmode.h"
37012
37013+#ifdef CONFIG_PAX_KERNEXEC
37014+#define ta(X) (X)
37015+#else
37016+#define ta(X) (pa_ ## X)
37017+#endif
37018+
37019 .text
37020 .code16
37021
37022@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
37023
37024 cli # We should be safe anyway
37025
37026- movl tr_start, %eax # where we need to go
37027-
37028 movl $0xA5A5A5A5, trampoline_status
37029 # write marker for master knows we're running
37030
37031@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
37032 movw $1, %dx # protected mode (PE) bit
37033 lmsw %dx # into protected mode
37034
37035- ljmpl $__BOOT_CS, $pa_startup_32
37036+ ljmpl *(trampoline_header)
37037
37038 .section ".text32","ax"
37039 .code32
37040@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
37041 .balign 8
37042 GLOBAL(trampoline_header)
37043 tr_start: .space 4
37044- tr_gdt_pad: .space 2
37045+ tr_boot_cs: .space 2
37046 tr_gdt: .space 6
37047 END(trampoline_header)
37048
37049diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
37050index dac7b20..72dbaca 100644
37051--- a/arch/x86/realmode/rm/trampoline_64.S
37052+++ b/arch/x86/realmode/rm/trampoline_64.S
37053@@ -93,6 +93,7 @@ ENTRY(startup_32)
37054 movl %edx, %gs
37055
37056 movl pa_tr_cr4, %eax
37057+ andl $~X86_CR4_PCIDE, %eax
37058 movl %eax, %cr4 # Enable PAE mode
37059
37060 # Setup trampoline 4 level pagetables
37061@@ -106,7 +107,7 @@ ENTRY(startup_32)
37062 wrmsr
37063
37064 # Enable paging and in turn activate Long Mode
37065- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
37066+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
37067 movl %eax, %cr0
37068
37069 /*
37070diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
37071index 9e7e147..25a4158 100644
37072--- a/arch/x86/realmode/rm/wakeup_asm.S
37073+++ b/arch/x86/realmode/rm/wakeup_asm.S
37074@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
37075 lgdtl pmode_gdt
37076
37077 /* This really couldn't... */
37078- movl pmode_entry, %eax
37079 movl pmode_cr0, %ecx
37080 movl %ecx, %cr0
37081- ljmpl $__KERNEL_CS, $pa_startup_32
37082- /* -> jmp *%eax in trampoline_32.S */
37083+
37084+ ljmpl *pmode_entry
37085 #else
37086 jmp trampoline_start
37087 #endif
37088diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
37089index 604a37e..e49702a 100644
37090--- a/arch/x86/tools/Makefile
37091+++ b/arch/x86/tools/Makefile
37092@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
37093
37094 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
37095
37096-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
37097+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
37098 hostprogs-y += relocs
37099 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
37100 PHONY += relocs
37101diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
37102index bbb1d22..e505211 100644
37103--- a/arch/x86/tools/relocs.c
37104+++ b/arch/x86/tools/relocs.c
37105@@ -1,5 +1,7 @@
37106 /* This is included from relocs_32/64.c */
37107
37108+#include "../../../include/generated/autoconf.h"
37109+
37110 #define ElfW(type) _ElfW(ELF_BITS, type)
37111 #define _ElfW(bits, type) __ElfW(bits, type)
37112 #define __ElfW(bits, type) Elf##bits##_##type
37113@@ -11,6 +13,7 @@
37114 #define Elf_Sym ElfW(Sym)
37115
37116 static Elf_Ehdr ehdr;
37117+static Elf_Phdr *phdr;
37118
37119 struct relocs {
37120 uint32_t *offset;
37121@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
37122 }
37123 }
37124
37125+static void read_phdrs(FILE *fp)
37126+{
37127+ unsigned int i;
37128+
37129+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
37130+ if (!phdr) {
37131+ die("Unable to allocate %d program headers\n",
37132+ ehdr.e_phnum);
37133+ }
37134+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
37135+ die("Seek to %d failed: %s\n",
37136+ ehdr.e_phoff, strerror(errno));
37137+ }
37138+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
37139+ die("Cannot read ELF program headers: %s\n",
37140+ strerror(errno));
37141+ }
37142+ for(i = 0; i < ehdr.e_phnum; i++) {
37143+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
37144+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
37145+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
37146+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
37147+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
37148+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
37149+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
37150+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
37151+ }
37152+
37153+}
37154+
37155 static void read_shdrs(FILE *fp)
37156 {
37157- int i;
37158+ unsigned int i;
37159 Elf_Shdr shdr;
37160
37161 secs = calloc(ehdr.e_shnum, sizeof(struct section));
37162@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
37163
37164 static void read_strtabs(FILE *fp)
37165 {
37166- int i;
37167+ unsigned int i;
37168 for (i = 0; i < ehdr.e_shnum; i++) {
37169 struct section *sec = &secs[i];
37170 if (sec->shdr.sh_type != SHT_STRTAB) {
37171@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
37172
37173 static void read_symtabs(FILE *fp)
37174 {
37175- int i,j;
37176+ unsigned int i,j;
37177 for (i = 0; i < ehdr.e_shnum; i++) {
37178 struct section *sec = &secs[i];
37179 if (sec->shdr.sh_type != SHT_SYMTAB) {
37180@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
37181 }
37182
37183
37184-static void read_relocs(FILE *fp)
37185+static void read_relocs(FILE *fp, int use_real_mode)
37186 {
37187- int i,j;
37188+ unsigned int i,j;
37189+ uint32_t base;
37190+
37191 for (i = 0; i < ehdr.e_shnum; i++) {
37192 struct section *sec = &secs[i];
37193 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37194@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
37195 die("Cannot read symbol table: %s\n",
37196 strerror(errno));
37197 }
37198+ base = 0;
37199+
37200+#ifdef CONFIG_X86_32
37201+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
37202+ if (phdr[j].p_type != PT_LOAD )
37203+ continue;
37204+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
37205+ continue;
37206+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
37207+ break;
37208+ }
37209+#endif
37210+
37211 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
37212 Elf_Rel *rel = &sec->reltab[j];
37213- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
37214+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
37215 rel->r_info = elf_xword_to_cpu(rel->r_info);
37216 #if (SHT_REL_TYPE == SHT_RELA)
37217 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
37218@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
37219
37220 static void print_absolute_symbols(void)
37221 {
37222- int i;
37223+ unsigned int i;
37224 const char *format;
37225
37226 if (ELF_BITS == 64)
37227@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
37228 for (i = 0; i < ehdr.e_shnum; i++) {
37229 struct section *sec = &secs[i];
37230 char *sym_strtab;
37231- int j;
37232+ unsigned int j;
37233
37234 if (sec->shdr.sh_type != SHT_SYMTAB) {
37235 continue;
37236@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
37237
37238 static void print_absolute_relocs(void)
37239 {
37240- int i, printed = 0;
37241+ unsigned int i, printed = 0;
37242 const char *format;
37243
37244 if (ELF_BITS == 64)
37245@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
37246 struct section *sec_applies, *sec_symtab;
37247 char *sym_strtab;
37248 Elf_Sym *sh_symtab;
37249- int j;
37250+ unsigned int j;
37251 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37252 continue;
37253 }
37254@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
37255 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
37256 Elf_Sym *sym, const char *symname))
37257 {
37258- int i;
37259+ unsigned int i;
37260 /* Walk through the relocations */
37261 for (i = 0; i < ehdr.e_shnum; i++) {
37262 char *sym_strtab;
37263 Elf_Sym *sh_symtab;
37264 struct section *sec_applies, *sec_symtab;
37265- int j;
37266+ unsigned int j;
37267 struct section *sec = &secs[i];
37268
37269 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37270@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37271 {
37272 unsigned r_type = ELF32_R_TYPE(rel->r_info);
37273 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
37274+ char *sym_strtab = sec->link->link->strtab;
37275+
37276+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
37277+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
37278+ return 0;
37279+
37280+#ifdef CONFIG_PAX_KERNEXEC
37281+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
37282+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
37283+ return 0;
37284+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
37285+ return 0;
37286+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
37287+ return 0;
37288+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
37289+ return 0;
37290+#endif
37291
37292 switch (r_type) {
37293 case R_386_NONE:
37294@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
37295
37296 static void emit_relocs(int as_text, int use_real_mode)
37297 {
37298- int i;
37299+ unsigned int i;
37300 int (*write_reloc)(uint32_t, FILE *) = write32;
37301 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37302 const char *symname);
37303@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
37304 {
37305 regex_init(use_real_mode);
37306 read_ehdr(fp);
37307+ read_phdrs(fp);
37308 read_shdrs(fp);
37309 read_strtabs(fp);
37310 read_symtabs(fp);
37311- read_relocs(fp);
37312+ read_relocs(fp, use_real_mode);
37313 if (ELF_BITS == 64)
37314 percpu_init();
37315 if (show_absolute_syms) {
37316diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
37317index f40281e..92728c9 100644
37318--- a/arch/x86/um/mem_32.c
37319+++ b/arch/x86/um/mem_32.c
37320@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
37321 gate_vma.vm_start = FIXADDR_USER_START;
37322 gate_vma.vm_end = FIXADDR_USER_END;
37323 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
37324- gate_vma.vm_page_prot = __P101;
37325+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
37326
37327 return 0;
37328 }
37329diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
37330index 80ffa5b..a33bd15 100644
37331--- a/arch/x86/um/tls_32.c
37332+++ b/arch/x86/um/tls_32.c
37333@@ -260,7 +260,7 @@ out:
37334 if (unlikely(task == current &&
37335 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
37336 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
37337- "without flushed TLS.", current->pid);
37338+ "without flushed TLS.", task_pid_nr(current));
37339 }
37340
37341 return 0;
37342diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
37343index 61b04fe..3134230 100644
37344--- a/arch/x86/vdso/Makefile
37345+++ b/arch/x86/vdso/Makefile
37346@@ -170,7 +170,7 @@ quiet_cmd_vdso = VDSO $@
37347 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
37348 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
37349
37350-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37351+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37352 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
37353 GCOV_PROFILE := n
37354
37355diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
37356index e4f7781..ab5ab26 100644
37357--- a/arch/x86/vdso/vdso32-setup.c
37358+++ b/arch/x86/vdso/vdso32-setup.c
37359@@ -14,6 +14,7 @@
37360 #include <asm/cpufeature.h>
37361 #include <asm/processor.h>
37362 #include <asm/vdso.h>
37363+#include <asm/mman.h>
37364
37365 #ifdef CONFIG_COMPAT_VDSO
37366 #define VDSO_DEFAULT 0
37367diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
37368index 5a5176d..e570acd 100644
37369--- a/arch/x86/vdso/vma.c
37370+++ b/arch/x86/vdso/vma.c
37371@@ -16,10 +16,9 @@
37372 #include <asm/vdso.h>
37373 #include <asm/page.h>
37374 #include <asm/hpet.h>
37375+#include <asm/mman.h>
37376
37377 #if defined(CONFIG_X86_64)
37378-unsigned int __read_mostly vdso64_enabled = 1;
37379-
37380 extern unsigned short vdso_sync_cpuid;
37381 #endif
37382
37383@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37384 .pages = no_pages,
37385 };
37386
37387+#ifdef CONFIG_PAX_RANDMMAP
37388+ if (mm->pax_flags & MF_PAX_RANDMMAP)
37389+ calculate_addr = false;
37390+#endif
37391+
37392 if (calculate_addr) {
37393 addr = vdso_addr(current->mm->start_stack,
37394 image->sym_end_mapping);
37395@@ -110,13 +114,13 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37396
37397 down_write(&mm->mmap_sem);
37398
37399- addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
37400+ addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, MAP_EXECUTABLE);
37401 if (IS_ERR_VALUE(addr)) {
37402 ret = addr;
37403 goto up_fail;
37404 }
37405
37406- current->mm->context.vdso = (void __user *)addr;
37407+ mm->context.vdso = addr;
37408
37409 /*
37410 * MAYWRITE to allow gdb to COW and set breakpoints
37411@@ -161,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37412 hpet_address >> PAGE_SHIFT,
37413 PAGE_SIZE,
37414 pgprot_noncached(PAGE_READONLY));
37415-
37416- if (ret)
37417- goto up_fail;
37418 }
37419 #endif
37420
37421 up_fail:
37422 if (ret)
37423- current->mm->context.vdso = NULL;
37424+ current->mm->context.vdso = 0;
37425
37426 up_write(&mm->mmap_sem);
37427 return ret;
37428@@ -189,8 +190,8 @@ static int load_vdso32(void)
37429
37430 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
37431 current_thread_info()->sysenter_return =
37432- current->mm->context.vdso +
37433- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
37434+ (void __force_user *)(current->mm->context.vdso +
37435+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
37436
37437 return 0;
37438 }
37439@@ -199,9 +200,6 @@ static int load_vdso32(void)
37440 #ifdef CONFIG_X86_64
37441 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37442 {
37443- if (!vdso64_enabled)
37444- return 0;
37445-
37446 return map_vdso(&vdso_image_64, true);
37447 }
37448
37449@@ -210,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
37450 int uses_interp)
37451 {
37452 #ifdef CONFIG_X86_X32_ABI
37453- if (test_thread_flag(TIF_X32)) {
37454- if (!vdso64_enabled)
37455- return 0;
37456-
37457+ if (test_thread_flag(TIF_X32))
37458 return map_vdso(&vdso_image_x32, true);
37459- }
37460 #endif
37461
37462 return load_vdso32();
37463@@ -227,12 +221,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37464 return load_vdso32();
37465 }
37466 #endif
37467-
37468-#ifdef CONFIG_X86_64
37469-static __init int vdso_setup(char *s)
37470-{
37471- vdso64_enabled = simple_strtoul(s, NULL, 0);
37472- return 0;
37473-}
37474-__setup("vdso=", vdso_setup);
37475-#endif
37476diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
37477index e88fda8..76ce7ce 100644
37478--- a/arch/x86/xen/Kconfig
37479+++ b/arch/x86/xen/Kconfig
37480@@ -9,6 +9,7 @@ config XEN
37481 select XEN_HAVE_PVMMU
37482 depends on X86_64 || (X86_32 && X86_PAE)
37483 depends on X86_TSC
37484+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
37485 help
37486 This is the Linux Xen port. Enabling this will allow the
37487 kernel to boot in a paravirtualized environment under the
37488diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
37489index ffb101e..98c0ecf 100644
37490--- a/arch/x86/xen/enlighten.c
37491+++ b/arch/x86/xen/enlighten.c
37492@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
37493
37494 struct shared_info xen_dummy_shared_info;
37495
37496-void *xen_initial_gdt;
37497-
37498 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
37499 __read_mostly int xen_have_vector_callback;
37500 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
37501@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
37502 {
37503 unsigned long va = dtr->address;
37504 unsigned int size = dtr->size + 1;
37505- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37506- unsigned long frames[pages];
37507+ unsigned long frames[65536 / PAGE_SIZE];
37508 int f;
37509
37510 /*
37511@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37512 {
37513 unsigned long va = dtr->address;
37514 unsigned int size = dtr->size + 1;
37515- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37516- unsigned long frames[pages];
37517+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
37518 int f;
37519
37520 /*
37521@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37522 * 8-byte entries, or 16 4k pages..
37523 */
37524
37525- BUG_ON(size > 65536);
37526+ BUG_ON(size > GDT_SIZE);
37527 BUG_ON(va & ~PAGE_MASK);
37528
37529 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
37530@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
37531 return 0;
37532 }
37533
37534-static void set_xen_basic_apic_ops(void)
37535+static void __init set_xen_basic_apic_ops(void)
37536 {
37537 apic->read = xen_apic_read;
37538 apic->write = xen_apic_write;
37539@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
37540 #endif
37541 };
37542
37543-static void xen_reboot(int reason)
37544+static __noreturn void xen_reboot(int reason)
37545 {
37546 struct sched_shutdown r = { .reason = reason };
37547
37548- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
37549- BUG();
37550+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
37551+ BUG();
37552 }
37553
37554-static void xen_restart(char *msg)
37555+static __noreturn void xen_restart(char *msg)
37556 {
37557 xen_reboot(SHUTDOWN_reboot);
37558 }
37559
37560-static void xen_emergency_restart(void)
37561+static __noreturn void xen_emergency_restart(void)
37562 {
37563 xen_reboot(SHUTDOWN_reboot);
37564 }
37565
37566-static void xen_machine_halt(void)
37567+static __noreturn void xen_machine_halt(void)
37568 {
37569 xen_reboot(SHUTDOWN_poweroff);
37570 }
37571
37572-static void xen_machine_power_off(void)
37573+static __noreturn void xen_machine_power_off(void)
37574 {
37575 if (pm_power_off)
37576 pm_power_off();
37577@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
37578 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
37579
37580 /* Work out if we support NX */
37581- x86_configure_nx();
37582+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
37583+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
37584+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
37585+ unsigned l, h;
37586+
37587+ __supported_pte_mask |= _PAGE_NX;
37588+ rdmsr(MSR_EFER, l, h);
37589+ l |= EFER_NX;
37590+ wrmsr(MSR_EFER, l, h);
37591+ }
37592+#endif
37593
37594 /* Get mfn list */
37595 xen_build_dynamic_phys_to_machine();
37596@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
37597
37598 machine_ops = xen_machine_ops;
37599
37600- /*
37601- * The only reliable way to retain the initial address of the
37602- * percpu gdt_page is to remember it here, so we can go and
37603- * mark it RW later, when the initial percpu area is freed.
37604- */
37605- xen_initial_gdt = &per_cpu(gdt_page, 0);
37606-
37607 xen_smp_init();
37608
37609 #ifdef CONFIG_ACPI_NUMA
37610diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
37611index e8a1201..046c66c 100644
37612--- a/arch/x86/xen/mmu.c
37613+++ b/arch/x86/xen/mmu.c
37614@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
37615 return val;
37616 }
37617
37618-static pteval_t pte_pfn_to_mfn(pteval_t val)
37619+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
37620 {
37621 if (val & _PAGE_PRESENT) {
37622 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
37623@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37624 /* L3_k[510] -> level2_kernel_pgt
37625 * L3_i[511] -> level2_fixmap_pgt */
37626 convert_pfn_mfn(level3_kernel_pgt);
37627+ convert_pfn_mfn(level3_vmalloc_start_pgt);
37628+ convert_pfn_mfn(level3_vmalloc_end_pgt);
37629+ convert_pfn_mfn(level3_vmemmap_pgt);
37630 }
37631 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
37632 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
37633@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37634 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
37635 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
37636 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
37637+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
37638+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
37639+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
37640 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
37641 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
37642+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
37643 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
37644 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
37645
37646@@ -2120,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
37647 pv_mmu_ops.set_pud = xen_set_pud;
37648 #if PAGETABLE_LEVELS == 4
37649 pv_mmu_ops.set_pgd = xen_set_pgd;
37650+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
37651 #endif
37652
37653 /* This will work as long as patching hasn't happened yet
37654@@ -2198,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
37655 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
37656 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
37657 .set_pgd = xen_set_pgd_hyper,
37658+ .set_pgd_batched = xen_set_pgd_hyper,
37659
37660 .alloc_pud = xen_alloc_pmd_init,
37661 .release_pud = xen_release_pmd_init,
37662diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
37663index 7005974..54fb05f 100644
37664--- a/arch/x86/xen/smp.c
37665+++ b/arch/x86/xen/smp.c
37666@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
37667
37668 if (xen_pv_domain()) {
37669 if (!xen_feature(XENFEAT_writable_page_tables))
37670- /* We've switched to the "real" per-cpu gdt, so make
37671- * sure the old memory can be recycled. */
37672- make_lowmem_page_readwrite(xen_initial_gdt);
37673-
37674 #ifdef CONFIG_X86_32
37675 /*
37676 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
37677 * expects __USER_DS
37678 */
37679- loadsegment(ds, __USER_DS);
37680- loadsegment(es, __USER_DS);
37681+ loadsegment(ds, __KERNEL_DS);
37682+ loadsegment(es, __KERNEL_DS);
37683 #endif
37684
37685 xen_filter_cpu_maps();
37686@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37687 #ifdef CONFIG_X86_32
37688 /* Note: PVH is not yet supported on x86_32. */
37689 ctxt->user_regs.fs = __KERNEL_PERCPU;
37690- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
37691+ savesegment(gs, ctxt->user_regs.gs);
37692 #endif
37693 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
37694
37695@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37696 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
37697 ctxt->flags = VGCF_IN_KERNEL;
37698 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
37699- ctxt->user_regs.ds = __USER_DS;
37700- ctxt->user_regs.es = __USER_DS;
37701+ ctxt->user_regs.ds = __KERNEL_DS;
37702+ ctxt->user_regs.es = __KERNEL_DS;
37703 ctxt->user_regs.ss = __KERNEL_DS;
37704
37705 xen_copy_trap_info(ctxt->trap_ctxt);
37706@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
37707 int rc;
37708
37709 per_cpu(current_task, cpu) = idle;
37710+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
37711 #ifdef CONFIG_X86_32
37712 irq_ctx_init(cpu);
37713 #else
37714 clear_tsk_thread_flag(idle, TIF_FORK);
37715 #endif
37716- per_cpu(kernel_stack, cpu) =
37717- (unsigned long)task_stack_page(idle) -
37718- KERNEL_STACK_OFFSET + THREAD_SIZE;
37719+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
37720
37721 xen_setup_runstate_info(cpu);
37722 xen_setup_timer(cpu);
37723@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
37724
37725 void __init xen_smp_init(void)
37726 {
37727- smp_ops = xen_smp_ops;
37728+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
37729 xen_fill_possible_map();
37730 }
37731
37732diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
37733index fd92a64..1f72641 100644
37734--- a/arch/x86/xen/xen-asm_32.S
37735+++ b/arch/x86/xen/xen-asm_32.S
37736@@ -99,7 +99,7 @@ ENTRY(xen_iret)
37737 pushw %fs
37738 movl $(__KERNEL_PERCPU), %eax
37739 movl %eax, %fs
37740- movl %fs:xen_vcpu, %eax
37741+ mov PER_CPU_VAR(xen_vcpu), %eax
37742 POP_FS
37743 #else
37744 movl %ss:xen_vcpu, %eax
37745diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
37746index 485b695..fda3e7c 100644
37747--- a/arch/x86/xen/xen-head.S
37748+++ b/arch/x86/xen/xen-head.S
37749@@ -39,6 +39,17 @@ ENTRY(startup_xen)
37750 #ifdef CONFIG_X86_32
37751 mov %esi,xen_start_info
37752 mov $init_thread_union+THREAD_SIZE,%esp
37753+#ifdef CONFIG_SMP
37754+ movl $cpu_gdt_table,%edi
37755+ movl $__per_cpu_load,%eax
37756+ movw %ax,__KERNEL_PERCPU + 2(%edi)
37757+ rorl $16,%eax
37758+ movb %al,__KERNEL_PERCPU + 4(%edi)
37759+ movb %ah,__KERNEL_PERCPU + 7(%edi)
37760+ movl $__per_cpu_end - 1,%eax
37761+ subl $__per_cpu_start,%eax
37762+ movw %ax,__KERNEL_PERCPU + 0(%edi)
37763+#endif
37764 #else
37765 mov %rsi,xen_start_info
37766 mov $init_thread_union+THREAD_SIZE,%rsp
37767diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
37768index 97d8765..c4526ec 100644
37769--- a/arch/x86/xen/xen-ops.h
37770+++ b/arch/x86/xen/xen-ops.h
37771@@ -10,8 +10,6 @@
37772 extern const char xen_hypervisor_callback[];
37773 extern const char xen_failsafe_callback[];
37774
37775-extern void *xen_initial_gdt;
37776-
37777 struct trap_info;
37778 void xen_copy_trap_info(struct trap_info *traps);
37779
37780diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
37781index 525bd3d..ef888b1 100644
37782--- a/arch/xtensa/variants/dc232b/include/variant/core.h
37783+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
37784@@ -119,9 +119,9 @@
37785 ----------------------------------------------------------------------*/
37786
37787 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
37788-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
37789 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
37790 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
37791+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37792
37793 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
37794 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
37795diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
37796index 2f33760..835e50a 100644
37797--- a/arch/xtensa/variants/fsf/include/variant/core.h
37798+++ b/arch/xtensa/variants/fsf/include/variant/core.h
37799@@ -11,6 +11,7 @@
37800 #ifndef _XTENSA_CORE_H
37801 #define _XTENSA_CORE_H
37802
37803+#include <linux/const.h>
37804
37805 /****************************************************************************
37806 Parameters Useful for Any Code, USER or PRIVILEGED
37807@@ -112,9 +113,9 @@
37808 ----------------------------------------------------------------------*/
37809
37810 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37811-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37812 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37813 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37814+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37815
37816 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
37817 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
37818diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
37819index af00795..2bb8105 100644
37820--- a/arch/xtensa/variants/s6000/include/variant/core.h
37821+++ b/arch/xtensa/variants/s6000/include/variant/core.h
37822@@ -11,6 +11,7 @@
37823 #ifndef _XTENSA_CORE_CONFIGURATION_H
37824 #define _XTENSA_CORE_CONFIGURATION_H
37825
37826+#include <linux/const.h>
37827
37828 /****************************************************************************
37829 Parameters Useful for Any Code, USER or PRIVILEGED
37830@@ -118,9 +119,9 @@
37831 ----------------------------------------------------------------------*/
37832
37833 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37834-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37835 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37836 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37837+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37838
37839 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
37840 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
37841diff --git a/block/bio.c b/block/bio.c
37842index 0ec61c9..93b94060 100644
37843--- a/block/bio.c
37844+++ b/block/bio.c
37845@@ -1159,7 +1159,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
37846 /*
37847 * Overflow, abort
37848 */
37849- if (end < start)
37850+ if (end < start || end - start > INT_MAX - nr_pages)
37851 return ERR_PTR(-EINVAL);
37852
37853 nr_pages += end - start;
37854@@ -1293,7 +1293,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
37855 /*
37856 * Overflow, abort
37857 */
37858- if (end < start)
37859+ if (end < start || end - start > INT_MAX - nr_pages)
37860 return ERR_PTR(-EINVAL);
37861
37862 nr_pages += end - start;
37863@@ -1555,7 +1555,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
37864 const int read = bio_data_dir(bio) == READ;
37865 struct bio_map_data *bmd = bio->bi_private;
37866 int i;
37867- char *p = bmd->sgvecs[0].iov_base;
37868+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
37869
37870 bio_for_each_segment_all(bvec, bio, i) {
37871 char *addr = page_address(bvec->bv_page);
37872diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
37873index 28d227c..d4c0bad 100644
37874--- a/block/blk-cgroup.c
37875+++ b/block/blk-cgroup.c
37876@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
37877 static struct cgroup_subsys_state *
37878 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37879 {
37880- static atomic64_t id_seq = ATOMIC64_INIT(0);
37881+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
37882 struct blkcg *blkcg;
37883
37884 if (!parent_css) {
37885@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37886
37887 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
37888 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
37889- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
37890+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
37891 done:
37892 spin_lock_init(&blkcg->lock);
37893 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
37894diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
37895index 0736729..2ec3b48 100644
37896--- a/block/blk-iopoll.c
37897+++ b/block/blk-iopoll.c
37898@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
37899 }
37900 EXPORT_SYMBOL(blk_iopoll_complete);
37901
37902-static void blk_iopoll_softirq(struct softirq_action *h)
37903+static __latent_entropy void blk_iopoll_softirq(void)
37904 {
37905 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
37906 int rearm = 0, budget = blk_iopoll_budget;
37907diff --git a/block/blk-map.c b/block/blk-map.c
37908index f890d43..97b0482 100644
37909--- a/block/blk-map.c
37910+++ b/block/blk-map.c
37911@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
37912 if (!len || !kbuf)
37913 return -EINVAL;
37914
37915- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
37916+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
37917 if (do_copy)
37918 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
37919 else
37920diff --git a/block/blk-softirq.c b/block/blk-softirq.c
37921index 53b1737..08177d2e 100644
37922--- a/block/blk-softirq.c
37923+++ b/block/blk-softirq.c
37924@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
37925 * Softirq action handler - move entries to local list and loop over them
37926 * while passing them to the queue registered handler.
37927 */
37928-static void blk_done_softirq(struct softirq_action *h)
37929+static __latent_entropy void blk_done_softirq(void)
37930 {
37931 struct list_head *cpu_list, local_list;
37932
37933diff --git a/block/bsg.c b/block/bsg.c
37934index ff46add..c4ba8ee 100644
37935--- a/block/bsg.c
37936+++ b/block/bsg.c
37937@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
37938 struct sg_io_v4 *hdr, struct bsg_device *bd,
37939 fmode_t has_write_perm)
37940 {
37941+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37942+ unsigned char *cmdptr;
37943+
37944 if (hdr->request_len > BLK_MAX_CDB) {
37945 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
37946 if (!rq->cmd)
37947 return -ENOMEM;
37948- }
37949+ cmdptr = rq->cmd;
37950+ } else
37951+ cmdptr = tmpcmd;
37952
37953- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
37954+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
37955 hdr->request_len))
37956 return -EFAULT;
37957
37958+ if (cmdptr != rq->cmd)
37959+ memcpy(rq->cmd, cmdptr, hdr->request_len);
37960+
37961 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
37962 if (blk_verify_command(rq->cmd, has_write_perm))
37963 return -EPERM;
37964diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
37965index a0926a6..b2b14b2 100644
37966--- a/block/compat_ioctl.c
37967+++ b/block/compat_ioctl.c
37968@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
37969 cgc = compat_alloc_user_space(sizeof(*cgc));
37970 cgc32 = compat_ptr(arg);
37971
37972- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
37973+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
37974 get_user(data, &cgc32->buffer) ||
37975 put_user(compat_ptr(data), &cgc->buffer) ||
37976 copy_in_user(&cgc->buflen, &cgc32->buflen,
37977@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
37978 err |= __get_user(f->spec1, &uf->spec1);
37979 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
37980 err |= __get_user(name, &uf->name);
37981- f->name = compat_ptr(name);
37982+ f->name = (void __force_kernel *)compat_ptr(name);
37983 if (err) {
37984 err = -EFAULT;
37985 goto out;
37986diff --git a/block/genhd.c b/block/genhd.c
37987index 791f419..89f21c4 100644
37988--- a/block/genhd.c
37989+++ b/block/genhd.c
37990@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
37991
37992 /*
37993 * Register device numbers dev..(dev+range-1)
37994- * range must be nonzero
37995+ * Noop if @range is zero.
37996 * The hash chain is sorted on range, so that subranges can override.
37997 */
37998 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
37999 struct kobject *(*probe)(dev_t, int *, void *),
38000 int (*lock)(dev_t, void *), void *data)
38001 {
38002- kobj_map(bdev_map, devt, range, module, probe, lock, data);
38003+ if (range)
38004+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
38005 }
38006
38007 EXPORT_SYMBOL(blk_register_region);
38008
38009+/* undo blk_register_region(), noop if @range is zero */
38010 void blk_unregister_region(dev_t devt, unsigned long range)
38011 {
38012- kobj_unmap(bdev_map, devt, range);
38013+ if (range)
38014+ kobj_unmap(bdev_map, devt, range);
38015 }
38016
38017 EXPORT_SYMBOL(blk_unregister_region);
38018diff --git a/block/partitions/efi.c b/block/partitions/efi.c
38019index dc51f46..d5446a8 100644
38020--- a/block/partitions/efi.c
38021+++ b/block/partitions/efi.c
38022@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
38023 if (!gpt)
38024 return NULL;
38025
38026+ if (!le32_to_cpu(gpt->num_partition_entries))
38027+ return NULL;
38028+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
38029+ if (!pte)
38030+ return NULL;
38031+
38032 count = le32_to_cpu(gpt->num_partition_entries) *
38033 le32_to_cpu(gpt->sizeof_partition_entry);
38034- if (!count)
38035- return NULL;
38036- pte = kmalloc(count, GFP_KERNEL);
38037- if (!pte)
38038- return NULL;
38039-
38040 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
38041 (u8 *) pte, count) < count) {
38042 kfree(pte);
38043diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
38044index 84ab119..a217f27 100644
38045--- a/block/scsi_ioctl.c
38046+++ b/block/scsi_ioctl.c
38047@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
38048 return put_user(0, p);
38049 }
38050
38051-static int sg_get_timeout(struct request_queue *q)
38052+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
38053 {
38054 return jiffies_to_clock_t(q->sg_timeout);
38055 }
38056@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
38057 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
38058 struct sg_io_hdr *hdr, fmode_t mode)
38059 {
38060- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
38061+ unsigned char tmpcmd[sizeof(rq->__cmd)];
38062+ unsigned char *cmdptr;
38063+
38064+ if (rq->cmd != rq->__cmd)
38065+ cmdptr = rq->cmd;
38066+ else
38067+ cmdptr = tmpcmd;
38068+
38069+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
38070 return -EFAULT;
38071+
38072+ if (cmdptr != rq->cmd)
38073+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
38074+
38075 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
38076 return -EPERM;
38077
38078@@ -413,6 +425,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
38079 int err;
38080 unsigned int in_len, out_len, bytes, opcode, cmdlen;
38081 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
38082+ unsigned char tmpcmd[sizeof(rq->__cmd)];
38083+ unsigned char *cmdptr;
38084
38085 if (!sic)
38086 return -EINVAL;
38087@@ -451,9 +465,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
38088 */
38089 err = -EFAULT;
38090 rq->cmd_len = cmdlen;
38091- if (copy_from_user(rq->cmd, sic->data, cmdlen))
38092+
38093+ if (rq->cmd != rq->__cmd)
38094+ cmdptr = rq->cmd;
38095+ else
38096+ cmdptr = tmpcmd;
38097+
38098+ if (copy_from_user(cmdptr, sic->data, cmdlen))
38099 goto error;
38100
38101+ if (rq->cmd != cmdptr)
38102+ memcpy(rq->cmd, cmdptr, cmdlen);
38103+
38104 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
38105 goto error;
38106
38107diff --git a/crypto/cryptd.c b/crypto/cryptd.c
38108index 7bdd61b..afec999 100644
38109--- a/crypto/cryptd.c
38110+++ b/crypto/cryptd.c
38111@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
38112
38113 struct cryptd_blkcipher_request_ctx {
38114 crypto_completion_t complete;
38115-};
38116+} __no_const;
38117
38118 struct cryptd_hash_ctx {
38119 struct crypto_shash *child;
38120@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
38121
38122 struct cryptd_aead_request_ctx {
38123 crypto_completion_t complete;
38124-};
38125+} __no_const;
38126
38127 static void cryptd_queue_worker(struct work_struct *work);
38128
38129diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
38130index 309d345..1632720 100644
38131--- a/crypto/pcrypt.c
38132+++ b/crypto/pcrypt.c
38133@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
38134 int ret;
38135
38136 pinst->kobj.kset = pcrypt_kset;
38137- ret = kobject_add(&pinst->kobj, NULL, name);
38138+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
38139 if (!ret)
38140 kobject_uevent(&pinst->kobj, KOBJ_ADD);
38141
38142diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
38143index 6921c7f..78e1af7 100644
38144--- a/drivers/acpi/acpica/hwxfsleep.c
38145+++ b/drivers/acpi/acpica/hwxfsleep.c
38146@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
38147 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
38148
38149 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
38150- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38151- acpi_hw_extended_sleep},
38152- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38153- acpi_hw_extended_wake_prep},
38154- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
38155+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38156+ .extended_function = acpi_hw_extended_sleep},
38157+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38158+ .extended_function = acpi_hw_extended_wake_prep},
38159+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
38160+ .extended_function = acpi_hw_extended_wake}
38161 };
38162
38163 /*
38164diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
38165index e5bcd91..74f050d 100644
38166--- a/drivers/acpi/apei/apei-internal.h
38167+++ b/drivers/acpi/apei/apei-internal.h
38168@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
38169 struct apei_exec_ins_type {
38170 u32 flags;
38171 apei_exec_ins_func_t run;
38172-};
38173+} __do_const;
38174
38175 struct apei_exec_context {
38176 u32 ip;
38177diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
38178index dab7cb7..f0d2994 100644
38179--- a/drivers/acpi/apei/ghes.c
38180+++ b/drivers/acpi/apei/ghes.c
38181@@ -500,7 +500,7 @@ static void __ghes_print_estatus(const char *pfx,
38182 const struct acpi_hest_generic *generic,
38183 const struct acpi_generic_status *estatus)
38184 {
38185- static atomic_t seqno;
38186+ static atomic_unchecked_t seqno;
38187 unsigned int curr_seqno;
38188 char pfx_seq[64];
38189
38190@@ -511,7 +511,7 @@ static void __ghes_print_estatus(const char *pfx,
38191 else
38192 pfx = KERN_ERR;
38193 }
38194- curr_seqno = atomic_inc_return(&seqno);
38195+ curr_seqno = atomic_inc_return_unchecked(&seqno);
38196 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
38197 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
38198 pfx_seq, generic->header.source_id);
38199diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
38200index a83e3c6..c3d617f 100644
38201--- a/drivers/acpi/bgrt.c
38202+++ b/drivers/acpi/bgrt.c
38203@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
38204 if (!bgrt_image)
38205 return -ENODEV;
38206
38207- bin_attr_image.private = bgrt_image;
38208- bin_attr_image.size = bgrt_image_size;
38209+ pax_open_kernel();
38210+ *(void **)&bin_attr_image.private = bgrt_image;
38211+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
38212+ pax_close_kernel();
38213
38214 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
38215 if (!bgrt_kobj)
38216diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
38217index 3d8413d..95f638c 100644
38218--- a/drivers/acpi/blacklist.c
38219+++ b/drivers/acpi/blacklist.c
38220@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
38221 u32 is_critical_error;
38222 };
38223
38224-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
38225+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
38226
38227 /*
38228 * POLICY: If *anything* doesn't work, put it on the blacklist.
38229@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
38230 return 0;
38231 }
38232
38233-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
38234+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
38235 {
38236 .callback = dmi_disable_osi_vista,
38237 .ident = "Fujitsu Siemens",
38238diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
38239index c68e724..e863008 100644
38240--- a/drivers/acpi/custom_method.c
38241+++ b/drivers/acpi/custom_method.c
38242@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
38243 struct acpi_table_header table;
38244 acpi_status status;
38245
38246+#ifdef CONFIG_GRKERNSEC_KMEM
38247+ return -EPERM;
38248+#endif
38249+
38250 if (!(*ppos)) {
38251 /* parse the table header to get the table length */
38252 if (count <= sizeof(struct acpi_table_header))
38253diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
38254index 17f9ec5..d9a455e 100644
38255--- a/drivers/acpi/processor_idle.c
38256+++ b/drivers/acpi/processor_idle.c
38257@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
38258 {
38259 int i, count = CPUIDLE_DRIVER_STATE_START;
38260 struct acpi_processor_cx *cx;
38261- struct cpuidle_state *state;
38262+ cpuidle_state_no_const *state;
38263 struct cpuidle_driver *drv = &acpi_idle_driver;
38264
38265 if (!pr->flags.power_setup_done)
38266diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
38267index 38cb978..352c761 100644
38268--- a/drivers/acpi/sysfs.c
38269+++ b/drivers/acpi/sysfs.c
38270@@ -423,11 +423,11 @@ static u32 num_counters;
38271 static struct attribute **all_attrs;
38272 static u32 acpi_gpe_count;
38273
38274-static struct attribute_group interrupt_stats_attr_group = {
38275+static attribute_group_no_const interrupt_stats_attr_group = {
38276 .name = "interrupts",
38277 };
38278
38279-static struct kobj_attribute *counter_attrs;
38280+static kobj_attribute_no_const *counter_attrs;
38281
38282 static void delete_gpe_attr_array(void)
38283 {
38284diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
38285index d72ce04..d6ab3c2 100644
38286--- a/drivers/ata/libahci.c
38287+++ b/drivers/ata/libahci.c
38288@@ -1257,7 +1257,7 @@ int ahci_kick_engine(struct ata_port *ap)
38289 }
38290 EXPORT_SYMBOL_GPL(ahci_kick_engine);
38291
38292-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38293+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38294 struct ata_taskfile *tf, int is_cmd, u16 flags,
38295 unsigned long timeout_msec)
38296 {
38297diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
38298index 677c0c1..354b89b 100644
38299--- a/drivers/ata/libata-core.c
38300+++ b/drivers/ata/libata-core.c
38301@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
38302 static void ata_dev_xfermask(struct ata_device *dev);
38303 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
38304
38305-atomic_t ata_print_id = ATOMIC_INIT(0);
38306+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
38307
38308 struct ata_force_param {
38309 const char *name;
38310@@ -4863,7 +4863,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
38311 struct ata_port *ap;
38312 unsigned int tag;
38313
38314- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38315+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38316 ap = qc->ap;
38317
38318 qc->flags = 0;
38319@@ -4879,7 +4879,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
38320 struct ata_port *ap;
38321 struct ata_link *link;
38322
38323- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38324+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38325 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
38326 ap = qc->ap;
38327 link = qc->dev->link;
38328@@ -5983,6 +5983,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38329 return;
38330
38331 spin_lock(&lock);
38332+ pax_open_kernel();
38333
38334 for (cur = ops->inherits; cur; cur = cur->inherits) {
38335 void **inherit = (void **)cur;
38336@@ -5996,8 +5997,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38337 if (IS_ERR(*pp))
38338 *pp = NULL;
38339
38340- ops->inherits = NULL;
38341+ *(struct ata_port_operations **)&ops->inherits = NULL;
38342
38343+ pax_close_kernel();
38344 spin_unlock(&lock);
38345 }
38346
38347@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
38348
38349 /* give ports names and add SCSI hosts */
38350 for (i = 0; i < host->n_ports; i++) {
38351- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
38352+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
38353 host->ports[i]->local_port_no = i + 1;
38354 }
38355
38356diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
38357index 72691fd..ad104c0 100644
38358--- a/drivers/ata/libata-scsi.c
38359+++ b/drivers/ata/libata-scsi.c
38360@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
38361
38362 if (rc)
38363 return rc;
38364- ap->print_id = atomic_inc_return(&ata_print_id);
38365+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
38366 return 0;
38367 }
38368 EXPORT_SYMBOL_GPL(ata_sas_port_init);
38369diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
38370index 45b5ab3..98446b8 100644
38371--- a/drivers/ata/libata.h
38372+++ b/drivers/ata/libata.h
38373@@ -53,7 +53,7 @@ enum {
38374 ATA_DNXFER_QUIET = (1 << 31),
38375 };
38376
38377-extern atomic_t ata_print_id;
38378+extern atomic_unchecked_t ata_print_id;
38379 extern int atapi_passthru16;
38380 extern int libata_fua;
38381 extern int libata_noacpi;
38382diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
38383index 4edb1a8..84e1658 100644
38384--- a/drivers/ata/pata_arasan_cf.c
38385+++ b/drivers/ata/pata_arasan_cf.c
38386@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
38387 /* Handle platform specific quirks */
38388 if (quirk) {
38389 if (quirk & CF_BROKEN_PIO) {
38390- ap->ops->set_piomode = NULL;
38391+ pax_open_kernel();
38392+ *(void **)&ap->ops->set_piomode = NULL;
38393+ pax_close_kernel();
38394 ap->pio_mask = 0;
38395 }
38396 if (quirk & CF_BROKEN_MWDMA)
38397diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
38398index f9b983a..887b9d8 100644
38399--- a/drivers/atm/adummy.c
38400+++ b/drivers/atm/adummy.c
38401@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
38402 vcc->pop(vcc, skb);
38403 else
38404 dev_kfree_skb_any(skb);
38405- atomic_inc(&vcc->stats->tx);
38406+ atomic_inc_unchecked(&vcc->stats->tx);
38407
38408 return 0;
38409 }
38410diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
38411index f1a9198..f466a4a 100644
38412--- a/drivers/atm/ambassador.c
38413+++ b/drivers/atm/ambassador.c
38414@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
38415 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
38416
38417 // VC layer stats
38418- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38419+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38420
38421 // free the descriptor
38422 kfree (tx_descr);
38423@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38424 dump_skb ("<<<", vc, skb);
38425
38426 // VC layer stats
38427- atomic_inc(&atm_vcc->stats->rx);
38428+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38429 __net_timestamp(skb);
38430 // end of our responsibility
38431 atm_vcc->push (atm_vcc, skb);
38432@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38433 } else {
38434 PRINTK (KERN_INFO, "dropped over-size frame");
38435 // should we count this?
38436- atomic_inc(&atm_vcc->stats->rx_drop);
38437+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38438 }
38439
38440 } else {
38441@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
38442 }
38443
38444 if (check_area (skb->data, skb->len)) {
38445- atomic_inc(&atm_vcc->stats->tx_err);
38446+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
38447 return -ENOMEM; // ?
38448 }
38449
38450diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
38451index 0e3f8f9..765a7a5 100644
38452--- a/drivers/atm/atmtcp.c
38453+++ b/drivers/atm/atmtcp.c
38454@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38455 if (vcc->pop) vcc->pop(vcc,skb);
38456 else dev_kfree_skb(skb);
38457 if (dev_data) return 0;
38458- atomic_inc(&vcc->stats->tx_err);
38459+ atomic_inc_unchecked(&vcc->stats->tx_err);
38460 return -ENOLINK;
38461 }
38462 size = skb->len+sizeof(struct atmtcp_hdr);
38463@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38464 if (!new_skb) {
38465 if (vcc->pop) vcc->pop(vcc,skb);
38466 else dev_kfree_skb(skb);
38467- atomic_inc(&vcc->stats->tx_err);
38468+ atomic_inc_unchecked(&vcc->stats->tx_err);
38469 return -ENOBUFS;
38470 }
38471 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
38472@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38473 if (vcc->pop) vcc->pop(vcc,skb);
38474 else dev_kfree_skb(skb);
38475 out_vcc->push(out_vcc,new_skb);
38476- atomic_inc(&vcc->stats->tx);
38477- atomic_inc(&out_vcc->stats->rx);
38478+ atomic_inc_unchecked(&vcc->stats->tx);
38479+ atomic_inc_unchecked(&out_vcc->stats->rx);
38480 return 0;
38481 }
38482
38483@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38484 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
38485 read_unlock(&vcc_sklist_lock);
38486 if (!out_vcc) {
38487- atomic_inc(&vcc->stats->tx_err);
38488+ atomic_inc_unchecked(&vcc->stats->tx_err);
38489 goto done;
38490 }
38491 skb_pull(skb,sizeof(struct atmtcp_hdr));
38492@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38493 __net_timestamp(new_skb);
38494 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
38495 out_vcc->push(out_vcc,new_skb);
38496- atomic_inc(&vcc->stats->tx);
38497- atomic_inc(&out_vcc->stats->rx);
38498+ atomic_inc_unchecked(&vcc->stats->tx);
38499+ atomic_inc_unchecked(&out_vcc->stats->rx);
38500 done:
38501 if (vcc->pop) vcc->pop(vcc,skb);
38502 else dev_kfree_skb(skb);
38503diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
38504index b1955ba..b179940 100644
38505--- a/drivers/atm/eni.c
38506+++ b/drivers/atm/eni.c
38507@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
38508 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
38509 vcc->dev->number);
38510 length = 0;
38511- atomic_inc(&vcc->stats->rx_err);
38512+ atomic_inc_unchecked(&vcc->stats->rx_err);
38513 }
38514 else {
38515 length = ATM_CELL_SIZE-1; /* no HEC */
38516@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38517 size);
38518 }
38519 eff = length = 0;
38520- atomic_inc(&vcc->stats->rx_err);
38521+ atomic_inc_unchecked(&vcc->stats->rx_err);
38522 }
38523 else {
38524 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
38525@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38526 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
38527 vcc->dev->number,vcc->vci,length,size << 2,descr);
38528 length = eff = 0;
38529- atomic_inc(&vcc->stats->rx_err);
38530+ atomic_inc_unchecked(&vcc->stats->rx_err);
38531 }
38532 }
38533 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
38534@@ -767,7 +767,7 @@ rx_dequeued++;
38535 vcc->push(vcc,skb);
38536 pushed++;
38537 }
38538- atomic_inc(&vcc->stats->rx);
38539+ atomic_inc_unchecked(&vcc->stats->rx);
38540 }
38541 wake_up(&eni_dev->rx_wait);
38542 }
38543@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
38544 PCI_DMA_TODEVICE);
38545 if (vcc->pop) vcc->pop(vcc,skb);
38546 else dev_kfree_skb_irq(skb);
38547- atomic_inc(&vcc->stats->tx);
38548+ atomic_inc_unchecked(&vcc->stats->tx);
38549 wake_up(&eni_dev->tx_wait);
38550 dma_complete++;
38551 }
38552diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
38553index 82f2ae0..f205c02 100644
38554--- a/drivers/atm/firestream.c
38555+++ b/drivers/atm/firestream.c
38556@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
38557 }
38558 }
38559
38560- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38561+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38562
38563 fs_dprintk (FS_DEBUG_TXMEM, "i");
38564 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
38565@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38566 #endif
38567 skb_put (skb, qe->p1 & 0xffff);
38568 ATM_SKB(skb)->vcc = atm_vcc;
38569- atomic_inc(&atm_vcc->stats->rx);
38570+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38571 __net_timestamp(skb);
38572 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
38573 atm_vcc->push (atm_vcc, skb);
38574@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38575 kfree (pe);
38576 }
38577 if (atm_vcc)
38578- atomic_inc(&atm_vcc->stats->rx_drop);
38579+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38580 break;
38581 case 0x1f: /* Reassembly abort: no buffers. */
38582 /* Silently increment error counter. */
38583 if (atm_vcc)
38584- atomic_inc(&atm_vcc->stats->rx_drop);
38585+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38586 break;
38587 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
38588 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
38589diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
38590index d4725fc..2d4ea65 100644
38591--- a/drivers/atm/fore200e.c
38592+++ b/drivers/atm/fore200e.c
38593@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
38594 #endif
38595 /* check error condition */
38596 if (*entry->status & STATUS_ERROR)
38597- atomic_inc(&vcc->stats->tx_err);
38598+ atomic_inc_unchecked(&vcc->stats->tx_err);
38599 else
38600- atomic_inc(&vcc->stats->tx);
38601+ atomic_inc_unchecked(&vcc->stats->tx);
38602 }
38603 }
38604
38605@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38606 if (skb == NULL) {
38607 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
38608
38609- atomic_inc(&vcc->stats->rx_drop);
38610+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38611 return -ENOMEM;
38612 }
38613
38614@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38615
38616 dev_kfree_skb_any(skb);
38617
38618- atomic_inc(&vcc->stats->rx_drop);
38619+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38620 return -ENOMEM;
38621 }
38622
38623 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38624
38625 vcc->push(vcc, skb);
38626- atomic_inc(&vcc->stats->rx);
38627+ atomic_inc_unchecked(&vcc->stats->rx);
38628
38629 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38630
38631@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
38632 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
38633 fore200e->atm_dev->number,
38634 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
38635- atomic_inc(&vcc->stats->rx_err);
38636+ atomic_inc_unchecked(&vcc->stats->rx_err);
38637 }
38638 }
38639
38640@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
38641 goto retry_here;
38642 }
38643
38644- atomic_inc(&vcc->stats->tx_err);
38645+ atomic_inc_unchecked(&vcc->stats->tx_err);
38646
38647 fore200e->tx_sat++;
38648 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
38649diff --git a/drivers/atm/he.c b/drivers/atm/he.c
38650index aa6be26..f70a785 100644
38651--- a/drivers/atm/he.c
38652+++ b/drivers/atm/he.c
38653@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38654
38655 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
38656 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
38657- atomic_inc(&vcc->stats->rx_drop);
38658+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38659 goto return_host_buffers;
38660 }
38661
38662@@ -1717,7 +1717,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38663 RBRQ_LEN_ERR(he_dev->rbrq_head)
38664 ? "LEN_ERR" : "",
38665 vcc->vpi, vcc->vci);
38666- atomic_inc(&vcc->stats->rx_err);
38667+ atomic_inc_unchecked(&vcc->stats->rx_err);
38668 goto return_host_buffers;
38669 }
38670
38671@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38672 vcc->push(vcc, skb);
38673 spin_lock(&he_dev->global_lock);
38674
38675- atomic_inc(&vcc->stats->rx);
38676+ atomic_inc_unchecked(&vcc->stats->rx);
38677
38678 return_host_buffers:
38679 ++pdus_assembled;
38680@@ -2095,7 +2095,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
38681 tpd->vcc->pop(tpd->vcc, tpd->skb);
38682 else
38683 dev_kfree_skb_any(tpd->skb);
38684- atomic_inc(&tpd->vcc->stats->tx_err);
38685+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
38686 }
38687 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
38688 return;
38689@@ -2507,7 +2507,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38690 vcc->pop(vcc, skb);
38691 else
38692 dev_kfree_skb_any(skb);
38693- atomic_inc(&vcc->stats->tx_err);
38694+ atomic_inc_unchecked(&vcc->stats->tx_err);
38695 return -EINVAL;
38696 }
38697
38698@@ -2518,7 +2518,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38699 vcc->pop(vcc, skb);
38700 else
38701 dev_kfree_skb_any(skb);
38702- atomic_inc(&vcc->stats->tx_err);
38703+ atomic_inc_unchecked(&vcc->stats->tx_err);
38704 return -EINVAL;
38705 }
38706 #endif
38707@@ -2530,7 +2530,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38708 vcc->pop(vcc, skb);
38709 else
38710 dev_kfree_skb_any(skb);
38711- atomic_inc(&vcc->stats->tx_err);
38712+ atomic_inc_unchecked(&vcc->stats->tx_err);
38713 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38714 return -ENOMEM;
38715 }
38716@@ -2572,7 +2572,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38717 vcc->pop(vcc, skb);
38718 else
38719 dev_kfree_skb_any(skb);
38720- atomic_inc(&vcc->stats->tx_err);
38721+ atomic_inc_unchecked(&vcc->stats->tx_err);
38722 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38723 return -ENOMEM;
38724 }
38725@@ -2603,7 +2603,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38726 __enqueue_tpd(he_dev, tpd, cid);
38727 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38728
38729- atomic_inc(&vcc->stats->tx);
38730+ atomic_inc_unchecked(&vcc->stats->tx);
38731
38732 return 0;
38733 }
38734diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
38735index 1dc0519..1aadaf7 100644
38736--- a/drivers/atm/horizon.c
38737+++ b/drivers/atm/horizon.c
38738@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
38739 {
38740 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
38741 // VC layer stats
38742- atomic_inc(&vcc->stats->rx);
38743+ atomic_inc_unchecked(&vcc->stats->rx);
38744 __net_timestamp(skb);
38745 // end of our responsibility
38746 vcc->push (vcc, skb);
38747@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
38748 dev->tx_iovec = NULL;
38749
38750 // VC layer stats
38751- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38752+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38753
38754 // free the skb
38755 hrz_kfree_skb (skb);
38756diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
38757index b621f56..1e3a799 100644
38758--- a/drivers/atm/idt77252.c
38759+++ b/drivers/atm/idt77252.c
38760@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
38761 else
38762 dev_kfree_skb(skb);
38763
38764- atomic_inc(&vcc->stats->tx);
38765+ atomic_inc_unchecked(&vcc->stats->tx);
38766 }
38767
38768 atomic_dec(&scq->used);
38769@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38770 if ((sb = dev_alloc_skb(64)) == NULL) {
38771 printk("%s: Can't allocate buffers for aal0.\n",
38772 card->name);
38773- atomic_add(i, &vcc->stats->rx_drop);
38774+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38775 break;
38776 }
38777 if (!atm_charge(vcc, sb->truesize)) {
38778 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
38779 card->name);
38780- atomic_add(i - 1, &vcc->stats->rx_drop);
38781+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
38782 dev_kfree_skb(sb);
38783 break;
38784 }
38785@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38786 ATM_SKB(sb)->vcc = vcc;
38787 __net_timestamp(sb);
38788 vcc->push(vcc, sb);
38789- atomic_inc(&vcc->stats->rx);
38790+ atomic_inc_unchecked(&vcc->stats->rx);
38791
38792 cell += ATM_CELL_PAYLOAD;
38793 }
38794@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38795 "(CDC: %08x)\n",
38796 card->name, len, rpp->len, readl(SAR_REG_CDC));
38797 recycle_rx_pool_skb(card, rpp);
38798- atomic_inc(&vcc->stats->rx_err);
38799+ atomic_inc_unchecked(&vcc->stats->rx_err);
38800 return;
38801 }
38802 if (stat & SAR_RSQE_CRC) {
38803 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
38804 recycle_rx_pool_skb(card, rpp);
38805- atomic_inc(&vcc->stats->rx_err);
38806+ atomic_inc_unchecked(&vcc->stats->rx_err);
38807 return;
38808 }
38809 if (skb_queue_len(&rpp->queue) > 1) {
38810@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38811 RXPRINTK("%s: Can't alloc RX skb.\n",
38812 card->name);
38813 recycle_rx_pool_skb(card, rpp);
38814- atomic_inc(&vcc->stats->rx_err);
38815+ atomic_inc_unchecked(&vcc->stats->rx_err);
38816 return;
38817 }
38818 if (!atm_charge(vcc, skb->truesize)) {
38819@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38820 __net_timestamp(skb);
38821
38822 vcc->push(vcc, skb);
38823- atomic_inc(&vcc->stats->rx);
38824+ atomic_inc_unchecked(&vcc->stats->rx);
38825
38826 return;
38827 }
38828@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38829 __net_timestamp(skb);
38830
38831 vcc->push(vcc, skb);
38832- atomic_inc(&vcc->stats->rx);
38833+ atomic_inc_unchecked(&vcc->stats->rx);
38834
38835 if (skb->truesize > SAR_FB_SIZE_3)
38836 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
38837@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
38838 if (vcc->qos.aal != ATM_AAL0) {
38839 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
38840 card->name, vpi, vci);
38841- atomic_inc(&vcc->stats->rx_drop);
38842+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38843 goto drop;
38844 }
38845
38846 if ((sb = dev_alloc_skb(64)) == NULL) {
38847 printk("%s: Can't allocate buffers for AAL0.\n",
38848 card->name);
38849- atomic_inc(&vcc->stats->rx_err);
38850+ atomic_inc_unchecked(&vcc->stats->rx_err);
38851 goto drop;
38852 }
38853
38854@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
38855 ATM_SKB(sb)->vcc = vcc;
38856 __net_timestamp(sb);
38857 vcc->push(vcc, sb);
38858- atomic_inc(&vcc->stats->rx);
38859+ atomic_inc_unchecked(&vcc->stats->rx);
38860
38861 drop:
38862 skb_pull(queue, 64);
38863@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38864
38865 if (vc == NULL) {
38866 printk("%s: NULL connection in send().\n", card->name);
38867- atomic_inc(&vcc->stats->tx_err);
38868+ atomic_inc_unchecked(&vcc->stats->tx_err);
38869 dev_kfree_skb(skb);
38870 return -EINVAL;
38871 }
38872 if (!test_bit(VCF_TX, &vc->flags)) {
38873 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
38874- atomic_inc(&vcc->stats->tx_err);
38875+ atomic_inc_unchecked(&vcc->stats->tx_err);
38876 dev_kfree_skb(skb);
38877 return -EINVAL;
38878 }
38879@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38880 break;
38881 default:
38882 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
38883- atomic_inc(&vcc->stats->tx_err);
38884+ atomic_inc_unchecked(&vcc->stats->tx_err);
38885 dev_kfree_skb(skb);
38886 return -EINVAL;
38887 }
38888
38889 if (skb_shinfo(skb)->nr_frags != 0) {
38890 printk("%s: No scatter-gather yet.\n", card->name);
38891- atomic_inc(&vcc->stats->tx_err);
38892+ atomic_inc_unchecked(&vcc->stats->tx_err);
38893 dev_kfree_skb(skb);
38894 return -EINVAL;
38895 }
38896@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38897
38898 err = queue_skb(card, vc, skb, oam);
38899 if (err) {
38900- atomic_inc(&vcc->stats->tx_err);
38901+ atomic_inc_unchecked(&vcc->stats->tx_err);
38902 dev_kfree_skb(skb);
38903 return err;
38904 }
38905@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
38906 skb = dev_alloc_skb(64);
38907 if (!skb) {
38908 printk("%s: Out of memory in send_oam().\n", card->name);
38909- atomic_inc(&vcc->stats->tx_err);
38910+ atomic_inc_unchecked(&vcc->stats->tx_err);
38911 return -ENOMEM;
38912 }
38913 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
38914diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
38915index 4217f29..88f547a 100644
38916--- a/drivers/atm/iphase.c
38917+++ b/drivers/atm/iphase.c
38918@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
38919 status = (u_short) (buf_desc_ptr->desc_mode);
38920 if (status & (RX_CER | RX_PTE | RX_OFL))
38921 {
38922- atomic_inc(&vcc->stats->rx_err);
38923+ atomic_inc_unchecked(&vcc->stats->rx_err);
38924 IF_ERR(printk("IA: bad packet, dropping it");)
38925 if (status & RX_CER) {
38926 IF_ERR(printk(" cause: packet CRC error\n");)
38927@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
38928 len = dma_addr - buf_addr;
38929 if (len > iadev->rx_buf_sz) {
38930 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
38931- atomic_inc(&vcc->stats->rx_err);
38932+ atomic_inc_unchecked(&vcc->stats->rx_err);
38933 goto out_free_desc;
38934 }
38935
38936@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38937 ia_vcc = INPH_IA_VCC(vcc);
38938 if (ia_vcc == NULL)
38939 {
38940- atomic_inc(&vcc->stats->rx_err);
38941+ atomic_inc_unchecked(&vcc->stats->rx_err);
38942 atm_return(vcc, skb->truesize);
38943 dev_kfree_skb_any(skb);
38944 goto INCR_DLE;
38945@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38946 if ((length > iadev->rx_buf_sz) || (length >
38947 (skb->len - sizeof(struct cpcs_trailer))))
38948 {
38949- atomic_inc(&vcc->stats->rx_err);
38950+ atomic_inc_unchecked(&vcc->stats->rx_err);
38951 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
38952 length, skb->len);)
38953 atm_return(vcc, skb->truesize);
38954@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38955
38956 IF_RX(printk("rx_dle_intr: skb push");)
38957 vcc->push(vcc,skb);
38958- atomic_inc(&vcc->stats->rx);
38959+ atomic_inc_unchecked(&vcc->stats->rx);
38960 iadev->rx_pkt_cnt++;
38961 }
38962 INCR_DLE:
38963@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
38964 {
38965 struct k_sonet_stats *stats;
38966 stats = &PRIV(_ia_dev[board])->sonet_stats;
38967- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
38968- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
38969- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
38970- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
38971- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
38972- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
38973- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
38974- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
38975- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
38976+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
38977+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
38978+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
38979+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
38980+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
38981+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
38982+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
38983+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
38984+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
38985 }
38986 ia_cmds.status = 0;
38987 break;
38988@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38989 if ((desc == 0) || (desc > iadev->num_tx_desc))
38990 {
38991 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38992- atomic_inc(&vcc->stats->tx);
38993+ atomic_inc_unchecked(&vcc->stats->tx);
38994 if (vcc->pop)
38995 vcc->pop(vcc, skb);
38996 else
38997@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38998 ATM_DESC(skb) = vcc->vci;
38999 skb_queue_tail(&iadev->tx_dma_q, skb);
39000
39001- atomic_inc(&vcc->stats->tx);
39002+ atomic_inc_unchecked(&vcc->stats->tx);
39003 iadev->tx_pkt_cnt++;
39004 /* Increment transaction counter */
39005 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
39006
39007 #if 0
39008 /* add flow control logic */
39009- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
39010+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
39011 if (iavcc->vc_desc_cnt > 10) {
39012 vcc->tx_quota = vcc->tx_quota * 3 / 4;
39013 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
39014diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
39015index fa7d7019..1e404c7 100644
39016--- a/drivers/atm/lanai.c
39017+++ b/drivers/atm/lanai.c
39018@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
39019 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
39020 lanai_endtx(lanai, lvcc);
39021 lanai_free_skb(lvcc->tx.atmvcc, skb);
39022- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
39023+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
39024 }
39025
39026 /* Try to fill the buffer - don't call unless there is backlog */
39027@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
39028 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
39029 __net_timestamp(skb);
39030 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
39031- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
39032+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
39033 out:
39034 lvcc->rx.buf.ptr = end;
39035 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
39036@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
39037 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
39038 "vcc %d\n", lanai->number, (unsigned int) s, vci);
39039 lanai->stats.service_rxnotaal5++;
39040- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
39041+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
39042 return 0;
39043 }
39044 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
39045@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
39046 int bytes;
39047 read_unlock(&vcc_sklist_lock);
39048 DPRINTK("got trashed rx pdu on vci %d\n", vci);
39049- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
39050+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
39051 lvcc->stats.x.aal5.service_trash++;
39052 bytes = (SERVICE_GET_END(s) * 16) -
39053 (((unsigned long) lvcc->rx.buf.ptr) -
39054@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
39055 }
39056 if (s & SERVICE_STREAM) {
39057 read_unlock(&vcc_sklist_lock);
39058- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
39059+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
39060 lvcc->stats.x.aal5.service_stream++;
39061 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
39062 "PDU on VCI %d!\n", lanai->number, vci);
39063@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
39064 return 0;
39065 }
39066 DPRINTK("got rx crc error on vci %d\n", vci);
39067- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
39068+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
39069 lvcc->stats.x.aal5.service_rxcrc++;
39070 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
39071 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
39072diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
39073index 9988ac9..7c52585 100644
39074--- a/drivers/atm/nicstar.c
39075+++ b/drivers/atm/nicstar.c
39076@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39077 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
39078 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
39079 card->index);
39080- atomic_inc(&vcc->stats->tx_err);
39081+ atomic_inc_unchecked(&vcc->stats->tx_err);
39082 dev_kfree_skb_any(skb);
39083 return -EINVAL;
39084 }
39085@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39086 if (!vc->tx) {
39087 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
39088 card->index);
39089- atomic_inc(&vcc->stats->tx_err);
39090+ atomic_inc_unchecked(&vcc->stats->tx_err);
39091 dev_kfree_skb_any(skb);
39092 return -EINVAL;
39093 }
39094@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39095 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
39096 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
39097 card->index);
39098- atomic_inc(&vcc->stats->tx_err);
39099+ atomic_inc_unchecked(&vcc->stats->tx_err);
39100 dev_kfree_skb_any(skb);
39101 return -EINVAL;
39102 }
39103
39104 if (skb_shinfo(skb)->nr_frags != 0) {
39105 printk("nicstar%d: No scatter-gather yet.\n", card->index);
39106- atomic_inc(&vcc->stats->tx_err);
39107+ atomic_inc_unchecked(&vcc->stats->tx_err);
39108 dev_kfree_skb_any(skb);
39109 return -EINVAL;
39110 }
39111@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39112 }
39113
39114 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
39115- atomic_inc(&vcc->stats->tx_err);
39116+ atomic_inc_unchecked(&vcc->stats->tx_err);
39117 dev_kfree_skb_any(skb);
39118 return -EIO;
39119 }
39120- atomic_inc(&vcc->stats->tx);
39121+ atomic_inc_unchecked(&vcc->stats->tx);
39122
39123 return 0;
39124 }
39125@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39126 printk
39127 ("nicstar%d: Can't allocate buffers for aal0.\n",
39128 card->index);
39129- atomic_add(i, &vcc->stats->rx_drop);
39130+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
39131 break;
39132 }
39133 if (!atm_charge(vcc, sb->truesize)) {
39134 RXPRINTK
39135 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
39136 card->index);
39137- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39138+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39139 dev_kfree_skb_any(sb);
39140 break;
39141 }
39142@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39143 ATM_SKB(sb)->vcc = vcc;
39144 __net_timestamp(sb);
39145 vcc->push(vcc, sb);
39146- atomic_inc(&vcc->stats->rx);
39147+ atomic_inc_unchecked(&vcc->stats->rx);
39148 cell += ATM_CELL_PAYLOAD;
39149 }
39150
39151@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39152 if (iovb == NULL) {
39153 printk("nicstar%d: Out of iovec buffers.\n",
39154 card->index);
39155- atomic_inc(&vcc->stats->rx_drop);
39156+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39157 recycle_rx_buf(card, skb);
39158 return;
39159 }
39160@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39161 small or large buffer itself. */
39162 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
39163 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
39164- atomic_inc(&vcc->stats->rx_err);
39165+ atomic_inc_unchecked(&vcc->stats->rx_err);
39166 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39167 NS_MAX_IOVECS);
39168 NS_PRV_IOVCNT(iovb) = 0;
39169@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39170 ("nicstar%d: Expected a small buffer, and this is not one.\n",
39171 card->index);
39172 which_list(card, skb);
39173- atomic_inc(&vcc->stats->rx_err);
39174+ atomic_inc_unchecked(&vcc->stats->rx_err);
39175 recycle_rx_buf(card, skb);
39176 vc->rx_iov = NULL;
39177 recycle_iov_buf(card, iovb);
39178@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39179 ("nicstar%d: Expected a large buffer, and this is not one.\n",
39180 card->index);
39181 which_list(card, skb);
39182- atomic_inc(&vcc->stats->rx_err);
39183+ atomic_inc_unchecked(&vcc->stats->rx_err);
39184 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39185 NS_PRV_IOVCNT(iovb));
39186 vc->rx_iov = NULL;
39187@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39188 printk(" - PDU size mismatch.\n");
39189 else
39190 printk(".\n");
39191- atomic_inc(&vcc->stats->rx_err);
39192+ atomic_inc_unchecked(&vcc->stats->rx_err);
39193 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39194 NS_PRV_IOVCNT(iovb));
39195 vc->rx_iov = NULL;
39196@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39197 /* skb points to a small buffer */
39198 if (!atm_charge(vcc, skb->truesize)) {
39199 push_rxbufs(card, skb);
39200- atomic_inc(&vcc->stats->rx_drop);
39201+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39202 } else {
39203 skb_put(skb, len);
39204 dequeue_sm_buf(card, skb);
39205@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39206 ATM_SKB(skb)->vcc = vcc;
39207 __net_timestamp(skb);
39208 vcc->push(vcc, skb);
39209- atomic_inc(&vcc->stats->rx);
39210+ atomic_inc_unchecked(&vcc->stats->rx);
39211 }
39212 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
39213 struct sk_buff *sb;
39214@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39215 if (len <= NS_SMBUFSIZE) {
39216 if (!atm_charge(vcc, sb->truesize)) {
39217 push_rxbufs(card, sb);
39218- atomic_inc(&vcc->stats->rx_drop);
39219+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39220 } else {
39221 skb_put(sb, len);
39222 dequeue_sm_buf(card, sb);
39223@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39224 ATM_SKB(sb)->vcc = vcc;
39225 __net_timestamp(sb);
39226 vcc->push(vcc, sb);
39227- atomic_inc(&vcc->stats->rx);
39228+ atomic_inc_unchecked(&vcc->stats->rx);
39229 }
39230
39231 push_rxbufs(card, skb);
39232@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39233
39234 if (!atm_charge(vcc, skb->truesize)) {
39235 push_rxbufs(card, skb);
39236- atomic_inc(&vcc->stats->rx_drop);
39237+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39238 } else {
39239 dequeue_lg_buf(card, skb);
39240 #ifdef NS_USE_DESTRUCTORS
39241@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39242 ATM_SKB(skb)->vcc = vcc;
39243 __net_timestamp(skb);
39244 vcc->push(vcc, skb);
39245- atomic_inc(&vcc->stats->rx);
39246+ atomic_inc_unchecked(&vcc->stats->rx);
39247 }
39248
39249 push_rxbufs(card, sb);
39250@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39251 printk
39252 ("nicstar%d: Out of huge buffers.\n",
39253 card->index);
39254- atomic_inc(&vcc->stats->rx_drop);
39255+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39256 recycle_iovec_rx_bufs(card,
39257 (struct iovec *)
39258 iovb->data,
39259@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39260 card->hbpool.count++;
39261 } else
39262 dev_kfree_skb_any(hb);
39263- atomic_inc(&vcc->stats->rx_drop);
39264+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39265 } else {
39266 /* Copy the small buffer to the huge buffer */
39267 sb = (struct sk_buff *)iov->iov_base;
39268@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39269 #endif /* NS_USE_DESTRUCTORS */
39270 __net_timestamp(hb);
39271 vcc->push(vcc, hb);
39272- atomic_inc(&vcc->stats->rx);
39273+ atomic_inc_unchecked(&vcc->stats->rx);
39274 }
39275 }
39276
39277diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
39278index 943cf0d..37d15d5 100644
39279--- a/drivers/atm/solos-pci.c
39280+++ b/drivers/atm/solos-pci.c
39281@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
39282 }
39283 atm_charge(vcc, skb->truesize);
39284 vcc->push(vcc, skb);
39285- atomic_inc(&vcc->stats->rx);
39286+ atomic_inc_unchecked(&vcc->stats->rx);
39287 break;
39288
39289 case PKT_STATUS:
39290@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
39291 vcc = SKB_CB(oldskb)->vcc;
39292
39293 if (vcc) {
39294- atomic_inc(&vcc->stats->tx);
39295+ atomic_inc_unchecked(&vcc->stats->tx);
39296 solos_pop(vcc, oldskb);
39297 } else {
39298 dev_kfree_skb_irq(oldskb);
39299diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
39300index 0215934..ce9f5b1 100644
39301--- a/drivers/atm/suni.c
39302+++ b/drivers/atm/suni.c
39303@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
39304
39305
39306 #define ADD_LIMITED(s,v) \
39307- atomic_add((v),&stats->s); \
39308- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
39309+ atomic_add_unchecked((v),&stats->s); \
39310+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
39311
39312
39313 static void suni_hz(unsigned long from_timer)
39314diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
39315index 5120a96..e2572bd 100644
39316--- a/drivers/atm/uPD98402.c
39317+++ b/drivers/atm/uPD98402.c
39318@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
39319 struct sonet_stats tmp;
39320 int error = 0;
39321
39322- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39323+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39324 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
39325 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
39326 if (zero && !error) {
39327@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
39328
39329
39330 #define ADD_LIMITED(s,v) \
39331- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
39332- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
39333- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39334+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
39335+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
39336+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39337
39338
39339 static void stat_event(struct atm_dev *dev)
39340@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
39341 if (reason & uPD98402_INT_PFM) stat_event(dev);
39342 if (reason & uPD98402_INT_PCO) {
39343 (void) GET(PCOCR); /* clear interrupt cause */
39344- atomic_add(GET(HECCT),
39345+ atomic_add_unchecked(GET(HECCT),
39346 &PRIV(dev)->sonet_stats.uncorr_hcs);
39347 }
39348 if ((reason & uPD98402_INT_RFO) &&
39349@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
39350 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
39351 uPD98402_INT_LOS),PIMR); /* enable them */
39352 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
39353- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39354- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
39355- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
39356+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39357+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
39358+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
39359 return 0;
39360 }
39361
39362diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
39363index 969c3c2..9b72956 100644
39364--- a/drivers/atm/zatm.c
39365+++ b/drivers/atm/zatm.c
39366@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39367 }
39368 if (!size) {
39369 dev_kfree_skb_irq(skb);
39370- if (vcc) atomic_inc(&vcc->stats->rx_err);
39371+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
39372 continue;
39373 }
39374 if (!atm_charge(vcc,skb->truesize)) {
39375@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39376 skb->len = size;
39377 ATM_SKB(skb)->vcc = vcc;
39378 vcc->push(vcc,skb);
39379- atomic_inc(&vcc->stats->rx);
39380+ atomic_inc_unchecked(&vcc->stats->rx);
39381 }
39382 zout(pos & 0xffff,MTA(mbx));
39383 #if 0 /* probably a stupid idea */
39384@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
39385 skb_queue_head(&zatm_vcc->backlog,skb);
39386 break;
39387 }
39388- atomic_inc(&vcc->stats->tx);
39389+ atomic_inc_unchecked(&vcc->stats->tx);
39390 wake_up(&zatm_vcc->tx_wait);
39391 }
39392
39393diff --git a/drivers/base/bus.c b/drivers/base/bus.c
39394index 83e910a..b224a73 100644
39395--- a/drivers/base/bus.c
39396+++ b/drivers/base/bus.c
39397@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
39398 return -EINVAL;
39399
39400 mutex_lock(&subsys->p->mutex);
39401- list_add_tail(&sif->node, &subsys->p->interfaces);
39402+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
39403 if (sif->add_dev) {
39404 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39405 while ((dev = subsys_dev_iter_next(&iter)))
39406@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
39407 subsys = sif->subsys;
39408
39409 mutex_lock(&subsys->p->mutex);
39410- list_del_init(&sif->node);
39411+ pax_list_del_init((struct list_head *)&sif->node);
39412 if (sif->remove_dev) {
39413 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39414 while ((dev = subsys_dev_iter_next(&iter)))
39415diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
39416index 25798db..15f130e 100644
39417--- a/drivers/base/devtmpfs.c
39418+++ b/drivers/base/devtmpfs.c
39419@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
39420 if (!thread)
39421 return 0;
39422
39423- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
39424+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
39425 if (err)
39426 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
39427 else
39428@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
39429 *err = sys_unshare(CLONE_NEWNS);
39430 if (*err)
39431 goto out;
39432- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
39433+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
39434 if (*err)
39435 goto out;
39436- sys_chdir("/.."); /* will traverse into overmounted root */
39437- sys_chroot(".");
39438+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
39439+ sys_chroot((char __force_user *)".");
39440 complete(&setup_done);
39441 while (1) {
39442 spin_lock(&req_lock);
39443diff --git a/drivers/base/node.c b/drivers/base/node.c
39444index 8f7ed99..700dd0c 100644
39445--- a/drivers/base/node.c
39446+++ b/drivers/base/node.c
39447@@ -624,7 +624,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
39448 struct node_attr {
39449 struct device_attribute attr;
39450 enum node_states state;
39451-};
39452+} __do_const;
39453
39454 static ssize_t show_node_state(struct device *dev,
39455 struct device_attribute *attr, char *buf)
39456diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
39457index eee55c1..b8c9393 100644
39458--- a/drivers/base/power/domain.c
39459+++ b/drivers/base/power/domain.c
39460@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
39461
39462 if (dev->power.subsys_data->domain_data) {
39463 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
39464- gpd_data->ops = (struct gpd_dev_ops){ NULL };
39465+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
39466 if (clear_td)
39467- gpd_data->td = (struct gpd_timing_data){ 0 };
39468+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
39469
39470 if (--gpd_data->refcount == 0) {
39471 dev->power.subsys_data->domain_data = NULL;
39472@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
39473 {
39474 struct cpuidle_driver *cpuidle_drv;
39475 struct gpd_cpu_data *cpu_data;
39476- struct cpuidle_state *idle_state;
39477+ cpuidle_state_no_const *idle_state;
39478 int ret = 0;
39479
39480 if (IS_ERR_OR_NULL(genpd) || state < 0)
39481@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
39482 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
39483 {
39484 struct gpd_cpu_data *cpu_data;
39485- struct cpuidle_state *idle_state;
39486+ cpuidle_state_no_const *idle_state;
39487 int ret = 0;
39488
39489 if (IS_ERR_OR_NULL(genpd))
39490diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
39491index 95b181d1..c4f0e19 100644
39492--- a/drivers/base/power/sysfs.c
39493+++ b/drivers/base/power/sysfs.c
39494@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
39495 return -EIO;
39496 }
39497 }
39498- return sprintf(buf, p);
39499+ return sprintf(buf, "%s", p);
39500 }
39501
39502 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
39503diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
39504index eb1bd2e..2667d3a 100644
39505--- a/drivers/base/power/wakeup.c
39506+++ b/drivers/base/power/wakeup.c
39507@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
39508 * They need to be modified together atomically, so it's better to use one
39509 * atomic variable to hold them both.
39510 */
39511-static atomic_t combined_event_count = ATOMIC_INIT(0);
39512+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
39513
39514 #define IN_PROGRESS_BITS (sizeof(int) * 4)
39515 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
39516
39517 static void split_counters(unsigned int *cnt, unsigned int *inpr)
39518 {
39519- unsigned int comb = atomic_read(&combined_event_count);
39520+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
39521
39522 *cnt = (comb >> IN_PROGRESS_BITS);
39523 *inpr = comb & MAX_IN_PROGRESS;
39524@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
39525 ws->start_prevent_time = ws->last_time;
39526
39527 /* Increment the counter of events in progress. */
39528- cec = atomic_inc_return(&combined_event_count);
39529+ cec = atomic_inc_return_unchecked(&combined_event_count);
39530
39531 trace_wakeup_source_activate(ws->name, cec);
39532 }
39533@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
39534 * Increment the counter of registered wakeup events and decrement the
39535 * couter of wakeup events in progress simultaneously.
39536 */
39537- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
39538+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
39539 trace_wakeup_source_deactivate(ws->name, cec);
39540
39541 split_counters(&cnt, &inpr);
39542diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
39543index dbb8350..4762f4c 100644
39544--- a/drivers/base/syscore.c
39545+++ b/drivers/base/syscore.c
39546@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
39547 void register_syscore_ops(struct syscore_ops *ops)
39548 {
39549 mutex_lock(&syscore_ops_lock);
39550- list_add_tail(&ops->node, &syscore_ops_list);
39551+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
39552 mutex_unlock(&syscore_ops_lock);
39553 }
39554 EXPORT_SYMBOL_GPL(register_syscore_ops);
39555@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
39556 void unregister_syscore_ops(struct syscore_ops *ops)
39557 {
39558 mutex_lock(&syscore_ops_lock);
39559- list_del(&ops->node);
39560+ pax_list_del((struct list_head *)&ops->node);
39561 mutex_unlock(&syscore_ops_lock);
39562 }
39563 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
39564diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
39565index 4595c22..d4f6c54 100644
39566--- a/drivers/block/cciss.c
39567+++ b/drivers/block/cciss.c
39568@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
39569 while (!list_empty(&h->reqQ)) {
39570 c = list_entry(h->reqQ.next, CommandList_struct, list);
39571 /* can't do anything if fifo is full */
39572- if ((h->access.fifo_full(h))) {
39573+ if ((h->access->fifo_full(h))) {
39574 dev_warn(&h->pdev->dev, "fifo full\n");
39575 break;
39576 }
39577@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
39578 h->Qdepth--;
39579
39580 /* Tell the controller execute command */
39581- h->access.submit_command(h, c);
39582+ h->access->submit_command(h, c);
39583
39584 /* Put job onto the completed Q */
39585 addQ(&h->cmpQ, c);
39586@@ -3447,17 +3447,17 @@ startio:
39587
39588 static inline unsigned long get_next_completion(ctlr_info_t *h)
39589 {
39590- return h->access.command_completed(h);
39591+ return h->access->command_completed(h);
39592 }
39593
39594 static inline int interrupt_pending(ctlr_info_t *h)
39595 {
39596- return h->access.intr_pending(h);
39597+ return h->access->intr_pending(h);
39598 }
39599
39600 static inline long interrupt_not_for_us(ctlr_info_t *h)
39601 {
39602- return ((h->access.intr_pending(h) == 0) ||
39603+ return ((h->access->intr_pending(h) == 0) ||
39604 (h->interrupts_enabled == 0));
39605 }
39606
39607@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
39608 u32 a;
39609
39610 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39611- return h->access.command_completed(h);
39612+ return h->access->command_completed(h);
39613
39614 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
39615 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
39616@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
39617 trans_support & CFGTBL_Trans_use_short_tags);
39618
39619 /* Change the access methods to the performant access methods */
39620- h->access = SA5_performant_access;
39621+ h->access = &SA5_performant_access;
39622 h->transMethod = CFGTBL_Trans_Performant;
39623
39624 return;
39625@@ -4321,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
39626 if (prod_index < 0)
39627 return -ENODEV;
39628 h->product_name = products[prod_index].product_name;
39629- h->access = *(products[prod_index].access);
39630+ h->access = products[prod_index].access;
39631
39632 if (cciss_board_disabled(h)) {
39633 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
39634@@ -5053,7 +5053,7 @@ reinit_after_soft_reset:
39635 }
39636
39637 /* make sure the board interrupts are off */
39638- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39639+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39640 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
39641 if (rc)
39642 goto clean2;
39643@@ -5103,7 +5103,7 @@ reinit_after_soft_reset:
39644 * fake ones to scoop up any residual completions.
39645 */
39646 spin_lock_irqsave(&h->lock, flags);
39647- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39648+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39649 spin_unlock_irqrestore(&h->lock, flags);
39650 free_irq(h->intr[h->intr_mode], h);
39651 rc = cciss_request_irq(h, cciss_msix_discard_completions,
39652@@ -5123,9 +5123,9 @@ reinit_after_soft_reset:
39653 dev_info(&h->pdev->dev, "Board READY.\n");
39654 dev_info(&h->pdev->dev,
39655 "Waiting for stale completions to drain.\n");
39656- h->access.set_intr_mask(h, CCISS_INTR_ON);
39657+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39658 msleep(10000);
39659- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39660+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39661
39662 rc = controller_reset_failed(h->cfgtable);
39663 if (rc)
39664@@ -5148,7 +5148,7 @@ reinit_after_soft_reset:
39665 cciss_scsi_setup(h);
39666
39667 /* Turn the interrupts on so we can service requests */
39668- h->access.set_intr_mask(h, CCISS_INTR_ON);
39669+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39670
39671 /* Get the firmware version */
39672 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
39673@@ -5220,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
39674 kfree(flush_buf);
39675 if (return_code != IO_OK)
39676 dev_warn(&h->pdev->dev, "Error flushing cache\n");
39677- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39678+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39679 free_irq(h->intr[h->intr_mode], h);
39680 }
39681
39682diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
39683index 7fda30e..2f27946 100644
39684--- a/drivers/block/cciss.h
39685+++ b/drivers/block/cciss.h
39686@@ -101,7 +101,7 @@ struct ctlr_info
39687 /* information about each logical volume */
39688 drive_info_struct *drv[CISS_MAX_LUN];
39689
39690- struct access_method access;
39691+ struct access_method *access;
39692
39693 /* queue and queue Info */
39694 struct list_head reqQ;
39695@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
39696 }
39697
39698 static struct access_method SA5_access = {
39699- SA5_submit_command,
39700- SA5_intr_mask,
39701- SA5_fifo_full,
39702- SA5_intr_pending,
39703- SA5_completed,
39704+ .submit_command = SA5_submit_command,
39705+ .set_intr_mask = SA5_intr_mask,
39706+ .fifo_full = SA5_fifo_full,
39707+ .intr_pending = SA5_intr_pending,
39708+ .command_completed = SA5_completed,
39709 };
39710
39711 static struct access_method SA5B_access = {
39712- SA5_submit_command,
39713- SA5B_intr_mask,
39714- SA5_fifo_full,
39715- SA5B_intr_pending,
39716- SA5_completed,
39717+ .submit_command = SA5_submit_command,
39718+ .set_intr_mask = SA5B_intr_mask,
39719+ .fifo_full = SA5_fifo_full,
39720+ .intr_pending = SA5B_intr_pending,
39721+ .command_completed = SA5_completed,
39722 };
39723
39724 static struct access_method SA5_performant_access = {
39725- SA5_submit_command,
39726- SA5_performant_intr_mask,
39727- SA5_fifo_full,
39728- SA5_performant_intr_pending,
39729- SA5_performant_completed,
39730+ .submit_command = SA5_submit_command,
39731+ .set_intr_mask = SA5_performant_intr_mask,
39732+ .fifo_full = SA5_fifo_full,
39733+ .intr_pending = SA5_performant_intr_pending,
39734+ .command_completed = SA5_performant_completed,
39735 };
39736
39737 struct board_type {
39738diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
39739index 2b94403..fd6ad1f 100644
39740--- a/drivers/block/cpqarray.c
39741+++ b/drivers/block/cpqarray.c
39742@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39743 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
39744 goto Enomem4;
39745 }
39746- hba[i]->access.set_intr_mask(hba[i], 0);
39747+ hba[i]->access->set_intr_mask(hba[i], 0);
39748 if (request_irq(hba[i]->intr, do_ida_intr,
39749 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
39750 {
39751@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39752 add_timer(&hba[i]->timer);
39753
39754 /* Enable IRQ now that spinlock and rate limit timer are set up */
39755- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39756+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39757
39758 for(j=0; j<NWD; j++) {
39759 struct gendisk *disk = ida_gendisk[i][j];
39760@@ -694,7 +694,7 @@ DBGINFO(
39761 for(i=0; i<NR_PRODUCTS; i++) {
39762 if (board_id == products[i].board_id) {
39763 c->product_name = products[i].product_name;
39764- c->access = *(products[i].access);
39765+ c->access = products[i].access;
39766 break;
39767 }
39768 }
39769@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
39770 hba[ctlr]->intr = intr;
39771 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
39772 hba[ctlr]->product_name = products[j].product_name;
39773- hba[ctlr]->access = *(products[j].access);
39774+ hba[ctlr]->access = products[j].access;
39775 hba[ctlr]->ctlr = ctlr;
39776 hba[ctlr]->board_id = board_id;
39777 hba[ctlr]->pci_dev = NULL; /* not PCI */
39778@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
39779
39780 while((c = h->reqQ) != NULL) {
39781 /* Can't do anything if we're busy */
39782- if (h->access.fifo_full(h) == 0)
39783+ if (h->access->fifo_full(h) == 0)
39784 return;
39785
39786 /* Get the first entry from the request Q */
39787@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
39788 h->Qdepth--;
39789
39790 /* Tell the controller to do our bidding */
39791- h->access.submit_command(h, c);
39792+ h->access->submit_command(h, c);
39793
39794 /* Get onto the completion Q */
39795 addQ(&h->cmpQ, c);
39796@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39797 unsigned long flags;
39798 __u32 a,a1;
39799
39800- istat = h->access.intr_pending(h);
39801+ istat = h->access->intr_pending(h);
39802 /* Is this interrupt for us? */
39803 if (istat == 0)
39804 return IRQ_NONE;
39805@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39806 */
39807 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
39808 if (istat & FIFO_NOT_EMPTY) {
39809- while((a = h->access.command_completed(h))) {
39810+ while((a = h->access->command_completed(h))) {
39811 a1 = a; a &= ~3;
39812 if ((c = h->cmpQ) == NULL)
39813 {
39814@@ -1448,11 +1448,11 @@ static int sendcmd(
39815 /*
39816 * Disable interrupt
39817 */
39818- info_p->access.set_intr_mask(info_p, 0);
39819+ info_p->access->set_intr_mask(info_p, 0);
39820 /* Make sure there is room in the command FIFO */
39821 /* Actually it should be completely empty at this time. */
39822 for (i = 200000; i > 0; i--) {
39823- temp = info_p->access.fifo_full(info_p);
39824+ temp = info_p->access->fifo_full(info_p);
39825 if (temp != 0) {
39826 break;
39827 }
39828@@ -1465,7 +1465,7 @@ DBG(
39829 /*
39830 * Send the cmd
39831 */
39832- info_p->access.submit_command(info_p, c);
39833+ info_p->access->submit_command(info_p, c);
39834 complete = pollcomplete(ctlr);
39835
39836 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
39837@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
39838 * we check the new geometry. Then turn interrupts back on when
39839 * we're done.
39840 */
39841- host->access.set_intr_mask(host, 0);
39842+ host->access->set_intr_mask(host, 0);
39843 getgeometry(ctlr);
39844- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
39845+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
39846
39847 for(i=0; i<NWD; i++) {
39848 struct gendisk *disk = ida_gendisk[ctlr][i];
39849@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
39850 /* Wait (up to 2 seconds) for a command to complete */
39851
39852 for (i = 200000; i > 0; i--) {
39853- done = hba[ctlr]->access.command_completed(hba[ctlr]);
39854+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
39855 if (done == 0) {
39856 udelay(10); /* a short fixed delay */
39857 } else
39858diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
39859index be73e9d..7fbf140 100644
39860--- a/drivers/block/cpqarray.h
39861+++ b/drivers/block/cpqarray.h
39862@@ -99,7 +99,7 @@ struct ctlr_info {
39863 drv_info_t drv[NWD];
39864 struct proc_dir_entry *proc;
39865
39866- struct access_method access;
39867+ struct access_method *access;
39868
39869 cmdlist_t *reqQ;
39870 cmdlist_t *cmpQ;
39871diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
39872index a76ceb3..3c1a9fd 100644
39873--- a/drivers/block/drbd/drbd_int.h
39874+++ b/drivers/block/drbd/drbd_int.h
39875@@ -331,7 +331,7 @@ struct drbd_epoch {
39876 struct drbd_connection *connection;
39877 struct list_head list;
39878 unsigned int barrier_nr;
39879- atomic_t epoch_size; /* increased on every request added. */
39880+ atomic_unchecked_t epoch_size; /* increased on every request added. */
39881 atomic_t active; /* increased on every req. added, and dec on every finished. */
39882 unsigned long flags;
39883 };
39884@@ -797,7 +797,7 @@ struct drbd_device {
39885 unsigned int al_tr_number;
39886 int al_tr_cycle;
39887 wait_queue_head_t seq_wait;
39888- atomic_t packet_seq;
39889+ atomic_unchecked_t packet_seq;
39890 unsigned int peer_seq;
39891 spinlock_t peer_seq_lock;
39892 unsigned int minor;
39893@@ -1407,7 +1407,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
39894 char __user *uoptval;
39895 int err;
39896
39897- uoptval = (char __user __force *)optval;
39898+ uoptval = (char __force_user *)optval;
39899
39900 set_fs(KERNEL_DS);
39901 if (level == SOL_SOCKET)
39902diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
39903index 89c497c..9c736ae 100644
39904--- a/drivers/block/drbd/drbd_interval.c
39905+++ b/drivers/block/drbd/drbd_interval.c
39906@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
39907 }
39908
39909 static const struct rb_augment_callbacks augment_callbacks = {
39910- augment_propagate,
39911- augment_copy,
39912- augment_rotate,
39913+ .propagate = augment_propagate,
39914+ .copy = augment_copy,
39915+ .rotate = augment_rotate,
39916 };
39917
39918 /**
39919diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
39920index 960645c..6c2724a 100644
39921--- a/drivers/block/drbd/drbd_main.c
39922+++ b/drivers/block/drbd/drbd_main.c
39923@@ -1322,7 +1322,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
39924 p->sector = sector;
39925 p->block_id = block_id;
39926 p->blksize = blksize;
39927- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
39928+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
39929 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
39930 }
39931
39932@@ -1628,7 +1628,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
39933 return -EIO;
39934 p->sector = cpu_to_be64(req->i.sector);
39935 p->block_id = (unsigned long)req;
39936- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
39937+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
39938 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
39939 if (device->state.conn >= C_SYNC_SOURCE &&
39940 device->state.conn <= C_PAUSED_SYNC_T)
39941@@ -2670,8 +2670,8 @@ void drbd_destroy_connection(struct kref *kref)
39942 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
39943 struct drbd_resource *resource = connection->resource;
39944
39945- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
39946- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
39947+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
39948+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
39949 kfree(connection->current_epoch);
39950
39951 idr_destroy(&connection->peer_devices);
39952diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
39953index 3f2e167..d3170e4 100644
39954--- a/drivers/block/drbd/drbd_nl.c
39955+++ b/drivers/block/drbd/drbd_nl.c
39956@@ -3616,7 +3616,7 @@ finish:
39957
39958 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39959 {
39960- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39961+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39962 struct sk_buff *msg;
39963 struct drbd_genlmsghdr *d_out;
39964 unsigned seq;
39965@@ -3629,7 +3629,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39966 return;
39967 }
39968
39969- seq = atomic_inc_return(&drbd_genl_seq);
39970+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39971 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39972 if (!msg)
39973 goto failed;
39974diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39975index 5b17ec8..6c21e6b 100644
39976--- a/drivers/block/drbd/drbd_receiver.c
39977+++ b/drivers/block/drbd/drbd_receiver.c
39978@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39979 struct drbd_device *device = peer_device->device;
39980 int err;
39981
39982- atomic_set(&device->packet_seq, 0);
39983+ atomic_set_unchecked(&device->packet_seq, 0);
39984 device->peer_seq = 0;
39985
39986 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39987@@ -1199,7 +1199,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39988 do {
39989 next_epoch = NULL;
39990
39991- epoch_size = atomic_read(&epoch->epoch_size);
39992+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39993
39994 switch (ev & ~EV_CLEANUP) {
39995 case EV_PUT:
39996@@ -1239,7 +1239,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39997 rv = FE_DESTROYED;
39998 } else {
39999 epoch->flags = 0;
40000- atomic_set(&epoch->epoch_size, 0);
40001+ atomic_set_unchecked(&epoch->epoch_size, 0);
40002 /* atomic_set(&epoch->active, 0); is already zero */
40003 if (rv == FE_STILL_LIVE)
40004 rv = FE_RECYCLED;
40005@@ -1490,7 +1490,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
40006 conn_wait_active_ee_empty(connection);
40007 drbd_flush(connection);
40008
40009- if (atomic_read(&connection->current_epoch->epoch_size)) {
40010+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
40011 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
40012 if (epoch)
40013 break;
40014@@ -1503,11 +1503,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
40015 }
40016
40017 epoch->flags = 0;
40018- atomic_set(&epoch->epoch_size, 0);
40019+ atomic_set_unchecked(&epoch->epoch_size, 0);
40020 atomic_set(&epoch->active, 0);
40021
40022 spin_lock(&connection->epoch_lock);
40023- if (atomic_read(&connection->current_epoch->epoch_size)) {
40024+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
40025 list_add(&epoch->list, &connection->current_epoch->list);
40026 connection->current_epoch = epoch;
40027 connection->epochs++;
40028@@ -2224,7 +2224,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
40029
40030 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
40031 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
40032- atomic_inc(&connection->current_epoch->epoch_size);
40033+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
40034 err2 = drbd_drain_block(peer_device, pi->size);
40035 if (!err)
40036 err = err2;
40037@@ -2266,7 +2266,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
40038
40039 spin_lock(&connection->epoch_lock);
40040 peer_req->epoch = connection->current_epoch;
40041- atomic_inc(&peer_req->epoch->epoch_size);
40042+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
40043 atomic_inc(&peer_req->epoch->active);
40044 spin_unlock(&connection->epoch_lock);
40045
40046@@ -4461,7 +4461,7 @@ struct data_cmd {
40047 int expect_payload;
40048 size_t pkt_size;
40049 int (*fn)(struct drbd_connection *, struct packet_info *);
40050-};
40051+} __do_const;
40052
40053 static struct data_cmd drbd_cmd_handler[] = {
40054 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
40055@@ -4572,7 +4572,7 @@ static void conn_disconnect(struct drbd_connection *connection)
40056 if (!list_empty(&connection->current_epoch->list))
40057 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
40058 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
40059- atomic_set(&connection->current_epoch->epoch_size, 0);
40060+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
40061 connection->send.seen_any_write_yet = false;
40062
40063 drbd_info(connection, "Connection closed\n");
40064@@ -5364,7 +5364,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
40065 struct asender_cmd {
40066 size_t pkt_size;
40067 int (*fn)(struct drbd_connection *connection, struct packet_info *);
40068-};
40069+} __do_const;
40070
40071 static struct asender_cmd asender_tbl[] = {
40072 [P_PING] = { 0, got_Ping },
40073diff --git a/drivers/block/loop.c b/drivers/block/loop.c
40074index 6cb1beb..bf490f7 100644
40075--- a/drivers/block/loop.c
40076+++ b/drivers/block/loop.c
40077@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
40078
40079 file_start_write(file);
40080 set_fs(get_ds());
40081- bw = file->f_op->write(file, buf, len, &pos);
40082+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
40083 set_fs(old_fs);
40084 file_end_write(file);
40085 if (likely(bw == len))
40086diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
40087index 02351e2..a9ea617 100644
40088--- a/drivers/block/nvme-core.c
40089+++ b/drivers/block/nvme-core.c
40090@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
40091 static struct task_struct *nvme_thread;
40092 static struct workqueue_struct *nvme_workq;
40093 static wait_queue_head_t nvme_kthread_wait;
40094-static struct notifier_block nvme_nb;
40095
40096 static void nvme_reset_failed_dev(struct work_struct *ws);
40097
40098@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
40099 .err_handler = &nvme_err_handler,
40100 };
40101
40102+static struct notifier_block nvme_nb = {
40103+ .notifier_call = &nvme_cpu_notify,
40104+};
40105+
40106 static int __init nvme_init(void)
40107 {
40108 int result;
40109@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
40110 else if (result > 0)
40111 nvme_major = result;
40112
40113- nvme_nb.notifier_call = &nvme_cpu_notify;
40114 result = register_hotcpu_notifier(&nvme_nb);
40115 if (result)
40116 goto unregister_blkdev;
40117diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
40118index 758ac44..58087fd 100644
40119--- a/drivers/block/pktcdvd.c
40120+++ b/drivers/block/pktcdvd.c
40121@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
40122
40123 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
40124 {
40125- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
40126+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
40127 }
40128
40129 /*
40130@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
40131 return -EROFS;
40132 }
40133 pd->settings.fp = ti.fp;
40134- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
40135+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
40136
40137 if (ti.nwa_v) {
40138 pd->nwa = be32_to_cpu(ti.next_writable);
40139diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
40140index e5565fb..71be10b4 100644
40141--- a/drivers/block/smart1,2.h
40142+++ b/drivers/block/smart1,2.h
40143@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
40144 }
40145
40146 static struct access_method smart4_access = {
40147- smart4_submit_command,
40148- smart4_intr_mask,
40149- smart4_fifo_full,
40150- smart4_intr_pending,
40151- smart4_completed,
40152+ .submit_command = smart4_submit_command,
40153+ .set_intr_mask = smart4_intr_mask,
40154+ .fifo_full = smart4_fifo_full,
40155+ .intr_pending = smart4_intr_pending,
40156+ .command_completed = smart4_completed,
40157 };
40158
40159 /*
40160@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
40161 }
40162
40163 static struct access_method smart2_access = {
40164- smart2_submit_command,
40165- smart2_intr_mask,
40166- smart2_fifo_full,
40167- smart2_intr_pending,
40168- smart2_completed,
40169+ .submit_command = smart2_submit_command,
40170+ .set_intr_mask = smart2_intr_mask,
40171+ .fifo_full = smart2_fifo_full,
40172+ .intr_pending = smart2_intr_pending,
40173+ .command_completed = smart2_completed,
40174 };
40175
40176 /*
40177@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
40178 }
40179
40180 static struct access_method smart2e_access = {
40181- smart2e_submit_command,
40182- smart2e_intr_mask,
40183- smart2e_fifo_full,
40184- smart2e_intr_pending,
40185- smart2e_completed,
40186+ .submit_command = smart2e_submit_command,
40187+ .set_intr_mask = smart2e_intr_mask,
40188+ .fifo_full = smart2e_fifo_full,
40189+ .intr_pending = smart2e_intr_pending,
40190+ .command_completed = smart2e_completed,
40191 };
40192
40193 /*
40194@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
40195 }
40196
40197 static struct access_method smart1_access = {
40198- smart1_submit_command,
40199- smart1_intr_mask,
40200- smart1_fifo_full,
40201- smart1_intr_pending,
40202- smart1_completed,
40203+ .submit_command = smart1_submit_command,
40204+ .set_intr_mask = smart1_intr_mask,
40205+ .fifo_full = smart1_fifo_full,
40206+ .intr_pending = smart1_intr_pending,
40207+ .command_completed = smart1_completed,
40208 };
40209diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
40210index f038dba..bb74c08 100644
40211--- a/drivers/bluetooth/btwilink.c
40212+++ b/drivers/bluetooth/btwilink.c
40213@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
40214
40215 static int bt_ti_probe(struct platform_device *pdev)
40216 {
40217- static struct ti_st *hst;
40218+ struct ti_st *hst;
40219 struct hci_dev *hdev;
40220 int err;
40221
40222diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
40223index 898b84b..86f74b9 100644
40224--- a/drivers/cdrom/cdrom.c
40225+++ b/drivers/cdrom/cdrom.c
40226@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
40227 ENSURE(reset, CDC_RESET);
40228 ENSURE(generic_packet, CDC_GENERIC_PACKET);
40229 cdi->mc_flags = 0;
40230- cdo->n_minors = 0;
40231 cdi->options = CDO_USE_FFLAGS;
40232
40233 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
40234@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
40235 else
40236 cdi->cdda_method = CDDA_OLD;
40237
40238- if (!cdo->generic_packet)
40239- cdo->generic_packet = cdrom_dummy_generic_packet;
40240+ if (!cdo->generic_packet) {
40241+ pax_open_kernel();
40242+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
40243+ pax_close_kernel();
40244+ }
40245
40246 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
40247 mutex_lock(&cdrom_mutex);
40248@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
40249 if (cdi->exit)
40250 cdi->exit(cdi);
40251
40252- cdi->ops->n_minors--;
40253 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
40254 }
40255
40256@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
40257 */
40258 nr = nframes;
40259 do {
40260- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40261+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40262 if (cgc.buffer)
40263 break;
40264
40265@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
40266 struct cdrom_device_info *cdi;
40267 int ret;
40268
40269- ret = scnprintf(info + *pos, max_size - *pos, header);
40270+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
40271 if (!ret)
40272 return 1;
40273
40274diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
40275index 584bc31..e64a12c 100644
40276--- a/drivers/cdrom/gdrom.c
40277+++ b/drivers/cdrom/gdrom.c
40278@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
40279 .audio_ioctl = gdrom_audio_ioctl,
40280 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
40281 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
40282- .n_minors = 1,
40283 };
40284
40285 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
40286diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
40287index 6e9f74a..50c7cea 100644
40288--- a/drivers/char/Kconfig
40289+++ b/drivers/char/Kconfig
40290@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
40291
40292 config DEVKMEM
40293 bool "/dev/kmem virtual device support"
40294- default y
40295+ default n
40296+ depends on !GRKERNSEC_KMEM
40297 help
40298 Say Y here if you want to support the /dev/kmem device. The
40299 /dev/kmem device is rarely used, but can be used for certain
40300@@ -577,6 +578,7 @@ config DEVPORT
40301 bool
40302 depends on !M68K
40303 depends on ISA || PCI
40304+ depends on !GRKERNSEC_KMEM
40305 default y
40306
40307 source "drivers/s390/char/Kconfig"
40308diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
40309index a48e05b..6bac831 100644
40310--- a/drivers/char/agp/compat_ioctl.c
40311+++ b/drivers/char/agp/compat_ioctl.c
40312@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
40313 return -ENOMEM;
40314 }
40315
40316- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
40317+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
40318 sizeof(*usegment) * ureserve.seg_count)) {
40319 kfree(usegment);
40320 kfree(ksegment);
40321diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
40322index b297033..fa217ca 100644
40323--- a/drivers/char/agp/frontend.c
40324+++ b/drivers/char/agp/frontend.c
40325@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40326 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
40327 return -EFAULT;
40328
40329- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
40330+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
40331 return -EFAULT;
40332
40333 client = agp_find_client_by_pid(reserve.pid);
40334@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40335 if (segment == NULL)
40336 return -ENOMEM;
40337
40338- if (copy_from_user(segment, (void __user *) reserve.seg_list,
40339+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
40340 sizeof(struct agp_segment) * reserve.seg_count)) {
40341 kfree(segment);
40342 return -EFAULT;
40343diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
40344index 4f94375..413694e 100644
40345--- a/drivers/char/genrtc.c
40346+++ b/drivers/char/genrtc.c
40347@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
40348 switch (cmd) {
40349
40350 case RTC_PLL_GET:
40351+ memset(&pll, 0, sizeof(pll));
40352 if (get_rtc_pll(&pll))
40353 return -EINVAL;
40354 else
40355diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
40356index d5d4cd8..22d561d 100644
40357--- a/drivers/char/hpet.c
40358+++ b/drivers/char/hpet.c
40359@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
40360 }
40361
40362 static int
40363-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
40364+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
40365 struct hpet_info *info)
40366 {
40367 struct hpet_timer __iomem *timer;
40368diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
40369index 86fe45c..c0ea948 100644
40370--- a/drivers/char/hw_random/intel-rng.c
40371+++ b/drivers/char/hw_random/intel-rng.c
40372@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
40373
40374 if (no_fwh_detect)
40375 return -ENODEV;
40376- printk(warning);
40377+ printk("%s", warning);
40378 return -EBUSY;
40379 }
40380
40381diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
40382index e6db938..835e3a2 100644
40383--- a/drivers/char/ipmi/ipmi_msghandler.c
40384+++ b/drivers/char/ipmi/ipmi_msghandler.c
40385@@ -438,7 +438,7 @@ struct ipmi_smi {
40386 struct proc_dir_entry *proc_dir;
40387 char proc_dir_name[10];
40388
40389- atomic_t stats[IPMI_NUM_STATS];
40390+ atomic_unchecked_t stats[IPMI_NUM_STATS];
40391
40392 /*
40393 * run_to_completion duplicate of smb_info, smi_info
40394@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
40395 static DEFINE_MUTEX(smi_watchers_mutex);
40396
40397 #define ipmi_inc_stat(intf, stat) \
40398- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
40399+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
40400 #define ipmi_get_stat(intf, stat) \
40401- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
40402+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
40403
40404 static int is_lan_addr(struct ipmi_addr *addr)
40405 {
40406@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
40407 INIT_LIST_HEAD(&intf->cmd_rcvrs);
40408 init_waitqueue_head(&intf->waitq);
40409 for (i = 0; i < IPMI_NUM_STATS; i++)
40410- atomic_set(&intf->stats[i], 0);
40411+ atomic_set_unchecked(&intf->stats[i], 0);
40412
40413 intf->proc_dir = NULL;
40414
40415diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
40416index 5d66568..c9d93c3 100644
40417--- a/drivers/char/ipmi/ipmi_si_intf.c
40418+++ b/drivers/char/ipmi/ipmi_si_intf.c
40419@@ -285,7 +285,7 @@ struct smi_info {
40420 unsigned char slave_addr;
40421
40422 /* Counters and things for the proc filesystem. */
40423- atomic_t stats[SI_NUM_STATS];
40424+ atomic_unchecked_t stats[SI_NUM_STATS];
40425
40426 struct task_struct *thread;
40427
40428@@ -294,9 +294,9 @@ struct smi_info {
40429 };
40430
40431 #define smi_inc_stat(smi, stat) \
40432- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
40433+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
40434 #define smi_get_stat(smi, stat) \
40435- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
40436+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
40437
40438 #define SI_MAX_PARMS 4
40439
40440@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
40441 atomic_set(&new_smi->req_events, 0);
40442 new_smi->run_to_completion = false;
40443 for (i = 0; i < SI_NUM_STATS; i++)
40444- atomic_set(&new_smi->stats[i], 0);
40445+ atomic_set_unchecked(&new_smi->stats[i], 0);
40446
40447 new_smi->interrupt_disabled = true;
40448 atomic_set(&new_smi->stop_operation, 0);
40449diff --git a/drivers/char/mem.c b/drivers/char/mem.c
40450index 917403f..dddd899 100644
40451--- a/drivers/char/mem.c
40452+++ b/drivers/char/mem.c
40453@@ -18,6 +18,7 @@
40454 #include <linux/raw.h>
40455 #include <linux/tty.h>
40456 #include <linux/capability.h>
40457+#include <linux/security.h>
40458 #include <linux/ptrace.h>
40459 #include <linux/device.h>
40460 #include <linux/highmem.h>
40461@@ -36,6 +37,10 @@
40462
40463 #define DEVPORT_MINOR 4
40464
40465+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40466+extern const struct file_operations grsec_fops;
40467+#endif
40468+
40469 static inline unsigned long size_inside_page(unsigned long start,
40470 unsigned long size)
40471 {
40472@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40473
40474 while (cursor < to) {
40475 if (!devmem_is_allowed(pfn)) {
40476+#ifdef CONFIG_GRKERNSEC_KMEM
40477+ gr_handle_mem_readwrite(from, to);
40478+#else
40479 printk(KERN_INFO
40480 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
40481 current->comm, from, to);
40482+#endif
40483 return 0;
40484 }
40485 cursor += PAGE_SIZE;
40486@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40487 }
40488 return 1;
40489 }
40490+#elif defined(CONFIG_GRKERNSEC_KMEM)
40491+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40492+{
40493+ return 0;
40494+}
40495 #else
40496 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40497 {
40498@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40499
40500 while (count > 0) {
40501 unsigned long remaining;
40502+ char *temp;
40503
40504 sz = size_inside_page(p, count);
40505
40506@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40507 if (!ptr)
40508 return -EFAULT;
40509
40510- remaining = copy_to_user(buf, ptr, sz);
40511+#ifdef CONFIG_PAX_USERCOPY
40512+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40513+ if (!temp) {
40514+ unxlate_dev_mem_ptr(p, ptr);
40515+ return -ENOMEM;
40516+ }
40517+ memcpy(temp, ptr, sz);
40518+#else
40519+ temp = ptr;
40520+#endif
40521+
40522+ remaining = copy_to_user(buf, temp, sz);
40523+
40524+#ifdef CONFIG_PAX_USERCOPY
40525+ kfree(temp);
40526+#endif
40527+
40528 unxlate_dev_mem_ptr(p, ptr);
40529 if (remaining)
40530 return -EFAULT;
40531@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40532 size_t count, loff_t *ppos)
40533 {
40534 unsigned long p = *ppos;
40535- ssize_t low_count, read, sz;
40536+ ssize_t low_count, read, sz, err = 0;
40537 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
40538- int err = 0;
40539
40540 read = 0;
40541 if (p < (unsigned long) high_memory) {
40542@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40543 }
40544 #endif
40545 while (low_count > 0) {
40546+ char *temp;
40547+
40548 sz = size_inside_page(p, low_count);
40549
40550 /*
40551@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40552 */
40553 kbuf = xlate_dev_kmem_ptr((char *)p);
40554
40555- if (copy_to_user(buf, kbuf, sz))
40556+#ifdef CONFIG_PAX_USERCOPY
40557+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40558+ if (!temp)
40559+ return -ENOMEM;
40560+ memcpy(temp, kbuf, sz);
40561+#else
40562+ temp = kbuf;
40563+#endif
40564+
40565+ err = copy_to_user(buf, temp, sz);
40566+
40567+#ifdef CONFIG_PAX_USERCOPY
40568+ kfree(temp);
40569+#endif
40570+
40571+ if (err)
40572 return -EFAULT;
40573 buf += sz;
40574 p += sz;
40575@@ -827,6 +874,9 @@ static const struct memdev {
40576 #ifdef CONFIG_PRINTK
40577 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
40578 #endif
40579+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40580+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
40581+#endif
40582 };
40583
40584 static int memory_open(struct inode *inode, struct file *filp)
40585@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
40586 continue;
40587
40588 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
40589- NULL, devlist[minor].name);
40590+ NULL, "%s", devlist[minor].name);
40591 }
40592
40593 return tty_init();
40594diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
40595index 9df78e2..01ba9ae 100644
40596--- a/drivers/char/nvram.c
40597+++ b/drivers/char/nvram.c
40598@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
40599
40600 spin_unlock_irq(&rtc_lock);
40601
40602- if (copy_to_user(buf, contents, tmp - contents))
40603+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
40604 return -EFAULT;
40605
40606 *ppos = i;
40607diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
40608index 8320abd..ec48108 100644
40609--- a/drivers/char/pcmcia/synclink_cs.c
40610+++ b/drivers/char/pcmcia/synclink_cs.c
40611@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40612
40613 if (debug_level >= DEBUG_LEVEL_INFO)
40614 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
40615- __FILE__, __LINE__, info->device_name, port->count);
40616+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
40617
40618- WARN_ON(!port->count);
40619+ WARN_ON(!atomic_read(&port->count));
40620
40621 if (tty_port_close_start(port, tty, filp) == 0)
40622 goto cleanup;
40623@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40624 cleanup:
40625 if (debug_level >= DEBUG_LEVEL_INFO)
40626 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
40627- tty->driver->name, port->count);
40628+ tty->driver->name, atomic_read(&port->count));
40629 }
40630
40631 /* Wait until the transmitter is empty.
40632@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40633
40634 if (debug_level >= DEBUG_LEVEL_INFO)
40635 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
40636- __FILE__, __LINE__, tty->driver->name, port->count);
40637+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
40638
40639 /* If port is closing, signal caller to try again */
40640 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
40641@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40642 goto cleanup;
40643 }
40644 spin_lock(&port->lock);
40645- port->count++;
40646+ atomic_inc(&port->count);
40647 spin_unlock(&port->lock);
40648 spin_unlock_irqrestore(&info->netlock, flags);
40649
40650- if (port->count == 1) {
40651+ if (atomic_read(&port->count) == 1) {
40652 /* 1st open on this device, init hardware */
40653 retval = startup(info, tty);
40654 if (retval < 0)
40655@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40656 unsigned short new_crctype;
40657
40658 /* return error if TTY interface open */
40659- if (info->port.count)
40660+ if (atomic_read(&info->port.count))
40661 return -EBUSY;
40662
40663 switch (encoding)
40664@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
40665
40666 /* arbitrate between network and tty opens */
40667 spin_lock_irqsave(&info->netlock, flags);
40668- if (info->port.count != 0 || info->netcount != 0) {
40669+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40670 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40671 spin_unlock_irqrestore(&info->netlock, flags);
40672 return -EBUSY;
40673@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40674 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
40675
40676 /* return error if TTY interface open */
40677- if (info->port.count)
40678+ if (atomic_read(&info->port.count))
40679 return -EBUSY;
40680
40681 if (cmd != SIOCWANDEV)
40682diff --git a/drivers/char/random.c b/drivers/char/random.c
40683index 71529e1..822b036 100644
40684--- a/drivers/char/random.c
40685+++ b/drivers/char/random.c
40686@@ -284,9 +284,6 @@
40687 /*
40688 * To allow fractional bits to be tracked, the entropy_count field is
40689 * denominated in units of 1/8th bits.
40690- *
40691- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
40692- * credit_entropy_bits() needs to be 64 bits wide.
40693 */
40694 #define ENTROPY_SHIFT 3
40695 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
40696@@ -433,9 +430,9 @@ struct entropy_store {
40697 };
40698
40699 static void push_to_pool(struct work_struct *work);
40700-static __u32 input_pool_data[INPUT_POOL_WORDS];
40701-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
40702-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
40703+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
40704+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40705+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40706
40707 static struct entropy_store input_pool = {
40708 .poolinfo = &poolinfo_table[0],
40709@@ -524,8 +521,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
40710 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
40711 }
40712
40713- ACCESS_ONCE(r->input_rotate) = input_rotate;
40714- ACCESS_ONCE(r->add_ptr) = i;
40715+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
40716+ ACCESS_ONCE_RW(r->add_ptr) = i;
40717 smp_wmb();
40718
40719 if (out)
40720@@ -632,7 +629,7 @@ retry:
40721 /* The +2 corresponds to the /4 in the denominator */
40722
40723 do {
40724- unsigned int anfrac = min(pnfrac, pool_size/2);
40725+ u64 anfrac = min(pnfrac, pool_size/2);
40726 unsigned int add =
40727 ((pool_size - entropy_count)*anfrac*3) >> s;
40728
40729@@ -1177,7 +1174,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40730
40731 extract_buf(r, tmp);
40732 i = min_t(int, nbytes, EXTRACT_SIZE);
40733- if (copy_to_user(buf, tmp, i)) {
40734+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
40735 ret = -EFAULT;
40736 break;
40737 }
40738@@ -1567,7 +1564,7 @@ static char sysctl_bootid[16];
40739 static int proc_do_uuid(struct ctl_table *table, int write,
40740 void __user *buffer, size_t *lenp, loff_t *ppos)
40741 {
40742- struct ctl_table fake_table;
40743+ ctl_table_no_const fake_table;
40744 unsigned char buf[64], tmp_uuid[16], *uuid;
40745
40746 uuid = table->data;
40747@@ -1597,7 +1594,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
40748 static int proc_do_entropy(struct ctl_table *table, int write,
40749 void __user *buffer, size_t *lenp, loff_t *ppos)
40750 {
40751- struct ctl_table fake_table;
40752+ ctl_table_no_const fake_table;
40753 int entropy_count;
40754
40755 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
40756diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
40757index 7cc1fe22..b602d6b 100644
40758--- a/drivers/char/sonypi.c
40759+++ b/drivers/char/sonypi.c
40760@@ -54,6 +54,7 @@
40761
40762 #include <asm/uaccess.h>
40763 #include <asm/io.h>
40764+#include <asm/local.h>
40765
40766 #include <linux/sonypi.h>
40767
40768@@ -490,7 +491,7 @@ static struct sonypi_device {
40769 spinlock_t fifo_lock;
40770 wait_queue_head_t fifo_proc_list;
40771 struct fasync_struct *fifo_async;
40772- int open_count;
40773+ local_t open_count;
40774 int model;
40775 struct input_dev *input_jog_dev;
40776 struct input_dev *input_key_dev;
40777@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
40778 static int sonypi_misc_release(struct inode *inode, struct file *file)
40779 {
40780 mutex_lock(&sonypi_device.lock);
40781- sonypi_device.open_count--;
40782+ local_dec(&sonypi_device.open_count);
40783 mutex_unlock(&sonypi_device.lock);
40784 return 0;
40785 }
40786@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
40787 {
40788 mutex_lock(&sonypi_device.lock);
40789 /* Flush input queue on first open */
40790- if (!sonypi_device.open_count)
40791+ if (!local_read(&sonypi_device.open_count))
40792 kfifo_reset(&sonypi_device.fifo);
40793- sonypi_device.open_count++;
40794+ local_inc(&sonypi_device.open_count);
40795 mutex_unlock(&sonypi_device.lock);
40796
40797 return 0;
40798diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
40799index 565a947..dcdc06e 100644
40800--- a/drivers/char/tpm/tpm_acpi.c
40801+++ b/drivers/char/tpm/tpm_acpi.c
40802@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
40803 virt = acpi_os_map_iomem(start, len);
40804 if (!virt) {
40805 kfree(log->bios_event_log);
40806+ log->bios_event_log = NULL;
40807 printk("%s: ERROR - Unable to map memory\n", __func__);
40808 return -EIO;
40809 }
40810
40811- memcpy_fromio(log->bios_event_log, virt, len);
40812+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
40813
40814 acpi_os_unmap_iomem(virt, len);
40815 return 0;
40816diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
40817index 59f7cb2..bac8b6d 100644
40818--- a/drivers/char/tpm/tpm_eventlog.c
40819+++ b/drivers/char/tpm/tpm_eventlog.c
40820@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
40821 event = addr;
40822
40823 if ((event->event_type == 0 && event->event_size == 0) ||
40824- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
40825+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
40826 return NULL;
40827
40828 return addr;
40829@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
40830 return NULL;
40831
40832 if ((event->event_type == 0 && event->event_size == 0) ||
40833- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
40834+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
40835 return NULL;
40836
40837 (*pos)++;
40838@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
40839 int i;
40840
40841 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
40842- seq_putc(m, data[i]);
40843+ if (!seq_putc(m, data[i]))
40844+ return -EFAULT;
40845
40846 return 0;
40847 }
40848diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
40849index 60aafb8..10c08e0 100644
40850--- a/drivers/char/virtio_console.c
40851+++ b/drivers/char/virtio_console.c
40852@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
40853 if (to_user) {
40854 ssize_t ret;
40855
40856- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
40857+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
40858 if (ret)
40859 return -EFAULT;
40860 } else {
40861@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
40862 if (!port_has_data(port) && !port->host_connected)
40863 return 0;
40864
40865- return fill_readbuf(port, ubuf, count, true);
40866+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
40867 }
40868
40869 static int wait_port_writable(struct port *port, bool nonblock)
40870diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
40871index 57a078e..c17cde8 100644
40872--- a/drivers/clk/clk-composite.c
40873+++ b/drivers/clk/clk-composite.c
40874@@ -146,7 +146,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
40875 struct clk *clk;
40876 struct clk_init_data init;
40877 struct clk_composite *composite;
40878- struct clk_ops *clk_composite_ops;
40879+ clk_ops_no_const *clk_composite_ops;
40880
40881 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
40882 if (!composite) {
40883diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
40884index dd3a78c..386d49c 100644
40885--- a/drivers/clk/socfpga/clk-gate.c
40886+++ b/drivers/clk/socfpga/clk-gate.c
40887@@ -22,6 +22,7 @@
40888 #include <linux/mfd/syscon.h>
40889 #include <linux/of.h>
40890 #include <linux/regmap.h>
40891+#include <asm/pgtable.h>
40892
40893 #include "clk.h"
40894
40895@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40896 return 0;
40897 }
40898
40899-static struct clk_ops gateclk_ops = {
40900+static clk_ops_no_const gateclk_ops __read_only = {
40901 .prepare = socfpga_clk_prepare,
40902 .recalc_rate = socfpga_clk_recalc_rate,
40903 .get_parent = socfpga_clk_get_parent,
40904@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40905 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40906 socfpga_clk->hw.bit_idx = clk_gate[1];
40907
40908- gateclk_ops.enable = clk_gate_ops.enable;
40909- gateclk_ops.disable = clk_gate_ops.disable;
40910+ pax_open_kernel();
40911+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40912+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40913+ pax_close_kernel();
40914 }
40915
40916 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40917diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40918index de6da95..c98278b 100644
40919--- a/drivers/clk/socfpga/clk-pll.c
40920+++ b/drivers/clk/socfpga/clk-pll.c
40921@@ -21,6 +21,7 @@
40922 #include <linux/io.h>
40923 #include <linux/of.h>
40924 #include <linux/of_address.h>
40925+#include <asm/pgtable.h>
40926
40927 #include "clk.h"
40928
40929@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40930 CLK_MGR_PLL_CLK_SRC_MASK;
40931 }
40932
40933-static struct clk_ops clk_pll_ops = {
40934+static clk_ops_no_const clk_pll_ops __read_only = {
40935 .recalc_rate = clk_pll_recalc_rate,
40936 .get_parent = clk_pll_get_parent,
40937 };
40938@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40939 pll_clk->hw.hw.init = &init;
40940
40941 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40942- clk_pll_ops.enable = clk_gate_ops.enable;
40943- clk_pll_ops.disable = clk_gate_ops.disable;
40944+ pax_open_kernel();
40945+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40946+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40947+ pax_close_kernel();
40948
40949 clk = clk_register(NULL, &pll_clk->hw.hw);
40950 if (WARN_ON(IS_ERR(clk))) {
40951diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40952index b0c18ed..1713a80 100644
40953--- a/drivers/cpufreq/acpi-cpufreq.c
40954+++ b/drivers/cpufreq/acpi-cpufreq.c
40955@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40956 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40957 per_cpu(acfreq_data, cpu) = data;
40958
40959- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40960- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40961+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40962+ pax_open_kernel();
40963+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40964+ pax_close_kernel();
40965+ }
40966
40967 result = acpi_processor_register_performance(data->acpi_data, cpu);
40968 if (result)
40969@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40970 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40971 break;
40972 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40973- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40974+ pax_open_kernel();
40975+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40976+ pax_close_kernel();
40977 break;
40978 default:
40979 break;
40980@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40981 if (!msrs)
40982 return;
40983
40984- acpi_cpufreq_driver.boost_supported = true;
40985- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40986+ pax_open_kernel();
40987+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40988+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40989+ pax_close_kernel();
40990
40991 cpu_notifier_register_begin();
40992
40993diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40994index 6f02485..13684ae 100644
40995--- a/drivers/cpufreq/cpufreq.c
40996+++ b/drivers/cpufreq/cpufreq.c
40997@@ -2100,7 +2100,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40998 }
40999
41000 mutex_lock(&cpufreq_governor_mutex);
41001- list_del(&governor->governor_list);
41002+ pax_list_del(&governor->governor_list);
41003 mutex_unlock(&cpufreq_governor_mutex);
41004 return;
41005 }
41006@@ -2316,7 +2316,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
41007 return NOTIFY_OK;
41008 }
41009
41010-static struct notifier_block __refdata cpufreq_cpu_notifier = {
41011+static struct notifier_block cpufreq_cpu_notifier = {
41012 .notifier_call = cpufreq_cpu_callback,
41013 };
41014
41015@@ -2356,13 +2356,17 @@ int cpufreq_boost_trigger_state(int state)
41016 return 0;
41017
41018 write_lock_irqsave(&cpufreq_driver_lock, flags);
41019- cpufreq_driver->boost_enabled = state;
41020+ pax_open_kernel();
41021+ *(bool *)&cpufreq_driver->boost_enabled = state;
41022+ pax_close_kernel();
41023 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
41024
41025 ret = cpufreq_driver->set_boost(state);
41026 if (ret) {
41027 write_lock_irqsave(&cpufreq_driver_lock, flags);
41028- cpufreq_driver->boost_enabled = !state;
41029+ pax_open_kernel();
41030+ *(bool *)&cpufreq_driver->boost_enabled = !state;
41031+ pax_close_kernel();
41032 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
41033
41034 pr_err("%s: Cannot %s BOOST\n",
41035@@ -2419,8 +2423,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
41036
41037 pr_debug("trying to register driver %s\n", driver_data->name);
41038
41039- if (driver_data->setpolicy)
41040- driver_data->flags |= CPUFREQ_CONST_LOOPS;
41041+ if (driver_data->setpolicy) {
41042+ pax_open_kernel();
41043+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
41044+ pax_close_kernel();
41045+ }
41046
41047 write_lock_irqsave(&cpufreq_driver_lock, flags);
41048 if (cpufreq_driver) {
41049@@ -2435,8 +2442,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
41050 * Check if driver provides function to enable boost -
41051 * if not, use cpufreq_boost_set_sw as default
41052 */
41053- if (!cpufreq_driver->set_boost)
41054- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
41055+ if (!cpufreq_driver->set_boost) {
41056+ pax_open_kernel();
41057+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
41058+ pax_close_kernel();
41059+ }
41060
41061 ret = cpufreq_sysfs_create_file(&boost.attr);
41062 if (ret) {
41063diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
41064index 1b44496..b80ff5e 100644
41065--- a/drivers/cpufreq/cpufreq_governor.c
41066+++ b/drivers/cpufreq/cpufreq_governor.c
41067@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41068 struct dbs_data *dbs_data;
41069 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
41070 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
41071- struct od_ops *od_ops = NULL;
41072+ const struct od_ops *od_ops = NULL;
41073 struct od_dbs_tuners *od_tuners = NULL;
41074 struct cs_dbs_tuners *cs_tuners = NULL;
41075 struct cpu_dbs_common_info *cpu_cdbs;
41076@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41077
41078 if ((cdata->governor == GOV_CONSERVATIVE) &&
41079 (!policy->governor->initialized)) {
41080- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41081+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41082
41083 cpufreq_register_notifier(cs_ops->notifier_block,
41084 CPUFREQ_TRANSITION_NOTIFIER);
41085@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41086
41087 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
41088 (policy->governor->initialized == 1)) {
41089- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41090+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41091
41092 cpufreq_unregister_notifier(cs_ops->notifier_block,
41093 CPUFREQ_TRANSITION_NOTIFIER);
41094diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
41095index cc401d1..8197340 100644
41096--- a/drivers/cpufreq/cpufreq_governor.h
41097+++ b/drivers/cpufreq/cpufreq_governor.h
41098@@ -212,7 +212,7 @@ struct common_dbs_data {
41099 void (*exit)(struct dbs_data *dbs_data);
41100
41101 /* Governor specific ops, see below */
41102- void *gov_ops;
41103+ const void *gov_ops;
41104 };
41105
41106 /* Governor Per policy data */
41107@@ -232,7 +232,7 @@ struct od_ops {
41108 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
41109 unsigned int freq_next, unsigned int relation);
41110 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
41111-};
41112+} __no_const;
41113
41114 struct cs_ops {
41115 struct notifier_block *notifier_block;
41116diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
41117index 18d4091..434be15 100644
41118--- a/drivers/cpufreq/cpufreq_ondemand.c
41119+++ b/drivers/cpufreq/cpufreq_ondemand.c
41120@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
41121
41122 define_get_cpu_dbs_routines(od_cpu_dbs_info);
41123
41124-static struct od_ops od_ops = {
41125+static struct od_ops od_ops __read_only = {
41126 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
41127 .powersave_bias_target = generic_powersave_bias_target,
41128 .freq_increase = dbs_freq_increase,
41129@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
41130 (struct cpufreq_policy *, unsigned int, unsigned int),
41131 unsigned int powersave_bias)
41132 {
41133- od_ops.powersave_bias_target = f;
41134+ pax_open_kernel();
41135+ *(void **)&od_ops.powersave_bias_target = f;
41136+ pax_close_kernel();
41137 od_set_powersave_bias(powersave_bias);
41138 }
41139 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
41140
41141 void od_unregister_powersave_bias_handler(void)
41142 {
41143- od_ops.powersave_bias_target = generic_powersave_bias_target;
41144+ pax_open_kernel();
41145+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
41146+ pax_close_kernel();
41147 od_set_powersave_bias(0);
41148 }
41149 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
41150diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
41151index 86631cb..c34ec78 100644
41152--- a/drivers/cpufreq/intel_pstate.c
41153+++ b/drivers/cpufreq/intel_pstate.c
41154@@ -121,10 +121,10 @@ struct pstate_funcs {
41155 struct cpu_defaults {
41156 struct pstate_adjust_policy pid_policy;
41157 struct pstate_funcs funcs;
41158-};
41159+} __do_const;
41160
41161 static struct pstate_adjust_policy pid_params;
41162-static struct pstate_funcs pstate_funcs;
41163+static struct pstate_funcs *pstate_funcs;
41164
41165 struct perf_limits {
41166 int no_turbo;
41167@@ -526,7 +526,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
41168
41169 cpu->pstate.current_pstate = pstate;
41170
41171- pstate_funcs.set(cpu, pstate);
41172+ pstate_funcs->set(cpu, pstate);
41173 }
41174
41175 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
41176@@ -546,12 +546,12 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
41177
41178 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
41179 {
41180- cpu->pstate.min_pstate = pstate_funcs.get_min();
41181- cpu->pstate.max_pstate = pstate_funcs.get_max();
41182- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
41183+ cpu->pstate.min_pstate = pstate_funcs->get_min();
41184+ cpu->pstate.max_pstate = pstate_funcs->get_max();
41185+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
41186
41187- if (pstate_funcs.get_vid)
41188- pstate_funcs.get_vid(cpu);
41189+ if (pstate_funcs->get_vid)
41190+ pstate_funcs->get_vid(cpu);
41191 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
41192 }
41193
41194@@ -838,9 +838,9 @@ static int intel_pstate_msrs_not_valid(void)
41195 rdmsrl(MSR_IA32_APERF, aperf);
41196 rdmsrl(MSR_IA32_MPERF, mperf);
41197
41198- if (!pstate_funcs.get_max() ||
41199- !pstate_funcs.get_min() ||
41200- !pstate_funcs.get_turbo())
41201+ if (!pstate_funcs->get_max() ||
41202+ !pstate_funcs->get_min() ||
41203+ !pstate_funcs->get_turbo())
41204 return -ENODEV;
41205
41206 rdmsrl(MSR_IA32_APERF, tmp);
41207@@ -854,7 +854,7 @@ static int intel_pstate_msrs_not_valid(void)
41208 return 0;
41209 }
41210
41211-static void copy_pid_params(struct pstate_adjust_policy *policy)
41212+static void copy_pid_params(const struct pstate_adjust_policy *policy)
41213 {
41214 pid_params.sample_rate_ms = policy->sample_rate_ms;
41215 pid_params.p_gain_pct = policy->p_gain_pct;
41216@@ -866,11 +866,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
41217
41218 static void copy_cpu_funcs(struct pstate_funcs *funcs)
41219 {
41220- pstate_funcs.get_max = funcs->get_max;
41221- pstate_funcs.get_min = funcs->get_min;
41222- pstate_funcs.get_turbo = funcs->get_turbo;
41223- pstate_funcs.set = funcs->set;
41224- pstate_funcs.get_vid = funcs->get_vid;
41225+ pstate_funcs = funcs;
41226 }
41227
41228 #if IS_ENABLED(CONFIG_ACPI)
41229diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
41230index 529cfd9..0e28fff 100644
41231--- a/drivers/cpufreq/p4-clockmod.c
41232+++ b/drivers/cpufreq/p4-clockmod.c
41233@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41234 case 0x0F: /* Core Duo */
41235 case 0x16: /* Celeron Core */
41236 case 0x1C: /* Atom */
41237- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41238+ pax_open_kernel();
41239+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41240+ pax_close_kernel();
41241 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
41242 case 0x0D: /* Pentium M (Dothan) */
41243- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41244+ pax_open_kernel();
41245+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41246+ pax_close_kernel();
41247 /* fall through */
41248 case 0x09: /* Pentium M (Banias) */
41249 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
41250@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41251
41252 /* on P-4s, the TSC runs with constant frequency independent whether
41253 * throttling is active or not. */
41254- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41255+ pax_open_kernel();
41256+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41257+ pax_close_kernel();
41258
41259 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
41260 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
41261diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
41262index 9bb42ba..b01b4a2 100644
41263--- a/drivers/cpufreq/sparc-us3-cpufreq.c
41264+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
41265@@ -18,14 +18,12 @@
41266 #include <asm/head.h>
41267 #include <asm/timer.h>
41268
41269-static struct cpufreq_driver *cpufreq_us3_driver;
41270-
41271 struct us3_freq_percpu_info {
41272 struct cpufreq_frequency_table table[4];
41273 };
41274
41275 /* Indexed by cpu number. */
41276-static struct us3_freq_percpu_info *us3_freq_table;
41277+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
41278
41279 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
41280 * in the Safari config register.
41281@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
41282
41283 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
41284 {
41285- if (cpufreq_us3_driver)
41286- us3_freq_target(policy, 0);
41287+ us3_freq_target(policy, 0);
41288
41289 return 0;
41290 }
41291
41292+static int __init us3_freq_init(void);
41293+static void __exit us3_freq_exit(void);
41294+
41295+static struct cpufreq_driver cpufreq_us3_driver = {
41296+ .init = us3_freq_cpu_init,
41297+ .verify = cpufreq_generic_frequency_table_verify,
41298+ .target_index = us3_freq_target,
41299+ .get = us3_freq_get,
41300+ .exit = us3_freq_cpu_exit,
41301+ .name = "UltraSPARC-III",
41302+
41303+};
41304+
41305 static int __init us3_freq_init(void)
41306 {
41307 unsigned long manuf, impl, ver;
41308- int ret;
41309
41310 if (tlb_type != cheetah && tlb_type != cheetah_plus)
41311 return -ENODEV;
41312@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
41313 (impl == CHEETAH_IMPL ||
41314 impl == CHEETAH_PLUS_IMPL ||
41315 impl == JAGUAR_IMPL ||
41316- impl == PANTHER_IMPL)) {
41317- struct cpufreq_driver *driver;
41318-
41319- ret = -ENOMEM;
41320- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
41321- if (!driver)
41322- goto err_out;
41323-
41324- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
41325- GFP_KERNEL);
41326- if (!us3_freq_table)
41327- goto err_out;
41328-
41329- driver->init = us3_freq_cpu_init;
41330- driver->verify = cpufreq_generic_frequency_table_verify;
41331- driver->target_index = us3_freq_target;
41332- driver->get = us3_freq_get;
41333- driver->exit = us3_freq_cpu_exit;
41334- strcpy(driver->name, "UltraSPARC-III");
41335-
41336- cpufreq_us3_driver = driver;
41337- ret = cpufreq_register_driver(driver);
41338- if (ret)
41339- goto err_out;
41340-
41341- return 0;
41342-
41343-err_out:
41344- if (driver) {
41345- kfree(driver);
41346- cpufreq_us3_driver = NULL;
41347- }
41348- kfree(us3_freq_table);
41349- us3_freq_table = NULL;
41350- return ret;
41351- }
41352+ impl == PANTHER_IMPL))
41353+ return cpufreq_register_driver(&cpufreq_us3_driver);
41354
41355 return -ENODEV;
41356 }
41357
41358 static void __exit us3_freq_exit(void)
41359 {
41360- if (cpufreq_us3_driver) {
41361- cpufreq_unregister_driver(cpufreq_us3_driver);
41362- kfree(cpufreq_us3_driver);
41363- cpufreq_us3_driver = NULL;
41364- kfree(us3_freq_table);
41365- us3_freq_table = NULL;
41366- }
41367+ cpufreq_unregister_driver(&cpufreq_us3_driver);
41368 }
41369
41370 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
41371diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
41372index 7d4a315..21bb886 100644
41373--- a/drivers/cpufreq/speedstep-centrino.c
41374+++ b/drivers/cpufreq/speedstep-centrino.c
41375@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
41376 !cpu_has(cpu, X86_FEATURE_EST))
41377 return -ENODEV;
41378
41379- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
41380- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41381+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
41382+ pax_open_kernel();
41383+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41384+ pax_close_kernel();
41385+ }
41386
41387 if (policy->cpu != 0)
41388 return -ENODEV;
41389diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
41390index 9634f20..e1499c7 100644
41391--- a/drivers/cpuidle/driver.c
41392+++ b/drivers/cpuidle/driver.c
41393@@ -205,7 +205,7 @@ static int poll_idle(struct cpuidle_device *dev,
41394
41395 static void poll_idle_init(struct cpuidle_driver *drv)
41396 {
41397- struct cpuidle_state *state = &drv->states[0];
41398+ cpuidle_state_no_const *state = &drv->states[0];
41399
41400 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
41401 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
41402diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
41403index ca89412..a7b9c49 100644
41404--- a/drivers/cpuidle/governor.c
41405+++ b/drivers/cpuidle/governor.c
41406@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
41407 mutex_lock(&cpuidle_lock);
41408 if (__cpuidle_find_governor(gov->name) == NULL) {
41409 ret = 0;
41410- list_add_tail(&gov->governor_list, &cpuidle_governors);
41411+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
41412 if (!cpuidle_curr_governor ||
41413 cpuidle_curr_governor->rating < gov->rating)
41414 cpuidle_switch_governor(gov);
41415diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
41416index efe2f17..b8124f9 100644
41417--- a/drivers/cpuidle/sysfs.c
41418+++ b/drivers/cpuidle/sysfs.c
41419@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
41420 NULL
41421 };
41422
41423-static struct attribute_group cpuidle_attr_group = {
41424+static attribute_group_no_const cpuidle_attr_group = {
41425 .attrs = cpuidle_default_attrs,
41426 .name = "cpuidle",
41427 };
41428diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
41429index 12fea3e..1e28f47 100644
41430--- a/drivers/crypto/hifn_795x.c
41431+++ b/drivers/crypto/hifn_795x.c
41432@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
41433 MODULE_PARM_DESC(hifn_pll_ref,
41434 "PLL reference clock (pci[freq] or ext[freq], default ext)");
41435
41436-static atomic_t hifn_dev_number;
41437+static atomic_unchecked_t hifn_dev_number;
41438
41439 #define ACRYPTO_OP_DECRYPT 0
41440 #define ACRYPTO_OP_ENCRYPT 1
41441@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41442 goto err_out_disable_pci_device;
41443
41444 snprintf(name, sizeof(name), "hifn%d",
41445- atomic_inc_return(&hifn_dev_number)-1);
41446+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
41447
41448 err = pci_request_regions(pdev, name);
41449 if (err)
41450diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
41451index 9f90369..bfcacdb 100644
41452--- a/drivers/devfreq/devfreq.c
41453+++ b/drivers/devfreq/devfreq.c
41454@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
41455 goto err_out;
41456 }
41457
41458- list_add(&governor->node, &devfreq_governor_list);
41459+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
41460
41461 list_for_each_entry(devfreq, &devfreq_list, node) {
41462 int ret = 0;
41463@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
41464 }
41465 }
41466
41467- list_del(&governor->node);
41468+ pax_list_del((struct list_head *)&governor->node);
41469 err_out:
41470 mutex_unlock(&devfreq_list_lock);
41471
41472diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
41473index b35007e..55ad549 100644
41474--- a/drivers/dma/sh/shdma-base.c
41475+++ b/drivers/dma/sh/shdma-base.c
41476@@ -267,8 +267,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
41477 schan->slave_id = -EINVAL;
41478 }
41479
41480- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
41481- sdev->desc_size, GFP_KERNEL);
41482+ schan->desc = kcalloc(sdev->desc_size,
41483+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
41484 if (!schan->desc) {
41485 ret = -ENOMEM;
41486 goto edescalloc;
41487diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
41488index 146d5df..3c14970 100644
41489--- a/drivers/dma/sh/shdmac.c
41490+++ b/drivers/dma/sh/shdmac.c
41491@@ -514,7 +514,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
41492 return ret;
41493 }
41494
41495-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
41496+static struct notifier_block sh_dmae_nmi_notifier = {
41497 .notifier_call = sh_dmae_nmi_handler,
41498
41499 /* Run before NMI debug handler and KGDB */
41500diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
41501index 592af5f..bb1d583 100644
41502--- a/drivers/edac/edac_device.c
41503+++ b/drivers/edac/edac_device.c
41504@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
41505 */
41506 int edac_device_alloc_index(void)
41507 {
41508- static atomic_t device_indexes = ATOMIC_INIT(0);
41509+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
41510
41511- return atomic_inc_return(&device_indexes) - 1;
41512+ return atomic_inc_return_unchecked(&device_indexes) - 1;
41513 }
41514 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
41515
41516diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
41517index 01fae82..1dd8289 100644
41518--- a/drivers/edac/edac_mc_sysfs.c
41519+++ b/drivers/edac/edac_mc_sysfs.c
41520@@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
41521 struct dev_ch_attribute {
41522 struct device_attribute attr;
41523 int channel;
41524-};
41525+} __do_const;
41526
41527 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
41528 struct dev_ch_attribute dev_attr_legacy_##_name = \
41529@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
41530 }
41531
41532 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
41533+ pax_open_kernel();
41534 if (mci->get_sdram_scrub_rate) {
41535- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41536- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41537+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41538+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41539 }
41540 if (mci->set_sdram_scrub_rate) {
41541- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41542- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41543+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41544+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41545 }
41546+ pax_close_kernel();
41547 err = device_create_file(&mci->dev,
41548 &dev_attr_sdram_scrub_rate);
41549 if (err) {
41550diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
41551index 2cf44b4d..6dd2dc7 100644
41552--- a/drivers/edac/edac_pci.c
41553+++ b/drivers/edac/edac_pci.c
41554@@ -29,7 +29,7 @@
41555
41556 static DEFINE_MUTEX(edac_pci_ctls_mutex);
41557 static LIST_HEAD(edac_pci_list);
41558-static atomic_t pci_indexes = ATOMIC_INIT(0);
41559+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
41560
41561 /*
41562 * edac_pci_alloc_ctl_info
41563@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
41564 */
41565 int edac_pci_alloc_index(void)
41566 {
41567- return atomic_inc_return(&pci_indexes) - 1;
41568+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
41569 }
41570 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
41571
41572diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
41573index e8658e4..22746d6 100644
41574--- a/drivers/edac/edac_pci_sysfs.c
41575+++ b/drivers/edac/edac_pci_sysfs.c
41576@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
41577 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
41578 static int edac_pci_poll_msec = 1000; /* one second workq period */
41579
41580-static atomic_t pci_parity_count = ATOMIC_INIT(0);
41581-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
41582+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
41583+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
41584
41585 static struct kobject *edac_pci_top_main_kobj;
41586 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
41587@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
41588 void *value;
41589 ssize_t(*show) (void *, char *);
41590 ssize_t(*store) (void *, const char *, size_t);
41591-};
41592+} __do_const;
41593
41594 /* Set of show/store abstract level functions for PCI Parity object */
41595 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
41596@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41597 edac_printk(KERN_CRIT, EDAC_PCI,
41598 "Signaled System Error on %s\n",
41599 pci_name(dev));
41600- atomic_inc(&pci_nonparity_count);
41601+ atomic_inc_unchecked(&pci_nonparity_count);
41602 }
41603
41604 if (status & (PCI_STATUS_PARITY)) {
41605@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41606 "Master Data Parity Error on %s\n",
41607 pci_name(dev));
41608
41609- atomic_inc(&pci_parity_count);
41610+ atomic_inc_unchecked(&pci_parity_count);
41611 }
41612
41613 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41614@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41615 "Detected Parity Error on %s\n",
41616 pci_name(dev));
41617
41618- atomic_inc(&pci_parity_count);
41619+ atomic_inc_unchecked(&pci_parity_count);
41620 }
41621 }
41622
41623@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41624 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
41625 "Signaled System Error on %s\n",
41626 pci_name(dev));
41627- atomic_inc(&pci_nonparity_count);
41628+ atomic_inc_unchecked(&pci_nonparity_count);
41629 }
41630
41631 if (status & (PCI_STATUS_PARITY)) {
41632@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41633 "Master Data Parity Error on "
41634 "%s\n", pci_name(dev));
41635
41636- atomic_inc(&pci_parity_count);
41637+ atomic_inc_unchecked(&pci_parity_count);
41638 }
41639
41640 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41641@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41642 "Detected Parity Error on %s\n",
41643 pci_name(dev));
41644
41645- atomic_inc(&pci_parity_count);
41646+ atomic_inc_unchecked(&pci_parity_count);
41647 }
41648 }
41649 }
41650@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
41651 if (!check_pci_errors)
41652 return;
41653
41654- before_count = atomic_read(&pci_parity_count);
41655+ before_count = atomic_read_unchecked(&pci_parity_count);
41656
41657 /* scan all PCI devices looking for a Parity Error on devices and
41658 * bridges.
41659@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
41660 /* Only if operator has selected panic on PCI Error */
41661 if (edac_pci_get_panic_on_pe()) {
41662 /* If the count is different 'after' from 'before' */
41663- if (before_count != atomic_read(&pci_parity_count))
41664+ if (before_count != atomic_read_unchecked(&pci_parity_count))
41665 panic("EDAC: PCI Parity Error");
41666 }
41667 }
41668diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
41669index 51b7e3a..aa8a3e8 100644
41670--- a/drivers/edac/mce_amd.h
41671+++ b/drivers/edac/mce_amd.h
41672@@ -77,7 +77,7 @@ struct amd_decoder_ops {
41673 bool (*mc0_mce)(u16, u8);
41674 bool (*mc1_mce)(u16, u8);
41675 bool (*mc2_mce)(u16, u8);
41676-};
41677+} __no_const;
41678
41679 void amd_report_gart_errors(bool);
41680 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
41681diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
41682index 57ea7f4..af06b76 100644
41683--- a/drivers/firewire/core-card.c
41684+++ b/drivers/firewire/core-card.c
41685@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
41686 const struct fw_card_driver *driver,
41687 struct device *device)
41688 {
41689- static atomic_t index = ATOMIC_INIT(-1);
41690+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
41691
41692- card->index = atomic_inc_return(&index);
41693+ card->index = atomic_inc_return_unchecked(&index);
41694 card->driver = driver;
41695 card->device = device;
41696 card->current_tlabel = 0;
41697@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
41698
41699 void fw_core_remove_card(struct fw_card *card)
41700 {
41701- struct fw_card_driver dummy_driver = dummy_driver_template;
41702+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
41703
41704 card->driver->update_phy_reg(card, 4,
41705 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
41706diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
41707index 2c6d5e1..a2cca6b 100644
41708--- a/drivers/firewire/core-device.c
41709+++ b/drivers/firewire/core-device.c
41710@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
41711 struct config_rom_attribute {
41712 struct device_attribute attr;
41713 u32 key;
41714-};
41715+} __do_const;
41716
41717 static ssize_t show_immediate(struct device *dev,
41718 struct device_attribute *dattr, char *buf)
41719diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
41720index eb6935c..3cc2bfa 100644
41721--- a/drivers/firewire/core-transaction.c
41722+++ b/drivers/firewire/core-transaction.c
41723@@ -38,6 +38,7 @@
41724 #include <linux/timer.h>
41725 #include <linux/types.h>
41726 #include <linux/workqueue.h>
41727+#include <linux/sched.h>
41728
41729 #include <asm/byteorder.h>
41730
41731diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
41732index e1480ff6..1a429bd 100644
41733--- a/drivers/firewire/core.h
41734+++ b/drivers/firewire/core.h
41735@@ -111,6 +111,7 @@ struct fw_card_driver {
41736
41737 int (*stop_iso)(struct fw_iso_context *ctx);
41738 };
41739+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
41740
41741 void fw_card_initialize(struct fw_card *card,
41742 const struct fw_card_driver *driver, struct device *device);
41743diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
41744index a66a321..f6caf20 100644
41745--- a/drivers/firewire/ohci.c
41746+++ b/drivers/firewire/ohci.c
41747@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
41748 be32_to_cpu(ohci->next_header));
41749 }
41750
41751+#ifndef CONFIG_GRKERNSEC
41752 if (param_remote_dma) {
41753 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
41754 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
41755 }
41756+#endif
41757
41758 spin_unlock_irq(&ohci->lock);
41759
41760@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
41761 unsigned long flags;
41762 int n, ret = 0;
41763
41764+#ifndef CONFIG_GRKERNSEC
41765 if (param_remote_dma)
41766 return 0;
41767+#endif
41768
41769 /*
41770 * FIXME: Make sure this bitmask is cleared when we clear the busReset
41771diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
41772index 94a58a0..f5eba42 100644
41773--- a/drivers/firmware/dmi-id.c
41774+++ b/drivers/firmware/dmi-id.c
41775@@ -16,7 +16,7 @@
41776 struct dmi_device_attribute{
41777 struct device_attribute dev_attr;
41778 int field;
41779-};
41780+} __do_const;
41781 #define to_dmi_dev_attr(_dev_attr) \
41782 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
41783
41784diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
41785index 17afc51..0ef90cd 100644
41786--- a/drivers/firmware/dmi_scan.c
41787+++ b/drivers/firmware/dmi_scan.c
41788@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
41789 if (buf == NULL)
41790 return -1;
41791
41792- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
41793+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
41794
41795 dmi_unmap(buf);
41796 return 0;
41797diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
41798index 1491dd4..aa910db 100644
41799--- a/drivers/firmware/efi/cper.c
41800+++ b/drivers/firmware/efi/cper.c
41801@@ -41,12 +41,12 @@
41802 */
41803 u64 cper_next_record_id(void)
41804 {
41805- static atomic64_t seq;
41806+ static atomic64_unchecked_t seq;
41807
41808- if (!atomic64_read(&seq))
41809- atomic64_set(&seq, ((u64)get_seconds()) << 32);
41810+ if (!atomic64_read_unchecked(&seq))
41811+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
41812
41813- return atomic64_inc_return(&seq);
41814+ return atomic64_inc_return_unchecked(&seq);
41815 }
41816 EXPORT_SYMBOL_GPL(cper_next_record_id);
41817
41818diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
41819index dc79346..b39bd69 100644
41820--- a/drivers/firmware/efi/efi.c
41821+++ b/drivers/firmware/efi/efi.c
41822@@ -122,14 +122,16 @@ static struct attribute_group efi_subsys_attr_group = {
41823 };
41824
41825 static struct efivars generic_efivars;
41826-static struct efivar_operations generic_ops;
41827+static efivar_operations_no_const generic_ops __read_only;
41828
41829 static int generic_ops_register(void)
41830 {
41831- generic_ops.get_variable = efi.get_variable;
41832- generic_ops.set_variable = efi.set_variable;
41833- generic_ops.get_next_variable = efi.get_next_variable;
41834- generic_ops.query_variable_store = efi_query_variable_store;
41835+ pax_open_kernel();
41836+ *(void **)&generic_ops.get_variable = efi.get_variable;
41837+ *(void **)&generic_ops.set_variable = efi.set_variable;
41838+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
41839+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
41840+ pax_close_kernel();
41841
41842 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
41843 }
41844diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
41845index 463c565..02a5640 100644
41846--- a/drivers/firmware/efi/efivars.c
41847+++ b/drivers/firmware/efi/efivars.c
41848@@ -588,7 +588,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
41849 static int
41850 create_efivars_bin_attributes(void)
41851 {
41852- struct bin_attribute *attr;
41853+ bin_attribute_no_const *attr;
41854 int error;
41855
41856 /* new_var */
41857diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
41858index 2f569aa..c95f4fb 100644
41859--- a/drivers/firmware/google/memconsole.c
41860+++ b/drivers/firmware/google/memconsole.c
41861@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
41862 if (!found_memconsole())
41863 return -ENODEV;
41864
41865- memconsole_bin_attr.size = memconsole_length;
41866+ pax_open_kernel();
41867+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
41868+ pax_close_kernel();
41869+
41870 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
41871 }
41872
41873diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
41874index cde3605..8b69df7 100644
41875--- a/drivers/gpio/gpio-em.c
41876+++ b/drivers/gpio/gpio-em.c
41877@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
41878 struct em_gio_priv *p;
41879 struct resource *io[2], *irq[2];
41880 struct gpio_chip *gpio_chip;
41881- struct irq_chip *irq_chip;
41882+ irq_chip_no_const *irq_chip;
41883 const char *name = dev_name(&pdev->dev);
41884 int ret;
41885
41886diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41887index 7030422..42a3fe9 100644
41888--- a/drivers/gpio/gpio-ich.c
41889+++ b/drivers/gpio/gpio-ich.c
41890@@ -94,7 +94,7 @@ struct ichx_desc {
41891 * this option allows driver caching written output values
41892 */
41893 bool use_outlvl_cache;
41894-};
41895+} __do_const;
41896
41897 static struct {
41898 spinlock_t lock;
41899diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41900index b6ae89e..ac7349c 100644
41901--- a/drivers/gpio/gpio-rcar.c
41902+++ b/drivers/gpio/gpio-rcar.c
41903@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41904 struct gpio_rcar_priv *p;
41905 struct resource *io, *irq;
41906 struct gpio_chip *gpio_chip;
41907- struct irq_chip *irq_chip;
41908+ irq_chip_no_const *irq_chip;
41909 struct device *dev = &pdev->dev;
41910 const char *name = dev_name(dev);
41911 int ret;
41912diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41913index 66cbcc1..0c5e622 100644
41914--- a/drivers/gpio/gpio-vr41xx.c
41915+++ b/drivers/gpio/gpio-vr41xx.c
41916@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41917 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41918 maskl, pendl, maskh, pendh);
41919
41920- atomic_inc(&irq_err_count);
41921+ atomic_inc_unchecked(&irq_err_count);
41922
41923 return -EINVAL;
41924 }
41925diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41926index 2ebc907..01bdd6e 100644
41927--- a/drivers/gpio/gpiolib.c
41928+++ b/drivers/gpio/gpiolib.c
41929@@ -1482,8 +1482,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41930 }
41931
41932 if (gpiochip->irqchip) {
41933- gpiochip->irqchip->irq_request_resources = NULL;
41934- gpiochip->irqchip->irq_release_resources = NULL;
41935+ pax_open_kernel();
41936+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41937+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41938+ pax_close_kernel();
41939 gpiochip->irqchip = NULL;
41940 }
41941 }
41942@@ -1549,8 +1551,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41943 gpiochip->irqchip = NULL;
41944 return -EINVAL;
41945 }
41946- irqchip->irq_request_resources = gpiochip_irq_reqres;
41947- irqchip->irq_release_resources = gpiochip_irq_relres;
41948+
41949+ pax_open_kernel();
41950+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41951+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41952+ pax_close_kernel();
41953
41954 /*
41955 * Prepare the mapping since the irqchip shall be orthogonal to
41956diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41957index fe94cc1..5e697b3 100644
41958--- a/drivers/gpu/drm/drm_crtc.c
41959+++ b/drivers/gpu/drm/drm_crtc.c
41960@@ -3584,7 +3584,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41961 goto done;
41962 }
41963
41964- if (copy_to_user(&enum_ptr[copied].name,
41965+ if (copy_to_user(enum_ptr[copied].name,
41966 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41967 ret = -EFAULT;
41968 goto done;
41969diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41970index 8218078..9960928a 100644
41971--- a/drivers/gpu/drm/drm_drv.c
41972+++ b/drivers/gpu/drm/drm_drv.c
41973@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
41974 /**
41975 * Copy and IOCTL return string to user space
41976 */
41977-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
41978+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
41979 {
41980 int len;
41981
41982@@ -342,7 +342,7 @@ long drm_ioctl(struct file *filp,
41983 struct drm_file *file_priv = filp->private_data;
41984 struct drm_device *dev;
41985 const struct drm_ioctl_desc *ioctl = NULL;
41986- drm_ioctl_t *func;
41987+ drm_ioctl_no_const_t func;
41988 unsigned int nr = DRM_IOCTL_NR(cmd);
41989 int retcode = -EINVAL;
41990 char stack_kdata[128];
41991diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41992index 021fe5d..abc9ce6 100644
41993--- a/drivers/gpu/drm/drm_fops.c
41994+++ b/drivers/gpu/drm/drm_fops.c
41995@@ -88,7 +88,7 @@ int drm_open(struct inode *inode, struct file *filp)
41996 return PTR_ERR(minor);
41997
41998 dev = minor->dev;
41999- if (!dev->open_count++)
42000+ if (local_inc_return(&dev->open_count) == 1)
42001 need_setup = 1;
42002
42003 /* share address_space across all char-devs of a single device */
42004@@ -105,7 +105,7 @@ int drm_open(struct inode *inode, struct file *filp)
42005 return 0;
42006
42007 err_undo:
42008- dev->open_count--;
42009+ local_dec(&dev->open_count);
42010 drm_minor_release(minor);
42011 return retcode;
42012 }
42013@@ -427,7 +427,7 @@ int drm_release(struct inode *inode, struct file *filp)
42014
42015 mutex_lock(&drm_global_mutex);
42016
42017- DRM_DEBUG("open_count = %d\n", dev->open_count);
42018+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
42019
42020 if (dev->driver->preclose)
42021 dev->driver->preclose(dev, file_priv);
42022@@ -436,10 +436,10 @@ int drm_release(struct inode *inode, struct file *filp)
42023 * Begin inline drm_release
42024 */
42025
42026- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
42027+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
42028 task_pid_nr(current),
42029 (long)old_encode_dev(file_priv->minor->kdev->devt),
42030- dev->open_count);
42031+ local_read(&dev->open_count));
42032
42033 /* Release any auth tokens that might point to this file_priv,
42034 (do that under the drm_global_mutex) */
42035@@ -540,7 +540,7 @@ int drm_release(struct inode *inode, struct file *filp)
42036 * End inline drm_release
42037 */
42038
42039- if (!--dev->open_count) {
42040+ if (local_dec_and_test(&dev->open_count)) {
42041 retcode = drm_lastclose(dev);
42042 if (drm_device_is_unplugged(dev))
42043 drm_put_dev(dev);
42044diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
42045index 3d2e91c..d31c4c9 100644
42046--- a/drivers/gpu/drm/drm_global.c
42047+++ b/drivers/gpu/drm/drm_global.c
42048@@ -36,7 +36,7 @@
42049 struct drm_global_item {
42050 struct mutex mutex;
42051 void *object;
42052- int refcount;
42053+ atomic_t refcount;
42054 };
42055
42056 static struct drm_global_item glob[DRM_GLOBAL_NUM];
42057@@ -49,7 +49,7 @@ void drm_global_init(void)
42058 struct drm_global_item *item = &glob[i];
42059 mutex_init(&item->mutex);
42060 item->object = NULL;
42061- item->refcount = 0;
42062+ atomic_set(&item->refcount, 0);
42063 }
42064 }
42065
42066@@ -59,7 +59,7 @@ void drm_global_release(void)
42067 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
42068 struct drm_global_item *item = &glob[i];
42069 BUG_ON(item->object != NULL);
42070- BUG_ON(item->refcount != 0);
42071+ BUG_ON(atomic_read(&item->refcount) != 0);
42072 }
42073 }
42074
42075@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
42076 struct drm_global_item *item = &glob[ref->global_type];
42077
42078 mutex_lock(&item->mutex);
42079- if (item->refcount == 0) {
42080+ if (atomic_read(&item->refcount) == 0) {
42081 item->object = kzalloc(ref->size, GFP_KERNEL);
42082 if (unlikely(item->object == NULL)) {
42083 ret = -ENOMEM;
42084@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
42085 goto out_err;
42086
42087 }
42088- ++item->refcount;
42089+ atomic_inc(&item->refcount);
42090 ref->object = item->object;
42091 mutex_unlock(&item->mutex);
42092 return 0;
42093@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
42094 struct drm_global_item *item = &glob[ref->global_type];
42095
42096 mutex_lock(&item->mutex);
42097- BUG_ON(item->refcount == 0);
42098+ BUG_ON(atomic_read(&item->refcount) == 0);
42099 BUG_ON(ref->object != item->object);
42100- if (--item->refcount == 0) {
42101+ if (atomic_dec_and_test(&item->refcount)) {
42102 ref->release(ref);
42103 item->object = NULL;
42104 }
42105diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
42106index 86feedd..cba70f5 100644
42107--- a/drivers/gpu/drm/drm_info.c
42108+++ b/drivers/gpu/drm/drm_info.c
42109@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
42110 struct drm_local_map *map;
42111 struct drm_map_list *r_list;
42112
42113- /* Hardcoded from _DRM_FRAME_BUFFER,
42114- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
42115- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
42116- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
42117+ static const char * const types[] = {
42118+ [_DRM_FRAME_BUFFER] = "FB",
42119+ [_DRM_REGISTERS] = "REG",
42120+ [_DRM_SHM] = "SHM",
42121+ [_DRM_AGP] = "AGP",
42122+ [_DRM_SCATTER_GATHER] = "SG",
42123+ [_DRM_CONSISTENT] = "PCI"};
42124 const char *type;
42125 int i;
42126
42127@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
42128 map = r_list->map;
42129 if (!map)
42130 continue;
42131- if (map->type < 0 || map->type > 5)
42132+ if (map->type >= ARRAY_SIZE(types))
42133 type = "??";
42134 else
42135 type = types[map->type];
42136@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
42137 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
42138 vma->vm_flags & VM_LOCKED ? 'l' : '-',
42139 vma->vm_flags & VM_IO ? 'i' : '-',
42140+#ifdef CONFIG_GRKERNSEC_HIDESYM
42141+ 0);
42142+#else
42143 vma->vm_pgoff);
42144+#endif
42145
42146 #if defined(__i386__)
42147 pgprot = pgprot_val(vma->vm_page_prot);
42148diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
42149index 2f4c4343..dd12cd2 100644
42150--- a/drivers/gpu/drm/drm_ioc32.c
42151+++ b/drivers/gpu/drm/drm_ioc32.c
42152@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
42153 request = compat_alloc_user_space(nbytes);
42154 if (!access_ok(VERIFY_WRITE, request, nbytes))
42155 return -EFAULT;
42156- list = (struct drm_buf_desc *) (request + 1);
42157+ list = (struct drm_buf_desc __user *) (request + 1);
42158
42159 if (__put_user(count, &request->count)
42160 || __put_user(list, &request->list))
42161@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
42162 request = compat_alloc_user_space(nbytes);
42163 if (!access_ok(VERIFY_WRITE, request, nbytes))
42164 return -EFAULT;
42165- list = (struct drm_buf_pub *) (request + 1);
42166+ list = (struct drm_buf_pub __user *) (request + 1);
42167
42168 if (__put_user(count, &request->count)
42169 || __put_user(list, &request->list))
42170@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
42171 return 0;
42172 }
42173
42174-drm_ioctl_compat_t *drm_compat_ioctls[] = {
42175+drm_ioctl_compat_t drm_compat_ioctls[] = {
42176 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
42177 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
42178 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
42179@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
42180 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42181 {
42182 unsigned int nr = DRM_IOCTL_NR(cmd);
42183- drm_ioctl_compat_t *fn;
42184 int ret;
42185
42186 /* Assume that ioctls without an explicit compat routine will just
42187@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42188 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
42189 return drm_ioctl(filp, cmd, arg);
42190
42191- fn = drm_compat_ioctls[nr];
42192-
42193- if (fn != NULL)
42194- ret = (*fn) (filp, cmd, arg);
42195+ if (drm_compat_ioctls[nr] != NULL)
42196+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
42197 else
42198 ret = drm_ioctl(filp, cmd, arg);
42199
42200diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
42201index 14d1646..99f9d49 100644
42202--- a/drivers/gpu/drm/drm_stub.c
42203+++ b/drivers/gpu/drm/drm_stub.c
42204@@ -455,7 +455,7 @@ void drm_unplug_dev(struct drm_device *dev)
42205
42206 drm_device_set_unplugged(dev);
42207
42208- if (dev->open_count == 0) {
42209+ if (local_read(&dev->open_count) == 0) {
42210 drm_put_dev(dev);
42211 }
42212 mutex_unlock(&drm_global_mutex);
42213diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
42214index 369b262..09ea3ab 100644
42215--- a/drivers/gpu/drm/drm_sysfs.c
42216+++ b/drivers/gpu/drm/drm_sysfs.c
42217@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
42218 */
42219 int drm_sysfs_device_add(struct drm_minor *minor)
42220 {
42221- char *minor_str;
42222+ const char *minor_str;
42223 int r;
42224
42225 if (minor->type == DRM_MINOR_CONTROL)
42226diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
42227index d4d16ed..8fb0b51 100644
42228--- a/drivers/gpu/drm/i810/i810_drv.h
42229+++ b/drivers/gpu/drm/i810/i810_drv.h
42230@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
42231 int page_flipping;
42232
42233 wait_queue_head_t irq_queue;
42234- atomic_t irq_received;
42235- atomic_t irq_emitted;
42236+ atomic_unchecked_t irq_received;
42237+ atomic_unchecked_t irq_emitted;
42238
42239 int front_offset;
42240 } drm_i810_private_t;
42241diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
42242index d443441..ab091dd 100644
42243--- a/drivers/gpu/drm/i915/i915_dma.c
42244+++ b/drivers/gpu/drm/i915/i915_dma.c
42245@@ -1290,7 +1290,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
42246 * locking inversion with the driver load path. And the access here is
42247 * completely racy anyway. So don't bother with locking for now.
42248 */
42249- return dev->open_count == 0;
42250+ return local_read(&dev->open_count) == 0;
42251 }
42252
42253 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
42254diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42255index 3a30133..ef4a743 100644
42256--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42257+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42258@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
42259
42260 static int
42261 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
42262- int count)
42263+ unsigned int count)
42264 {
42265- int i;
42266+ unsigned int i;
42267 unsigned relocs_total = 0;
42268 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
42269
42270diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
42271index 2e0613e..a8b94d9 100644
42272--- a/drivers/gpu/drm/i915/i915_ioc32.c
42273+++ b/drivers/gpu/drm/i915/i915_ioc32.c
42274@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
42275 (unsigned long)request);
42276 }
42277
42278-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42279+static drm_ioctl_compat_t i915_compat_ioctls[] = {
42280 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
42281 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
42282 [DRM_I915_GETPARAM] = compat_i915_getparam,
42283@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42284 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42285 {
42286 unsigned int nr = DRM_IOCTL_NR(cmd);
42287- drm_ioctl_compat_t *fn = NULL;
42288 int ret;
42289
42290 if (nr < DRM_COMMAND_BASE)
42291 return drm_compat_ioctl(filp, cmd, arg);
42292
42293- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
42294- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42295-
42296- if (fn != NULL)
42297+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
42298+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42299 ret = (*fn) (filp, cmd, arg);
42300- else
42301+ } else
42302 ret = drm_ioctl(filp, cmd, arg);
42303
42304 return ret;
42305diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
42306index f0be855..94e82d9 100644
42307--- a/drivers/gpu/drm/i915/intel_display.c
42308+++ b/drivers/gpu/drm/i915/intel_display.c
42309@@ -11604,13 +11604,13 @@ struct intel_quirk {
42310 int subsystem_vendor;
42311 int subsystem_device;
42312 void (*hook)(struct drm_device *dev);
42313-};
42314+} __do_const;
42315
42316 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
42317 struct intel_dmi_quirk {
42318 void (*hook)(struct drm_device *dev);
42319 const struct dmi_system_id (*dmi_id_list)[];
42320-};
42321+} __do_const;
42322
42323 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42324 {
42325@@ -11618,18 +11618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42326 return 1;
42327 }
42328
42329-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42330+static const struct dmi_system_id intel_dmi_quirks_table[] = {
42331 {
42332- .dmi_id_list = &(const struct dmi_system_id[]) {
42333- {
42334- .callback = intel_dmi_reverse_brightness,
42335- .ident = "NCR Corporation",
42336- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42337- DMI_MATCH(DMI_PRODUCT_NAME, ""),
42338- },
42339- },
42340- { } /* terminating entry */
42341+ .callback = intel_dmi_reverse_brightness,
42342+ .ident = "NCR Corporation",
42343+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42344+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
42345 },
42346+ },
42347+ { } /* terminating entry */
42348+};
42349+
42350+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42351+ {
42352+ .dmi_id_list = &intel_dmi_quirks_table,
42353 .hook = quirk_invert_brightness,
42354 },
42355 };
42356diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
42357index fe45321..836fdca 100644
42358--- a/drivers/gpu/drm/mga/mga_drv.h
42359+++ b/drivers/gpu/drm/mga/mga_drv.h
42360@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
42361 u32 clear_cmd;
42362 u32 maccess;
42363
42364- atomic_t vbl_received; /**< Number of vblanks received. */
42365+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
42366 wait_queue_head_t fence_queue;
42367- atomic_t last_fence_retired;
42368+ atomic_unchecked_t last_fence_retired;
42369 u32 next_fence_to_post;
42370
42371 unsigned int fb_cpp;
42372diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
42373index 729bfd5..ead8823 100644
42374--- a/drivers/gpu/drm/mga/mga_ioc32.c
42375+++ b/drivers/gpu/drm/mga/mga_ioc32.c
42376@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
42377 return 0;
42378 }
42379
42380-drm_ioctl_compat_t *mga_compat_ioctls[] = {
42381+drm_ioctl_compat_t mga_compat_ioctls[] = {
42382 [DRM_MGA_INIT] = compat_mga_init,
42383 [DRM_MGA_GETPARAM] = compat_mga_getparam,
42384 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
42385@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
42386 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42387 {
42388 unsigned int nr = DRM_IOCTL_NR(cmd);
42389- drm_ioctl_compat_t *fn = NULL;
42390 int ret;
42391
42392 if (nr < DRM_COMMAND_BASE)
42393 return drm_compat_ioctl(filp, cmd, arg);
42394
42395- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
42396- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42397-
42398- if (fn != NULL)
42399+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
42400+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42401 ret = (*fn) (filp, cmd, arg);
42402- else
42403+ } else
42404 ret = drm_ioctl(filp, cmd, arg);
42405
42406 return ret;
42407diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
42408index 1b071b8..de8601a 100644
42409--- a/drivers/gpu/drm/mga/mga_irq.c
42410+++ b/drivers/gpu/drm/mga/mga_irq.c
42411@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
42412 if (crtc != 0)
42413 return 0;
42414
42415- return atomic_read(&dev_priv->vbl_received);
42416+ return atomic_read_unchecked(&dev_priv->vbl_received);
42417 }
42418
42419
42420@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42421 /* VBLANK interrupt */
42422 if (status & MGA_VLINEPEN) {
42423 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
42424- atomic_inc(&dev_priv->vbl_received);
42425+ atomic_inc_unchecked(&dev_priv->vbl_received);
42426 drm_handle_vblank(dev, 0);
42427 handled = 1;
42428 }
42429@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42430 if ((prim_start & ~0x03) != (prim_end & ~0x03))
42431 MGA_WRITE(MGA_PRIMEND, prim_end);
42432
42433- atomic_inc(&dev_priv->last_fence_retired);
42434+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
42435 wake_up(&dev_priv->fence_queue);
42436 handled = 1;
42437 }
42438@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
42439 * using fences.
42440 */
42441 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
42442- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
42443+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
42444 - *sequence) <= (1 << 23)));
42445
42446 *sequence = cur_fence;
42447diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
42448index 8268a4c..5105708 100644
42449--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
42450+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
42451@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
42452 struct bit_table {
42453 const char id;
42454 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
42455-};
42456+} __no_const;
42457
42458 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
42459
42460diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
42461index b628add..57cd489 100644
42462--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
42463+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
42464@@ -99,7 +99,6 @@ struct nouveau_drm {
42465 struct drm_global_reference mem_global_ref;
42466 struct ttm_bo_global_ref bo_global_ref;
42467 struct ttm_bo_device bdev;
42468- atomic_t validate_sequence;
42469 int (*move)(struct nouveau_channel *,
42470 struct ttm_buffer_object *,
42471 struct ttm_mem_reg *, struct ttm_mem_reg *);
42472diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42473index 462679a..88e32a7 100644
42474--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42475+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42476@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
42477 unsigned long arg)
42478 {
42479 unsigned int nr = DRM_IOCTL_NR(cmd);
42480- drm_ioctl_compat_t *fn = NULL;
42481+ drm_ioctl_compat_t fn = NULL;
42482 int ret;
42483
42484 if (nr < DRM_COMMAND_BASE)
42485diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42486index ab0228f..20b756b 100644
42487--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
42488+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42489@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42490 }
42491
42492 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
42493- nouveau_vram_manager_init,
42494- nouveau_vram_manager_fini,
42495- nouveau_vram_manager_new,
42496- nouveau_vram_manager_del,
42497- nouveau_vram_manager_debug
42498+ .init = nouveau_vram_manager_init,
42499+ .takedown = nouveau_vram_manager_fini,
42500+ .get_node = nouveau_vram_manager_new,
42501+ .put_node = nouveau_vram_manager_del,
42502+ .debug = nouveau_vram_manager_debug
42503 };
42504
42505 static int
42506@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42507 }
42508
42509 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
42510- nouveau_gart_manager_init,
42511- nouveau_gart_manager_fini,
42512- nouveau_gart_manager_new,
42513- nouveau_gart_manager_del,
42514- nouveau_gart_manager_debug
42515+ .init = nouveau_gart_manager_init,
42516+ .takedown = nouveau_gart_manager_fini,
42517+ .get_node = nouveau_gart_manager_new,
42518+ .put_node = nouveau_gart_manager_del,
42519+ .debug = nouveau_gart_manager_debug
42520 };
42521
42522 #include <core/subdev/vm/nv04.h>
42523@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42524 }
42525
42526 const struct ttm_mem_type_manager_func nv04_gart_manager = {
42527- nv04_gart_manager_init,
42528- nv04_gart_manager_fini,
42529- nv04_gart_manager_new,
42530- nv04_gart_manager_del,
42531- nv04_gart_manager_debug
42532+ .init = nv04_gart_manager_init,
42533+ .takedown = nv04_gart_manager_fini,
42534+ .get_node = nv04_gart_manager_new,
42535+ .put_node = nv04_gart_manager_del,
42536+ .debug = nv04_gart_manager_debug
42537 };
42538
42539 int
42540diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
42541index 4f4c3fe..2cce716 100644
42542--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
42543+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
42544@@ -70,7 +70,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
42545 * locking inversion with the driver load path. And the access here is
42546 * completely racy anyway. So don't bother with locking for now.
42547 */
42548- return dev->open_count == 0;
42549+ return local_read(&dev->open_count) == 0;
42550 }
42551
42552 static const struct vga_switcheroo_client_ops
42553diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
42554index eb89653..613cf71 100644
42555--- a/drivers/gpu/drm/qxl/qxl_cmd.c
42556+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
42557@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
42558 int ret;
42559
42560 mutex_lock(&qdev->async_io_mutex);
42561- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42562+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42563 if (qdev->last_sent_io_cmd > irq_num) {
42564 if (intr)
42565 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42566- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42567+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42568 else
42569 ret = wait_event_timeout(qdev->io_cmd_event,
42570- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42571+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42572 /* 0 is timeout, just bail the "hw" has gone away */
42573 if (ret <= 0)
42574 goto out;
42575- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42576+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42577 }
42578 outb(val, addr);
42579 qdev->last_sent_io_cmd = irq_num + 1;
42580 if (intr)
42581 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42582- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42583+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42584 else
42585 ret = wait_event_timeout(qdev->io_cmd_event,
42586- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42587+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42588 out:
42589 if (ret > 0)
42590 ret = 0;
42591diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
42592index c3c2bbd..bc3c0fb 100644
42593--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
42594+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
42595@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
42596 struct drm_info_node *node = (struct drm_info_node *) m->private;
42597 struct qxl_device *qdev = node->minor->dev->dev_private;
42598
42599- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
42600- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
42601- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
42602- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
42603+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
42604+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
42605+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
42606+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
42607 seq_printf(m, "%d\n", qdev->irq_received_error);
42608 return 0;
42609 }
42610diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
42611index 36ed40b..0397633 100644
42612--- a/drivers/gpu/drm/qxl/qxl_drv.h
42613+++ b/drivers/gpu/drm/qxl/qxl_drv.h
42614@@ -290,10 +290,10 @@ struct qxl_device {
42615 unsigned int last_sent_io_cmd;
42616
42617 /* interrupt handling */
42618- atomic_t irq_received;
42619- atomic_t irq_received_display;
42620- atomic_t irq_received_cursor;
42621- atomic_t irq_received_io_cmd;
42622+ atomic_unchecked_t irq_received;
42623+ atomic_unchecked_t irq_received_display;
42624+ atomic_unchecked_t irq_received_cursor;
42625+ atomic_unchecked_t irq_received_io_cmd;
42626 unsigned irq_received_error;
42627 wait_queue_head_t display_event;
42628 wait_queue_head_t cursor_event;
42629diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
42630index b110883..dd06418 100644
42631--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
42632+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
42633@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42634
42635 /* TODO copy slow path code from i915 */
42636 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
42637- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
42638+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
42639
42640 {
42641 struct qxl_drawable *draw = fb_cmd;
42642@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42643 struct drm_qxl_reloc reloc;
42644
42645 if (copy_from_user(&reloc,
42646- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
42647+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
42648 sizeof(reloc))) {
42649 ret = -EFAULT;
42650 goto out_free_bos;
42651@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
42652
42653 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
42654
42655- struct drm_qxl_command *commands =
42656- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
42657+ struct drm_qxl_command __user *commands =
42658+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
42659
42660- if (copy_from_user(&user_cmd, &commands[cmd_num],
42661+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
42662 sizeof(user_cmd)))
42663 return -EFAULT;
42664
42665diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
42666index 0bf1e20..42a7310 100644
42667--- a/drivers/gpu/drm/qxl/qxl_irq.c
42668+++ b/drivers/gpu/drm/qxl/qxl_irq.c
42669@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
42670 if (!pending)
42671 return IRQ_NONE;
42672
42673- atomic_inc(&qdev->irq_received);
42674+ atomic_inc_unchecked(&qdev->irq_received);
42675
42676 if (pending & QXL_INTERRUPT_DISPLAY) {
42677- atomic_inc(&qdev->irq_received_display);
42678+ atomic_inc_unchecked(&qdev->irq_received_display);
42679 wake_up_all(&qdev->display_event);
42680 qxl_queue_garbage_collect(qdev, false);
42681 }
42682 if (pending & QXL_INTERRUPT_CURSOR) {
42683- atomic_inc(&qdev->irq_received_cursor);
42684+ atomic_inc_unchecked(&qdev->irq_received_cursor);
42685 wake_up_all(&qdev->cursor_event);
42686 }
42687 if (pending & QXL_INTERRUPT_IO_CMD) {
42688- atomic_inc(&qdev->irq_received_io_cmd);
42689+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
42690 wake_up_all(&qdev->io_cmd_event);
42691 }
42692 if (pending & QXL_INTERRUPT_ERROR) {
42693@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
42694 init_waitqueue_head(&qdev->io_cmd_event);
42695 INIT_WORK(&qdev->client_monitors_config_work,
42696 qxl_client_monitors_config_work_func);
42697- atomic_set(&qdev->irq_received, 0);
42698- atomic_set(&qdev->irq_received_display, 0);
42699- atomic_set(&qdev->irq_received_cursor, 0);
42700- atomic_set(&qdev->irq_received_io_cmd, 0);
42701+ atomic_set_unchecked(&qdev->irq_received, 0);
42702+ atomic_set_unchecked(&qdev->irq_received_display, 0);
42703+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
42704+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
42705 qdev->irq_received_error = 0;
42706 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
42707 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
42708diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
42709index 71a1bae..cb1f103 100644
42710--- a/drivers/gpu/drm/qxl/qxl_ttm.c
42711+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
42712@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
42713 }
42714 }
42715
42716-static struct vm_operations_struct qxl_ttm_vm_ops;
42717+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
42718 static const struct vm_operations_struct *ttm_vm_ops;
42719
42720 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42721@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
42722 return r;
42723 if (unlikely(ttm_vm_ops == NULL)) {
42724 ttm_vm_ops = vma->vm_ops;
42725+ pax_open_kernel();
42726 qxl_ttm_vm_ops = *ttm_vm_ops;
42727 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
42728+ pax_close_kernel();
42729 }
42730 vma->vm_ops = &qxl_ttm_vm_ops;
42731 return 0;
42732@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
42733 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
42734 {
42735 #if defined(CONFIG_DEBUG_FS)
42736- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
42737- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
42738- unsigned i;
42739+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
42740+ {
42741+ .name = "qxl_mem_mm",
42742+ .show = &qxl_mm_dump_table,
42743+ },
42744+ {
42745+ .name = "qxl_surf_mm",
42746+ .show = &qxl_mm_dump_table,
42747+ }
42748+ };
42749
42750- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
42751- if (i == 0)
42752- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
42753- else
42754- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
42755- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
42756- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
42757- qxl_mem_types_list[i].driver_features = 0;
42758- if (i == 0)
42759- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42760- else
42761- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42762+ pax_open_kernel();
42763+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42764+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42765+ pax_close_kernel();
42766
42767- }
42768- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
42769+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
42770 #else
42771 return 0;
42772 #endif
42773diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
42774index 59459fe..be26b31 100644
42775--- a/drivers/gpu/drm/r128/r128_cce.c
42776+++ b/drivers/gpu/drm/r128/r128_cce.c
42777@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
42778
42779 /* GH: Simple idle check.
42780 */
42781- atomic_set(&dev_priv->idle_count, 0);
42782+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42783
42784 /* We don't support anything other than bus-mastering ring mode,
42785 * but the ring can be in either AGP or PCI space for the ring
42786diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
42787index 5bf3f5f..7000661 100644
42788--- a/drivers/gpu/drm/r128/r128_drv.h
42789+++ b/drivers/gpu/drm/r128/r128_drv.h
42790@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
42791 int is_pci;
42792 unsigned long cce_buffers_offset;
42793
42794- atomic_t idle_count;
42795+ atomic_unchecked_t idle_count;
42796
42797 int page_flipping;
42798 int current_page;
42799 u32 crtc_offset;
42800 u32 crtc_offset_cntl;
42801
42802- atomic_t vbl_received;
42803+ atomic_unchecked_t vbl_received;
42804
42805 u32 color_fmt;
42806 unsigned int front_offset;
42807diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
42808index 663f38c..c689495 100644
42809--- a/drivers/gpu/drm/r128/r128_ioc32.c
42810+++ b/drivers/gpu/drm/r128/r128_ioc32.c
42811@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
42812 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
42813 }
42814
42815-drm_ioctl_compat_t *r128_compat_ioctls[] = {
42816+drm_ioctl_compat_t r128_compat_ioctls[] = {
42817 [DRM_R128_INIT] = compat_r128_init,
42818 [DRM_R128_DEPTH] = compat_r128_depth,
42819 [DRM_R128_STIPPLE] = compat_r128_stipple,
42820@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
42821 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42822 {
42823 unsigned int nr = DRM_IOCTL_NR(cmd);
42824- drm_ioctl_compat_t *fn = NULL;
42825 int ret;
42826
42827 if (nr < DRM_COMMAND_BASE)
42828 return drm_compat_ioctl(filp, cmd, arg);
42829
42830- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
42831- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42832-
42833- if (fn != NULL)
42834+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
42835+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42836 ret = (*fn) (filp, cmd, arg);
42837- else
42838+ } else
42839 ret = drm_ioctl(filp, cmd, arg);
42840
42841 return ret;
42842diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
42843index c2ae496..30b5993 100644
42844--- a/drivers/gpu/drm/r128/r128_irq.c
42845+++ b/drivers/gpu/drm/r128/r128_irq.c
42846@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
42847 if (crtc != 0)
42848 return 0;
42849
42850- return atomic_read(&dev_priv->vbl_received);
42851+ return atomic_read_unchecked(&dev_priv->vbl_received);
42852 }
42853
42854 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42855@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42856 /* VBLANK interrupt */
42857 if (status & R128_CRTC_VBLANK_INT) {
42858 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
42859- atomic_inc(&dev_priv->vbl_received);
42860+ atomic_inc_unchecked(&dev_priv->vbl_received);
42861 drm_handle_vblank(dev, 0);
42862 return IRQ_HANDLED;
42863 }
42864diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
42865index 575e986..66e62ca 100644
42866--- a/drivers/gpu/drm/r128/r128_state.c
42867+++ b/drivers/gpu/drm/r128/r128_state.c
42868@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
42869
42870 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
42871 {
42872- if (atomic_read(&dev_priv->idle_count) == 0)
42873+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
42874 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
42875 else
42876- atomic_set(&dev_priv->idle_count, 0);
42877+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42878 }
42879
42880 #endif
42881diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
42882index 4a85bb6..aaea819 100644
42883--- a/drivers/gpu/drm/radeon/mkregtable.c
42884+++ b/drivers/gpu/drm/radeon/mkregtable.c
42885@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
42886 regex_t mask_rex;
42887 regmatch_t match[4];
42888 char buf[1024];
42889- size_t end;
42890+ long end;
42891 int len;
42892 int done = 0;
42893 int r;
42894 unsigned o;
42895 struct offset *offset;
42896 char last_reg_s[10];
42897- int last_reg;
42898+ unsigned long last_reg;
42899
42900 if (regcomp
42901 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42902diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42903index 697add2..9860f5b 100644
42904--- a/drivers/gpu/drm/radeon/radeon_device.c
42905+++ b/drivers/gpu/drm/radeon/radeon_device.c
42906@@ -1169,7 +1169,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42907 * locking inversion with the driver load path. And the access here is
42908 * completely racy anyway. So don't bother with locking for now.
42909 */
42910- return dev->open_count == 0;
42911+ return local_read(&dev->open_count) == 0;
42912 }
42913
42914 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42915diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42916index dafd812..1bf20c7 100644
42917--- a/drivers/gpu/drm/radeon/radeon_drv.h
42918+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42919@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42920
42921 /* SW interrupt */
42922 wait_queue_head_t swi_queue;
42923- atomic_t swi_emitted;
42924+ atomic_unchecked_t swi_emitted;
42925 int vblank_crtc;
42926 uint32_t irq_enable_reg;
42927 uint32_t r500_disp_irq_reg;
42928diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42929index 0b98ea1..0881827 100644
42930--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42931+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42932@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42933 request = compat_alloc_user_space(sizeof(*request));
42934 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42935 || __put_user(req32.param, &request->param)
42936- || __put_user((void __user *)(unsigned long)req32.value,
42937+ || __put_user((unsigned long)req32.value,
42938 &request->value))
42939 return -EFAULT;
42940
42941@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42942 #define compat_radeon_cp_setparam NULL
42943 #endif /* X86_64 || IA64 */
42944
42945-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42946+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42947 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42948 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42949 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42950@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42951 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42952 {
42953 unsigned int nr = DRM_IOCTL_NR(cmd);
42954- drm_ioctl_compat_t *fn = NULL;
42955 int ret;
42956
42957 if (nr < DRM_COMMAND_BASE)
42958 return drm_compat_ioctl(filp, cmd, arg);
42959
42960- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42961- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42962-
42963- if (fn != NULL)
42964+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42965+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42966 ret = (*fn) (filp, cmd, arg);
42967- else
42968+ } else
42969 ret = drm_ioctl(filp, cmd, arg);
42970
42971 return ret;
42972diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42973index 244b19b..c19226d 100644
42974--- a/drivers/gpu/drm/radeon/radeon_irq.c
42975+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42976@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42977 unsigned int ret;
42978 RING_LOCALS;
42979
42980- atomic_inc(&dev_priv->swi_emitted);
42981- ret = atomic_read(&dev_priv->swi_emitted);
42982+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42983+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42984
42985 BEGIN_RING(4);
42986 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42987@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42988 drm_radeon_private_t *dev_priv =
42989 (drm_radeon_private_t *) dev->dev_private;
42990
42991- atomic_set(&dev_priv->swi_emitted, 0);
42992+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42993 init_waitqueue_head(&dev_priv->swi_queue);
42994
42995 dev->max_vblank_count = 0x001fffff;
42996diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42997index 23bb64f..69d7234 100644
42998--- a/drivers/gpu/drm/radeon/radeon_state.c
42999+++ b/drivers/gpu/drm/radeon/radeon_state.c
43000@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
43001 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
43002 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
43003
43004- if (copy_from_user(&depth_boxes, clear->depth_boxes,
43005+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
43006 sarea_priv->nbox * sizeof(depth_boxes[0])))
43007 return -EFAULT;
43008
43009@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
43010 {
43011 drm_radeon_private_t *dev_priv = dev->dev_private;
43012 drm_radeon_getparam_t *param = data;
43013- int value;
43014+ int value = 0;
43015
43016 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
43017
43018diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
43019index c8a8a51..219dacc 100644
43020--- a/drivers/gpu/drm/radeon/radeon_ttm.c
43021+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
43022@@ -797,7 +797,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
43023 man->size = size >> PAGE_SHIFT;
43024 }
43025
43026-static struct vm_operations_struct radeon_ttm_vm_ops;
43027+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
43028 static const struct vm_operations_struct *ttm_vm_ops = NULL;
43029
43030 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43031@@ -838,8 +838,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
43032 }
43033 if (unlikely(ttm_vm_ops == NULL)) {
43034 ttm_vm_ops = vma->vm_ops;
43035+ pax_open_kernel();
43036 radeon_ttm_vm_ops = *ttm_vm_ops;
43037 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
43038+ pax_close_kernel();
43039 }
43040 vma->vm_ops = &radeon_ttm_vm_ops;
43041 return 0;
43042diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
43043index ef40381..347463e 100644
43044--- a/drivers/gpu/drm/tegra/dc.c
43045+++ b/drivers/gpu/drm/tegra/dc.c
43046@@ -1173,7 +1173,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
43047 }
43048
43049 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
43050- dc->debugfs_files[i].data = dc;
43051+ *(void **)&dc->debugfs_files[i].data = dc;
43052
43053 err = drm_debugfs_create_files(dc->debugfs_files,
43054 ARRAY_SIZE(debugfs_files),
43055diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
43056index bd56f2a..255af4b 100644
43057--- a/drivers/gpu/drm/tegra/dsi.c
43058+++ b/drivers/gpu/drm/tegra/dsi.c
43059@@ -41,7 +41,7 @@ struct tegra_dsi {
43060 struct clk *clk_lp;
43061 struct clk *clk;
43062
43063- struct drm_info_list *debugfs_files;
43064+ drm_info_list_no_const *debugfs_files;
43065 struct drm_minor *minor;
43066 struct dentry *debugfs;
43067
43068diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
43069index ba067bb..23afbbd 100644
43070--- a/drivers/gpu/drm/tegra/hdmi.c
43071+++ b/drivers/gpu/drm/tegra/hdmi.c
43072@@ -60,7 +60,7 @@ struct tegra_hdmi {
43073 bool stereo;
43074 bool dvi;
43075
43076- struct drm_info_list *debugfs_files;
43077+ drm_info_list_no_const *debugfs_files;
43078 struct drm_minor *minor;
43079 struct dentry *debugfs;
43080 };
43081diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
43082index bd850c9..d9f3573 100644
43083--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
43084+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
43085@@ -146,10 +146,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
43086 }
43087
43088 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
43089- ttm_bo_man_init,
43090- ttm_bo_man_takedown,
43091- ttm_bo_man_get_node,
43092- ttm_bo_man_put_node,
43093- ttm_bo_man_debug
43094+ .init = ttm_bo_man_init,
43095+ .takedown = ttm_bo_man_takedown,
43096+ .get_node = ttm_bo_man_get_node,
43097+ .put_node = ttm_bo_man_put_node,
43098+ .debug = ttm_bo_man_debug
43099 };
43100 EXPORT_SYMBOL(ttm_bo_manager_func);
43101diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
43102index dbc2def..0a9f710 100644
43103--- a/drivers/gpu/drm/ttm/ttm_memory.c
43104+++ b/drivers/gpu/drm/ttm/ttm_memory.c
43105@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
43106 zone->glob = glob;
43107 glob->zone_kernel = zone;
43108 ret = kobject_init_and_add(
43109- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
43110+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
43111 if (unlikely(ret != 0)) {
43112 kobject_put(&zone->kobj);
43113 return ret;
43114@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
43115 zone->glob = glob;
43116 glob->zone_dma32 = zone;
43117 ret = kobject_init_and_add(
43118- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
43119+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
43120 if (unlikely(ret != 0)) {
43121 kobject_put(&zone->kobj);
43122 return ret;
43123diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
43124index 863bef9..cba15cf 100644
43125--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
43126+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
43127@@ -391,9 +391,9 @@ out:
43128 static unsigned long
43129 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
43130 {
43131- static atomic_t start_pool = ATOMIC_INIT(0);
43132+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
43133 unsigned i;
43134- unsigned pool_offset = atomic_add_return(1, &start_pool);
43135+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
43136 struct ttm_page_pool *pool;
43137 int shrink_pages = sc->nr_to_scan;
43138 unsigned long freed = 0;
43139diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
43140index 3771763..883f206 100644
43141--- a/drivers/gpu/drm/udl/udl_fb.c
43142+++ b/drivers/gpu/drm/udl/udl_fb.c
43143@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
43144 fb_deferred_io_cleanup(info);
43145 kfree(info->fbdefio);
43146 info->fbdefio = NULL;
43147- info->fbops->fb_mmap = udl_fb_mmap;
43148 }
43149
43150 pr_warn("released /dev/fb%d user=%d count=%d\n",
43151diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
43152index ad02732..144f5ed 100644
43153--- a/drivers/gpu/drm/via/via_drv.h
43154+++ b/drivers/gpu/drm/via/via_drv.h
43155@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
43156 typedef uint32_t maskarray_t[5];
43157
43158 typedef struct drm_via_irq {
43159- atomic_t irq_received;
43160+ atomic_unchecked_t irq_received;
43161 uint32_t pending_mask;
43162 uint32_t enable_mask;
43163 wait_queue_head_t irq_queue;
43164@@ -75,7 +75,7 @@ typedef struct drm_via_private {
43165 struct timeval last_vblank;
43166 int last_vblank_valid;
43167 unsigned usec_per_vblank;
43168- atomic_t vbl_received;
43169+ atomic_unchecked_t vbl_received;
43170 drm_via_state_t hc_state;
43171 char pci_buf[VIA_PCI_BUF_SIZE];
43172 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
43173diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
43174index 1319433..a993b0c 100644
43175--- a/drivers/gpu/drm/via/via_irq.c
43176+++ b/drivers/gpu/drm/via/via_irq.c
43177@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
43178 if (crtc != 0)
43179 return 0;
43180
43181- return atomic_read(&dev_priv->vbl_received);
43182+ return atomic_read_unchecked(&dev_priv->vbl_received);
43183 }
43184
43185 irqreturn_t via_driver_irq_handler(int irq, void *arg)
43186@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43187
43188 status = VIA_READ(VIA_REG_INTERRUPT);
43189 if (status & VIA_IRQ_VBLANK_PENDING) {
43190- atomic_inc(&dev_priv->vbl_received);
43191- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
43192+ atomic_inc_unchecked(&dev_priv->vbl_received);
43193+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
43194 do_gettimeofday(&cur_vblank);
43195 if (dev_priv->last_vblank_valid) {
43196 dev_priv->usec_per_vblank =
43197@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43198 dev_priv->last_vblank = cur_vblank;
43199 dev_priv->last_vblank_valid = 1;
43200 }
43201- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
43202+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
43203 DRM_DEBUG("US per vblank is: %u\n",
43204 dev_priv->usec_per_vblank);
43205 }
43206@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43207
43208 for (i = 0; i < dev_priv->num_irqs; ++i) {
43209 if (status & cur_irq->pending_mask) {
43210- atomic_inc(&cur_irq->irq_received);
43211+ atomic_inc_unchecked(&cur_irq->irq_received);
43212 wake_up(&cur_irq->irq_queue);
43213 handled = 1;
43214 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
43215@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
43216 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43217 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
43218 masks[irq][4]));
43219- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
43220+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
43221 } else {
43222 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43223 (((cur_irq_sequence =
43224- atomic_read(&cur_irq->irq_received)) -
43225+ atomic_read_unchecked(&cur_irq->irq_received)) -
43226 *sequence) <= (1 << 23)));
43227 }
43228 *sequence = cur_irq_sequence;
43229@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
43230 }
43231
43232 for (i = 0; i < dev_priv->num_irqs; ++i) {
43233- atomic_set(&cur_irq->irq_received, 0);
43234+ atomic_set_unchecked(&cur_irq->irq_received, 0);
43235 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
43236 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
43237 init_waitqueue_head(&cur_irq->irq_queue);
43238@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
43239 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
43240 case VIA_IRQ_RELATIVE:
43241 irqwait->request.sequence +=
43242- atomic_read(&cur_irq->irq_received);
43243+ atomic_read_unchecked(&cur_irq->irq_received);
43244 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
43245 case VIA_IRQ_ABSOLUTE:
43246 break;
43247diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43248index 6b252a8..5975dfe 100644
43249--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43250+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43251@@ -437,7 +437,7 @@ struct vmw_private {
43252 * Fencing and IRQs.
43253 */
43254
43255- atomic_t marker_seq;
43256+ atomic_unchecked_t marker_seq;
43257 wait_queue_head_t fence_queue;
43258 wait_queue_head_t fifo_queue;
43259 int fence_queue_waiters; /* Protected by hw_mutex */
43260diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43261index 6ccd993..618d592 100644
43262--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43263+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43264@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
43265 (unsigned int) min,
43266 (unsigned int) fifo->capabilities);
43267
43268- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43269+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43270 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
43271 vmw_marker_queue_init(&fifo->marker_queue);
43272 return vmw_fifo_send_fence(dev_priv, &dummy);
43273@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
43274 if (reserveable)
43275 iowrite32(bytes, fifo_mem +
43276 SVGA_FIFO_RESERVED);
43277- return fifo_mem + (next_cmd >> 2);
43278+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
43279 } else {
43280 need_bounce = true;
43281 }
43282@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43283
43284 fm = vmw_fifo_reserve(dev_priv, bytes);
43285 if (unlikely(fm == NULL)) {
43286- *seqno = atomic_read(&dev_priv->marker_seq);
43287+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43288 ret = -ENOMEM;
43289 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
43290 false, 3*HZ);
43291@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43292 }
43293
43294 do {
43295- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
43296+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
43297 } while (*seqno == 0);
43298
43299 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
43300diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43301index b1273e8..9c274fd 100644
43302--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43303+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43304@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
43305 }
43306
43307 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
43308- vmw_gmrid_man_init,
43309- vmw_gmrid_man_takedown,
43310- vmw_gmrid_man_get_node,
43311- vmw_gmrid_man_put_node,
43312- vmw_gmrid_man_debug
43313+ .init = vmw_gmrid_man_init,
43314+ .takedown = vmw_gmrid_man_takedown,
43315+ .get_node = vmw_gmrid_man_get_node,
43316+ .put_node = vmw_gmrid_man_put_node,
43317+ .debug = vmw_gmrid_man_debug
43318 };
43319diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43320index 37881ec..319065d 100644
43321--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43322+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43323@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
43324 int ret;
43325
43326 num_clips = arg->num_clips;
43327- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43328+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43329
43330 if (unlikely(num_clips == 0))
43331 return 0;
43332@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
43333 int ret;
43334
43335 num_clips = arg->num_clips;
43336- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43337+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43338
43339 if (unlikely(num_clips == 0))
43340 return 0;
43341diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43342index 0c42376..6febe77 100644
43343--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43344+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43345@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
43346 * emitted. Then the fence is stale and signaled.
43347 */
43348
43349- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
43350+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
43351 > VMW_FENCE_WRAP);
43352
43353 return ret;
43354@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
43355
43356 if (fifo_idle)
43357 down_read(&fifo_state->rwsem);
43358- signal_seq = atomic_read(&dev_priv->marker_seq);
43359+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
43360 ret = 0;
43361
43362 for (;;) {
43363diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43364index 8a8725c2..afed796 100644
43365--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43366+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43367@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
43368 while (!vmw_lag_lt(queue, us)) {
43369 spin_lock(&queue->lock);
43370 if (list_empty(&queue->head))
43371- seqno = atomic_read(&dev_priv->marker_seq);
43372+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43373 else {
43374 marker = list_first_entry(&queue->head,
43375 struct vmw_marker, head);
43376diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
43377index 6866448..2ad2b34 100644
43378--- a/drivers/gpu/vga/vga_switcheroo.c
43379+++ b/drivers/gpu/vga/vga_switcheroo.c
43380@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
43381
43382 /* this version is for the case where the power switch is separate
43383 to the device being powered down. */
43384-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
43385+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
43386 {
43387 /* copy over all the bus versions */
43388 if (dev->bus && dev->bus->pm) {
43389@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
43390 return ret;
43391 }
43392
43393-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
43394+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
43395 {
43396 /* copy over all the bus versions */
43397 if (dev->bus && dev->bus->pm) {
43398diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
43399index 8ed66fd..38ff772 100644
43400--- a/drivers/hid/hid-core.c
43401+++ b/drivers/hid/hid-core.c
43402@@ -2488,7 +2488,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
43403
43404 int hid_add_device(struct hid_device *hdev)
43405 {
43406- static atomic_t id = ATOMIC_INIT(0);
43407+ static atomic_unchecked_t id = ATOMIC_INIT(0);
43408 int ret;
43409
43410 if (WARN_ON(hdev->status & HID_STAT_ADDED))
43411@@ -2530,7 +2530,7 @@ int hid_add_device(struct hid_device *hdev)
43412 /* XXX hack, any other cleaner solution after the driver core
43413 * is converted to allow more than 20 bytes as the device name? */
43414 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
43415- hdev->vendor, hdev->product, atomic_inc_return(&id));
43416+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
43417
43418 hid_debug_register(hdev, dev_name(&hdev->dev));
43419 ret = device_add(&hdev->dev);
43420diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
43421index ecc2cbf..29a74c1 100644
43422--- a/drivers/hid/hid-magicmouse.c
43423+++ b/drivers/hid/hid-magicmouse.c
43424@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43425 if (size < 4 || ((size - 4) % 9) != 0)
43426 return 0;
43427 npoints = (size - 4) / 9;
43428+ if (npoints > 15) {
43429+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
43430+ size);
43431+ return 0;
43432+ }
43433 msc->ntouches = 0;
43434 for (ii = 0; ii < npoints; ii++)
43435 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
43436@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43437 if (size < 6 || ((size - 6) % 8) != 0)
43438 return 0;
43439 npoints = (size - 6) / 8;
43440+ if (npoints > 15) {
43441+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
43442+ size);
43443+ return 0;
43444+ }
43445 msc->ntouches = 0;
43446 for (ii = 0; ii < npoints; ii++)
43447 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
43448diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
43449index acbb0210..020df3c 100644
43450--- a/drivers/hid/hid-picolcd_core.c
43451+++ b/drivers/hid/hid-picolcd_core.c
43452@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
43453 if (!data)
43454 return 1;
43455
43456+ if (size > 64) {
43457+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
43458+ size);
43459+ return 0;
43460+ }
43461+
43462 if (report->id == REPORT_KEY_STATE) {
43463 if (data->input_keys)
43464 ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
43465diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
43466index c13fb5b..55a3802 100644
43467--- a/drivers/hid/hid-wiimote-debug.c
43468+++ b/drivers/hid/hid-wiimote-debug.c
43469@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
43470 else if (size == 0)
43471 return -EIO;
43472
43473- if (copy_to_user(u, buf, size))
43474+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
43475 return -EFAULT;
43476
43477 *off += size;
43478diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
43479index 0cb92e3..c7d453d 100644
43480--- a/drivers/hid/uhid.c
43481+++ b/drivers/hid/uhid.c
43482@@ -47,7 +47,7 @@ struct uhid_device {
43483 struct mutex report_lock;
43484 wait_queue_head_t report_wait;
43485 atomic_t report_done;
43486- atomic_t report_id;
43487+ atomic_unchecked_t report_id;
43488 struct uhid_event report_buf;
43489 };
43490
43491@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
43492
43493 spin_lock_irqsave(&uhid->qlock, flags);
43494 ev->type = UHID_FEATURE;
43495- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
43496+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
43497 ev->u.feature.rnum = rnum;
43498 ev->u.feature.rtype = report_type;
43499
43500@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
43501 spin_lock_irqsave(&uhid->qlock, flags);
43502
43503 /* id for old report; drop it silently */
43504- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
43505+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
43506 goto unlock;
43507 if (atomic_read(&uhid->report_done))
43508 goto unlock;
43509diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
43510index 284cf66..084c627 100644
43511--- a/drivers/hv/channel.c
43512+++ b/drivers/hv/channel.c
43513@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
43514 int ret = 0;
43515 int t;
43516
43517- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
43518- atomic_inc(&vmbus_connection.next_gpadl_handle);
43519+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
43520+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
43521
43522 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
43523 if (ret)
43524diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
43525index edfc848..d83e195 100644
43526--- a/drivers/hv/hv.c
43527+++ b/drivers/hv/hv.c
43528@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
43529 u64 output_address = (output) ? virt_to_phys(output) : 0;
43530 u32 output_address_hi = output_address >> 32;
43531 u32 output_address_lo = output_address & 0xFFFFFFFF;
43532- void *hypercall_page = hv_context.hypercall_page;
43533+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
43534
43535 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
43536 "=a"(hv_status_lo) : "d" (control_hi),
43537@@ -154,7 +154,7 @@ int hv_init(void)
43538 /* See if the hypercall page is already set */
43539 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
43540
43541- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
43542+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
43543
43544 if (!virtaddr)
43545 goto cleanup;
43546diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
43547index 5e90c5d..d8fcefb 100644
43548--- a/drivers/hv/hv_balloon.c
43549+++ b/drivers/hv/hv_balloon.c
43550@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
43551
43552 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
43553 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
43554-static atomic_t trans_id = ATOMIC_INIT(0);
43555+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
43556
43557 static int dm_ring_size = (5 * PAGE_SIZE);
43558
43559@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
43560 pr_info("Memory hot add failed\n");
43561
43562 dm->state = DM_INITIALIZED;
43563- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43564+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43565 vmbus_sendpacket(dm->dev->channel, &resp,
43566 sizeof(struct dm_hot_add_response),
43567 (unsigned long)NULL,
43568@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
43569 memset(&status, 0, sizeof(struct dm_status));
43570 status.hdr.type = DM_STATUS_REPORT;
43571 status.hdr.size = sizeof(struct dm_status);
43572- status.hdr.trans_id = atomic_inc_return(&trans_id);
43573+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43574
43575 /*
43576 * The host expects the guest to report free memory.
43577@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
43578 * send the status. This can happen if we were interrupted
43579 * after we picked our transaction ID.
43580 */
43581- if (status.hdr.trans_id != atomic_read(&trans_id))
43582+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
43583 return;
43584
43585 /*
43586@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
43587 */
43588
43589 do {
43590- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
43591+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43592 ret = vmbus_sendpacket(dm_device.dev->channel,
43593 bl_resp,
43594 bl_resp->hdr.size,
43595@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
43596
43597 memset(&resp, 0, sizeof(struct dm_unballoon_response));
43598 resp.hdr.type = DM_UNBALLOON_RESPONSE;
43599- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43600+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43601 resp.hdr.size = sizeof(struct dm_unballoon_response);
43602
43603 vmbus_sendpacket(dm_device.dev->channel, &resp,
43604@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
43605 memset(&version_req, 0, sizeof(struct dm_version_request));
43606 version_req.hdr.type = DM_VERSION_REQUEST;
43607 version_req.hdr.size = sizeof(struct dm_version_request);
43608- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43609+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43610 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
43611 version_req.is_last_attempt = 1;
43612
43613@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
43614 memset(&version_req, 0, sizeof(struct dm_version_request));
43615 version_req.hdr.type = DM_VERSION_REQUEST;
43616 version_req.hdr.size = sizeof(struct dm_version_request);
43617- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43618+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43619 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
43620 version_req.is_last_attempt = 0;
43621
43622@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
43623 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
43624 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
43625 cap_msg.hdr.size = sizeof(struct dm_capabilities);
43626- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
43627+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43628
43629 cap_msg.caps.cap_bits.balloon = 1;
43630 cap_msg.caps.cap_bits.hot_add = 1;
43631diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
43632index 22b7507..fc2fc47 100644
43633--- a/drivers/hv/hyperv_vmbus.h
43634+++ b/drivers/hv/hyperv_vmbus.h
43635@@ -607,7 +607,7 @@ enum vmbus_connect_state {
43636 struct vmbus_connection {
43637 enum vmbus_connect_state conn_state;
43638
43639- atomic_t next_gpadl_handle;
43640+ atomic_unchecked_t next_gpadl_handle;
43641
43642 /*
43643 * Represents channel interrupts. Each bit position represents a
43644diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
43645index 4d6b269..2e23b86 100644
43646--- a/drivers/hv/vmbus_drv.c
43647+++ b/drivers/hv/vmbus_drv.c
43648@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
43649 {
43650 int ret = 0;
43651
43652- static atomic_t device_num = ATOMIC_INIT(0);
43653+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
43654
43655 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
43656- atomic_inc_return(&device_num));
43657+ atomic_inc_return_unchecked(&device_num));
43658
43659 child_device_obj->device.bus = &hv_bus;
43660 child_device_obj->device.parent = &hv_acpi_dev->dev;
43661diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
43662index 579bdf9..75118b5 100644
43663--- a/drivers/hwmon/acpi_power_meter.c
43664+++ b/drivers/hwmon/acpi_power_meter.c
43665@@ -116,7 +116,7 @@ struct sensor_template {
43666 struct device_attribute *devattr,
43667 const char *buf, size_t count);
43668 int index;
43669-};
43670+} __do_const;
43671
43672 /* Averaging interval */
43673 static int update_avg_interval(struct acpi_power_meter_resource *resource)
43674@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
43675 struct sensor_template *attrs)
43676 {
43677 struct device *dev = &resource->acpi_dev->dev;
43678- struct sensor_device_attribute *sensors =
43679+ sensor_device_attribute_no_const *sensors =
43680 &resource->sensors[resource->num_sensors];
43681 int res = 0;
43682
43683diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
43684index 3288f13..71cfb4e 100644
43685--- a/drivers/hwmon/applesmc.c
43686+++ b/drivers/hwmon/applesmc.c
43687@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
43688 {
43689 struct applesmc_node_group *grp;
43690 struct applesmc_dev_attr *node;
43691- struct attribute *attr;
43692+ attribute_no_const *attr;
43693 int ret, i;
43694
43695 for (grp = groups; grp->format; grp++) {
43696diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
43697index ae208f6..48b6c5b 100644
43698--- a/drivers/hwmon/asus_atk0110.c
43699+++ b/drivers/hwmon/asus_atk0110.c
43700@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
43701 struct atk_sensor_data {
43702 struct list_head list;
43703 struct atk_data *data;
43704- struct device_attribute label_attr;
43705- struct device_attribute input_attr;
43706- struct device_attribute limit1_attr;
43707- struct device_attribute limit2_attr;
43708+ device_attribute_no_const label_attr;
43709+ device_attribute_no_const input_attr;
43710+ device_attribute_no_const limit1_attr;
43711+ device_attribute_no_const limit2_attr;
43712 char label_attr_name[ATTR_NAME_SIZE];
43713 char input_attr_name[ATTR_NAME_SIZE];
43714 char limit1_attr_name[ATTR_NAME_SIZE];
43715@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
43716 static struct device_attribute atk_name_attr =
43717 __ATTR(name, 0444, atk_name_show, NULL);
43718
43719-static void atk_init_attribute(struct device_attribute *attr, char *name,
43720+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
43721 sysfs_show_func show)
43722 {
43723 sysfs_attr_init(&attr->attr);
43724diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
43725index d76f0b7..55ae976 100644
43726--- a/drivers/hwmon/coretemp.c
43727+++ b/drivers/hwmon/coretemp.c
43728@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
43729 return NOTIFY_OK;
43730 }
43731
43732-static struct notifier_block coretemp_cpu_notifier __refdata = {
43733+static struct notifier_block coretemp_cpu_notifier = {
43734 .notifier_call = coretemp_cpu_callback,
43735 };
43736
43737diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
43738index 632f1dc..57e6a58 100644
43739--- a/drivers/hwmon/ibmaem.c
43740+++ b/drivers/hwmon/ibmaem.c
43741@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
43742 struct aem_rw_sensor_template *rw)
43743 {
43744 struct device *dev = &data->pdev->dev;
43745- struct sensor_device_attribute *sensors = data->sensors;
43746+ sensor_device_attribute_no_const *sensors = data->sensors;
43747 int err;
43748
43749 /* Set up read-only sensors */
43750diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
43751index 14c82da..09b25d7 100644
43752--- a/drivers/hwmon/iio_hwmon.c
43753+++ b/drivers/hwmon/iio_hwmon.c
43754@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
43755 {
43756 struct device *dev = &pdev->dev;
43757 struct iio_hwmon_state *st;
43758- struct sensor_device_attribute *a;
43759+ sensor_device_attribute_no_const *a;
43760 int ret, i;
43761 int in_i = 1, temp_i = 1, curr_i = 1;
43762 enum iio_chan_type type;
43763diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
43764index 7710f46..427a28d 100644
43765--- a/drivers/hwmon/nct6683.c
43766+++ b/drivers/hwmon/nct6683.c
43767@@ -397,11 +397,11 @@ static struct attribute_group *
43768 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43769 int repeat)
43770 {
43771- struct sensor_device_attribute_2 *a2;
43772- struct sensor_device_attribute *a;
43773+ sensor_device_attribute_2_no_const *a2;
43774+ sensor_device_attribute_no_const *a;
43775 struct sensor_device_template **t;
43776 struct sensor_device_attr_u *su;
43777- struct attribute_group *group;
43778+ attribute_group_no_const *group;
43779 struct attribute **attrs;
43780 int i, j, count;
43781
43782diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
43783index 59d9a3f..2298fa4 100644
43784--- a/drivers/hwmon/nct6775.c
43785+++ b/drivers/hwmon/nct6775.c
43786@@ -944,10 +944,10 @@ static struct attribute_group *
43787 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43788 int repeat)
43789 {
43790- struct attribute_group *group;
43791+ attribute_group_no_const *group;
43792 struct sensor_device_attr_u *su;
43793- struct sensor_device_attribute *a;
43794- struct sensor_device_attribute_2 *a2;
43795+ sensor_device_attribute_no_const *a;
43796+ sensor_device_attribute_2_no_const *a2;
43797 struct attribute **attrs;
43798 struct sensor_device_template **t;
43799 int i, count;
43800diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
43801index 291d11f..3f0dbbd 100644
43802--- a/drivers/hwmon/pmbus/pmbus_core.c
43803+++ b/drivers/hwmon/pmbus/pmbus_core.c
43804@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
43805 return 0;
43806 }
43807
43808-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43809+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
43810 const char *name,
43811 umode_t mode,
43812 ssize_t (*show)(struct device *dev,
43813@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43814 dev_attr->store = store;
43815 }
43816
43817-static void pmbus_attr_init(struct sensor_device_attribute *a,
43818+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
43819 const char *name,
43820 umode_t mode,
43821 ssize_t (*show)(struct device *dev,
43822@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
43823 u16 reg, u8 mask)
43824 {
43825 struct pmbus_boolean *boolean;
43826- struct sensor_device_attribute *a;
43827+ sensor_device_attribute_no_const *a;
43828
43829 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43830 if (!boolean)
43831@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43832 bool update, bool readonly)
43833 {
43834 struct pmbus_sensor *sensor;
43835- struct device_attribute *a;
43836+ device_attribute_no_const *a;
43837
43838 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43839 if (!sensor)
43840@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43841 const char *lstring, int index)
43842 {
43843 struct pmbus_label *label;
43844- struct device_attribute *a;
43845+ device_attribute_no_const *a;
43846
43847 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43848 if (!label)
43849diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43850index 97cd45a..ac54d8b 100644
43851--- a/drivers/hwmon/sht15.c
43852+++ b/drivers/hwmon/sht15.c
43853@@ -169,7 +169,7 @@ struct sht15_data {
43854 int supply_uv;
43855 bool supply_uv_valid;
43856 struct work_struct update_supply_work;
43857- atomic_t interrupt_handled;
43858+ atomic_unchecked_t interrupt_handled;
43859 };
43860
43861 /**
43862@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43863 ret = gpio_direction_input(data->pdata->gpio_data);
43864 if (ret)
43865 return ret;
43866- atomic_set(&data->interrupt_handled, 0);
43867+ atomic_set_unchecked(&data->interrupt_handled, 0);
43868
43869 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43870 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43871 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43872 /* Only relevant if the interrupt hasn't occurred. */
43873- if (!atomic_read(&data->interrupt_handled))
43874+ if (!atomic_read_unchecked(&data->interrupt_handled))
43875 schedule_work(&data->read_work);
43876 }
43877 ret = wait_event_timeout(data->wait_queue,
43878@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43879
43880 /* First disable the interrupt */
43881 disable_irq_nosync(irq);
43882- atomic_inc(&data->interrupt_handled);
43883+ atomic_inc_unchecked(&data->interrupt_handled);
43884 /* Then schedule a reading work struct */
43885 if (data->state != SHT15_READING_NOTHING)
43886 schedule_work(&data->read_work);
43887@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43888 * If not, then start the interrupt again - care here as could
43889 * have gone low in meantime so verify it hasn't!
43890 */
43891- atomic_set(&data->interrupt_handled, 0);
43892+ atomic_set_unchecked(&data->interrupt_handled, 0);
43893 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43894 /* If still not occurred or another handler was scheduled */
43895 if (gpio_get_value(data->pdata->gpio_data)
43896- || atomic_read(&data->interrupt_handled))
43897+ || atomic_read_unchecked(&data->interrupt_handled))
43898 return;
43899 }
43900
43901diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43902index 8df43c5..b07b91d 100644
43903--- a/drivers/hwmon/via-cputemp.c
43904+++ b/drivers/hwmon/via-cputemp.c
43905@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43906 return NOTIFY_OK;
43907 }
43908
43909-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43910+static struct notifier_block via_cputemp_cpu_notifier = {
43911 .notifier_call = via_cputemp_cpu_callback,
43912 };
43913
43914diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43915index 41fc683..a39cfea 100644
43916--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43917+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43918@@ -43,7 +43,7 @@
43919 extern struct i2c_adapter amd756_smbus;
43920
43921 static struct i2c_adapter *s4882_adapter;
43922-static struct i2c_algorithm *s4882_algo;
43923+static i2c_algorithm_no_const *s4882_algo;
43924
43925 /* Wrapper access functions for multiplexed SMBus */
43926 static DEFINE_MUTEX(amd756_lock);
43927diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43928index b19a310..d6eece0 100644
43929--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43930+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43931@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43932 /* usb layer */
43933
43934 /* Send command to device, and get response. */
43935-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43936+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43937 {
43938 int ret = 0;
43939 int actual;
43940diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43941index b170bdf..3c76427 100644
43942--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43943+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43944@@ -41,7 +41,7 @@
43945 extern struct i2c_adapter *nforce2_smbus;
43946
43947 static struct i2c_adapter *s4985_adapter;
43948-static struct i2c_algorithm *s4985_algo;
43949+static i2c_algorithm_no_const *s4985_algo;
43950
43951 /* Wrapper access functions for multiplexed SMBus */
43952 static DEFINE_MUTEX(nforce2_lock);
43953diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43954index 80b47e8..1a6040d9 100644
43955--- a/drivers/i2c/i2c-dev.c
43956+++ b/drivers/i2c/i2c-dev.c
43957@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43958 break;
43959 }
43960
43961- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43962+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43963 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43964 if (IS_ERR(rdwr_pa[i].buf)) {
43965 res = PTR_ERR(rdwr_pa[i].buf);
43966diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43967index 0b510ba..4fbb5085 100644
43968--- a/drivers/ide/ide-cd.c
43969+++ b/drivers/ide/ide-cd.c
43970@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43971 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43972 if ((unsigned long)buf & alignment
43973 || blk_rq_bytes(rq) & q->dma_pad_mask
43974- || object_is_on_stack(buf))
43975+ || object_starts_on_stack(buf))
43976 drive->dma = 0;
43977 }
43978 }
43979diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43980index 4b1f375..770b95f 100644
43981--- a/drivers/iio/industrialio-core.c
43982+++ b/drivers/iio/industrialio-core.c
43983@@ -551,7 +551,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43984 }
43985
43986 static
43987-int __iio_device_attr_init(struct device_attribute *dev_attr,
43988+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43989 const char *postfix,
43990 struct iio_chan_spec const *chan,
43991 ssize_t (*readfunc)(struct device *dev,
43992diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43993index c323917..6ddea8b 100644
43994--- a/drivers/infiniband/core/cm.c
43995+++ b/drivers/infiniband/core/cm.c
43996@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43997
43998 struct cm_counter_group {
43999 struct kobject obj;
44000- atomic_long_t counter[CM_ATTR_COUNT];
44001+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
44002 };
44003
44004 struct cm_counter_attribute {
44005@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
44006 struct ib_mad_send_buf *msg = NULL;
44007 int ret;
44008
44009- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44010+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44011 counter[CM_REQ_COUNTER]);
44012
44013 /* Quick state check to discard duplicate REQs. */
44014@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
44015 if (!cm_id_priv)
44016 return;
44017
44018- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44019+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44020 counter[CM_REP_COUNTER]);
44021 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
44022 if (ret)
44023@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
44024 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
44025 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
44026 spin_unlock_irq(&cm_id_priv->lock);
44027- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44028+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44029 counter[CM_RTU_COUNTER]);
44030 goto out;
44031 }
44032@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
44033 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
44034 dreq_msg->local_comm_id);
44035 if (!cm_id_priv) {
44036- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44037+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44038 counter[CM_DREQ_COUNTER]);
44039 cm_issue_drep(work->port, work->mad_recv_wc);
44040 return -EINVAL;
44041@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
44042 case IB_CM_MRA_REP_RCVD:
44043 break;
44044 case IB_CM_TIMEWAIT:
44045- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44046+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44047 counter[CM_DREQ_COUNTER]);
44048 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
44049 goto unlock;
44050@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
44051 cm_free_msg(msg);
44052 goto deref;
44053 case IB_CM_DREQ_RCVD:
44054- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44055+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44056 counter[CM_DREQ_COUNTER]);
44057 goto unlock;
44058 default:
44059@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
44060 ib_modify_mad(cm_id_priv->av.port->mad_agent,
44061 cm_id_priv->msg, timeout)) {
44062 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
44063- atomic_long_inc(&work->port->
44064+ atomic_long_inc_unchecked(&work->port->
44065 counter_group[CM_RECV_DUPLICATES].
44066 counter[CM_MRA_COUNTER]);
44067 goto out;
44068@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
44069 break;
44070 case IB_CM_MRA_REQ_RCVD:
44071 case IB_CM_MRA_REP_RCVD:
44072- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44073+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44074 counter[CM_MRA_COUNTER]);
44075 /* fall through */
44076 default:
44077@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
44078 case IB_CM_LAP_IDLE:
44079 break;
44080 case IB_CM_MRA_LAP_SENT:
44081- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44082+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44083 counter[CM_LAP_COUNTER]);
44084 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
44085 goto unlock;
44086@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
44087 cm_free_msg(msg);
44088 goto deref;
44089 case IB_CM_LAP_RCVD:
44090- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44091+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44092 counter[CM_LAP_COUNTER]);
44093 goto unlock;
44094 default:
44095@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
44096 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
44097 if (cur_cm_id_priv) {
44098 spin_unlock_irq(&cm.lock);
44099- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44100+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44101 counter[CM_SIDR_REQ_COUNTER]);
44102 goto out; /* Duplicate message. */
44103 }
44104@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
44105 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
44106 msg->retries = 1;
44107
44108- atomic_long_add(1 + msg->retries,
44109+ atomic_long_add_unchecked(1 + msg->retries,
44110 &port->counter_group[CM_XMIT].counter[attr_index]);
44111 if (msg->retries)
44112- atomic_long_add(msg->retries,
44113+ atomic_long_add_unchecked(msg->retries,
44114 &port->counter_group[CM_XMIT_RETRIES].
44115 counter[attr_index]);
44116
44117@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
44118 }
44119
44120 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
44121- atomic_long_inc(&port->counter_group[CM_RECV].
44122+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
44123 counter[attr_id - CM_ATTR_ID_OFFSET]);
44124
44125 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
44126@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
44127 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
44128
44129 return sprintf(buf, "%ld\n",
44130- atomic_long_read(&group->counter[cm_attr->index]));
44131+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
44132 }
44133
44134 static const struct sysfs_ops cm_counter_ops = {
44135diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
44136index 9f5ad7c..588cd84 100644
44137--- a/drivers/infiniband/core/fmr_pool.c
44138+++ b/drivers/infiniband/core/fmr_pool.c
44139@@ -98,8 +98,8 @@ struct ib_fmr_pool {
44140
44141 struct task_struct *thread;
44142
44143- atomic_t req_ser;
44144- atomic_t flush_ser;
44145+ atomic_unchecked_t req_ser;
44146+ atomic_unchecked_t flush_ser;
44147
44148 wait_queue_head_t force_wait;
44149 };
44150@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44151 struct ib_fmr_pool *pool = pool_ptr;
44152
44153 do {
44154- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
44155+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
44156 ib_fmr_batch_release(pool);
44157
44158- atomic_inc(&pool->flush_ser);
44159+ atomic_inc_unchecked(&pool->flush_ser);
44160 wake_up_interruptible(&pool->force_wait);
44161
44162 if (pool->flush_function)
44163@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44164 }
44165
44166 set_current_state(TASK_INTERRUPTIBLE);
44167- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
44168+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
44169 !kthread_should_stop())
44170 schedule();
44171 __set_current_state(TASK_RUNNING);
44172@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
44173 pool->dirty_watermark = params->dirty_watermark;
44174 pool->dirty_len = 0;
44175 spin_lock_init(&pool->pool_lock);
44176- atomic_set(&pool->req_ser, 0);
44177- atomic_set(&pool->flush_ser, 0);
44178+ atomic_set_unchecked(&pool->req_ser, 0);
44179+ atomic_set_unchecked(&pool->flush_ser, 0);
44180 init_waitqueue_head(&pool->force_wait);
44181
44182 pool->thread = kthread_run(ib_fmr_cleanup_thread,
44183@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
44184 }
44185 spin_unlock_irq(&pool->pool_lock);
44186
44187- serial = atomic_inc_return(&pool->req_ser);
44188+ serial = atomic_inc_return_unchecked(&pool->req_ser);
44189 wake_up_process(pool->thread);
44190
44191 if (wait_event_interruptible(pool->force_wait,
44192- atomic_read(&pool->flush_ser) - serial >= 0))
44193+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
44194 return -EINTR;
44195
44196 return 0;
44197@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
44198 } else {
44199 list_add_tail(&fmr->list, &pool->dirty_list);
44200 if (++pool->dirty_len >= pool->dirty_watermark) {
44201- atomic_inc(&pool->req_ser);
44202+ atomic_inc_unchecked(&pool->req_ser);
44203 wake_up_process(pool->thread);
44204 }
44205 }
44206diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
44207index ec7a298..8742e59 100644
44208--- a/drivers/infiniband/hw/cxgb4/mem.c
44209+++ b/drivers/infiniband/hw/cxgb4/mem.c
44210@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44211 int err;
44212 struct fw_ri_tpte tpt;
44213 u32 stag_idx;
44214- static atomic_t key;
44215+ static atomic_unchecked_t key;
44216
44217 if (c4iw_fatal_error(rdev))
44218 return -EIO;
44219@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44220 if (rdev->stats.stag.cur > rdev->stats.stag.max)
44221 rdev->stats.stag.max = rdev->stats.stag.cur;
44222 mutex_unlock(&rdev->stats.lock);
44223- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
44224+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
44225 }
44226 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
44227 __func__, stag_state, type, pdid, stag_idx);
44228diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
44229index 79b3dbc..96e5fcc 100644
44230--- a/drivers/infiniband/hw/ipath/ipath_rc.c
44231+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
44232@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44233 struct ib_atomic_eth *ateth;
44234 struct ipath_ack_entry *e;
44235 u64 vaddr;
44236- atomic64_t *maddr;
44237+ atomic64_unchecked_t *maddr;
44238 u64 sdata;
44239 u32 rkey;
44240 u8 next;
44241@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44242 IB_ACCESS_REMOTE_ATOMIC)))
44243 goto nack_acc_unlck;
44244 /* Perform atomic OP and save result. */
44245- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44246+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44247 sdata = be64_to_cpu(ateth->swap_data);
44248 e = &qp->s_ack_queue[qp->r_head_ack_queue];
44249 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
44250- (u64) atomic64_add_return(sdata, maddr) - sdata :
44251+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44252 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44253 be64_to_cpu(ateth->compare_data),
44254 sdata);
44255diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
44256index 1f95bba..9530f87 100644
44257--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
44258+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
44259@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
44260 unsigned long flags;
44261 struct ib_wc wc;
44262 u64 sdata;
44263- atomic64_t *maddr;
44264+ atomic64_unchecked_t *maddr;
44265 enum ib_wc_status send_status;
44266
44267 /*
44268@@ -382,11 +382,11 @@ again:
44269 IB_ACCESS_REMOTE_ATOMIC)))
44270 goto acc_err;
44271 /* Perform atomic OP and save result. */
44272- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44273+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44274 sdata = wqe->wr.wr.atomic.compare_add;
44275 *(u64 *) sqp->s_sge.sge.vaddr =
44276 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
44277- (u64) atomic64_add_return(sdata, maddr) - sdata :
44278+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44279 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44280 sdata, wqe->wr.wr.atomic.swap);
44281 goto send_comp;
44282diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
44283index 287ad05..5ae7b44d 100644
44284--- a/drivers/infiniband/hw/mlx4/mad.c
44285+++ b/drivers/infiniband/hw/mlx4/mad.c
44286@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
44287
44288 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
44289 {
44290- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
44291+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
44292 cpu_to_be64(0xff00000000000000LL);
44293 }
44294
44295diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
44296index ed327e6..ca1739e0 100644
44297--- a/drivers/infiniband/hw/mlx4/mcg.c
44298+++ b/drivers/infiniband/hw/mlx4/mcg.c
44299@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
44300 {
44301 char name[20];
44302
44303- atomic_set(&ctx->tid, 0);
44304+ atomic_set_unchecked(&ctx->tid, 0);
44305 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
44306 ctx->mcg_wq = create_singlethread_workqueue(name);
44307 if (!ctx->mcg_wq)
44308diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44309index 369da3c..223e6e9 100644
44310--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
44311+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44312@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
44313 struct list_head mcg_mgid0_list;
44314 struct workqueue_struct *mcg_wq;
44315 struct mlx4_ib_demux_pv_ctx **tun;
44316- atomic_t tid;
44317+ atomic_unchecked_t tid;
44318 int flushing; /* flushing the work queue */
44319 };
44320
44321diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
44322index 9d3e5c1..6f166df 100644
44323--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
44324+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
44325@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
44326 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
44327 }
44328
44329-int mthca_QUERY_FW(struct mthca_dev *dev)
44330+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
44331 {
44332 struct mthca_mailbox *mailbox;
44333 u32 *outbox;
44334@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44335 CMD_TIME_CLASS_B);
44336 }
44337
44338-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44339+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44340 int num_mtt)
44341 {
44342 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
44343@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
44344 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
44345 }
44346
44347-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44348+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44349 int eq_num)
44350 {
44351 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
44352@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
44353 CMD_TIME_CLASS_B);
44354 }
44355
44356-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44357+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44358 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
44359 void *in_mad, void *response_mad)
44360 {
44361diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
44362index ded76c1..0cf0a08 100644
44363--- a/drivers/infiniband/hw/mthca/mthca_main.c
44364+++ b/drivers/infiniband/hw/mthca/mthca_main.c
44365@@ -692,7 +692,7 @@ err_close:
44366 return err;
44367 }
44368
44369-static int mthca_setup_hca(struct mthca_dev *dev)
44370+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
44371 {
44372 int err;
44373
44374diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
44375index ed9a989..6aa5dc2 100644
44376--- a/drivers/infiniband/hw/mthca/mthca_mr.c
44377+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
44378@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
44379 * through the bitmaps)
44380 */
44381
44382-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44383+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44384 {
44385 int o;
44386 int m;
44387@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
44388 return key;
44389 }
44390
44391-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44392+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44393 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
44394 {
44395 struct mthca_mailbox *mailbox;
44396@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
44397 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
44398 }
44399
44400-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44401+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44402 u64 *buffer_list, int buffer_size_shift,
44403 int list_len, u64 iova, u64 total_size,
44404 u32 access, struct mthca_mr *mr)
44405diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
44406index 415f8e1..e34214e 100644
44407--- a/drivers/infiniband/hw/mthca/mthca_provider.c
44408+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
44409@@ -764,7 +764,7 @@ unlock:
44410 return 0;
44411 }
44412
44413-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44414+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44415 {
44416 struct mthca_dev *dev = to_mdev(ibcq->device);
44417 struct mthca_cq *cq = to_mcq(ibcq);
44418diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
44419index 3b2a6dc..bce26ff 100644
44420--- a/drivers/infiniband/hw/nes/nes.c
44421+++ b/drivers/infiniband/hw/nes/nes.c
44422@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
44423 LIST_HEAD(nes_adapter_list);
44424 static LIST_HEAD(nes_dev_list);
44425
44426-atomic_t qps_destroyed;
44427+atomic_unchecked_t qps_destroyed;
44428
44429 static unsigned int ee_flsh_adapter;
44430 static unsigned int sysfs_nonidx_addr;
44431@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
44432 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
44433 struct nes_adapter *nesadapter = nesdev->nesadapter;
44434
44435- atomic_inc(&qps_destroyed);
44436+ atomic_inc_unchecked(&qps_destroyed);
44437
44438 /* Free the control structures */
44439
44440diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
44441index bd9d132..70d84f4 100644
44442--- a/drivers/infiniband/hw/nes/nes.h
44443+++ b/drivers/infiniband/hw/nes/nes.h
44444@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
44445 extern unsigned int wqm_quanta;
44446 extern struct list_head nes_adapter_list;
44447
44448-extern atomic_t cm_connects;
44449-extern atomic_t cm_accepts;
44450-extern atomic_t cm_disconnects;
44451-extern atomic_t cm_closes;
44452-extern atomic_t cm_connecteds;
44453-extern atomic_t cm_connect_reqs;
44454-extern atomic_t cm_rejects;
44455-extern atomic_t mod_qp_timouts;
44456-extern atomic_t qps_created;
44457-extern atomic_t qps_destroyed;
44458-extern atomic_t sw_qps_destroyed;
44459+extern atomic_unchecked_t cm_connects;
44460+extern atomic_unchecked_t cm_accepts;
44461+extern atomic_unchecked_t cm_disconnects;
44462+extern atomic_unchecked_t cm_closes;
44463+extern atomic_unchecked_t cm_connecteds;
44464+extern atomic_unchecked_t cm_connect_reqs;
44465+extern atomic_unchecked_t cm_rejects;
44466+extern atomic_unchecked_t mod_qp_timouts;
44467+extern atomic_unchecked_t qps_created;
44468+extern atomic_unchecked_t qps_destroyed;
44469+extern atomic_unchecked_t sw_qps_destroyed;
44470 extern u32 mh_detected;
44471 extern u32 mh_pauses_sent;
44472 extern u32 cm_packets_sent;
44473@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
44474 extern u32 cm_packets_received;
44475 extern u32 cm_packets_dropped;
44476 extern u32 cm_packets_retrans;
44477-extern atomic_t cm_listens_created;
44478-extern atomic_t cm_listens_destroyed;
44479+extern atomic_unchecked_t cm_listens_created;
44480+extern atomic_unchecked_t cm_listens_destroyed;
44481 extern u32 cm_backlog_drops;
44482-extern atomic_t cm_loopbacks;
44483-extern atomic_t cm_nodes_created;
44484-extern atomic_t cm_nodes_destroyed;
44485-extern atomic_t cm_accel_dropped_pkts;
44486-extern atomic_t cm_resets_recvd;
44487-extern atomic_t pau_qps_created;
44488-extern atomic_t pau_qps_destroyed;
44489+extern atomic_unchecked_t cm_loopbacks;
44490+extern atomic_unchecked_t cm_nodes_created;
44491+extern atomic_unchecked_t cm_nodes_destroyed;
44492+extern atomic_unchecked_t cm_accel_dropped_pkts;
44493+extern atomic_unchecked_t cm_resets_recvd;
44494+extern atomic_unchecked_t pau_qps_created;
44495+extern atomic_unchecked_t pau_qps_destroyed;
44496
44497 extern u32 int_mod_timer_init;
44498 extern u32 int_mod_cq_depth_256;
44499diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
44500index 6f09a72..cf4399d 100644
44501--- a/drivers/infiniband/hw/nes/nes_cm.c
44502+++ b/drivers/infiniband/hw/nes/nes_cm.c
44503@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
44504 u32 cm_packets_retrans;
44505 u32 cm_packets_created;
44506 u32 cm_packets_received;
44507-atomic_t cm_listens_created;
44508-atomic_t cm_listens_destroyed;
44509+atomic_unchecked_t cm_listens_created;
44510+atomic_unchecked_t cm_listens_destroyed;
44511 u32 cm_backlog_drops;
44512-atomic_t cm_loopbacks;
44513-atomic_t cm_nodes_created;
44514-atomic_t cm_nodes_destroyed;
44515-atomic_t cm_accel_dropped_pkts;
44516-atomic_t cm_resets_recvd;
44517+atomic_unchecked_t cm_loopbacks;
44518+atomic_unchecked_t cm_nodes_created;
44519+atomic_unchecked_t cm_nodes_destroyed;
44520+atomic_unchecked_t cm_accel_dropped_pkts;
44521+atomic_unchecked_t cm_resets_recvd;
44522
44523 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
44524 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
44525@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
44526 /* instance of function pointers for client API */
44527 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
44528 static struct nes_cm_ops nes_cm_api = {
44529- mini_cm_accelerated,
44530- mini_cm_listen,
44531- mini_cm_del_listen,
44532- mini_cm_connect,
44533- mini_cm_close,
44534- mini_cm_accept,
44535- mini_cm_reject,
44536- mini_cm_recv_pkt,
44537- mini_cm_dealloc_core,
44538- mini_cm_get,
44539- mini_cm_set
44540+ .accelerated = mini_cm_accelerated,
44541+ .listen = mini_cm_listen,
44542+ .stop_listener = mini_cm_del_listen,
44543+ .connect = mini_cm_connect,
44544+ .close = mini_cm_close,
44545+ .accept = mini_cm_accept,
44546+ .reject = mini_cm_reject,
44547+ .recv_pkt = mini_cm_recv_pkt,
44548+ .destroy_cm_core = mini_cm_dealloc_core,
44549+ .get = mini_cm_get,
44550+ .set = mini_cm_set
44551 };
44552
44553 static struct nes_cm_core *g_cm_core;
44554
44555-atomic_t cm_connects;
44556-atomic_t cm_accepts;
44557-atomic_t cm_disconnects;
44558-atomic_t cm_closes;
44559-atomic_t cm_connecteds;
44560-atomic_t cm_connect_reqs;
44561-atomic_t cm_rejects;
44562+atomic_unchecked_t cm_connects;
44563+atomic_unchecked_t cm_accepts;
44564+atomic_unchecked_t cm_disconnects;
44565+atomic_unchecked_t cm_closes;
44566+atomic_unchecked_t cm_connecteds;
44567+atomic_unchecked_t cm_connect_reqs;
44568+atomic_unchecked_t cm_rejects;
44569
44570 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
44571 {
44572@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
44573 kfree(listener);
44574 listener = NULL;
44575 ret = 0;
44576- atomic_inc(&cm_listens_destroyed);
44577+ atomic_inc_unchecked(&cm_listens_destroyed);
44578 } else {
44579 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
44580 }
44581@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
44582 cm_node->rem_mac);
44583
44584 add_hte_node(cm_core, cm_node);
44585- atomic_inc(&cm_nodes_created);
44586+ atomic_inc_unchecked(&cm_nodes_created);
44587
44588 return cm_node;
44589 }
44590@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
44591 }
44592
44593 atomic_dec(&cm_core->node_cnt);
44594- atomic_inc(&cm_nodes_destroyed);
44595+ atomic_inc_unchecked(&cm_nodes_destroyed);
44596 nesqp = cm_node->nesqp;
44597 if (nesqp) {
44598 nesqp->cm_node = NULL;
44599@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
44600
44601 static void drop_packet(struct sk_buff *skb)
44602 {
44603- atomic_inc(&cm_accel_dropped_pkts);
44604+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44605 dev_kfree_skb_any(skb);
44606 }
44607
44608@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
44609 {
44610
44611 int reset = 0; /* whether to send reset in case of err.. */
44612- atomic_inc(&cm_resets_recvd);
44613+ atomic_inc_unchecked(&cm_resets_recvd);
44614 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
44615 " refcnt=%d\n", cm_node, cm_node->state,
44616 atomic_read(&cm_node->ref_count));
44617@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
44618 rem_ref_cm_node(cm_node->cm_core, cm_node);
44619 return NULL;
44620 }
44621- atomic_inc(&cm_loopbacks);
44622+ atomic_inc_unchecked(&cm_loopbacks);
44623 loopbackremotenode->loopbackpartner = cm_node;
44624 loopbackremotenode->tcp_cntxt.rcv_wscale =
44625 NES_CM_DEFAULT_RCV_WND_SCALE;
44626@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
44627 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
44628 else {
44629 rem_ref_cm_node(cm_core, cm_node);
44630- atomic_inc(&cm_accel_dropped_pkts);
44631+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44632 dev_kfree_skb_any(skb);
44633 }
44634 break;
44635@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44636
44637 if ((cm_id) && (cm_id->event_handler)) {
44638 if (issue_disconn) {
44639- atomic_inc(&cm_disconnects);
44640+ atomic_inc_unchecked(&cm_disconnects);
44641 cm_event.event = IW_CM_EVENT_DISCONNECT;
44642 cm_event.status = disconn_status;
44643 cm_event.local_addr = cm_id->local_addr;
44644@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44645 }
44646
44647 if (issue_close) {
44648- atomic_inc(&cm_closes);
44649+ atomic_inc_unchecked(&cm_closes);
44650 nes_disconnect(nesqp, 1);
44651
44652 cm_id->provider_data = nesqp;
44653@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44654
44655 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
44656 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
44657- atomic_inc(&cm_accepts);
44658+ atomic_inc_unchecked(&cm_accepts);
44659
44660 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
44661 netdev_refcnt_read(nesvnic->netdev));
44662@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
44663 struct nes_cm_core *cm_core;
44664 u8 *start_buff;
44665
44666- atomic_inc(&cm_rejects);
44667+ atomic_inc_unchecked(&cm_rejects);
44668 cm_node = (struct nes_cm_node *)cm_id->provider_data;
44669 loopback = cm_node->loopbackpartner;
44670 cm_core = cm_node->cm_core;
44671@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44672 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
44673 ntohs(laddr->sin_port));
44674
44675- atomic_inc(&cm_connects);
44676+ atomic_inc_unchecked(&cm_connects);
44677 nesqp->active_conn = 1;
44678
44679 /* cache the cm_id in the qp */
44680@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
44681 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
44682 return err;
44683 }
44684- atomic_inc(&cm_listens_created);
44685+ atomic_inc_unchecked(&cm_listens_created);
44686 }
44687
44688 cm_id->add_ref(cm_id);
44689@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
44690
44691 if (nesqp->destroyed)
44692 return;
44693- atomic_inc(&cm_connecteds);
44694+ atomic_inc_unchecked(&cm_connecteds);
44695 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
44696 " local port 0x%04X. jiffies = %lu.\n",
44697 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
44698@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
44699
44700 cm_id->add_ref(cm_id);
44701 ret = cm_id->event_handler(cm_id, &cm_event);
44702- atomic_inc(&cm_closes);
44703+ atomic_inc_unchecked(&cm_closes);
44704 cm_event.event = IW_CM_EVENT_CLOSE;
44705 cm_event.status = 0;
44706 cm_event.provider_data = cm_id->provider_data;
44707@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
44708 return;
44709 cm_id = cm_node->cm_id;
44710
44711- atomic_inc(&cm_connect_reqs);
44712+ atomic_inc_unchecked(&cm_connect_reqs);
44713 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44714 cm_node, cm_id, jiffies);
44715
44716@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
44717 return;
44718 cm_id = cm_node->cm_id;
44719
44720- atomic_inc(&cm_connect_reqs);
44721+ atomic_inc_unchecked(&cm_connect_reqs);
44722 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44723 cm_node, cm_id, jiffies);
44724
44725diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
44726index 4166452..fc952c3 100644
44727--- a/drivers/infiniband/hw/nes/nes_mgt.c
44728+++ b/drivers/infiniband/hw/nes/nes_mgt.c
44729@@ -40,8 +40,8 @@
44730 #include "nes.h"
44731 #include "nes_mgt.h"
44732
44733-atomic_t pau_qps_created;
44734-atomic_t pau_qps_destroyed;
44735+atomic_unchecked_t pau_qps_created;
44736+atomic_unchecked_t pau_qps_destroyed;
44737
44738 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
44739 {
44740@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
44741 {
44742 struct sk_buff *skb;
44743 unsigned long flags;
44744- atomic_inc(&pau_qps_destroyed);
44745+ atomic_inc_unchecked(&pau_qps_destroyed);
44746
44747 /* Free packets that have not yet been forwarded */
44748 /* Lock is acquired by skb_dequeue when removing the skb */
44749@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
44750 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
44751 skb_queue_head_init(&nesqp->pau_list);
44752 spin_lock_init(&nesqp->pau_lock);
44753- atomic_inc(&pau_qps_created);
44754+ atomic_inc_unchecked(&pau_qps_created);
44755 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
44756 }
44757
44758diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
44759index 49eb511..a774366 100644
44760--- a/drivers/infiniband/hw/nes/nes_nic.c
44761+++ b/drivers/infiniband/hw/nes/nes_nic.c
44762@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
44763 target_stat_values[++index] = mh_detected;
44764 target_stat_values[++index] = mh_pauses_sent;
44765 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
44766- target_stat_values[++index] = atomic_read(&cm_connects);
44767- target_stat_values[++index] = atomic_read(&cm_accepts);
44768- target_stat_values[++index] = atomic_read(&cm_disconnects);
44769- target_stat_values[++index] = atomic_read(&cm_connecteds);
44770- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
44771- target_stat_values[++index] = atomic_read(&cm_rejects);
44772- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
44773- target_stat_values[++index] = atomic_read(&qps_created);
44774- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
44775- target_stat_values[++index] = atomic_read(&qps_destroyed);
44776- target_stat_values[++index] = atomic_read(&cm_closes);
44777+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
44778+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
44779+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
44780+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
44781+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
44782+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
44783+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
44784+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
44785+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
44786+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
44787+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
44788 target_stat_values[++index] = cm_packets_sent;
44789 target_stat_values[++index] = cm_packets_bounced;
44790 target_stat_values[++index] = cm_packets_created;
44791 target_stat_values[++index] = cm_packets_received;
44792 target_stat_values[++index] = cm_packets_dropped;
44793 target_stat_values[++index] = cm_packets_retrans;
44794- target_stat_values[++index] = atomic_read(&cm_listens_created);
44795- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
44796+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
44797+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
44798 target_stat_values[++index] = cm_backlog_drops;
44799- target_stat_values[++index] = atomic_read(&cm_loopbacks);
44800- target_stat_values[++index] = atomic_read(&cm_nodes_created);
44801- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
44802- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
44803- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
44804+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
44805+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
44806+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
44807+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
44808+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
44809 target_stat_values[++index] = nesadapter->free_4kpbl;
44810 target_stat_values[++index] = nesadapter->free_256pbl;
44811 target_stat_values[++index] = int_mod_timer_init;
44812 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44813 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44814 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44815- target_stat_values[++index] = atomic_read(&pau_qps_created);
44816- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44817+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44818+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44819 }
44820
44821 /**
44822diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44823index 218dd35..97ce31d 100644
44824--- a/drivers/infiniband/hw/nes/nes_verbs.c
44825+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44826@@ -46,9 +46,9 @@
44827
44828 #include <rdma/ib_umem.h>
44829
44830-atomic_t mod_qp_timouts;
44831-atomic_t qps_created;
44832-atomic_t sw_qps_destroyed;
44833+atomic_unchecked_t mod_qp_timouts;
44834+atomic_unchecked_t qps_created;
44835+atomic_unchecked_t sw_qps_destroyed;
44836
44837 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44838
44839@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44840 if (init_attr->create_flags)
44841 return ERR_PTR(-EINVAL);
44842
44843- atomic_inc(&qps_created);
44844+ atomic_inc_unchecked(&qps_created);
44845 switch (init_attr->qp_type) {
44846 case IB_QPT_RC:
44847 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44848@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44849 struct iw_cm_event cm_event;
44850 int ret = 0;
44851
44852- atomic_inc(&sw_qps_destroyed);
44853+ atomic_inc_unchecked(&sw_qps_destroyed);
44854 nesqp->destroyed = 1;
44855
44856 /* Blow away the connection if it exists. */
44857diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44858index c00ae09..04e91be 100644
44859--- a/drivers/infiniband/hw/qib/qib.h
44860+++ b/drivers/infiniband/hw/qib/qib.h
44861@@ -52,6 +52,7 @@
44862 #include <linux/kref.h>
44863 #include <linux/sched.h>
44864 #include <linux/kthread.h>
44865+#include <linux/slab.h>
44866
44867 #include "qib_common.h"
44868 #include "qib_verbs.h"
44869diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44870index 24c41ba..102d71f 100644
44871--- a/drivers/input/gameport/gameport.c
44872+++ b/drivers/input/gameport/gameport.c
44873@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44874 */
44875 static void gameport_init_port(struct gameport *gameport)
44876 {
44877- static atomic_t gameport_no = ATOMIC_INIT(0);
44878+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
44879
44880 __module_get(THIS_MODULE);
44881
44882 mutex_init(&gameport->drv_mutex);
44883 device_initialize(&gameport->dev);
44884 dev_set_name(&gameport->dev, "gameport%lu",
44885- (unsigned long)atomic_inc_return(&gameport_no) - 1);
44886+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
44887 gameport->dev.bus = &gameport_bus;
44888 gameport->dev.release = gameport_release_port;
44889 if (gameport->parent)
44890diff --git a/drivers/input/input.c b/drivers/input/input.c
44891index 29ca0bb..f4bc2e3 100644
44892--- a/drivers/input/input.c
44893+++ b/drivers/input/input.c
44894@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
44895 */
44896 struct input_dev *input_allocate_device(void)
44897 {
44898- static atomic_t input_no = ATOMIC_INIT(0);
44899+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
44900 struct input_dev *dev;
44901
44902 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44903@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
44904 INIT_LIST_HEAD(&dev->node);
44905
44906 dev_set_name(&dev->dev, "input%ld",
44907- (unsigned long) atomic_inc_return(&input_no) - 1);
44908+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
44909
44910 __module_get(THIS_MODULE);
44911 }
44912diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44913index 4a95b22..874c182 100644
44914--- a/drivers/input/joystick/sidewinder.c
44915+++ b/drivers/input/joystick/sidewinder.c
44916@@ -30,6 +30,7 @@
44917 #include <linux/kernel.h>
44918 #include <linux/module.h>
44919 #include <linux/slab.h>
44920+#include <linux/sched.h>
44921 #include <linux/input.h>
44922 #include <linux/gameport.h>
44923 #include <linux/jiffies.h>
44924diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44925index 603fe0d..f63decc 100644
44926--- a/drivers/input/joystick/xpad.c
44927+++ b/drivers/input/joystick/xpad.c
44928@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44929
44930 static int xpad_led_probe(struct usb_xpad *xpad)
44931 {
44932- static atomic_t led_seq = ATOMIC_INIT(0);
44933+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
44934 long led_no;
44935 struct xpad_led *led;
44936 struct led_classdev *led_cdev;
44937@@ -750,7 +750,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44938 if (!led)
44939 return -ENOMEM;
44940
44941- led_no = (long)atomic_inc_return(&led_seq) - 1;
44942+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44943
44944 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44945 led->xpad = xpad;
44946diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44947index 719410f..1896169 100644
44948--- a/drivers/input/misc/ims-pcu.c
44949+++ b/drivers/input/misc/ims-pcu.c
44950@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44951
44952 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44953 {
44954- static atomic_t device_no = ATOMIC_INIT(0);
44955+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44956
44957 const struct ims_pcu_device_info *info;
44958 int error;
44959@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44960 }
44961
44962 /* Device appears to be operable, complete initialization */
44963- pcu->device_no = atomic_inc_return(&device_no) - 1;
44964+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44965
44966 /*
44967 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44968diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44969index 2f0b39d..7370f13 100644
44970--- a/drivers/input/mouse/psmouse.h
44971+++ b/drivers/input/mouse/psmouse.h
44972@@ -116,7 +116,7 @@ struct psmouse_attribute {
44973 ssize_t (*set)(struct psmouse *psmouse, void *data,
44974 const char *buf, size_t count);
44975 bool protect;
44976-};
44977+} __do_const;
44978 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44979
44980 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44981diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44982index b604564..3f14ae4 100644
44983--- a/drivers/input/mousedev.c
44984+++ b/drivers/input/mousedev.c
44985@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44986
44987 spin_unlock_irq(&client->packet_lock);
44988
44989- if (copy_to_user(buffer, data, count))
44990+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44991 return -EFAULT;
44992
44993 return count;
44994diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44995index b29134d..394deb0 100644
44996--- a/drivers/input/serio/serio.c
44997+++ b/drivers/input/serio/serio.c
44998@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44999 */
45000 static void serio_init_port(struct serio *serio)
45001 {
45002- static atomic_t serio_no = ATOMIC_INIT(0);
45003+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
45004
45005 __module_get(THIS_MODULE);
45006
45007@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
45008 mutex_init(&serio->drv_mutex);
45009 device_initialize(&serio->dev);
45010 dev_set_name(&serio->dev, "serio%ld",
45011- (long)atomic_inc_return(&serio_no) - 1);
45012+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
45013 serio->dev.bus = &serio_bus;
45014 serio->dev.release = serio_release_port;
45015 serio->dev.groups = serio_device_attr_groups;
45016diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
45017index c9a02fe..0debc75 100644
45018--- a/drivers/input/serio/serio_raw.c
45019+++ b/drivers/input/serio/serio_raw.c
45020@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
45021
45022 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
45023 {
45024- static atomic_t serio_raw_no = ATOMIC_INIT(0);
45025+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
45026 struct serio_raw *serio_raw;
45027 int err;
45028
45029@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
45030 }
45031
45032 snprintf(serio_raw->name, sizeof(serio_raw->name),
45033- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
45034+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
45035 kref_init(&serio_raw->kref);
45036 INIT_LIST_HEAD(&serio_raw->client_list);
45037 init_waitqueue_head(&serio_raw->wait);
45038diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
45039index e5555fc..937986d 100644
45040--- a/drivers/iommu/iommu.c
45041+++ b/drivers/iommu/iommu.c
45042@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
45043 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
45044 {
45045 bus_register_notifier(bus, &iommu_bus_nb);
45046- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
45047+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
45048 }
45049
45050 /**
45051diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
45052index 33c4395..e06447e 100644
45053--- a/drivers/iommu/irq_remapping.c
45054+++ b/drivers/iommu/irq_remapping.c
45055@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
45056 void panic_if_irq_remap(const char *msg)
45057 {
45058 if (irq_remapping_enabled)
45059- panic(msg);
45060+ panic("%s", msg);
45061 }
45062
45063 static void ir_ack_apic_edge(struct irq_data *data)
45064@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
45065
45066 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
45067 {
45068- chip->irq_print_chip = ir_print_prefix;
45069- chip->irq_ack = ir_ack_apic_edge;
45070- chip->irq_eoi = ir_ack_apic_level;
45071- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45072+ pax_open_kernel();
45073+ *(void **)&chip->irq_print_chip = ir_print_prefix;
45074+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
45075+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
45076+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45077+ pax_close_kernel();
45078 }
45079
45080 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
45081diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
45082index 7c131cf..035129b 100644
45083--- a/drivers/irqchip/irq-gic.c
45084+++ b/drivers/irqchip/irq-gic.c
45085@@ -85,7 +85,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
45086 * Supported arch specific GIC irq extension.
45087 * Default make them NULL.
45088 */
45089-struct irq_chip gic_arch_extn = {
45090+irq_chip_no_const gic_arch_extn = {
45091 .irq_eoi = NULL,
45092 .irq_mask = NULL,
45093 .irq_unmask = NULL,
45094@@ -337,7 +337,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
45095 chained_irq_exit(chip, desc);
45096 }
45097
45098-static struct irq_chip gic_chip = {
45099+static irq_chip_no_const gic_chip __read_only = {
45100 .name = "GIC",
45101 .irq_mask = gic_mask_irq,
45102 .irq_unmask = gic_unmask_irq,
45103diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
45104index 8777065..a4a9967 100644
45105--- a/drivers/irqchip/irq-renesas-irqc.c
45106+++ b/drivers/irqchip/irq-renesas-irqc.c
45107@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
45108 struct irqc_priv *p;
45109 struct resource *io;
45110 struct resource *irq;
45111- struct irq_chip *irq_chip;
45112+ irq_chip_no_const *irq_chip;
45113 const char *name = dev_name(&pdev->dev);
45114 int ret;
45115 int k;
45116diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
45117index f9a87ed..3fdd854 100644
45118--- a/drivers/isdn/capi/capi.c
45119+++ b/drivers/isdn/capi/capi.c
45120@@ -81,8 +81,8 @@ struct capiminor {
45121
45122 struct capi20_appl *ap;
45123 u32 ncci;
45124- atomic_t datahandle;
45125- atomic_t msgid;
45126+ atomic_unchecked_t datahandle;
45127+ atomic_unchecked_t msgid;
45128
45129 struct tty_port port;
45130 int ttyinstop;
45131@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
45132 capimsg_setu16(s, 2, mp->ap->applid);
45133 capimsg_setu8 (s, 4, CAPI_DATA_B3);
45134 capimsg_setu8 (s, 5, CAPI_RESP);
45135- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
45136+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
45137 capimsg_setu32(s, 8, mp->ncci);
45138 capimsg_setu16(s, 12, datahandle);
45139 }
45140@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
45141 mp->outbytes -= len;
45142 spin_unlock_bh(&mp->outlock);
45143
45144- datahandle = atomic_inc_return(&mp->datahandle);
45145+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
45146 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
45147 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45148 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45149 capimsg_setu16(skb->data, 2, mp->ap->applid);
45150 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
45151 capimsg_setu8 (skb->data, 5, CAPI_REQ);
45152- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
45153+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
45154 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
45155 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
45156 capimsg_setu16(skb->data, 16, len); /* Data length */
45157diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
45158index b7ae0a0..04590fa 100644
45159--- a/drivers/isdn/gigaset/bas-gigaset.c
45160+++ b/drivers/isdn/gigaset/bas-gigaset.c
45161@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
45162
45163
45164 static const struct gigaset_ops gigops = {
45165- gigaset_write_cmd,
45166- gigaset_write_room,
45167- gigaset_chars_in_buffer,
45168- gigaset_brkchars,
45169- gigaset_init_bchannel,
45170- gigaset_close_bchannel,
45171- gigaset_initbcshw,
45172- gigaset_freebcshw,
45173- gigaset_reinitbcshw,
45174- gigaset_initcshw,
45175- gigaset_freecshw,
45176- gigaset_set_modem_ctrl,
45177- gigaset_baud_rate,
45178- gigaset_set_line_ctrl,
45179- gigaset_isoc_send_skb,
45180- gigaset_isoc_input,
45181+ .write_cmd = gigaset_write_cmd,
45182+ .write_room = gigaset_write_room,
45183+ .chars_in_buffer = gigaset_chars_in_buffer,
45184+ .brkchars = gigaset_brkchars,
45185+ .init_bchannel = gigaset_init_bchannel,
45186+ .close_bchannel = gigaset_close_bchannel,
45187+ .initbcshw = gigaset_initbcshw,
45188+ .freebcshw = gigaset_freebcshw,
45189+ .reinitbcshw = gigaset_reinitbcshw,
45190+ .initcshw = gigaset_initcshw,
45191+ .freecshw = gigaset_freecshw,
45192+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45193+ .baud_rate = gigaset_baud_rate,
45194+ .set_line_ctrl = gigaset_set_line_ctrl,
45195+ .send_skb = gigaset_isoc_send_skb,
45196+ .handle_input = gigaset_isoc_input,
45197 };
45198
45199 /* bas_gigaset_init
45200diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
45201index 600c79b..3752bab 100644
45202--- a/drivers/isdn/gigaset/interface.c
45203+++ b/drivers/isdn/gigaset/interface.c
45204@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
45205 }
45206 tty->driver_data = cs;
45207
45208- ++cs->port.count;
45209+ atomic_inc(&cs->port.count);
45210
45211- if (cs->port.count == 1) {
45212+ if (atomic_read(&cs->port.count) == 1) {
45213 tty_port_tty_set(&cs->port, tty);
45214 cs->port.low_latency = 1;
45215 }
45216@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45217
45218 if (!cs->connected)
45219 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45220- else if (!cs->port.count)
45221+ else if (!atomic_read(&cs->port.count))
45222 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45223- else if (!--cs->port.count)
45224+ else if (!atomic_dec_return(&cs->port.count))
45225 tty_port_tty_set(&cs->port, NULL);
45226
45227 mutex_unlock(&cs->mutex);
45228diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45229index 8c91fd5..14f13ce 100644
45230--- a/drivers/isdn/gigaset/ser-gigaset.c
45231+++ b/drivers/isdn/gigaset/ser-gigaset.c
45232@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45233 }
45234
45235 static const struct gigaset_ops ops = {
45236- gigaset_write_cmd,
45237- gigaset_write_room,
45238- gigaset_chars_in_buffer,
45239- gigaset_brkchars,
45240- gigaset_init_bchannel,
45241- gigaset_close_bchannel,
45242- gigaset_initbcshw,
45243- gigaset_freebcshw,
45244- gigaset_reinitbcshw,
45245- gigaset_initcshw,
45246- gigaset_freecshw,
45247- gigaset_set_modem_ctrl,
45248- gigaset_baud_rate,
45249- gigaset_set_line_ctrl,
45250- gigaset_m10x_send_skb, /* asyncdata.c */
45251- gigaset_m10x_input, /* asyncdata.c */
45252+ .write_cmd = gigaset_write_cmd,
45253+ .write_room = gigaset_write_room,
45254+ .chars_in_buffer = gigaset_chars_in_buffer,
45255+ .brkchars = gigaset_brkchars,
45256+ .init_bchannel = gigaset_init_bchannel,
45257+ .close_bchannel = gigaset_close_bchannel,
45258+ .initbcshw = gigaset_initbcshw,
45259+ .freebcshw = gigaset_freebcshw,
45260+ .reinitbcshw = gigaset_reinitbcshw,
45261+ .initcshw = gigaset_initcshw,
45262+ .freecshw = gigaset_freecshw,
45263+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45264+ .baud_rate = gigaset_baud_rate,
45265+ .set_line_ctrl = gigaset_set_line_ctrl,
45266+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45267+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45268 };
45269
45270
45271diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45272index d0a41cb..b953e50 100644
45273--- a/drivers/isdn/gigaset/usb-gigaset.c
45274+++ b/drivers/isdn/gigaset/usb-gigaset.c
45275@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45276 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45277 memcpy(cs->hw.usb->bchars, buf, 6);
45278 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45279- 0, 0, &buf, 6, 2000);
45280+ 0, 0, buf, 6, 2000);
45281 }
45282
45283 static void gigaset_freebcshw(struct bc_state *bcs)
45284@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45285 }
45286
45287 static const struct gigaset_ops ops = {
45288- gigaset_write_cmd,
45289- gigaset_write_room,
45290- gigaset_chars_in_buffer,
45291- gigaset_brkchars,
45292- gigaset_init_bchannel,
45293- gigaset_close_bchannel,
45294- gigaset_initbcshw,
45295- gigaset_freebcshw,
45296- gigaset_reinitbcshw,
45297- gigaset_initcshw,
45298- gigaset_freecshw,
45299- gigaset_set_modem_ctrl,
45300- gigaset_baud_rate,
45301- gigaset_set_line_ctrl,
45302- gigaset_m10x_send_skb,
45303- gigaset_m10x_input,
45304+ .write_cmd = gigaset_write_cmd,
45305+ .write_room = gigaset_write_room,
45306+ .chars_in_buffer = gigaset_chars_in_buffer,
45307+ .brkchars = gigaset_brkchars,
45308+ .init_bchannel = gigaset_init_bchannel,
45309+ .close_bchannel = gigaset_close_bchannel,
45310+ .initbcshw = gigaset_initbcshw,
45311+ .freebcshw = gigaset_freebcshw,
45312+ .reinitbcshw = gigaset_reinitbcshw,
45313+ .initcshw = gigaset_initcshw,
45314+ .freecshw = gigaset_freecshw,
45315+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45316+ .baud_rate = gigaset_baud_rate,
45317+ .set_line_ctrl = gigaset_set_line_ctrl,
45318+ .send_skb = gigaset_m10x_send_skb,
45319+ .handle_input = gigaset_m10x_input,
45320 };
45321
45322 /*
45323diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45324index 4d9b195..455075c 100644
45325--- a/drivers/isdn/hardware/avm/b1.c
45326+++ b/drivers/isdn/hardware/avm/b1.c
45327@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45328 }
45329 if (left) {
45330 if (t4file->user) {
45331- if (copy_from_user(buf, dp, left))
45332+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45333 return -EFAULT;
45334 } else {
45335 memcpy(buf, dp, left);
45336@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45337 }
45338 if (left) {
45339 if (config->user) {
45340- if (copy_from_user(buf, dp, left))
45341+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45342 return -EFAULT;
45343 } else {
45344 memcpy(buf, dp, left);
45345diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45346index 9b856e1..fa03c92 100644
45347--- a/drivers/isdn/i4l/isdn_common.c
45348+++ b/drivers/isdn/i4l/isdn_common.c
45349@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45350 } else
45351 return -EINVAL;
45352 case IIOCDBGVAR:
45353+ if (!capable(CAP_SYS_RAWIO))
45354+ return -EPERM;
45355 if (arg) {
45356 if (copy_to_user(argp, &dev, sizeof(ulong)))
45357 return -EFAULT;
45358diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45359index 91d5730..336523e 100644
45360--- a/drivers/isdn/i4l/isdn_concap.c
45361+++ b/drivers/isdn/i4l/isdn_concap.c
45362@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45363 }
45364
45365 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45366- &isdn_concap_dl_data_req,
45367- &isdn_concap_dl_connect_req,
45368- &isdn_concap_dl_disconn_req
45369+ .data_req = &isdn_concap_dl_data_req,
45370+ .connect_req = &isdn_concap_dl_connect_req,
45371+ .disconn_req = &isdn_concap_dl_disconn_req
45372 };
45373
45374 /* The following should better go into a dedicated source file such that
45375diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
45376index 62f0688..38ceac5 100644
45377--- a/drivers/isdn/i4l/isdn_ppp.c
45378+++ b/drivers/isdn/i4l/isdn_ppp.c
45379@@ -378,15 +378,10 @@ isdn_ppp_release(int min, struct file *file)
45380 is->slcomp = NULL;
45381 #endif
45382 #ifdef CONFIG_IPPP_FILTER
45383- if (is->pass_filter) {
45384- sk_unattached_filter_destroy(is->pass_filter);
45385- is->pass_filter = NULL;
45386- }
45387-
45388- if (is->active_filter) {
45389- sk_unattached_filter_destroy(is->active_filter);
45390- is->active_filter = NULL;
45391- }
45392+ kfree(is->pass_filter);
45393+ is->pass_filter = NULL;
45394+ kfree(is->active_filter);
45395+ is->active_filter = NULL;
45396 #endif
45397
45398 /* TODO: if this was the previous master: link the stuff to the new master */
45399@@ -442,7 +437,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45400 {
45401 struct sock_fprog uprog;
45402 struct sock_filter *code = NULL;
45403- int len;
45404+ int len, err;
45405
45406 if (copy_from_user(&uprog, arg, sizeof(uprog)))
45407 return -EFAULT;
45408@@ -458,6 +453,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45409 if (IS_ERR(code))
45410 return PTR_ERR(code);
45411
45412+ err = sk_chk_filter(code, uprog.len);
45413+ if (err) {
45414+ kfree(code);
45415+ return err;
45416+ }
45417+
45418 *p = code;
45419 return uprog.len;
45420 }
45421@@ -628,53 +629,25 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
45422 #ifdef CONFIG_IPPP_FILTER
45423 case PPPIOCSPASS:
45424 {
45425- struct sock_fprog_kern fprog;
45426 struct sock_filter *code;
45427- int err, len = get_filter(argp, &code);
45428-
45429+ int len = get_filter(argp, &code);
45430 if (len < 0)
45431 return len;
45432-
45433- fprog.len = len;
45434- fprog.filter = code;
45435-
45436- if (is->pass_filter) {
45437- sk_unattached_filter_destroy(is->pass_filter);
45438- is->pass_filter = NULL;
45439- }
45440- if (fprog.filter != NULL)
45441- err = sk_unattached_filter_create(&is->pass_filter,
45442- &fprog);
45443- else
45444- err = 0;
45445- kfree(code);
45446-
45447- return err;
45448+ kfree(is->pass_filter);
45449+ is->pass_filter = code;
45450+ is->pass_len = len;
45451+ break;
45452 }
45453 case PPPIOCSACTIVE:
45454 {
45455- struct sock_fprog_kern fprog;
45456 struct sock_filter *code;
45457- int err, len = get_filter(argp, &code);
45458-
45459+ int len = get_filter(argp, &code);
45460 if (len < 0)
45461 return len;
45462-
45463- fprog.len = len;
45464- fprog.filter = code;
45465-
45466- if (is->active_filter) {
45467- sk_unattached_filter_destroy(is->active_filter);
45468- is->active_filter = NULL;
45469- }
45470- if (fprog.filter != NULL)
45471- err = sk_unattached_filter_create(&is->active_filter,
45472- &fprog);
45473- else
45474- err = 0;
45475- kfree(code);
45476-
45477- return err;
45478+ kfree(is->active_filter);
45479+ is->active_filter = code;
45480+ is->active_len = len;
45481+ break;
45482 }
45483 #endif /* CONFIG_IPPP_FILTER */
45484 default:
45485@@ -1174,14 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
45486 }
45487
45488 if (is->pass_filter
45489- && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
45490+ && sk_run_filter(skb, is->pass_filter) == 0) {
45491 if (is->debug & 0x2)
45492 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
45493 kfree_skb(skb);
45494 return;
45495 }
45496 if (!(is->active_filter
45497- && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
45498+ && sk_run_filter(skb, is->active_filter) == 0)) {
45499 if (is->debug & 0x2)
45500 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45501 lp->huptimer = 0;
45502@@ -1320,14 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
45503 }
45504
45505 if (ipt->pass_filter
45506- && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
45507+ && sk_run_filter(skb, ipt->pass_filter) == 0) {
45508 if (ipt->debug & 0x4)
45509 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
45510 kfree_skb(skb);
45511 goto unlock;
45512 }
45513 if (!(ipt->active_filter
45514- && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
45515+ && sk_run_filter(skb, ipt->active_filter) == 0)) {
45516 if (ipt->debug & 0x4)
45517 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45518 lp->huptimer = 0;
45519@@ -1517,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
45520 }
45521
45522 drop |= is->pass_filter
45523- && SK_RUN_FILTER(is->pass_filter, skb) == 0;
45524+ && sk_run_filter(skb, is->pass_filter) == 0;
45525 drop |= is->active_filter
45526- && SK_RUN_FILTER(is->active_filter, skb) == 0;
45527+ && sk_run_filter(skb, is->active_filter) == 0;
45528
45529 skb_push(skb, IPPP_MAX_HEADER - 4);
45530 return drop;
45531diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45532index 3c5f249..5fac4d0 100644
45533--- a/drivers/isdn/i4l/isdn_tty.c
45534+++ b/drivers/isdn/i4l/isdn_tty.c
45535@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45536
45537 #ifdef ISDN_DEBUG_MODEM_OPEN
45538 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45539- port->count);
45540+ atomic_read(&port->count));
45541 #endif
45542- port->count++;
45543+ atomic_inc(&port->count);
45544 port->tty = tty;
45545 /*
45546 * Start up serial port
45547@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45548 #endif
45549 return;
45550 }
45551- if ((tty->count == 1) && (port->count != 1)) {
45552+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45553 /*
45554 * Uh, oh. tty->count is 1, which means that the tty
45555 * structure will be freed. Info->count should always
45556@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45557 * serial port won't be shutdown.
45558 */
45559 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45560- "info->count is %d\n", port->count);
45561- port->count = 1;
45562+ "info->count is %d\n", atomic_read(&port->count));
45563+ atomic_set(&port->count, 1);
45564 }
45565- if (--port->count < 0) {
45566+ if (atomic_dec_return(&port->count) < 0) {
45567 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45568- info->line, port->count);
45569- port->count = 0;
45570+ info->line, atomic_read(&port->count));
45571+ atomic_set(&port->count, 0);
45572 }
45573- if (port->count) {
45574+ if (atomic_read(&port->count)) {
45575 #ifdef ISDN_DEBUG_MODEM_OPEN
45576 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45577 #endif
45578@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45579 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45580 return;
45581 isdn_tty_shutdown(info);
45582- port->count = 0;
45583+ atomic_set(&port->count, 0);
45584 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45585 port->tty = NULL;
45586 wake_up_interruptible(&port->open_wait);
45587@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45588 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45589 modem_info *info = &dev->mdm.info[i];
45590
45591- if (info->port.count == 0)
45592+ if (atomic_read(&info->port.count) == 0)
45593 continue;
45594 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45595 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45596diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45597index e2d4e58..40cd045 100644
45598--- a/drivers/isdn/i4l/isdn_x25iface.c
45599+++ b/drivers/isdn/i4l/isdn_x25iface.c
45600@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45601
45602
45603 static struct concap_proto_ops ix25_pops = {
45604- &isdn_x25iface_proto_new,
45605- &isdn_x25iface_proto_del,
45606- &isdn_x25iface_proto_restart,
45607- &isdn_x25iface_proto_close,
45608- &isdn_x25iface_xmit,
45609- &isdn_x25iface_receive,
45610- &isdn_x25iface_connect_ind,
45611- &isdn_x25iface_disconn_ind
45612+ .proto_new = &isdn_x25iface_proto_new,
45613+ .proto_del = &isdn_x25iface_proto_del,
45614+ .restart = &isdn_x25iface_proto_restart,
45615+ .close = &isdn_x25iface_proto_close,
45616+ .encap_and_xmit = &isdn_x25iface_xmit,
45617+ .data_ind = &isdn_x25iface_receive,
45618+ .connect_ind = &isdn_x25iface_connect_ind,
45619+ .disconn_ind = &isdn_x25iface_disconn_ind
45620 };
45621
45622 /* error message helper function */
45623diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45624index 6a7447c..cae33fe 100644
45625--- a/drivers/isdn/icn/icn.c
45626+++ b/drivers/isdn/icn/icn.c
45627@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45628 if (count > len)
45629 count = len;
45630 if (user) {
45631- if (copy_from_user(msg, buf, count))
45632+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45633 return -EFAULT;
45634 } else
45635 memcpy(msg, buf, count);
45636diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45637index a4f05c5..1433bc5 100644
45638--- a/drivers/isdn/mISDN/dsp_cmx.c
45639+++ b/drivers/isdn/mISDN/dsp_cmx.c
45640@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45641 static u16 dsp_count; /* last sample count */
45642 static int dsp_count_valid; /* if we have last sample count */
45643
45644-void
45645+void __intentional_overflow(-1)
45646 dsp_cmx_send(void *arg)
45647 {
45648 struct dsp_conf *conf;
45649diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45650index f58a354..fbae176 100644
45651--- a/drivers/leds/leds-clevo-mail.c
45652+++ b/drivers/leds/leds-clevo-mail.c
45653@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45654 * detected as working, but in reality it is not) as low as
45655 * possible.
45656 */
45657-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45658+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45659 {
45660 .callback = clevo_mail_led_dmi_callback,
45661 .ident = "Clevo D410J",
45662diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45663index 2eb3ef6..295891f 100644
45664--- a/drivers/leds/leds-ss4200.c
45665+++ b/drivers/leds/leds-ss4200.c
45666@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45667 * detected as working, but in reality it is not) as low as
45668 * possible.
45669 */
45670-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45671+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45672 {
45673 .callback = ss4200_led_dmi_callback,
45674 .ident = "Intel SS4200-E",
45675diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45676index 0bf1e4e..b4bf44e 100644
45677--- a/drivers/lguest/core.c
45678+++ b/drivers/lguest/core.c
45679@@ -97,9 +97,17 @@ static __init int map_switcher(void)
45680 * The end address needs +1 because __get_vm_area allocates an
45681 * extra guard page, so we need space for that.
45682 */
45683+
45684+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45685+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45686+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45687+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45688+#else
45689 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45690 VM_ALLOC, switcher_addr, switcher_addr
45691 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45692+#endif
45693+
45694 if (!switcher_vma) {
45695 err = -ENOMEM;
45696 printk("lguest: could not map switcher pages high\n");
45697@@ -124,7 +132,7 @@ static __init int map_switcher(void)
45698 * Now the Switcher is mapped at the right address, we can't fail!
45699 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45700 */
45701- memcpy(switcher_vma->addr, start_switcher_text,
45702+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45703 end_switcher_text - start_switcher_text);
45704
45705 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45706diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45707index e8b55c3..3514c37 100644
45708--- a/drivers/lguest/page_tables.c
45709+++ b/drivers/lguest/page_tables.c
45710@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45711 /*:*/
45712
45713 #ifdef CONFIG_X86_PAE
45714-static void release_pmd(pmd_t *spmd)
45715+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45716 {
45717 /* If the entry's not present, there's nothing to release. */
45718 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45719diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45720index 922a1ac..9dd0c2a 100644
45721--- a/drivers/lguest/x86/core.c
45722+++ b/drivers/lguest/x86/core.c
45723@@ -59,7 +59,7 @@ static struct {
45724 /* Offset from where switcher.S was compiled to where we've copied it */
45725 static unsigned long switcher_offset(void)
45726 {
45727- return switcher_addr - (unsigned long)start_switcher_text;
45728+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45729 }
45730
45731 /* This cpu's struct lguest_pages (after the Switcher text page) */
45732@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45733 * These copies are pretty cheap, so we do them unconditionally: */
45734 /* Save the current Host top-level page directory.
45735 */
45736+
45737+#ifdef CONFIG_PAX_PER_CPU_PGD
45738+ pages->state.host_cr3 = read_cr3();
45739+#else
45740 pages->state.host_cr3 = __pa(current->mm->pgd);
45741+#endif
45742+
45743 /*
45744 * Set up the Guest's page tables to see this CPU's pages (and no
45745 * other CPU's pages).
45746@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
45747 * compiled-in switcher code and the high-mapped copy we just made.
45748 */
45749 for (i = 0; i < IDT_ENTRIES; i++)
45750- default_idt_entries[i] += switcher_offset();
45751+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45752
45753 /*
45754 * Set up the Switcher's per-cpu areas.
45755@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
45756 * it will be undisturbed when we switch. To change %cs and jump we
45757 * need this structure to feed to Intel's "lcall" instruction.
45758 */
45759- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45760+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45761 lguest_entry.segment = LGUEST_CS;
45762
45763 /*
45764diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45765index 40634b0..4f5855e 100644
45766--- a/drivers/lguest/x86/switcher_32.S
45767+++ b/drivers/lguest/x86/switcher_32.S
45768@@ -87,6 +87,7 @@
45769 #include <asm/page.h>
45770 #include <asm/segment.h>
45771 #include <asm/lguest.h>
45772+#include <asm/processor-flags.h>
45773
45774 // We mark the start of the code to copy
45775 // It's placed in .text tho it's never run here
45776@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45777 // Changes type when we load it: damn Intel!
45778 // For after we switch over our page tables
45779 // That entry will be read-only: we'd crash.
45780+
45781+#ifdef CONFIG_PAX_KERNEXEC
45782+ mov %cr0, %edx
45783+ xor $X86_CR0_WP, %edx
45784+ mov %edx, %cr0
45785+#endif
45786+
45787 movl $(GDT_ENTRY_TSS*8), %edx
45788 ltr %dx
45789
45790@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45791 // Let's clear it again for our return.
45792 // The GDT descriptor of the Host
45793 // Points to the table after two "size" bytes
45794- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45795+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45796 // Clear "used" from type field (byte 5, bit 2)
45797- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45798+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45799+
45800+#ifdef CONFIG_PAX_KERNEXEC
45801+ mov %cr0, %eax
45802+ xor $X86_CR0_WP, %eax
45803+ mov %eax, %cr0
45804+#endif
45805
45806 // Once our page table's switched, the Guest is live!
45807 // The Host fades as we run this final step.
45808@@ -295,13 +309,12 @@ deliver_to_host:
45809 // I consulted gcc, and it gave
45810 // These instructions, which I gladly credit:
45811 leal (%edx,%ebx,8), %eax
45812- movzwl (%eax),%edx
45813- movl 4(%eax), %eax
45814- xorw %ax, %ax
45815- orl %eax, %edx
45816+ movl 4(%eax), %edx
45817+ movw (%eax), %dx
45818 // Now the address of the handler's in %edx
45819 // We call it now: its "iret" drops us home.
45820- jmp *%edx
45821+ ljmp $__KERNEL_CS, $1f
45822+1: jmp *%edx
45823
45824 // Every interrupt can come to us here
45825 // But we must truly tell each apart.
45826diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45827index a08e3ee..df8ade2 100644
45828--- a/drivers/md/bcache/closure.h
45829+++ b/drivers/md/bcache/closure.h
45830@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45831 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45832 struct workqueue_struct *wq)
45833 {
45834- BUG_ON(object_is_on_stack(cl));
45835+ BUG_ON(object_starts_on_stack(cl));
45836 closure_set_ip(cl);
45837 cl->fn = fn;
45838 cl->wq = wq;
45839diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45840index 67f8b31..9418f2b 100644
45841--- a/drivers/md/bitmap.c
45842+++ b/drivers/md/bitmap.c
45843@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45844 chunk_kb ? "KB" : "B");
45845 if (bitmap->storage.file) {
45846 seq_printf(seq, ", file: ");
45847- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45848+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45849 }
45850
45851 seq_printf(seq, "\n");
45852diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45853index 5152142..623d141 100644
45854--- a/drivers/md/dm-ioctl.c
45855+++ b/drivers/md/dm-ioctl.c
45856@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45857 cmd == DM_LIST_VERSIONS_CMD)
45858 return 0;
45859
45860- if ((cmd == DM_DEV_CREATE_CMD)) {
45861+ if (cmd == DM_DEV_CREATE_CMD) {
45862 if (!*param->name) {
45863 DMWARN("name not supplied when creating device");
45864 return -EINVAL;
45865diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45866index 7dfdb5c..4caada6 100644
45867--- a/drivers/md/dm-raid1.c
45868+++ b/drivers/md/dm-raid1.c
45869@@ -40,7 +40,7 @@ enum dm_raid1_error {
45870
45871 struct mirror {
45872 struct mirror_set *ms;
45873- atomic_t error_count;
45874+ atomic_unchecked_t error_count;
45875 unsigned long error_type;
45876 struct dm_dev *dev;
45877 sector_t offset;
45878@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45879 struct mirror *m;
45880
45881 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45882- if (!atomic_read(&m->error_count))
45883+ if (!atomic_read_unchecked(&m->error_count))
45884 return m;
45885
45886 return NULL;
45887@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45888 * simple way to tell if a device has encountered
45889 * errors.
45890 */
45891- atomic_inc(&m->error_count);
45892+ atomic_inc_unchecked(&m->error_count);
45893
45894 if (test_and_set_bit(error_type, &m->error_type))
45895 return;
45896@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45897 struct mirror *m = get_default_mirror(ms);
45898
45899 do {
45900- if (likely(!atomic_read(&m->error_count)))
45901+ if (likely(!atomic_read_unchecked(&m->error_count)))
45902 return m;
45903
45904 if (m-- == ms->mirror)
45905@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45906 {
45907 struct mirror *default_mirror = get_default_mirror(m->ms);
45908
45909- return !atomic_read(&default_mirror->error_count);
45910+ return !atomic_read_unchecked(&default_mirror->error_count);
45911 }
45912
45913 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45914@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45915 */
45916 if (likely(region_in_sync(ms, region, 1)))
45917 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45918- else if (m && atomic_read(&m->error_count))
45919+ else if (m && atomic_read_unchecked(&m->error_count))
45920 m = NULL;
45921
45922 if (likely(m))
45923@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45924 }
45925
45926 ms->mirror[mirror].ms = ms;
45927- atomic_set(&(ms->mirror[mirror].error_count), 0);
45928+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45929 ms->mirror[mirror].error_type = 0;
45930 ms->mirror[mirror].offset = offset;
45931
45932@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
45933 */
45934 static char device_status_char(struct mirror *m)
45935 {
45936- if (!atomic_read(&(m->error_count)))
45937+ if (!atomic_read_unchecked(&(m->error_count)))
45938 return 'A';
45939
45940 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45941diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45942index 28a9012..9c0f6a5 100644
45943--- a/drivers/md/dm-stats.c
45944+++ b/drivers/md/dm-stats.c
45945@@ -382,7 +382,7 @@ do_sync_free:
45946 synchronize_rcu_expedited();
45947 dm_stat_free(&s->rcu_head);
45948 } else {
45949- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45950+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45951 call_rcu(&s->rcu_head, dm_stat_free);
45952 }
45953 return 0;
45954@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45955 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45956 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45957 ));
45958- ACCESS_ONCE(last->last_sector) = end_sector;
45959- ACCESS_ONCE(last->last_rw) = bi_rw;
45960+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45961+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45962 }
45963
45964 rcu_read_lock();
45965diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45966index d1600d2..4c3af3a 100644
45967--- a/drivers/md/dm-stripe.c
45968+++ b/drivers/md/dm-stripe.c
45969@@ -21,7 +21,7 @@ struct stripe {
45970 struct dm_dev *dev;
45971 sector_t physical_start;
45972
45973- atomic_t error_count;
45974+ atomic_unchecked_t error_count;
45975 };
45976
45977 struct stripe_c {
45978@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45979 kfree(sc);
45980 return r;
45981 }
45982- atomic_set(&(sc->stripe[i].error_count), 0);
45983+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45984 }
45985
45986 ti->private = sc;
45987@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45988 DMEMIT("%d ", sc->stripes);
45989 for (i = 0; i < sc->stripes; i++) {
45990 DMEMIT("%s ", sc->stripe[i].dev->name);
45991- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45992+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45993 'D' : 'A';
45994 }
45995 buffer[i] = '\0';
45996@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45997 */
45998 for (i = 0; i < sc->stripes; i++)
45999 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
46000- atomic_inc(&(sc->stripe[i].error_count));
46001- if (atomic_read(&(sc->stripe[i].error_count)) <
46002+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
46003+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
46004 DM_IO_ERROR_THRESHOLD)
46005 schedule_work(&sc->trigger_event);
46006 }
46007diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
46008index 9227910..f51ca38 100644
46009--- a/drivers/md/dm-table.c
46010+++ b/drivers/md/dm-table.c
46011@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
46012 static int open_dev(struct dm_dev_internal *d, dev_t dev,
46013 struct mapped_device *md)
46014 {
46015- static char *_claim_ptr = "I belong to device-mapper";
46016+ static char _claim_ptr[] = "I belong to device-mapper";
46017 struct block_device *bdev;
46018
46019 int r;
46020@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
46021 if (!dev_size)
46022 return 0;
46023
46024- if ((start >= dev_size) || (start + len > dev_size)) {
46025+ if ((start >= dev_size) || (len > dev_size - start)) {
46026 DMWARN("%s: %s too small for target: "
46027 "start=%llu, len=%llu, dev_size=%llu",
46028 dm_device_name(ti->table->md), bdevname(bdev, b),
46029diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
46030index e9d33ad..dae9880d 100644
46031--- a/drivers/md/dm-thin-metadata.c
46032+++ b/drivers/md/dm-thin-metadata.c
46033@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
46034 {
46035 pmd->info.tm = pmd->tm;
46036 pmd->info.levels = 2;
46037- pmd->info.value_type.context = pmd->data_sm;
46038+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
46039 pmd->info.value_type.size = sizeof(__le64);
46040 pmd->info.value_type.inc = data_block_inc;
46041 pmd->info.value_type.dec = data_block_dec;
46042@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
46043
46044 pmd->bl_info.tm = pmd->tm;
46045 pmd->bl_info.levels = 1;
46046- pmd->bl_info.value_type.context = pmd->data_sm;
46047+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
46048 pmd->bl_info.value_type.size = sizeof(__le64);
46049 pmd->bl_info.value_type.inc = data_block_inc;
46050 pmd->bl_info.value_type.dec = data_block_dec;
46051diff --git a/drivers/md/dm.c b/drivers/md/dm.c
46052index 32b958d..34011e8 100644
46053--- a/drivers/md/dm.c
46054+++ b/drivers/md/dm.c
46055@@ -180,9 +180,9 @@ struct mapped_device {
46056 /*
46057 * Event handling.
46058 */
46059- atomic_t event_nr;
46060+ atomic_unchecked_t event_nr;
46061 wait_queue_head_t eventq;
46062- atomic_t uevent_seq;
46063+ atomic_unchecked_t uevent_seq;
46064 struct list_head uevent_list;
46065 spinlock_t uevent_lock; /* Protect access to uevent_list */
46066
46067@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
46068 spin_lock_init(&md->deferred_lock);
46069 atomic_set(&md->holders, 1);
46070 atomic_set(&md->open_count, 0);
46071- atomic_set(&md->event_nr, 0);
46072- atomic_set(&md->uevent_seq, 0);
46073+ atomic_set_unchecked(&md->event_nr, 0);
46074+ atomic_set_unchecked(&md->uevent_seq, 0);
46075 INIT_LIST_HEAD(&md->uevent_list);
46076 spin_lock_init(&md->uevent_lock);
46077
46078@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
46079
46080 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
46081
46082- atomic_inc(&md->event_nr);
46083+ atomic_inc_unchecked(&md->event_nr);
46084 wake_up(&md->eventq);
46085 }
46086
46087@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
46088
46089 uint32_t dm_next_uevent_seq(struct mapped_device *md)
46090 {
46091- return atomic_add_return(1, &md->uevent_seq);
46092+ return atomic_add_return_unchecked(1, &md->uevent_seq);
46093 }
46094
46095 uint32_t dm_get_event_nr(struct mapped_device *md)
46096 {
46097- return atomic_read(&md->event_nr);
46098+ return atomic_read_unchecked(&md->event_nr);
46099 }
46100
46101 int dm_wait_event(struct mapped_device *md, int event_nr)
46102 {
46103 return wait_event_interruptible(md->eventq,
46104- (event_nr != atomic_read(&md->event_nr)));
46105+ (event_nr != atomic_read_unchecked(&md->event_nr)));
46106 }
46107
46108 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
46109diff --git a/drivers/md/md.c b/drivers/md/md.c
46110index 32fc19c..cb6eba3 100644
46111--- a/drivers/md/md.c
46112+++ b/drivers/md/md.c
46113@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
46114 * start build, activate spare
46115 */
46116 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
46117-static atomic_t md_event_count;
46118+static atomic_unchecked_t md_event_count;
46119 void md_new_event(struct mddev *mddev)
46120 {
46121- atomic_inc(&md_event_count);
46122+ atomic_inc_unchecked(&md_event_count);
46123 wake_up(&md_event_waiters);
46124 }
46125 EXPORT_SYMBOL_GPL(md_new_event);
46126@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
46127 */
46128 static void md_new_event_inintr(struct mddev *mddev)
46129 {
46130- atomic_inc(&md_event_count);
46131+ atomic_inc_unchecked(&md_event_count);
46132 wake_up(&md_event_waiters);
46133 }
46134
46135@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
46136 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
46137 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
46138 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
46139- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46140+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46141
46142 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
46143 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
46144@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
46145 else
46146 sb->resync_offset = cpu_to_le64(0);
46147
46148- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
46149+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
46150
46151 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
46152 sb->size = cpu_to_le64(mddev->dev_sectors);
46153@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
46154 static ssize_t
46155 errors_show(struct md_rdev *rdev, char *page)
46156 {
46157- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
46158+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
46159 }
46160
46161 static ssize_t
46162@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
46163 char *e;
46164 unsigned long n = simple_strtoul(buf, &e, 10);
46165 if (*buf && (*e == 0 || *e == '\n')) {
46166- atomic_set(&rdev->corrected_errors, n);
46167+ atomic_set_unchecked(&rdev->corrected_errors, n);
46168 return len;
46169 }
46170 return -EINVAL;
46171@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
46172 rdev->sb_loaded = 0;
46173 rdev->bb_page = NULL;
46174 atomic_set(&rdev->nr_pending, 0);
46175- atomic_set(&rdev->read_errors, 0);
46176- atomic_set(&rdev->corrected_errors, 0);
46177+ atomic_set_unchecked(&rdev->read_errors, 0);
46178+ atomic_set_unchecked(&rdev->corrected_errors, 0);
46179
46180 INIT_LIST_HEAD(&rdev->same_set);
46181 init_waitqueue_head(&rdev->blocked_wait);
46182@@ -7068,7 +7068,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
46183
46184 spin_unlock(&pers_lock);
46185 seq_printf(seq, "\n");
46186- seq->poll_event = atomic_read(&md_event_count);
46187+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46188 return 0;
46189 }
46190 if (v == (void*)2) {
46191@@ -7171,7 +7171,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
46192 return error;
46193
46194 seq = file->private_data;
46195- seq->poll_event = atomic_read(&md_event_count);
46196+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46197 return error;
46198 }
46199
46200@@ -7188,7 +7188,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
46201 /* always allow read */
46202 mask = POLLIN | POLLRDNORM;
46203
46204- if (seq->poll_event != atomic_read(&md_event_count))
46205+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
46206 mask |= POLLERR | POLLPRI;
46207 return mask;
46208 }
46209@@ -7232,7 +7232,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
46210 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
46211 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
46212 (int)part_stat_read(&disk->part0, sectors[1]) -
46213- atomic_read(&disk->sync_io);
46214+ atomic_read_unchecked(&disk->sync_io);
46215 /* sync IO will cause sync_io to increase before the disk_stats
46216 * as sync_io is counted when a request starts, and
46217 * disk_stats is counted when it completes.
46218diff --git a/drivers/md/md.h b/drivers/md/md.h
46219index a49d991..3582bb7 100644
46220--- a/drivers/md/md.h
46221+++ b/drivers/md/md.h
46222@@ -94,13 +94,13 @@ struct md_rdev {
46223 * only maintained for arrays that
46224 * support hot removal
46225 */
46226- atomic_t read_errors; /* number of consecutive read errors that
46227+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
46228 * we have tried to ignore.
46229 */
46230 struct timespec last_read_error; /* monotonic time since our
46231 * last read error
46232 */
46233- atomic_t corrected_errors; /* number of corrected read errors,
46234+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
46235 * for reporting to userspace and storing
46236 * in superblock.
46237 */
46238@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
46239
46240 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
46241 {
46242- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46243+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46244 }
46245
46246 struct md_personality
46247diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
46248index 786b689..ea8c956 100644
46249--- a/drivers/md/persistent-data/dm-space-map-metadata.c
46250+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
46251@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
46252 * Flick into a mode where all blocks get allocated in the new area.
46253 */
46254 smm->begin = old_len;
46255- memcpy(sm, &bootstrap_ops, sizeof(*sm));
46256+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
46257
46258 /*
46259 * Extend.
46260@@ -710,7 +710,7 @@ out:
46261 /*
46262 * Switch back to normal behaviour.
46263 */
46264- memcpy(sm, &ops, sizeof(*sm));
46265+ memcpy((void *)sm, &ops, sizeof(*sm));
46266 return r;
46267 }
46268
46269diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
46270index 3e6d115..ffecdeb 100644
46271--- a/drivers/md/persistent-data/dm-space-map.h
46272+++ b/drivers/md/persistent-data/dm-space-map.h
46273@@ -71,6 +71,7 @@ struct dm_space_map {
46274 dm_sm_threshold_fn fn,
46275 void *context);
46276 };
46277+typedef struct dm_space_map __no_const dm_space_map_no_const;
46278
46279 /*----------------------------------------------------------------*/
46280
46281diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
46282index d7690f8..3db9ef1 100644
46283--- a/drivers/md/raid1.c
46284+++ b/drivers/md/raid1.c
46285@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
46286 if (r1_sync_page_io(rdev, sect, s,
46287 bio->bi_io_vec[idx].bv_page,
46288 READ) != 0)
46289- atomic_add(s, &rdev->corrected_errors);
46290+ atomic_add_unchecked(s, &rdev->corrected_errors);
46291 }
46292 sectors -= s;
46293 sect += s;
46294@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
46295 test_bit(In_sync, &rdev->flags)) {
46296 if (r1_sync_page_io(rdev, sect, s,
46297 conf->tmppage, READ)) {
46298- atomic_add(s, &rdev->corrected_errors);
46299+ atomic_add_unchecked(s, &rdev->corrected_errors);
46300 printk(KERN_INFO
46301 "md/raid1:%s: read error corrected "
46302 "(%d sectors at %llu on %s)\n",
46303diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
46304index a46124e..caf0bd55 100644
46305--- a/drivers/md/raid10.c
46306+++ b/drivers/md/raid10.c
46307@@ -1948,7 +1948,7 @@ static void end_sync_read(struct bio *bio, int error)
46308 /* The write handler will notice the lack of
46309 * R10BIO_Uptodate and record any errors etc
46310 */
46311- atomic_add(r10_bio->sectors,
46312+ atomic_add_unchecked(r10_bio->sectors,
46313 &conf->mirrors[d].rdev->corrected_errors);
46314
46315 /* for reconstruct, we always reschedule after a read.
46316@@ -2306,7 +2306,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46317 {
46318 struct timespec cur_time_mon;
46319 unsigned long hours_since_last;
46320- unsigned int read_errors = atomic_read(&rdev->read_errors);
46321+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
46322
46323 ktime_get_ts(&cur_time_mon);
46324
46325@@ -2328,9 +2328,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46326 * overflowing the shift of read_errors by hours_since_last.
46327 */
46328 if (hours_since_last >= 8 * sizeof(read_errors))
46329- atomic_set(&rdev->read_errors, 0);
46330+ atomic_set_unchecked(&rdev->read_errors, 0);
46331 else
46332- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
46333+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
46334 }
46335
46336 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
46337@@ -2384,8 +2384,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46338 return;
46339
46340 check_decay_read_errors(mddev, rdev);
46341- atomic_inc(&rdev->read_errors);
46342- if (atomic_read(&rdev->read_errors) > max_read_errors) {
46343+ atomic_inc_unchecked(&rdev->read_errors);
46344+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
46345 char b[BDEVNAME_SIZE];
46346 bdevname(rdev->bdev, b);
46347
46348@@ -2393,7 +2393,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46349 "md/raid10:%s: %s: Raid device exceeded "
46350 "read_error threshold [cur %d:max %d]\n",
46351 mdname(mddev), b,
46352- atomic_read(&rdev->read_errors), max_read_errors);
46353+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
46354 printk(KERN_NOTICE
46355 "md/raid10:%s: %s: Failing raid device\n",
46356 mdname(mddev), b);
46357@@ -2548,7 +2548,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46358 sect +
46359 choose_data_offset(r10_bio, rdev)),
46360 bdevname(rdev->bdev, b));
46361- atomic_add(s, &rdev->corrected_errors);
46362+ atomic_add_unchecked(s, &rdev->corrected_errors);
46363 }
46364
46365 rdev_dec_pending(rdev, mddev);
46366diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46367index 183588b..0eedcfa 100644
46368--- a/drivers/md/raid5.c
46369+++ b/drivers/md/raid5.c
46370@@ -1731,6 +1731,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46371 return 1;
46372 }
46373
46374+#ifdef CONFIG_GRKERNSEC_HIDESYM
46375+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46376+#endif
46377+
46378 static int grow_stripes(struct r5conf *conf, int num)
46379 {
46380 struct kmem_cache *sc;
46381@@ -1742,7 +1746,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46382 "raid%d-%s", conf->level, mdname(conf->mddev));
46383 else
46384 sprintf(conf->cache_name[0],
46385+#ifdef CONFIG_GRKERNSEC_HIDESYM
46386+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46387+#else
46388 "raid%d-%p", conf->level, conf->mddev);
46389+#endif
46390 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46391
46392 conf->active_name = 0;
46393@@ -2018,21 +2026,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46394 mdname(conf->mddev), STRIPE_SECTORS,
46395 (unsigned long long)s,
46396 bdevname(rdev->bdev, b));
46397- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46398+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46399 clear_bit(R5_ReadError, &sh->dev[i].flags);
46400 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46401 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46402 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46403
46404- if (atomic_read(&rdev->read_errors))
46405- atomic_set(&rdev->read_errors, 0);
46406+ if (atomic_read_unchecked(&rdev->read_errors))
46407+ atomic_set_unchecked(&rdev->read_errors, 0);
46408 } else {
46409 const char *bdn = bdevname(rdev->bdev, b);
46410 int retry = 0;
46411 int set_bad = 0;
46412
46413 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46414- atomic_inc(&rdev->read_errors);
46415+ atomic_inc_unchecked(&rdev->read_errors);
46416 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46417 printk_ratelimited(
46418 KERN_WARNING
46419@@ -2060,7 +2068,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46420 mdname(conf->mddev),
46421 (unsigned long long)s,
46422 bdn);
46423- } else if (atomic_read(&rdev->read_errors)
46424+ } else if (atomic_read_unchecked(&rdev->read_errors)
46425 > conf->max_nr_stripes)
46426 printk(KERN_WARNING
46427 "md/raid:%s: Too many read errors, failing device %s.\n",
46428diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46429index 983db75..ef9248c 100644
46430--- a/drivers/media/dvb-core/dvbdev.c
46431+++ b/drivers/media/dvb-core/dvbdev.c
46432@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46433 const struct dvb_device *template, void *priv, int type)
46434 {
46435 struct dvb_device *dvbdev;
46436- struct file_operations *dvbdevfops;
46437+ file_operations_no_const *dvbdevfops;
46438 struct device *clsdev;
46439 int minor;
46440 int id;
46441diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46442index 539f4db..cdd403b 100644
46443--- a/drivers/media/dvb-frontends/af9033.h
46444+++ b/drivers/media/dvb-frontends/af9033.h
46445@@ -82,7 +82,7 @@ struct af9033_ops {
46446 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46447 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46448 int onoff);
46449-};
46450+} __no_const;
46451
46452
46453 #if IS_ENABLED(CONFIG_DVB_AF9033)
46454diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46455index 9b6c3bb..baeb5c7 100644
46456--- a/drivers/media/dvb-frontends/dib3000.h
46457+++ b/drivers/media/dvb-frontends/dib3000.h
46458@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46459 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46460 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46461 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46462-};
46463+} __no_const;
46464
46465 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46466 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46467diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46468index ed8cb90..5ef7f79 100644
46469--- a/drivers/media/pci/cx88/cx88-video.c
46470+++ b/drivers/media/pci/cx88/cx88-video.c
46471@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46472
46473 /* ------------------------------------------------------------------ */
46474
46475-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46476-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46477-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46478+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46479+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46480+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46481
46482 module_param_array(video_nr, int, NULL, 0444);
46483 module_param_array(vbi_nr, int, NULL, 0444);
46484diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46485index 802642d..5534900 100644
46486--- a/drivers/media/pci/ivtv/ivtv-driver.c
46487+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46488@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46489 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46490
46491 /* ivtv instance counter */
46492-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46493+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46494
46495 /* Parameter declarations */
46496 static int cardtype[IVTV_MAX_CARDS];
46497diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46498index 9a726ea..f5e9b52 100644
46499--- a/drivers/media/platform/omap/omap_vout.c
46500+++ b/drivers/media/platform/omap/omap_vout.c
46501@@ -63,7 +63,6 @@ enum omap_vout_channels {
46502 OMAP_VIDEO2,
46503 };
46504
46505-static struct videobuf_queue_ops video_vbq_ops;
46506 /* Variables configurable through module params*/
46507 static u32 video1_numbuffers = 3;
46508 static u32 video2_numbuffers = 3;
46509@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
46510 {
46511 struct videobuf_queue *q;
46512 struct omap_vout_device *vout = NULL;
46513+ static struct videobuf_queue_ops video_vbq_ops = {
46514+ .buf_setup = omap_vout_buffer_setup,
46515+ .buf_prepare = omap_vout_buffer_prepare,
46516+ .buf_release = omap_vout_buffer_release,
46517+ .buf_queue = omap_vout_buffer_queue,
46518+ };
46519
46520 vout = video_drvdata(file);
46521 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46522@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
46523 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46524
46525 q = &vout->vbq;
46526- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46527- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46528- video_vbq_ops.buf_release = omap_vout_buffer_release;
46529- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46530 spin_lock_init(&vout->vbq_lock);
46531
46532 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46533diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46534index fb2acc5..a2fcbdc4 100644
46535--- a/drivers/media/platform/s5p-tv/mixer.h
46536+++ b/drivers/media/platform/s5p-tv/mixer.h
46537@@ -156,7 +156,7 @@ struct mxr_layer {
46538 /** layer index (unique identifier) */
46539 int idx;
46540 /** callbacks for layer methods */
46541- struct mxr_layer_ops ops;
46542+ struct mxr_layer_ops *ops;
46543 /** format array */
46544 const struct mxr_format **fmt_array;
46545 /** size of format array */
46546diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46547index 74344c7..a39e70e 100644
46548--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46549+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46550@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46551 {
46552 struct mxr_layer *layer;
46553 int ret;
46554- struct mxr_layer_ops ops = {
46555+ static struct mxr_layer_ops ops = {
46556 .release = mxr_graph_layer_release,
46557 .buffer_set = mxr_graph_buffer_set,
46558 .stream_set = mxr_graph_stream_set,
46559diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46560index b713403..53cb5ad 100644
46561--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46562+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46563@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46564 layer->update_buf = next;
46565 }
46566
46567- layer->ops.buffer_set(layer, layer->update_buf);
46568+ layer->ops->buffer_set(layer, layer->update_buf);
46569
46570 if (done && done != layer->shadow_buf)
46571 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46572diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46573index 8a8dbc8..b74c62d 100644
46574--- a/drivers/media/platform/s5p-tv/mixer_video.c
46575+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46576@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46577 layer->geo.src.height = layer->geo.src.full_height;
46578
46579 mxr_geometry_dump(mdev, &layer->geo);
46580- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46581+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46582 mxr_geometry_dump(mdev, &layer->geo);
46583 }
46584
46585@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46586 layer->geo.dst.full_width = mbus_fmt.width;
46587 layer->geo.dst.full_height = mbus_fmt.height;
46588 layer->geo.dst.field = mbus_fmt.field;
46589- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46590+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46591
46592 mxr_geometry_dump(mdev, &layer->geo);
46593 }
46594@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46595 /* set source size to highest accepted value */
46596 geo->src.full_width = max(geo->dst.full_width, pix->width);
46597 geo->src.full_height = max(geo->dst.full_height, pix->height);
46598- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46599+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46600 mxr_geometry_dump(mdev, &layer->geo);
46601 /* set cropping to total visible screen */
46602 geo->src.width = pix->width;
46603@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46604 geo->src.x_offset = 0;
46605 geo->src.y_offset = 0;
46606 /* assure consistency of geometry */
46607- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46608+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46609 mxr_geometry_dump(mdev, &layer->geo);
46610 /* set full size to lowest possible value */
46611 geo->src.full_width = 0;
46612 geo->src.full_height = 0;
46613- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46614+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46615 mxr_geometry_dump(mdev, &layer->geo);
46616
46617 /* returning results */
46618@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46619 target->width = s->r.width;
46620 target->height = s->r.height;
46621
46622- layer->ops.fix_geometry(layer, stage, s->flags);
46623+ layer->ops->fix_geometry(layer, stage, s->flags);
46624
46625 /* retrieve update selection rectangle */
46626 res.left = target->x_offset;
46627@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46628 mxr_output_get(mdev);
46629
46630 mxr_layer_update_output(layer);
46631- layer->ops.format_set(layer);
46632+ layer->ops->format_set(layer);
46633 /* enabling layer in hardware */
46634 spin_lock_irqsave(&layer->enq_slock, flags);
46635 layer->state = MXR_LAYER_STREAMING;
46636 spin_unlock_irqrestore(&layer->enq_slock, flags);
46637
46638- layer->ops.stream_set(layer, MXR_ENABLE);
46639+ layer->ops->stream_set(layer, MXR_ENABLE);
46640 mxr_streamer_get(mdev);
46641
46642 return 0;
46643@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
46644 spin_unlock_irqrestore(&layer->enq_slock, flags);
46645
46646 /* disabling layer in hardware */
46647- layer->ops.stream_set(layer, MXR_DISABLE);
46648+ layer->ops->stream_set(layer, MXR_DISABLE);
46649 /* remove one streamer */
46650 mxr_streamer_put(mdev);
46651 /* allow changes in output configuration */
46652@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46653
46654 void mxr_layer_release(struct mxr_layer *layer)
46655 {
46656- if (layer->ops.release)
46657- layer->ops.release(layer);
46658+ if (layer->ops->release)
46659+ layer->ops->release(layer);
46660 }
46661
46662 void mxr_base_layer_release(struct mxr_layer *layer)
46663@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46664
46665 layer->mdev = mdev;
46666 layer->idx = idx;
46667- layer->ops = *ops;
46668+ layer->ops = ops;
46669
46670 spin_lock_init(&layer->enq_slock);
46671 INIT_LIST_HEAD(&layer->enq_list);
46672diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46673index c9388c4..ce71ece 100644
46674--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46675+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46676@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46677 {
46678 struct mxr_layer *layer;
46679 int ret;
46680- struct mxr_layer_ops ops = {
46681+ static struct mxr_layer_ops ops = {
46682 .release = mxr_vp_layer_release,
46683 .buffer_set = mxr_vp_buffer_set,
46684 .stream_set = mxr_vp_stream_set,
46685diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
46686index d00bf3d..1301a0c 100644
46687--- a/drivers/media/platform/vivi.c
46688+++ b/drivers/media/platform/vivi.c
46689@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
46690 MODULE_LICENSE("Dual BSD/GPL");
46691 MODULE_VERSION(VIVI_VERSION);
46692
46693-static unsigned video_nr = -1;
46694-module_param(video_nr, uint, 0644);
46695+static int video_nr = -1;
46696+module_param(video_nr, int, 0644);
46697 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
46698
46699 static unsigned n_devs = 1;
46700diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46701index d719e59..63f3470 100644
46702--- a/drivers/media/radio/radio-cadet.c
46703+++ b/drivers/media/radio/radio-cadet.c
46704@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46705 unsigned char readbuf[RDS_BUFFER];
46706 int i = 0;
46707
46708+ if (count > RDS_BUFFER)
46709+ return -EFAULT;
46710 mutex_lock(&dev->lock);
46711 if (dev->rdsstat == 0)
46712 cadet_start_rds(dev);
46713@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46714 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46715 mutex_unlock(&dev->lock);
46716
46717- if (i && copy_to_user(data, readbuf, i))
46718- return -EFAULT;
46719+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46720+ i = -EFAULT;
46721+
46722 return i;
46723 }
46724
46725diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46726index 5236035..c622c74 100644
46727--- a/drivers/media/radio/radio-maxiradio.c
46728+++ b/drivers/media/radio/radio-maxiradio.c
46729@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46730 /* TEA5757 pin mappings */
46731 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46732
46733-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46734+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46735
46736 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46737 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46738diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46739index 050b3bb..79f62b9 100644
46740--- a/drivers/media/radio/radio-shark.c
46741+++ b/drivers/media/radio/radio-shark.c
46742@@ -79,7 +79,7 @@ struct shark_device {
46743 u32 last_val;
46744 };
46745
46746-static atomic_t shark_instance = ATOMIC_INIT(0);
46747+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46748
46749 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46750 {
46751diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46752index 8654e0d..0608a64 100644
46753--- a/drivers/media/radio/radio-shark2.c
46754+++ b/drivers/media/radio/radio-shark2.c
46755@@ -74,7 +74,7 @@ struct shark_device {
46756 u8 *transfer_buffer;
46757 };
46758
46759-static atomic_t shark_instance = ATOMIC_INIT(0);
46760+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46761
46762 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46763 {
46764diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46765index 2fd9009..278cc1e 100644
46766--- a/drivers/media/radio/radio-si476x.c
46767+++ b/drivers/media/radio/radio-si476x.c
46768@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46769 struct si476x_radio *radio;
46770 struct v4l2_ctrl *ctrl;
46771
46772- static atomic_t instance = ATOMIC_INIT(0);
46773+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46774
46775 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46776 if (!radio)
46777diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46778index 9fd1527..8927230 100644
46779--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46780+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46781@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46782
46783 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46784 {
46785- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46786- char result[64];
46787- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46788- sizeof(result), 0);
46789+ char *buf;
46790+ char *result;
46791+ int retval;
46792+
46793+ buf = kmalloc(2, GFP_KERNEL);
46794+ if (buf == NULL)
46795+ return -ENOMEM;
46796+ result = kmalloc(64, GFP_KERNEL);
46797+ if (result == NULL) {
46798+ kfree(buf);
46799+ return -ENOMEM;
46800+ }
46801+
46802+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46803+ buf[1] = enable ? 1 : 0;
46804+
46805+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46806+
46807+ kfree(buf);
46808+ kfree(result);
46809+ return retval;
46810 }
46811
46812 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46813 {
46814- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46815- char state[3];
46816- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46817+ char *buf;
46818+ char *state;
46819+ int retval;
46820+
46821+ buf = kmalloc(2, GFP_KERNEL);
46822+ if (buf == NULL)
46823+ return -ENOMEM;
46824+ state = kmalloc(3, GFP_KERNEL);
46825+ if (state == NULL) {
46826+ kfree(buf);
46827+ return -ENOMEM;
46828+ }
46829+
46830+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46831+ buf[1] = enable ? 1 : 0;
46832+
46833+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46834+
46835+ kfree(buf);
46836+ kfree(state);
46837+ return retval;
46838 }
46839
46840 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46841 {
46842- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46843- char state[3];
46844+ char *query;
46845+ char *state;
46846 int ret;
46847+ query = kmalloc(1, GFP_KERNEL);
46848+ if (query == NULL)
46849+ return -ENOMEM;
46850+ state = kmalloc(3, GFP_KERNEL);
46851+ if (state == NULL) {
46852+ kfree(query);
46853+ return -ENOMEM;
46854+ }
46855+
46856+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46857
46858 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46859
46860- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46861- sizeof(state), 0);
46862+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46863 if (ret < 0) {
46864 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46865 "state info\n");
46866@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46867
46868 /* Copy this pointer as we are gonna need it in the release phase */
46869 cinergyt2_usb_device = adap->dev;
46870-
46871+ kfree(query);
46872+ kfree(state);
46873 return 0;
46874 }
46875
46876@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46877 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46878 {
46879 struct cinergyt2_state *st = d->priv;
46880- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46881+ u8 *key, *cmd;
46882 int i;
46883
46884+ cmd = kmalloc(1, GFP_KERNEL);
46885+ if (cmd == NULL)
46886+ return -EINVAL;
46887+ key = kzalloc(5, GFP_KERNEL);
46888+ if (key == NULL) {
46889+ kfree(cmd);
46890+ return -EINVAL;
46891+ }
46892+
46893+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46894+
46895 *state = REMOTE_NO_KEY_PRESSED;
46896
46897- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46898+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46899 if (key[4] == 0xff) {
46900 /* key repeat */
46901 st->rc_counter++;
46902@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46903 *event = d->last_event;
46904 deb_rc("repeat key, event %x\n",
46905 *event);
46906- return 0;
46907+ goto out;
46908 }
46909 }
46910 deb_rc("repeated key (non repeatable)\n");
46911 }
46912- return 0;
46913+ goto out;
46914 }
46915
46916 /* hack to pass checksum on the custom field */
46917@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46918
46919 deb_rc("key: %*ph\n", 5, key);
46920 }
46921+out:
46922+ kfree(cmd);
46923+ kfree(key);
46924 return 0;
46925 }
46926
46927diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46928index c890fe4..f9b2ae6 100644
46929--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46930+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46931@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46932 fe_status_t *status)
46933 {
46934 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46935- struct dvbt_get_status_msg result;
46936- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46937+ struct dvbt_get_status_msg *result;
46938+ u8 *cmd;
46939 int ret;
46940
46941- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46942- sizeof(result), 0);
46943+ cmd = kmalloc(1, GFP_KERNEL);
46944+ if (cmd == NULL)
46945+ return -ENOMEM;
46946+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46947+ if (result == NULL) {
46948+ kfree(cmd);
46949+ return -ENOMEM;
46950+ }
46951+
46952+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46953+
46954+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46955+ sizeof(*result), 0);
46956 if (ret < 0)
46957- return ret;
46958+ goto out;
46959
46960 *status = 0;
46961
46962- if (0xffff - le16_to_cpu(result.gain) > 30)
46963+ if (0xffff - le16_to_cpu(result->gain) > 30)
46964 *status |= FE_HAS_SIGNAL;
46965- if (result.lock_bits & (1 << 6))
46966+ if (result->lock_bits & (1 << 6))
46967 *status |= FE_HAS_LOCK;
46968- if (result.lock_bits & (1 << 5))
46969+ if (result->lock_bits & (1 << 5))
46970 *status |= FE_HAS_SYNC;
46971- if (result.lock_bits & (1 << 4))
46972+ if (result->lock_bits & (1 << 4))
46973 *status |= FE_HAS_CARRIER;
46974- if (result.lock_bits & (1 << 1))
46975+ if (result->lock_bits & (1 << 1))
46976 *status |= FE_HAS_VITERBI;
46977
46978 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46979 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46980 *status &= ~FE_HAS_LOCK;
46981
46982- return 0;
46983+out:
46984+ kfree(cmd);
46985+ kfree(result);
46986+ return ret;
46987 }
46988
46989 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46990 {
46991 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46992- struct dvbt_get_status_msg status;
46993- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46994+ struct dvbt_get_status_msg *status;
46995+ char *cmd;
46996 int ret;
46997
46998- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46999- sizeof(status), 0);
47000+ cmd = kmalloc(1, GFP_KERNEL);
47001+ if (cmd == NULL)
47002+ return -ENOMEM;
47003+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47004+ if (status == NULL) {
47005+ kfree(cmd);
47006+ return -ENOMEM;
47007+ }
47008+
47009+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47010+
47011+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47012+ sizeof(*status), 0);
47013 if (ret < 0)
47014- return ret;
47015+ goto out;
47016
47017- *ber = le32_to_cpu(status.viterbi_error_rate);
47018+ *ber = le32_to_cpu(status->viterbi_error_rate);
47019+out:
47020+ kfree(cmd);
47021+ kfree(status);
47022 return 0;
47023 }
47024
47025 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
47026 {
47027 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47028- struct dvbt_get_status_msg status;
47029- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47030+ struct dvbt_get_status_msg *status;
47031+ u8 *cmd;
47032 int ret;
47033
47034- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
47035- sizeof(status), 0);
47036+ cmd = kmalloc(1, GFP_KERNEL);
47037+ if (cmd == NULL)
47038+ return -ENOMEM;
47039+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47040+ if (status == NULL) {
47041+ kfree(cmd);
47042+ return -ENOMEM;
47043+ }
47044+
47045+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47046+
47047+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
47048+ sizeof(*status), 0);
47049 if (ret < 0) {
47050 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
47051 ret);
47052- return ret;
47053+ goto out;
47054 }
47055- *unc = le32_to_cpu(status.uncorrected_block_count);
47056- return 0;
47057+ *unc = le32_to_cpu(status->uncorrected_block_count);
47058+
47059+out:
47060+ kfree(cmd);
47061+ kfree(status);
47062+ return ret;
47063 }
47064
47065 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
47066 u16 *strength)
47067 {
47068 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47069- struct dvbt_get_status_msg status;
47070- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47071+ struct dvbt_get_status_msg *status;
47072+ char *cmd;
47073 int ret;
47074
47075- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
47076- sizeof(status), 0);
47077+ cmd = kmalloc(1, GFP_KERNEL);
47078+ if (cmd == NULL)
47079+ return -ENOMEM;
47080+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47081+ if (status == NULL) {
47082+ kfree(cmd);
47083+ return -ENOMEM;
47084+ }
47085+
47086+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47087+
47088+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47089+ sizeof(*status), 0);
47090 if (ret < 0) {
47091 err("cinergyt2_fe_read_signal_strength() Failed!"
47092 " (Error=%d)\n", ret);
47093- return ret;
47094+ goto out;
47095 }
47096- *strength = (0xffff - le16_to_cpu(status.gain));
47097+ *strength = (0xffff - le16_to_cpu(status->gain));
47098+
47099+out:
47100+ kfree(cmd);
47101+ kfree(status);
47102 return 0;
47103 }
47104
47105 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
47106 {
47107 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47108- struct dvbt_get_status_msg status;
47109- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47110+ struct dvbt_get_status_msg *status;
47111+ char *cmd;
47112 int ret;
47113
47114- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
47115- sizeof(status), 0);
47116+ cmd = kmalloc(1, GFP_KERNEL);
47117+ if (cmd == NULL)
47118+ return -ENOMEM;
47119+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47120+ if (status == NULL) {
47121+ kfree(cmd);
47122+ return -ENOMEM;
47123+ }
47124+
47125+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47126+
47127+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47128+ sizeof(*status), 0);
47129 if (ret < 0) {
47130 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
47131- return ret;
47132+ goto out;
47133 }
47134- *snr = (status.snr << 8) | status.snr;
47135- return 0;
47136+ *snr = (status->snr << 8) | status->snr;
47137+
47138+out:
47139+ kfree(cmd);
47140+ kfree(status);
47141+ return ret;
47142 }
47143
47144 static int cinergyt2_fe_init(struct dvb_frontend *fe)
47145@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
47146 {
47147 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
47148 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47149- struct dvbt_set_parameters_msg param;
47150- char result[2];
47151+ struct dvbt_set_parameters_msg *param;
47152+ char *result;
47153 int err;
47154
47155- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
47156- param.tps = cpu_to_le16(compute_tps(fep));
47157- param.freq = cpu_to_le32(fep->frequency / 1000);
47158- param.flags = 0;
47159+ result = kmalloc(2, GFP_KERNEL);
47160+ if (result == NULL)
47161+ return -ENOMEM;
47162+ param = kmalloc(sizeof(*param), GFP_KERNEL);
47163+ if (param == NULL) {
47164+ kfree(result);
47165+ return -ENOMEM;
47166+ }
47167+
47168+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
47169+ param->tps = cpu_to_le16(compute_tps(fep));
47170+ param->freq = cpu_to_le32(fep->frequency / 1000);
47171+ param->flags = 0;
47172
47173 switch (fep->bandwidth_hz) {
47174 default:
47175 case 8000000:
47176- param.bandwidth = 8;
47177+ param->bandwidth = 8;
47178 break;
47179 case 7000000:
47180- param.bandwidth = 7;
47181+ param->bandwidth = 7;
47182 break;
47183 case 6000000:
47184- param.bandwidth = 6;
47185+ param->bandwidth = 6;
47186 break;
47187 }
47188
47189 err = dvb_usb_generic_rw(state->d,
47190- (char *)&param, sizeof(param),
47191- result, sizeof(result), 0);
47192+ (char *)param, sizeof(*param),
47193+ result, 2, 0);
47194 if (err < 0)
47195 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
47196
47197- return (err < 0) ? err : 0;
47198+ kfree(result);
47199+ kfree(param);
47200+ return err;
47201 }
47202
47203 static void cinergyt2_fe_release(struct dvb_frontend *fe)
47204diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
47205index a1c641e..3007da9 100644
47206--- a/drivers/media/usb/dvb-usb/cxusb.c
47207+++ b/drivers/media/usb/dvb-usb/cxusb.c
47208@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
47209
47210 struct dib0700_adapter_state {
47211 int (*set_param_save) (struct dvb_frontend *);
47212-};
47213+} __no_const;
47214
47215 static int dib7070_set_param_override(struct dvb_frontend *fe)
47216 {
47217diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47218index 733a7ff..f8b52e3 100644
47219--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47220+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47221@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
47222
47223 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
47224 {
47225- struct hexline hx;
47226- u8 reset;
47227+ struct hexline *hx;
47228+ u8 *reset;
47229 int ret,pos=0;
47230
47231+ reset = kmalloc(1, GFP_KERNEL);
47232+ if (reset == NULL)
47233+ return -ENOMEM;
47234+
47235+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
47236+ if (hx == NULL) {
47237+ kfree(reset);
47238+ return -ENOMEM;
47239+ }
47240+
47241 /* stop the CPU */
47242- reset = 1;
47243- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
47244+ reset[0] = 1;
47245+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
47246 err("could not stop the USB controller CPU.");
47247
47248- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
47249- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
47250- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
47251+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
47252+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
47253+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
47254
47255- if (ret != hx.len) {
47256+ if (ret != hx->len) {
47257 err("error while transferring firmware "
47258 "(transferred size: %d, block size: %d)",
47259- ret,hx.len);
47260+ ret,hx->len);
47261 ret = -EINVAL;
47262 break;
47263 }
47264 }
47265 if (ret < 0) {
47266 err("firmware download failed at %d with %d",pos,ret);
47267+ kfree(reset);
47268+ kfree(hx);
47269 return ret;
47270 }
47271
47272 if (ret == 0) {
47273 /* restart the CPU */
47274- reset = 0;
47275- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
47276+ reset[0] = 0;
47277+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
47278 err("could not restart the USB controller CPU.");
47279 ret = -EINVAL;
47280 }
47281 } else
47282 ret = -EIO;
47283
47284+ kfree(reset);
47285+ kfree(hx);
47286+
47287 return ret;
47288 }
47289 EXPORT_SYMBOL(usb_cypress_load_firmware);
47290diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
47291index ae0f56a..ec71784 100644
47292--- a/drivers/media/usb/dvb-usb/dw2102.c
47293+++ b/drivers/media/usb/dvb-usb/dw2102.c
47294@@ -118,7 +118,7 @@ struct su3000_state {
47295
47296 struct s6x0_state {
47297 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
47298-};
47299+} __no_const;
47300
47301 /* debug */
47302 static int dvb_usb_dw2102_debug;
47303diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47304index d947e03..87fef42 100644
47305--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47306+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47307@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47308 static int technisat_usb2_i2c_access(struct usb_device *udev,
47309 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47310 {
47311- u8 b[64];
47312- int ret, actual_length;
47313+ u8 *b = kmalloc(64, GFP_KERNEL);
47314+ int ret, actual_length, error = 0;
47315+
47316+ if (b == NULL)
47317+ return -ENOMEM;
47318
47319 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47320 debug_dump(tx, txlen, deb_i2c);
47321@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47322
47323 if (ret < 0) {
47324 err("i2c-error: out failed %02x = %d", device_addr, ret);
47325- return -ENODEV;
47326+ error = -ENODEV;
47327+ goto out;
47328 }
47329
47330 ret = usb_bulk_msg(udev,
47331@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47332 b, 64, &actual_length, 1000);
47333 if (ret < 0) {
47334 err("i2c-error: in failed %02x = %d", device_addr, ret);
47335- return -ENODEV;
47336+ error = -ENODEV;
47337+ goto out;
47338 }
47339
47340 if (b[0] != I2C_STATUS_OK) {
47341@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47342 /* handle tuner-i2c-nak */
47343 if (!(b[0] == I2C_STATUS_NAK &&
47344 device_addr == 0x60
47345- /* && device_is_technisat_usb2 */))
47346- return -ENODEV;
47347+ /* && device_is_technisat_usb2 */)) {
47348+ error = -ENODEV;
47349+ goto out;
47350+ }
47351 }
47352
47353 deb_i2c("status: %d, ", b[0]);
47354@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47355
47356 deb_i2c("\n");
47357
47358- return 0;
47359+out:
47360+ kfree(b);
47361+ return error;
47362 }
47363
47364 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47365@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47366 {
47367 int ret;
47368
47369- u8 led[8] = {
47370- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47371- 0
47372- };
47373+ u8 *led = kzalloc(8, GFP_KERNEL);
47374+
47375+ if (led == NULL)
47376+ return -ENOMEM;
47377
47378 if (disable_led_control && state != TECH_LED_OFF)
47379 return 0;
47380
47381+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47382+
47383 switch (state) {
47384 case TECH_LED_ON:
47385 led[1] = 0x82;
47386@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47387 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47388 USB_TYPE_VENDOR | USB_DIR_OUT,
47389 0, 0,
47390- led, sizeof(led), 500);
47391+ led, 8, 500);
47392
47393 mutex_unlock(&d->i2c_mutex);
47394+
47395+ kfree(led);
47396+
47397 return ret;
47398 }
47399
47400 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47401 {
47402 int ret;
47403- u8 b = 0;
47404+ u8 *b = kzalloc(1, GFP_KERNEL);
47405+
47406+ if (b == NULL)
47407+ return -ENOMEM;
47408
47409 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47410 return -EAGAIN;
47411@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47412 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47413 USB_TYPE_VENDOR | USB_DIR_OUT,
47414 (red << 8) | green, 0,
47415- &b, 1, 500);
47416+ b, 1, 500);
47417
47418 mutex_unlock(&d->i2c_mutex);
47419
47420+ kfree(b);
47421+
47422 return ret;
47423 }
47424
47425@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47426 struct dvb_usb_device_description **desc, int *cold)
47427 {
47428 int ret;
47429- u8 version[3];
47430+ u8 *version = kmalloc(3, GFP_KERNEL);
47431
47432 /* first select the interface */
47433 if (usb_set_interface(udev, 0, 1) != 0)
47434@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47435
47436 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47437
47438+ if (version == NULL)
47439+ return 0;
47440+
47441 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47442 GET_VERSION_INFO_VENDOR_REQUEST,
47443 USB_TYPE_VENDOR | USB_DIR_IN,
47444 0, 0,
47445- version, sizeof(version), 500);
47446+ version, 3, 500);
47447
47448 if (ret < 0)
47449 *cold = 1;
47450@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47451 *cold = 0;
47452 }
47453
47454+ kfree(version);
47455+
47456 return 0;
47457 }
47458
47459@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47460
47461 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47462 {
47463- u8 buf[62], *b;
47464+ u8 *buf, *b;
47465 int ret;
47466 struct ir_raw_event ev;
47467
47468+ buf = kmalloc(62, GFP_KERNEL);
47469+
47470+ if (buf == NULL)
47471+ return -ENOMEM;
47472+
47473 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47474 buf[1] = 0x08;
47475 buf[2] = 0x8f;
47476@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47477 GET_IR_DATA_VENDOR_REQUEST,
47478 USB_TYPE_VENDOR | USB_DIR_IN,
47479 0x8080, 0,
47480- buf, sizeof(buf), 500);
47481+ buf, 62, 500);
47482
47483 unlock:
47484 mutex_unlock(&d->i2c_mutex);
47485
47486- if (ret < 0)
47487+ if (ret < 0) {
47488+ kfree(buf);
47489 return ret;
47490+ }
47491
47492- if (ret == 1)
47493+ if (ret == 1) {
47494+ kfree(buf);
47495 return 0; /* no key pressed */
47496+ }
47497
47498 /* decoding */
47499 b = buf+1;
47500@@ -653,6 +686,8 @@ unlock:
47501
47502 ir_raw_event_handle(d->rc_dev);
47503
47504+ kfree(buf);
47505+
47506 return 1;
47507 }
47508
47509diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47510index 7e2411c..cef73ca 100644
47511--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47512+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47513@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
47514 __u32 reserved;
47515 };
47516
47517-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47518+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47519 enum v4l2_memory memory)
47520 {
47521 void __user *up_pln;
47522@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47523 return 0;
47524 }
47525
47526-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47527+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47528 enum v4l2_memory memory)
47529 {
47530 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
47531@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47532 * by passing a very big num_planes value */
47533 uplane = compat_alloc_user_space(num_planes *
47534 sizeof(struct v4l2_plane));
47535- kp->m.planes = uplane;
47536+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
47537
47538 while (--num_planes >= 0) {
47539 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47540@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47541 if (num_planes == 0)
47542 return 0;
47543
47544- uplane = kp->m.planes;
47545+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47546 if (get_user(p, &up->m.planes))
47547 return -EFAULT;
47548 uplane32 = compat_ptr(p);
47549@@ -552,7 +552,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47550 get_user(kp->capability, &up->capability) ||
47551 get_user(kp->flags, &up->flags))
47552 return -EFAULT;
47553- kp->base = compat_ptr(tmp);
47554+ kp->base = (void __force_kernel *)compat_ptr(tmp);
47555 get_v4l2_pix_format(&kp->fmt, &up->fmt);
47556 return 0;
47557 }
47558@@ -658,7 +658,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47559 n * sizeof(struct v4l2_ext_control32)))
47560 return -EFAULT;
47561 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47562- kp->controls = kcontrols;
47563+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
47564 while (--n >= 0) {
47565 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
47566 return -EFAULT;
47567@@ -680,7 +680,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47568 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
47569 {
47570 struct v4l2_ext_control32 __user *ucontrols;
47571- struct v4l2_ext_control __user *kcontrols = kp->controls;
47572+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
47573 int n = kp->count;
47574 compat_caddr_t p;
47575
47576@@ -774,7 +774,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47577 put_user(kp->start_block, &up->start_block) ||
47578 put_user(kp->blocks, &up->blocks) ||
47579 put_user(tmp, &up->edid) ||
47580- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47581+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
47582 return -EFAULT;
47583 return 0;
47584 }
47585diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
47586index 55c6832..a91c7a6 100644
47587--- a/drivers/media/v4l2-core/v4l2-ctrls.c
47588+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
47589@@ -1431,8 +1431,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
47590 return 0;
47591
47592 case V4L2_CTRL_TYPE_STRING:
47593- len = strlen(c->string);
47594- if (len < ctrl->minimum)
47595+ len = strlen_user(c->string);
47596+ if (!len || len < ctrl->minimum)
47597 return -ERANGE;
47598 if ((len - ctrl->minimum) % ctrl->step)
47599 return -ERANGE;
47600diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47601index 015f92a..59e311e 100644
47602--- a/drivers/media/v4l2-core/v4l2-device.c
47603+++ b/drivers/media/v4l2-core/v4l2-device.c
47604@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47605 EXPORT_SYMBOL_GPL(v4l2_device_put);
47606
47607 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47608- atomic_t *instance)
47609+ atomic_unchecked_t *instance)
47610 {
47611- int num = atomic_inc_return(instance) - 1;
47612+ int num = atomic_inc_return_unchecked(instance) - 1;
47613 int len = strlen(basename);
47614
47615 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47616diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47617index 16bffd8..3ab516a 100644
47618--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47619+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47620@@ -2003,7 +2003,8 @@ struct v4l2_ioctl_info {
47621 struct file *file, void *fh, void *p);
47622 } u;
47623 void (*debug)(const void *arg, bool write_only);
47624-};
47625+} __do_const;
47626+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47627
47628 /* This control needs a priority check */
47629 #define INFO_FL_PRIO (1 << 0)
47630@@ -2186,7 +2187,7 @@ static long __video_do_ioctl(struct file *file,
47631 struct video_device *vfd = video_devdata(file);
47632 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47633 bool write_only = false;
47634- struct v4l2_ioctl_info default_info;
47635+ v4l2_ioctl_info_no_const default_info;
47636 const struct v4l2_ioctl_info *info;
47637 void *fh = file->private_data;
47638 struct v4l2_fh *vfh = NULL;
47639@@ -2276,7 +2277,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47640 ret = -EINVAL;
47641 break;
47642 }
47643- *user_ptr = (void __user *)buf->m.planes;
47644+ *user_ptr = (void __force_user *)buf->m.planes;
47645 *kernel_ptr = (void **)&buf->m.planes;
47646 *array_size = sizeof(struct v4l2_plane) * buf->length;
47647 ret = 1;
47648@@ -2293,7 +2294,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47649 ret = -EINVAL;
47650 break;
47651 }
47652- *user_ptr = (void __user *)edid->edid;
47653+ *user_ptr = (void __force_user *)edid->edid;
47654 *kernel_ptr = (void **)&edid->edid;
47655 *array_size = edid->blocks * 128;
47656 ret = 1;
47657@@ -2311,7 +2312,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47658 ret = -EINVAL;
47659 break;
47660 }
47661- *user_ptr = (void __user *)ctrls->controls;
47662+ *user_ptr = (void __force_user *)ctrls->controls;
47663 *kernel_ptr = (void **)&ctrls->controls;
47664 *array_size = sizeof(struct v4l2_ext_control)
47665 * ctrls->count;
47666@@ -2412,7 +2413,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47667 }
47668
47669 if (has_array_args) {
47670- *kernel_ptr = (void __force *)user_ptr;
47671+ *kernel_ptr = (void __force_kernel *)user_ptr;
47672 if (copy_to_user(user_ptr, mbuf, array_size))
47673 err = -EFAULT;
47674 goto out_array_args;
47675diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47676index ebc0af7..baed058 100644
47677--- a/drivers/message/fusion/mptbase.c
47678+++ b/drivers/message/fusion/mptbase.c
47679@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47680 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47681 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47682
47683+#ifdef CONFIG_GRKERNSEC_HIDESYM
47684+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47685+#else
47686 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47687 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47688+#endif
47689+
47690 /*
47691 * Rounding UP to nearest 4-kB boundary here...
47692 */
47693@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47694 ioc->facts.GlobalCredits);
47695
47696 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47697+#ifdef CONFIG_GRKERNSEC_HIDESYM
47698+ NULL, NULL);
47699+#else
47700 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47701+#endif
47702 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47703 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47704 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47705diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47706index 711fcb5..5da1fb0 100644
47707--- a/drivers/message/fusion/mptsas.c
47708+++ b/drivers/message/fusion/mptsas.c
47709@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47710 return 0;
47711 }
47712
47713+static inline void
47714+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47715+{
47716+ if (phy_info->port_details) {
47717+ phy_info->port_details->rphy = rphy;
47718+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47719+ ioc->name, rphy));
47720+ }
47721+
47722+ if (rphy) {
47723+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47724+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47725+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47726+ ioc->name, rphy, rphy->dev.release));
47727+ }
47728+}
47729+
47730 /* no mutex */
47731 static void
47732 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47733@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47734 return NULL;
47735 }
47736
47737-static inline void
47738-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47739-{
47740- if (phy_info->port_details) {
47741- phy_info->port_details->rphy = rphy;
47742- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47743- ioc->name, rphy));
47744- }
47745-
47746- if (rphy) {
47747- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47748- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47749- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47750- ioc->name, rphy, rphy->dev.release));
47751- }
47752-}
47753-
47754 static inline struct sas_port *
47755 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47756 {
47757diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
47758index 2a1c6f2..a04c6a2 100644
47759--- a/drivers/message/fusion/mptscsih.c
47760+++ b/drivers/message/fusion/mptscsih.c
47761@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
47762
47763 h = shost_priv(SChost);
47764
47765- if (h) {
47766- if (h->info_kbuf == NULL)
47767- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47768- return h->info_kbuf;
47769- h->info_kbuf[0] = '\0';
47770+ if (!h)
47771+ return NULL;
47772
47773- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47774- h->info_kbuf[size-1] = '\0';
47775- }
47776+ if (h->info_kbuf == NULL)
47777+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47778+ return h->info_kbuf;
47779+ h->info_kbuf[0] = '\0';
47780+
47781+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47782+ h->info_kbuf[size-1] = '\0';
47783
47784 return h->info_kbuf;
47785 }
47786diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
47787index b7d87cd..3fb36da 100644
47788--- a/drivers/message/i2o/i2o_proc.c
47789+++ b/drivers/message/i2o/i2o_proc.c
47790@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
47791 "Array Controller Device"
47792 };
47793
47794-static char *chtostr(char *tmp, u8 *chars, int n)
47795-{
47796- tmp[0] = 0;
47797- return strncat(tmp, (char *)chars, n);
47798-}
47799-
47800 static int i2o_report_query_status(struct seq_file *seq, int block_status,
47801 char *group)
47802 {
47803@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
47804 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
47805 {
47806 struct i2o_controller *c = (struct i2o_controller *)seq->private;
47807- static u32 work32[5];
47808- static u8 *work8 = (u8 *) work32;
47809- static u16 *work16 = (u16 *) work32;
47810+ u32 work32[5];
47811+ u8 *work8 = (u8 *) work32;
47812+ u16 *work16 = (u16 *) work32;
47813 int token;
47814 u32 hwcap;
47815
47816@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47817 } *result;
47818
47819 i2o_exec_execute_ddm_table ddm_table;
47820- char tmp[28 + 1];
47821
47822 result = kmalloc(sizeof(*result), GFP_KERNEL);
47823 if (!result)
47824@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47825
47826 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
47827 seq_printf(seq, "%-#8x", ddm_table.module_id);
47828- seq_printf(seq, "%-29s",
47829- chtostr(tmp, ddm_table.module_name_version, 28));
47830+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
47831 seq_printf(seq, "%9d ", ddm_table.data_size);
47832 seq_printf(seq, "%8d", ddm_table.code_size);
47833
47834@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47835
47836 i2o_driver_result_table *result;
47837 i2o_driver_store_table *dst;
47838- char tmp[28 + 1];
47839
47840 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
47841 if (result == NULL)
47842@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47843
47844 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
47845 seq_printf(seq, "%-#8x", dst->module_id);
47846- seq_printf(seq, "%-29s",
47847- chtostr(tmp, dst->module_name_version, 28));
47848- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
47849+ seq_printf(seq, "%-.28s", dst->module_name_version);
47850+ seq_printf(seq, "%-.8s", dst->date);
47851 seq_printf(seq, "%8d ", dst->module_size);
47852 seq_printf(seq, "%8d ", dst->mpb_size);
47853 seq_printf(seq, "0x%04x", dst->module_flags);
47854@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
47855 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47856 {
47857 struct i2o_device *d = (struct i2o_device *)seq->private;
47858- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47859+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47860 // == (allow) 512d bytes (max)
47861- static u16 *work16 = (u16 *) work32;
47862+ u16 *work16 = (u16 *) work32;
47863 int token;
47864- char tmp[16 + 1];
47865
47866 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
47867
47868@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47869 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
47870 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
47871 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
47872- seq_printf(seq, "Vendor info : %s\n",
47873- chtostr(tmp, (u8 *) (work32 + 2), 16));
47874- seq_printf(seq, "Product info : %s\n",
47875- chtostr(tmp, (u8 *) (work32 + 6), 16));
47876- seq_printf(seq, "Description : %s\n",
47877- chtostr(tmp, (u8 *) (work32 + 10), 16));
47878- seq_printf(seq, "Product rev. : %s\n",
47879- chtostr(tmp, (u8 *) (work32 + 14), 8));
47880+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
47881+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
47882+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
47883+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
47884
47885 seq_printf(seq, "Serial number : ");
47886 print_serial_number(seq, (u8 *) (work32 + 16),
47887@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47888 u8 pad[256]; // allow up to 256 byte (max) serial number
47889 } result;
47890
47891- char tmp[24 + 1];
47892-
47893 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
47894
47895 if (token < 0) {
47896@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47897 }
47898
47899 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
47900- seq_printf(seq, "Module name : %s\n",
47901- chtostr(tmp, result.module_name, 24));
47902- seq_printf(seq, "Module revision : %s\n",
47903- chtostr(tmp, result.module_rev, 8));
47904+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
47905+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
47906
47907 seq_printf(seq, "Serial number : ");
47908 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
47909@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47910 u8 instance_number[4];
47911 } result;
47912
47913- char tmp[64 + 1];
47914-
47915 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
47916
47917 if (token < 0) {
47918@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47919 return 0;
47920 }
47921
47922- seq_printf(seq, "Device name : %s\n",
47923- chtostr(tmp, result.device_name, 64));
47924- seq_printf(seq, "Service name : %s\n",
47925- chtostr(tmp, result.service_name, 64));
47926- seq_printf(seq, "Physical name : %s\n",
47927- chtostr(tmp, result.physical_location, 64));
47928- seq_printf(seq, "Instance number : %s\n",
47929- chtostr(tmp, result.instance_number, 4));
47930+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
47931+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
47932+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
47933+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
47934
47935 return 0;
47936 }
47937@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47938 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
47939 {
47940 struct i2o_device *d = (struct i2o_device *)seq->private;
47941- static u32 work32[12];
47942- static u16 *work16 = (u16 *) work32;
47943- static u8 *work8 = (u8 *) work32;
47944+ u32 work32[12];
47945+ u16 *work16 = (u16 *) work32;
47946+ u8 *work8 = (u8 *) work32;
47947 int token;
47948
47949 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
47950diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
47951index 92752fb..a7494f6 100644
47952--- a/drivers/message/i2o/iop.c
47953+++ b/drivers/message/i2o/iop.c
47954@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
47955
47956 spin_lock_irqsave(&c->context_list_lock, flags);
47957
47958- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
47959- atomic_inc(&c->context_list_counter);
47960+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
47961+ atomic_inc_unchecked(&c->context_list_counter);
47962
47963- entry->context = atomic_read(&c->context_list_counter);
47964+ entry->context = atomic_read_unchecked(&c->context_list_counter);
47965
47966 list_add(&entry->list, &c->context_list);
47967
47968@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
47969
47970 #if BITS_PER_LONG == 64
47971 spin_lock_init(&c->context_list_lock);
47972- atomic_set(&c->context_list_counter, 0);
47973+ atomic_set_unchecked(&c->context_list_counter, 0);
47974 INIT_LIST_HEAD(&c->context_list);
47975 #endif
47976
47977diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47978index d1a22aa..d0f7bf7 100644
47979--- a/drivers/mfd/ab8500-debugfs.c
47980+++ b/drivers/mfd/ab8500-debugfs.c
47981@@ -100,7 +100,7 @@ static int irq_last;
47982 static u32 *irq_count;
47983 static int num_irqs;
47984
47985-static struct device_attribute **dev_attr;
47986+static device_attribute_no_const **dev_attr;
47987 static char **event_name;
47988
47989 static u8 avg_sample = SAMPLE_16;
47990diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47991index a83eed5..62a58a9 100644
47992--- a/drivers/mfd/max8925-i2c.c
47993+++ b/drivers/mfd/max8925-i2c.c
47994@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47995 const struct i2c_device_id *id)
47996 {
47997 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47998- static struct max8925_chip *chip;
47999+ struct max8925_chip *chip;
48000 struct device_node *node = client->dev.of_node;
48001
48002 if (node && !pdata) {
48003diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
48004index f9e42ea..614d240 100644
48005--- a/drivers/mfd/tps65910.c
48006+++ b/drivers/mfd/tps65910.c
48007@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
48008 struct tps65910_platform_data *pdata)
48009 {
48010 int ret = 0;
48011- static struct regmap_irq_chip *tps6591x_irqs_chip;
48012+ struct regmap_irq_chip *tps6591x_irqs_chip;
48013
48014 if (!irq) {
48015 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
48016diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
48017index 596b1f6..5b6ab74 100644
48018--- a/drivers/mfd/twl4030-irq.c
48019+++ b/drivers/mfd/twl4030-irq.c
48020@@ -34,6 +34,7 @@
48021 #include <linux/of.h>
48022 #include <linux/irqdomain.h>
48023 #include <linux/i2c/twl.h>
48024+#include <asm/pgtable.h>
48025
48026 #include "twl-core.h"
48027
48028@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
48029 * Install an irq handler for each of the SIH modules;
48030 * clone dummy irq_chip since PIH can't *do* anything
48031 */
48032- twl4030_irq_chip = dummy_irq_chip;
48033- twl4030_irq_chip.name = "twl4030";
48034+ pax_open_kernel();
48035+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
48036+ *(const char **)&twl4030_irq_chip.name = "twl4030";
48037
48038- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
48039+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
48040+ pax_close_kernel();
48041
48042 for (i = irq_base; i < irq_end; i++) {
48043 irq_set_chip_and_handler(i, &twl4030_irq_chip,
48044diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
48045index 464419b..64bae8d 100644
48046--- a/drivers/misc/c2port/core.c
48047+++ b/drivers/misc/c2port/core.c
48048@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
48049 goto error_idr_alloc;
48050 c2dev->id = ret;
48051
48052- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
48053+ pax_open_kernel();
48054+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
48055+ pax_close_kernel();
48056
48057 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
48058 "c2port%d", c2dev->id);
48059diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
48060index 3f2b625..945e179 100644
48061--- a/drivers/misc/eeprom/sunxi_sid.c
48062+++ b/drivers/misc/eeprom/sunxi_sid.c
48063@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
48064
48065 platform_set_drvdata(pdev, sid_data);
48066
48067- sid_bin_attr.size = sid_data->keysize;
48068+ pax_open_kernel();
48069+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
48070+ pax_close_kernel();
48071 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
48072 return -ENODEV;
48073
48074diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
48075index 36f5d52..32311c3 100644
48076--- a/drivers/misc/kgdbts.c
48077+++ b/drivers/misc/kgdbts.c
48078@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
48079 char before[BREAK_INSTR_SIZE];
48080 char after[BREAK_INSTR_SIZE];
48081
48082- probe_kernel_read(before, (char *)kgdbts_break_test,
48083+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
48084 BREAK_INSTR_SIZE);
48085 init_simple_test();
48086 ts.tst = plant_and_detach_test;
48087@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
48088 /* Activate test with initial breakpoint */
48089 if (!is_early)
48090 kgdb_breakpoint();
48091- probe_kernel_read(after, (char *)kgdbts_break_test,
48092+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
48093 BREAK_INSTR_SIZE);
48094 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
48095 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
48096diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
48097index 3ef4627..8d00486 100644
48098--- a/drivers/misc/lis3lv02d/lis3lv02d.c
48099+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
48100@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
48101 * the lid is closed. This leads to interrupts as soon as a little move
48102 * is done.
48103 */
48104- atomic_inc(&lis3->count);
48105+ atomic_inc_unchecked(&lis3->count);
48106
48107 wake_up_interruptible(&lis3->misc_wait);
48108 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
48109@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
48110 if (lis3->pm_dev)
48111 pm_runtime_get_sync(lis3->pm_dev);
48112
48113- atomic_set(&lis3->count, 0);
48114+ atomic_set_unchecked(&lis3->count, 0);
48115 return 0;
48116 }
48117
48118@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
48119 add_wait_queue(&lis3->misc_wait, &wait);
48120 while (true) {
48121 set_current_state(TASK_INTERRUPTIBLE);
48122- data = atomic_xchg(&lis3->count, 0);
48123+ data = atomic_xchg_unchecked(&lis3->count, 0);
48124 if (data)
48125 break;
48126
48127@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
48128 struct lis3lv02d, miscdev);
48129
48130 poll_wait(file, &lis3->misc_wait, wait);
48131- if (atomic_read(&lis3->count))
48132+ if (atomic_read_unchecked(&lis3->count))
48133 return POLLIN | POLLRDNORM;
48134 return 0;
48135 }
48136diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
48137index c439c82..1f20f57 100644
48138--- a/drivers/misc/lis3lv02d/lis3lv02d.h
48139+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
48140@@ -297,7 +297,7 @@ struct lis3lv02d {
48141 struct input_polled_dev *idev; /* input device */
48142 struct platform_device *pdev; /* platform device */
48143 struct regulator_bulk_data regulators[2];
48144- atomic_t count; /* interrupt count after last read */
48145+ atomic_unchecked_t count; /* interrupt count after last read */
48146 union axis_conversion ac; /* hw -> logical axis */
48147 int mapped_btns[3];
48148
48149diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
48150index 2f30bad..c4c13d0 100644
48151--- a/drivers/misc/sgi-gru/gruhandles.c
48152+++ b/drivers/misc/sgi-gru/gruhandles.c
48153@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
48154 unsigned long nsec;
48155
48156 nsec = CLKS2NSEC(clks);
48157- atomic_long_inc(&mcs_op_statistics[op].count);
48158- atomic_long_add(nsec, &mcs_op_statistics[op].total);
48159+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
48160+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
48161 if (mcs_op_statistics[op].max < nsec)
48162 mcs_op_statistics[op].max = nsec;
48163 }
48164diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
48165index 4f76359..cdfcb2e 100644
48166--- a/drivers/misc/sgi-gru/gruprocfs.c
48167+++ b/drivers/misc/sgi-gru/gruprocfs.c
48168@@ -32,9 +32,9 @@
48169
48170 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
48171
48172-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
48173+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
48174 {
48175- unsigned long val = atomic_long_read(v);
48176+ unsigned long val = atomic_long_read_unchecked(v);
48177
48178 seq_printf(s, "%16lu %s\n", val, id);
48179 }
48180@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
48181
48182 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
48183 for (op = 0; op < mcsop_last; op++) {
48184- count = atomic_long_read(&mcs_op_statistics[op].count);
48185- total = atomic_long_read(&mcs_op_statistics[op].total);
48186+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
48187+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
48188 max = mcs_op_statistics[op].max;
48189 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
48190 count ? total / count : 0, max);
48191diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
48192index 5c3ce24..4915ccb 100644
48193--- a/drivers/misc/sgi-gru/grutables.h
48194+++ b/drivers/misc/sgi-gru/grutables.h
48195@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
48196 * GRU statistics.
48197 */
48198 struct gru_stats_s {
48199- atomic_long_t vdata_alloc;
48200- atomic_long_t vdata_free;
48201- atomic_long_t gts_alloc;
48202- atomic_long_t gts_free;
48203- atomic_long_t gms_alloc;
48204- atomic_long_t gms_free;
48205- atomic_long_t gts_double_allocate;
48206- atomic_long_t assign_context;
48207- atomic_long_t assign_context_failed;
48208- atomic_long_t free_context;
48209- atomic_long_t load_user_context;
48210- atomic_long_t load_kernel_context;
48211- atomic_long_t lock_kernel_context;
48212- atomic_long_t unlock_kernel_context;
48213- atomic_long_t steal_user_context;
48214- atomic_long_t steal_kernel_context;
48215- atomic_long_t steal_context_failed;
48216- atomic_long_t nopfn;
48217- atomic_long_t asid_new;
48218- atomic_long_t asid_next;
48219- atomic_long_t asid_wrap;
48220- atomic_long_t asid_reuse;
48221- atomic_long_t intr;
48222- atomic_long_t intr_cbr;
48223- atomic_long_t intr_tfh;
48224- atomic_long_t intr_spurious;
48225- atomic_long_t intr_mm_lock_failed;
48226- atomic_long_t call_os;
48227- atomic_long_t call_os_wait_queue;
48228- atomic_long_t user_flush_tlb;
48229- atomic_long_t user_unload_context;
48230- atomic_long_t user_exception;
48231- atomic_long_t set_context_option;
48232- atomic_long_t check_context_retarget_intr;
48233- atomic_long_t check_context_unload;
48234- atomic_long_t tlb_dropin;
48235- atomic_long_t tlb_preload_page;
48236- atomic_long_t tlb_dropin_fail_no_asid;
48237- atomic_long_t tlb_dropin_fail_upm;
48238- atomic_long_t tlb_dropin_fail_invalid;
48239- atomic_long_t tlb_dropin_fail_range_active;
48240- atomic_long_t tlb_dropin_fail_idle;
48241- atomic_long_t tlb_dropin_fail_fmm;
48242- atomic_long_t tlb_dropin_fail_no_exception;
48243- atomic_long_t tfh_stale_on_fault;
48244- atomic_long_t mmu_invalidate_range;
48245- atomic_long_t mmu_invalidate_page;
48246- atomic_long_t flush_tlb;
48247- atomic_long_t flush_tlb_gru;
48248- atomic_long_t flush_tlb_gru_tgh;
48249- atomic_long_t flush_tlb_gru_zero_asid;
48250+ atomic_long_unchecked_t vdata_alloc;
48251+ atomic_long_unchecked_t vdata_free;
48252+ atomic_long_unchecked_t gts_alloc;
48253+ atomic_long_unchecked_t gts_free;
48254+ atomic_long_unchecked_t gms_alloc;
48255+ atomic_long_unchecked_t gms_free;
48256+ atomic_long_unchecked_t gts_double_allocate;
48257+ atomic_long_unchecked_t assign_context;
48258+ atomic_long_unchecked_t assign_context_failed;
48259+ atomic_long_unchecked_t free_context;
48260+ atomic_long_unchecked_t load_user_context;
48261+ atomic_long_unchecked_t load_kernel_context;
48262+ atomic_long_unchecked_t lock_kernel_context;
48263+ atomic_long_unchecked_t unlock_kernel_context;
48264+ atomic_long_unchecked_t steal_user_context;
48265+ atomic_long_unchecked_t steal_kernel_context;
48266+ atomic_long_unchecked_t steal_context_failed;
48267+ atomic_long_unchecked_t nopfn;
48268+ atomic_long_unchecked_t asid_new;
48269+ atomic_long_unchecked_t asid_next;
48270+ atomic_long_unchecked_t asid_wrap;
48271+ atomic_long_unchecked_t asid_reuse;
48272+ atomic_long_unchecked_t intr;
48273+ atomic_long_unchecked_t intr_cbr;
48274+ atomic_long_unchecked_t intr_tfh;
48275+ atomic_long_unchecked_t intr_spurious;
48276+ atomic_long_unchecked_t intr_mm_lock_failed;
48277+ atomic_long_unchecked_t call_os;
48278+ atomic_long_unchecked_t call_os_wait_queue;
48279+ atomic_long_unchecked_t user_flush_tlb;
48280+ atomic_long_unchecked_t user_unload_context;
48281+ atomic_long_unchecked_t user_exception;
48282+ atomic_long_unchecked_t set_context_option;
48283+ atomic_long_unchecked_t check_context_retarget_intr;
48284+ atomic_long_unchecked_t check_context_unload;
48285+ atomic_long_unchecked_t tlb_dropin;
48286+ atomic_long_unchecked_t tlb_preload_page;
48287+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
48288+ atomic_long_unchecked_t tlb_dropin_fail_upm;
48289+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
48290+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
48291+ atomic_long_unchecked_t tlb_dropin_fail_idle;
48292+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
48293+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
48294+ atomic_long_unchecked_t tfh_stale_on_fault;
48295+ atomic_long_unchecked_t mmu_invalidate_range;
48296+ atomic_long_unchecked_t mmu_invalidate_page;
48297+ atomic_long_unchecked_t flush_tlb;
48298+ atomic_long_unchecked_t flush_tlb_gru;
48299+ atomic_long_unchecked_t flush_tlb_gru_tgh;
48300+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
48301
48302- atomic_long_t copy_gpa;
48303- atomic_long_t read_gpa;
48304+ atomic_long_unchecked_t copy_gpa;
48305+ atomic_long_unchecked_t read_gpa;
48306
48307- atomic_long_t mesq_receive;
48308- atomic_long_t mesq_receive_none;
48309- atomic_long_t mesq_send;
48310- atomic_long_t mesq_send_failed;
48311- atomic_long_t mesq_noop;
48312- atomic_long_t mesq_send_unexpected_error;
48313- atomic_long_t mesq_send_lb_overflow;
48314- atomic_long_t mesq_send_qlimit_reached;
48315- atomic_long_t mesq_send_amo_nacked;
48316- atomic_long_t mesq_send_put_nacked;
48317- atomic_long_t mesq_page_overflow;
48318- atomic_long_t mesq_qf_locked;
48319- atomic_long_t mesq_qf_noop_not_full;
48320- atomic_long_t mesq_qf_switch_head_failed;
48321- atomic_long_t mesq_qf_unexpected_error;
48322- atomic_long_t mesq_noop_unexpected_error;
48323- atomic_long_t mesq_noop_lb_overflow;
48324- atomic_long_t mesq_noop_qlimit_reached;
48325- atomic_long_t mesq_noop_amo_nacked;
48326- atomic_long_t mesq_noop_put_nacked;
48327- atomic_long_t mesq_noop_page_overflow;
48328+ atomic_long_unchecked_t mesq_receive;
48329+ atomic_long_unchecked_t mesq_receive_none;
48330+ atomic_long_unchecked_t mesq_send;
48331+ atomic_long_unchecked_t mesq_send_failed;
48332+ atomic_long_unchecked_t mesq_noop;
48333+ atomic_long_unchecked_t mesq_send_unexpected_error;
48334+ atomic_long_unchecked_t mesq_send_lb_overflow;
48335+ atomic_long_unchecked_t mesq_send_qlimit_reached;
48336+ atomic_long_unchecked_t mesq_send_amo_nacked;
48337+ atomic_long_unchecked_t mesq_send_put_nacked;
48338+ atomic_long_unchecked_t mesq_page_overflow;
48339+ atomic_long_unchecked_t mesq_qf_locked;
48340+ atomic_long_unchecked_t mesq_qf_noop_not_full;
48341+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
48342+ atomic_long_unchecked_t mesq_qf_unexpected_error;
48343+ atomic_long_unchecked_t mesq_noop_unexpected_error;
48344+ atomic_long_unchecked_t mesq_noop_lb_overflow;
48345+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
48346+ atomic_long_unchecked_t mesq_noop_amo_nacked;
48347+ atomic_long_unchecked_t mesq_noop_put_nacked;
48348+ atomic_long_unchecked_t mesq_noop_page_overflow;
48349
48350 };
48351
48352@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
48353 tghop_invalidate, mcsop_last};
48354
48355 struct mcs_op_statistic {
48356- atomic_long_t count;
48357- atomic_long_t total;
48358+ atomic_long_unchecked_t count;
48359+ atomic_long_unchecked_t total;
48360 unsigned long max;
48361 };
48362
48363@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
48364
48365 #define STAT(id) do { \
48366 if (gru_options & OPT_STATS) \
48367- atomic_long_inc(&gru_stats.id); \
48368+ atomic_long_inc_unchecked(&gru_stats.id); \
48369 } while (0)
48370
48371 #ifdef CONFIG_SGI_GRU_DEBUG
48372diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
48373index c862cd4..0d176fe 100644
48374--- a/drivers/misc/sgi-xp/xp.h
48375+++ b/drivers/misc/sgi-xp/xp.h
48376@@ -288,7 +288,7 @@ struct xpc_interface {
48377 xpc_notify_func, void *);
48378 void (*received) (short, int, void *);
48379 enum xp_retval (*partid_to_nasids) (short, void *);
48380-};
48381+} __no_const;
48382
48383 extern struct xpc_interface xpc_interface;
48384
48385diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
48386index 01be66d..e3a0c7e 100644
48387--- a/drivers/misc/sgi-xp/xp_main.c
48388+++ b/drivers/misc/sgi-xp/xp_main.c
48389@@ -78,13 +78,13 @@ xpc_notloaded(void)
48390 }
48391
48392 struct xpc_interface xpc_interface = {
48393- (void (*)(int))xpc_notloaded,
48394- (void (*)(int))xpc_notloaded,
48395- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48396- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48397+ .connect = (void (*)(int))xpc_notloaded,
48398+ .disconnect = (void (*)(int))xpc_notloaded,
48399+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48400+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48401 void *))xpc_notloaded,
48402- (void (*)(short, int, void *))xpc_notloaded,
48403- (enum xp_retval(*)(short, void *))xpc_notloaded
48404+ .received = (void (*)(short, int, void *))xpc_notloaded,
48405+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
48406 };
48407 EXPORT_SYMBOL_GPL(xpc_interface);
48408
48409diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
48410index b94d5f7..7f494c5 100644
48411--- a/drivers/misc/sgi-xp/xpc.h
48412+++ b/drivers/misc/sgi-xp/xpc.h
48413@@ -835,6 +835,7 @@ struct xpc_arch_operations {
48414 void (*received_payload) (struct xpc_channel *, void *);
48415 void (*notify_senders_of_disconnect) (struct xpc_channel *);
48416 };
48417+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
48418
48419 /* struct xpc_partition act_state values (for XPC HB) */
48420
48421@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
48422 /* found in xpc_main.c */
48423 extern struct device *xpc_part;
48424 extern struct device *xpc_chan;
48425-extern struct xpc_arch_operations xpc_arch_ops;
48426+extern xpc_arch_operations_no_const xpc_arch_ops;
48427 extern int xpc_disengage_timelimit;
48428 extern int xpc_disengage_timedout;
48429 extern int xpc_activate_IRQ_rcvd;
48430diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
48431index 82dc574..8539ab2 100644
48432--- a/drivers/misc/sgi-xp/xpc_main.c
48433+++ b/drivers/misc/sgi-xp/xpc_main.c
48434@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
48435 .notifier_call = xpc_system_die,
48436 };
48437
48438-struct xpc_arch_operations xpc_arch_ops;
48439+xpc_arch_operations_no_const xpc_arch_ops;
48440
48441 /*
48442 * Timer function to enforce the timelimit on the partition disengage.
48443@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
48444
48445 if (((die_args->trapnr == X86_TRAP_MF) ||
48446 (die_args->trapnr == X86_TRAP_XF)) &&
48447- !user_mode_vm(die_args->regs))
48448+ !user_mode(die_args->regs))
48449 xpc_die_deactivate();
48450
48451 break;
48452diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
48453index 452782b..0c10e40 100644
48454--- a/drivers/mmc/card/block.c
48455+++ b/drivers/mmc/card/block.c
48456@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48457 if (idata->ic.postsleep_min_us)
48458 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48459
48460- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48461+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48462 err = -EFAULT;
48463 goto cmd_rel_host;
48464 }
48465diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
48466index f51b5ba..86614a7 100644
48467--- a/drivers/mmc/core/mmc_ops.c
48468+++ b/drivers/mmc/core/mmc_ops.c
48469@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
48470 void *data_buf;
48471 int is_on_stack;
48472
48473- is_on_stack = object_is_on_stack(buf);
48474+ is_on_stack = object_starts_on_stack(buf);
48475 if (is_on_stack) {
48476 /*
48477 * dma onto stack is unsafe/nonportable, but callers to this
48478diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48479index 738fa24..1568451 100644
48480--- a/drivers/mmc/host/dw_mmc.h
48481+++ b/drivers/mmc/host/dw_mmc.h
48482@@ -257,5 +257,5 @@ struct dw_mci_drv_data {
48483 int (*parse_dt)(struct dw_mci *host);
48484 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
48485 struct dw_mci_tuning_data *tuning_data);
48486-};
48487+} __do_const;
48488 #endif /* _DW_MMC_H_ */
48489diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48490index 249ab80..9314ce1 100644
48491--- a/drivers/mmc/host/mmci.c
48492+++ b/drivers/mmc/host/mmci.c
48493@@ -1507,7 +1507,9 @@ static int mmci_probe(struct amba_device *dev,
48494 mmc->caps |= MMC_CAP_CMD23;
48495
48496 if (variant->busy_detect) {
48497- mmci_ops.card_busy = mmci_card_busy;
48498+ pax_open_kernel();
48499+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48500+ pax_close_kernel();
48501 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48502 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48503 mmc->max_busy_timeout = 0;
48504diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48505index ccec0e3..199f9ce 100644
48506--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48507+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48508@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48509 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48510 }
48511
48512- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48513- sdhci_esdhc_ops.platform_execute_tuning =
48514+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48515+ pax_open_kernel();
48516+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48517 esdhc_executing_tuning;
48518+ pax_close_kernel();
48519+ }
48520
48521 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48522 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48523diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48524index fa5954a..56840e5 100644
48525--- a/drivers/mmc/host/sdhci-s3c.c
48526+++ b/drivers/mmc/host/sdhci-s3c.c
48527@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48528 * we can use overriding functions instead of default.
48529 */
48530 if (sc->no_divider) {
48531- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48532- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48533- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48534+ pax_open_kernel();
48535+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48536+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48537+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48538+ pax_close_kernel();
48539 }
48540
48541 /* It supports additional host capabilities if needed */
48542diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48543index 423666b..81ff5eb 100644
48544--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48545+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48546@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48547 size_t totlen = 0, thislen;
48548 int ret = 0;
48549 size_t buflen = 0;
48550- static char *buffer;
48551+ char *buffer;
48552
48553 if (!ECCBUF_SIZE) {
48554 /* We should fall back to a general writev implementation.
48555diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48556index 9f2012a..a81c720 100644
48557--- a/drivers/mtd/nand/denali.c
48558+++ b/drivers/mtd/nand/denali.c
48559@@ -24,6 +24,7 @@
48560 #include <linux/slab.h>
48561 #include <linux/mtd/mtd.h>
48562 #include <linux/module.h>
48563+#include <linux/slab.h>
48564
48565 #include "denali.h"
48566
48567diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48568index f638cd8..2cbf586 100644
48569--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48570+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48571@@ -387,7 +387,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48572
48573 /* first try to map the upper buffer directly */
48574 if (virt_addr_valid(this->upper_buf) &&
48575- !object_is_on_stack(this->upper_buf)) {
48576+ !object_starts_on_stack(this->upper_buf)) {
48577 sg_init_one(sgl, this->upper_buf, this->upper_len);
48578 ret = dma_map_sg(this->dev, sgl, 1, dr);
48579 if (ret == 0)
48580diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48581index 51b9d6a..52af9a7 100644
48582--- a/drivers/mtd/nftlmount.c
48583+++ b/drivers/mtd/nftlmount.c
48584@@ -24,6 +24,7 @@
48585 #include <asm/errno.h>
48586 #include <linux/delay.h>
48587 #include <linux/slab.h>
48588+#include <linux/sched.h>
48589 #include <linux/mtd/mtd.h>
48590 #include <linux/mtd/nand.h>
48591 #include <linux/mtd/nftl.h>
48592diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48593index cf49c22..971b133 100644
48594--- a/drivers/mtd/sm_ftl.c
48595+++ b/drivers/mtd/sm_ftl.c
48596@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48597 #define SM_CIS_VENDOR_OFFSET 0x59
48598 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48599 {
48600- struct attribute_group *attr_group;
48601+ attribute_group_no_const *attr_group;
48602 struct attribute **attributes;
48603 struct sm_sysfs_attribute *vendor_attribute;
48604 char *vendor;
48605diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48606index 5ab3c18..5c3a836 100644
48607--- a/drivers/net/bonding/bond_netlink.c
48608+++ b/drivers/net/bonding/bond_netlink.c
48609@@ -542,7 +542,7 @@ nla_put_failure:
48610 return -EMSGSIZE;
48611 }
48612
48613-struct rtnl_link_ops bond_link_ops __read_mostly = {
48614+struct rtnl_link_ops bond_link_ops = {
48615 .kind = "bond",
48616 .priv_size = sizeof(struct bonding),
48617 .setup = bond_setup,
48618diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48619index 4168822..f38eeddf 100644
48620--- a/drivers/net/can/Kconfig
48621+++ b/drivers/net/can/Kconfig
48622@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48623
48624 config CAN_FLEXCAN
48625 tristate "Support for Freescale FLEXCAN based chips"
48626- depends on ARM || PPC
48627+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48628 ---help---
48629 Say Y here if you want to support for Freescale FlexCAN.
48630
48631diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48632index 1d162cc..b546a75 100644
48633--- a/drivers/net/ethernet/8390/ax88796.c
48634+++ b/drivers/net/ethernet/8390/ax88796.c
48635@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48636 if (ax->plat->reg_offsets)
48637 ei_local->reg_offset = ax->plat->reg_offsets;
48638 else {
48639+ resource_size_t _mem_size = mem_size;
48640+ do_div(_mem_size, 0x18);
48641 ei_local->reg_offset = ax->reg_offsets;
48642 for (ret = 0; ret < 0x18; ret++)
48643- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48644+ ax->reg_offsets[ret] = _mem_size * ret;
48645 }
48646
48647 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48648diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48649index 7330681..7e9e463 100644
48650--- a/drivers/net/ethernet/altera/altera_tse_main.c
48651+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48652@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
48653 return 0;
48654 }
48655
48656-static struct net_device_ops altera_tse_netdev_ops = {
48657+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48658 .ndo_open = tse_open,
48659 .ndo_stop = tse_shutdown,
48660 .ndo_start_xmit = tse_start_xmit,
48661@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48662 ndev->netdev_ops = &altera_tse_netdev_ops;
48663 altera_tse_set_ethtool_ops(ndev);
48664
48665+ pax_open_kernel();
48666 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48667
48668 if (priv->hash_filter)
48669 altera_tse_netdev_ops.ndo_set_rx_mode =
48670 tse_set_rx_mode_hashfilter;
48671+ pax_close_kernel();
48672
48673 /* Scatter/gather IO is not supported,
48674 * so it is turned off
48675diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48676index bf462ee8..18b8375 100644
48677--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48678+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48679@@ -986,14 +986,14 @@ do { \
48680 * operations, everything works on mask values.
48681 */
48682 #define XMDIO_READ(_pdata, _mmd, _reg) \
48683- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48684+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48685 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48686
48687 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48688 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48689
48690 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48691- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48692+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48693 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48694
48695 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48696diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48697index 6bb76d5..ded47a8 100644
48698--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48699+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48700@@ -273,7 +273,7 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
48701 struct xgbe_prv_data *pdata = filp->private_data;
48702 unsigned int value;
48703
48704- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48705+ value = pdata->hw_if->read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48706 pdata->debugfs_xpcs_reg);
48707
48708 return xgbe_common_read(buffer, count, ppos, value);
48709@@ -291,7 +291,7 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
48710 if (len < 0)
48711 return len;
48712
48713- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48714+ pdata->hw_if->write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48715 pdata->debugfs_xpcs_reg, value);
48716
48717 return len;
48718diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48719index 6f1c859..e96ac1a 100644
48720--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48721+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48722@@ -236,7 +236,7 @@ err_ring:
48723
48724 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48725 {
48726- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48727+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48728 struct xgbe_channel *channel;
48729 struct xgbe_ring *ring;
48730 struct xgbe_ring_data *rdata;
48731@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48732
48733 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48734 {
48735- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48736+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48737 struct xgbe_channel *channel;
48738 struct xgbe_ring *ring;
48739 struct xgbe_ring_desc *rdesc;
48740@@ -496,7 +496,7 @@ err_out:
48741 static void xgbe_realloc_skb(struct xgbe_channel *channel)
48742 {
48743 struct xgbe_prv_data *pdata = channel->pdata;
48744- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48745+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48746 struct xgbe_ring *ring = channel->rx_ring;
48747 struct xgbe_ring_data *rdata;
48748 struct sk_buff *skb = NULL;
48749@@ -540,17 +540,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
48750 DBGPR("<--xgbe_realloc_skb\n");
48751 }
48752
48753-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48754-{
48755- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48756-
48757- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48758- desc_if->free_ring_resources = xgbe_free_ring_resources;
48759- desc_if->map_tx_skb = xgbe_map_tx_skb;
48760- desc_if->realloc_skb = xgbe_realloc_skb;
48761- desc_if->unmap_skb = xgbe_unmap_skb;
48762- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48763- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48764-
48765- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48766-}
48767+const struct xgbe_desc_if default_xgbe_desc_if = {
48768+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48769+ .free_ring_resources = xgbe_free_ring_resources,
48770+ .map_tx_skb = xgbe_map_tx_skb,
48771+ .realloc_skb = xgbe_realloc_skb,
48772+ .unmap_skb = xgbe_unmap_skb,
48773+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48774+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48775+};
48776diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48777index 002293b..5ced1dd 100644
48778--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48779+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48780@@ -2030,7 +2030,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48781
48782 static int xgbe_init(struct xgbe_prv_data *pdata)
48783 {
48784- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48785+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48786 int ret;
48787
48788 DBGPR("-->xgbe_init\n");
48789@@ -2096,87 +2096,82 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48790 return 0;
48791 }
48792
48793-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48794-{
48795- DBGPR("-->xgbe_init_function_ptrs\n");
48796-
48797- hw_if->tx_complete = xgbe_tx_complete;
48798-
48799- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48800- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48801- hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
48802- hw_if->set_mac_address = xgbe_set_mac_address;
48803-
48804- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48805- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48806-
48807- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48808- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48809-
48810- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48811- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48812-
48813- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48814- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48815- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48816-
48817- hw_if->enable_tx = xgbe_enable_tx;
48818- hw_if->disable_tx = xgbe_disable_tx;
48819- hw_if->enable_rx = xgbe_enable_rx;
48820- hw_if->disable_rx = xgbe_disable_rx;
48821-
48822- hw_if->powerup_tx = xgbe_powerup_tx;
48823- hw_if->powerdown_tx = xgbe_powerdown_tx;
48824- hw_if->powerup_rx = xgbe_powerup_rx;
48825- hw_if->powerdown_rx = xgbe_powerdown_rx;
48826-
48827- hw_if->pre_xmit = xgbe_pre_xmit;
48828- hw_if->dev_read = xgbe_dev_read;
48829- hw_if->enable_int = xgbe_enable_int;
48830- hw_if->disable_int = xgbe_disable_int;
48831- hw_if->init = xgbe_init;
48832- hw_if->exit = xgbe_exit;
48833+const struct xgbe_hw_if default_xgbe_hw_if = {
48834+ .tx_complete = xgbe_tx_complete,
48835+
48836+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48837+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48838+ .set_addn_mac_addrs = xgbe_set_addn_mac_addrs,
48839+ .set_mac_address = xgbe_set_mac_address,
48840+
48841+ .enable_rx_csum = xgbe_enable_rx_csum,
48842+ .disable_rx_csum = xgbe_disable_rx_csum,
48843+
48844+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48845+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48846+
48847+ .read_mmd_regs = xgbe_read_mmd_regs,
48848+ .write_mmd_regs = xgbe_write_mmd_regs,
48849+
48850+ .set_gmii_speed = xgbe_set_gmii_speed,
48851+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48852+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48853+
48854+ .enable_tx = xgbe_enable_tx,
48855+ .disable_tx = xgbe_disable_tx,
48856+ .enable_rx = xgbe_enable_rx,
48857+ .disable_rx = xgbe_disable_rx,
48858+
48859+ .powerup_tx = xgbe_powerup_tx,
48860+ .powerdown_tx = xgbe_powerdown_tx,
48861+ .powerup_rx = xgbe_powerup_rx,
48862+ .powerdown_rx = xgbe_powerdown_rx,
48863+
48864+ .pre_xmit = xgbe_pre_xmit,
48865+ .dev_read = xgbe_dev_read,
48866+ .enable_int = xgbe_enable_int,
48867+ .disable_int = xgbe_disable_int,
48868+ .init = xgbe_init,
48869+ .exit = xgbe_exit,
48870
48871 /* Descriptor related Sequences have to be initialized here */
48872- hw_if->tx_desc_init = xgbe_tx_desc_init;
48873- hw_if->rx_desc_init = xgbe_rx_desc_init;
48874- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48875- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48876- hw_if->is_last_desc = xgbe_is_last_desc;
48877- hw_if->is_context_desc = xgbe_is_context_desc;
48878+ .tx_desc_init = xgbe_tx_desc_init,
48879+ .rx_desc_init = xgbe_rx_desc_init,
48880+ .tx_desc_reset = xgbe_tx_desc_reset,
48881+ .rx_desc_reset = xgbe_rx_desc_reset,
48882+ .is_last_desc = xgbe_is_last_desc,
48883+ .is_context_desc = xgbe_is_context_desc,
48884
48885 /* For FLOW ctrl */
48886- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48887- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48888+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48889+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48890
48891 /* For RX coalescing */
48892- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48893- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48894- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48895- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48896+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48897+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48898+ .usec_to_riwt = xgbe_usec_to_riwt,
48899+ .riwt_to_usec = xgbe_riwt_to_usec,
48900
48901 /* For RX and TX threshold config */
48902- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48903- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48904+ .config_rx_threshold = xgbe_config_rx_threshold,
48905+ .config_tx_threshold = xgbe_config_tx_threshold,
48906
48907 /* For RX and TX Store and Forward Mode config */
48908- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48909- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48910+ .config_rsf_mode = xgbe_config_rsf_mode,
48911+ .config_tsf_mode = xgbe_config_tsf_mode,
48912
48913 /* For TX DMA Operating on Second Frame config */
48914- hw_if->config_osp_mode = xgbe_config_osp_mode;
48915+ .config_osp_mode = xgbe_config_osp_mode,
48916
48917 /* For RX and TX PBL config */
48918- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48919- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48920- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48921- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48922- hw_if->config_pblx8 = xgbe_config_pblx8;
48923+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48924+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48925+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48926+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48927+ .config_pblx8 = xgbe_config_pblx8,
48928
48929 /* For MMC statistics support */
48930- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48931- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48932- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48933-
48934- DBGPR("<--xgbe_init_function_ptrs\n");
48935-}
48936+ .tx_mmc_int = xgbe_tx_mmc_int,
48937+ .rx_mmc_int = xgbe_rx_mmc_int,
48938+ .read_mmc_stats = xgbe_read_mmc_stats,
48939+};
48940diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48941index cfe3d93..07a78ae 100644
48942--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48943+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48944@@ -153,7 +153,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48945
48946 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48947 {
48948- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48949+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48950 struct xgbe_channel *channel;
48951 unsigned int i;
48952
48953@@ -170,7 +170,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48954
48955 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48956 {
48957- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48958+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48959 struct xgbe_channel *channel;
48960 unsigned int i;
48961
48962@@ -188,7 +188,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48963 static irqreturn_t xgbe_isr(int irq, void *data)
48964 {
48965 struct xgbe_prv_data *pdata = data;
48966- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48967+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48968 struct xgbe_channel *channel;
48969 unsigned int dma_isr, dma_ch_isr;
48970 unsigned int mac_isr;
48971@@ -403,7 +403,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
48972
48973 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48974 {
48975- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48976+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48977
48978 DBGPR("-->xgbe_init_tx_coalesce\n");
48979
48980@@ -417,7 +417,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48981
48982 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48983 {
48984- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48985+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48986
48987 DBGPR("-->xgbe_init_rx_coalesce\n");
48988
48989@@ -431,7 +431,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48990
48991 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48992 {
48993- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48994+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48995 struct xgbe_channel *channel;
48996 struct xgbe_ring *ring;
48997 struct xgbe_ring_data *rdata;
48998@@ -456,7 +456,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48999
49000 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
49001 {
49002- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49003+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49004 struct xgbe_channel *channel;
49005 struct xgbe_ring *ring;
49006 struct xgbe_ring_data *rdata;
49007@@ -482,7 +482,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
49008 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
49009 {
49010 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49011- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49012+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49013 unsigned long flags;
49014
49015 DBGPR("-->xgbe_powerdown\n");
49016@@ -520,7 +520,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
49017 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
49018 {
49019 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49020- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49021+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49022 unsigned long flags;
49023
49024 DBGPR("-->xgbe_powerup\n");
49025@@ -557,7 +557,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
49026
49027 static int xgbe_start(struct xgbe_prv_data *pdata)
49028 {
49029- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49030+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49031 struct net_device *netdev = pdata->netdev;
49032
49033 DBGPR("-->xgbe_start\n");
49034@@ -583,7 +583,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
49035
49036 static void xgbe_stop(struct xgbe_prv_data *pdata)
49037 {
49038- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49039+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49040 struct net_device *netdev = pdata->netdev;
49041
49042 DBGPR("-->xgbe_stop\n");
49043@@ -603,7 +603,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
49044
49045 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
49046 {
49047- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49048+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49049
49050 DBGPR("-->xgbe_restart_dev\n");
49051
49052@@ -741,8 +741,8 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
49053 static int xgbe_open(struct net_device *netdev)
49054 {
49055 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49056- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49057- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49058+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49059+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49060 int ret;
49061
49062 DBGPR("-->xgbe_open\n");
49063@@ -804,8 +804,8 @@ err_clk:
49064 static int xgbe_close(struct net_device *netdev)
49065 {
49066 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49067- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49068- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49069+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49070+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49071
49072 DBGPR("-->xgbe_close\n");
49073
49074@@ -835,8 +835,8 @@ static int xgbe_close(struct net_device *netdev)
49075 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
49076 {
49077 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49078- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49079- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49080+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49081+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49082 struct xgbe_channel *channel;
49083 struct xgbe_ring *ring;
49084 struct xgbe_packet_data *packet;
49085@@ -903,7 +903,7 @@ tx_netdev_return:
49086 static void xgbe_set_rx_mode(struct net_device *netdev)
49087 {
49088 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49089- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49090+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49091 unsigned int pr_mode, am_mode;
49092
49093 DBGPR("-->xgbe_set_rx_mode\n");
49094@@ -930,7 +930,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
49095 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
49096 {
49097 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49098- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49099+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49100 struct sockaddr *saddr = addr;
49101
49102 DBGPR("-->xgbe_set_mac_address\n");
49103@@ -976,7 +976,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
49104
49105 DBGPR("-->%s\n", __func__);
49106
49107- pdata->hw_if.read_mmc_stats(pdata);
49108+ pdata->hw_if->read_mmc_stats(pdata);
49109
49110 s->rx_packets = pstats->rxframecount_gb;
49111 s->rx_bytes = pstats->rxoctetcount_gb;
49112@@ -1020,7 +1020,7 @@ static int xgbe_set_features(struct net_device *netdev,
49113 netdev_features_t features)
49114 {
49115 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49116- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49117+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49118 unsigned int rxcsum_enabled, rxvlan_enabled;
49119
49120 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
49121@@ -1072,8 +1072,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
49122 static int xgbe_tx_poll(struct xgbe_channel *channel)
49123 {
49124 struct xgbe_prv_data *pdata = channel->pdata;
49125- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49126- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49127+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49128+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49129 struct xgbe_ring *ring = channel->tx_ring;
49130 struct xgbe_ring_data *rdata;
49131 struct xgbe_ring_desc *rdesc;
49132@@ -1124,8 +1124,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
49133 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
49134 {
49135 struct xgbe_prv_data *pdata = channel->pdata;
49136- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49137- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49138+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49139+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49140 struct xgbe_ring *ring = channel->rx_ring;
49141 struct xgbe_ring_data *rdata;
49142 struct xgbe_packet_data *packet;
49143diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49144index 8909f2b..719e767 100644
49145--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49146+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49147@@ -202,7 +202,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
49148
49149 DBGPR("-->%s\n", __func__);
49150
49151- pdata->hw_if.read_mmc_stats(pdata);
49152+ pdata->hw_if->read_mmc_stats(pdata);
49153 for (i = 0; i < XGBE_STATS_COUNT; i++) {
49154 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
49155 *data++ = *(u64 *)stat;
49156@@ -387,7 +387,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
49157 struct ethtool_coalesce *ec)
49158 {
49159 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49160- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49161+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49162 unsigned int riwt;
49163
49164 DBGPR("-->xgbe_get_coalesce\n");
49165@@ -410,7 +410,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
49166 struct ethtool_coalesce *ec)
49167 {
49168 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49169- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49170+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49171 unsigned int rx_frames, rx_riwt, rx_usecs;
49172 unsigned int tx_frames, tx_usecs;
49173
49174diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49175index 5a1891f..1b7888e 100644
49176--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49177+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49178@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
49179 DBGPR("<--xgbe_default_config\n");
49180 }
49181
49182-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
49183-{
49184- xgbe_init_function_ptrs_dev(&pdata->hw_if);
49185- xgbe_init_function_ptrs_desc(&pdata->desc_if);
49186-}
49187-
49188 static int xgbe_probe(struct platform_device *pdev)
49189 {
49190 struct xgbe_prv_data *pdata;
49191@@ -306,9 +300,8 @@ static int xgbe_probe(struct platform_device *pdev)
49192 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
49193
49194 /* Set all the function pointers */
49195- xgbe_init_all_fptrs(pdata);
49196- hw_if = &pdata->hw_if;
49197- desc_if = &pdata->desc_if;
49198+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
49199+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
49200
49201 /* Issue software reset to device */
49202 hw_if->exit(pdata);
49203diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49204index ea7a5d6..d10a742 100644
49205--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49206+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49207@@ -128,7 +128,7 @@
49208 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
49209 {
49210 struct xgbe_prv_data *pdata = mii->priv;
49211- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49212+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49213 int mmd_data;
49214
49215 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
49216@@ -145,7 +145,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
49217 u16 mmd_val)
49218 {
49219 struct xgbe_prv_data *pdata = mii->priv;
49220- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49221+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49222 int mmd_data = mmd_val;
49223
49224 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
49225@@ -161,7 +161,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
49226 static void xgbe_adjust_link(struct net_device *netdev)
49227 {
49228 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49229- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49230+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49231 struct phy_device *phydev = pdata->phydev;
49232 unsigned long flags;
49233 int new_state = 0;
49234diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
49235index ab06271..a560fa7 100644
49236--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
49237+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
49238@@ -527,8 +527,8 @@ struct xgbe_prv_data {
49239
49240 int irq_number;
49241
49242- struct xgbe_hw_if hw_if;
49243- struct xgbe_desc_if desc_if;
49244+ const struct xgbe_hw_if *hw_if;
49245+ const struct xgbe_desc_if *desc_if;
49246
49247 /* Rings for Tx/Rx on a DMA channel */
49248 struct xgbe_channel *channel;
49249@@ -611,6 +611,9 @@ struct xgbe_prv_data {
49250 #endif
49251 };
49252
49253+extern const struct xgbe_hw_if default_xgbe_hw_if;
49254+extern const struct xgbe_desc_if default_xgbe_desc_if;
49255+
49256 /* Function prototypes*/
49257
49258 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
49259diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49260index 571427c..e9fe9e7 100644
49261--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49262+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49263@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
49264 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
49265 {
49266 /* RX_MODE controlling object */
49267- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
49268+ bnx2x_init_rx_mode_obj(bp);
49269
49270 /* multicast configuration controlling object */
49271 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
49272diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49273index b193604..8873bfd 100644
49274--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49275+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49276@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
49277 return rc;
49278 }
49279
49280-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49281- struct bnx2x_rx_mode_obj *o)
49282+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
49283 {
49284 if (CHIP_IS_E1x(bp)) {
49285- o->wait_comp = bnx2x_empty_rx_mode_wait;
49286- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
49287+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
49288+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
49289 } else {
49290- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
49291- o->config_rx_mode = bnx2x_set_rx_mode_e2;
49292+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
49293+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
49294 }
49295 }
49296
49297diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49298index 718ecd2..2183b2f 100644
49299--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49300+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49301@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
49302
49303 /********************* RX MODE ****************/
49304
49305-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49306- struct bnx2x_rx_mode_obj *o);
49307+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
49308
49309 /**
49310 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
49311diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
49312index 461acca..2b546ba 100644
49313--- a/drivers/net/ethernet/broadcom/tg3.h
49314+++ b/drivers/net/ethernet/broadcom/tg3.h
49315@@ -150,6 +150,7 @@
49316 #define CHIPREV_ID_5750_A0 0x4000
49317 #define CHIPREV_ID_5750_A1 0x4001
49318 #define CHIPREV_ID_5750_A3 0x4003
49319+#define CHIPREV_ID_5750_C1 0x4201
49320 #define CHIPREV_ID_5750_C2 0x4202
49321 #define CHIPREV_ID_5752_A0_HW 0x5000
49322 #define CHIPREV_ID_5752_A0 0x6000
49323diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
49324index 13f9636..228040f 100644
49325--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
49326+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
49327@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
49328 }
49329
49330 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
49331- bna_cb_ioceth_enable,
49332- bna_cb_ioceth_disable,
49333- bna_cb_ioceth_hbfail,
49334- bna_cb_ioceth_reset
49335+ .enable_cbfn = bna_cb_ioceth_enable,
49336+ .disable_cbfn = bna_cb_ioceth_disable,
49337+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
49338+ .reset_cbfn = bna_cb_ioceth_reset
49339 };
49340
49341 static void bna_attr_init(struct bna_ioceth *ioceth)
49342diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49343index 8cffcdf..aadf043 100644
49344--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49345+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49346@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
49347 */
49348 struct l2t_skb_cb {
49349 arp_failure_handler_func arp_failure_handler;
49350-};
49351+} __no_const;
49352
49353 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49354
49355diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49356index a83271c..cf00874 100644
49357--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49358+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49359@@ -2174,7 +2174,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49360
49361 int i;
49362 struct adapter *ap = netdev2adap(dev);
49363- static const unsigned int *reg_ranges;
49364+ const unsigned int *reg_ranges;
49365 int arr_size = 0, buf_size = 0;
49366
49367 if (is_t4(ap->params.chip)) {
49368diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49369index c05b66d..ed69872 100644
49370--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49371+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49372@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49373 for (i=0; i<ETH_ALEN; i++) {
49374 tmp.addr[i] = dev->dev_addr[i];
49375 }
49376- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49377+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49378 break;
49379
49380 case DE4X5_SET_HWADDR: /* Set the hardware address */
49381@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49382 spin_lock_irqsave(&lp->lock, flags);
49383 memcpy(&statbuf, &lp->pktStats, ioc->len);
49384 spin_unlock_irqrestore(&lp->lock, flags);
49385- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49386+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49387 return -EFAULT;
49388 break;
49389 }
49390diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49391index 1e187fb..d024547 100644
49392--- a/drivers/net/ethernet/emulex/benet/be_main.c
49393+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49394@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49395
49396 if (wrapped)
49397 newacc += 65536;
49398- ACCESS_ONCE(*acc) = newacc;
49399+ ACCESS_ONCE_RW(*acc) = newacc;
49400 }
49401
49402 static void populate_erx_stats(struct be_adapter *adapter,
49403diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49404index c77fa4a..7fd42fc 100644
49405--- a/drivers/net/ethernet/faraday/ftgmac100.c
49406+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49407@@ -30,6 +30,8 @@
49408 #include <linux/netdevice.h>
49409 #include <linux/phy.h>
49410 #include <linux/platform_device.h>
49411+#include <linux/interrupt.h>
49412+#include <linux/irqreturn.h>
49413 #include <net/ip.h>
49414
49415 #include "ftgmac100.h"
49416diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49417index 4ff1adc..0ea6bf4 100644
49418--- a/drivers/net/ethernet/faraday/ftmac100.c
49419+++ b/drivers/net/ethernet/faraday/ftmac100.c
49420@@ -31,6 +31,8 @@
49421 #include <linux/module.h>
49422 #include <linux/netdevice.h>
49423 #include <linux/platform_device.h>
49424+#include <linux/interrupt.h>
49425+#include <linux/irqreturn.h>
49426
49427 #include "ftmac100.h"
49428
49429diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49430index 101f439..59e7ec6 100644
49431--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49432+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49433@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49434 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49435
49436 /* Update the base adjustement value. */
49437- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49438+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49439 smp_mb(); /* Force the above update. */
49440 }
49441
49442diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49443index 68f87ec..241dbe3 100644
49444--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49445+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49446@@ -792,7 +792,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49447 }
49448
49449 /* update the base incval used to calculate frequency adjustment */
49450- ACCESS_ONCE(adapter->base_incval) = incval;
49451+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49452 smp_mb();
49453
49454 /* need lock to prevent incorrect read while modifying cyclecounter */
49455diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49456index 2bbd01f..e8baa64 100644
49457--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49458+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49459@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49460 struct __vxge_hw_fifo *fifo;
49461 struct vxge_hw_fifo_config *config;
49462 u32 txdl_size, txdl_per_memblock;
49463- struct vxge_hw_mempool_cbs fifo_mp_callback;
49464+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49465+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49466+ };
49467+
49468 struct __vxge_hw_virtualpath *vpath;
49469
49470 if ((vp == NULL) || (attr == NULL)) {
49471@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49472 goto exit;
49473 }
49474
49475- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49476-
49477 fifo->mempool =
49478 __vxge_hw_mempool_create(vpath->hldev,
49479 fifo->config->memblock_size,
49480diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49481index 73e6683..464e910 100644
49482--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49483+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49484@@ -120,6 +120,10 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
49485 int data);
49486 static void pch_gbe_set_multi(struct net_device *netdev);
49487
49488+static struct sock_filter ptp_filter[] = {
49489+ PTP_FILTER
49490+};
49491+
49492 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49493 {
49494 u8 *data = skb->data;
49495@@ -127,7 +131,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49496 u16 *hi, *id;
49497 u32 lo;
49498
49499- if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
49500+ if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
49501 return 0;
49502
49503 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49504@@ -2631,6 +2635,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
49505
49506 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
49507 PCI_DEVFN(12, 4));
49508+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49509+ dev_err(&pdev->dev, "Bad ptp filter\n");
49510+ ret = -EINVAL;
49511+ goto err_free_netdev;
49512+ }
49513
49514 netdev->netdev_ops = &pch_gbe_netdev_ops;
49515 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
49516diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49517index f33559b..c7f50ac 100644
49518--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49519+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49520@@ -2176,7 +2176,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49521 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49522 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49523 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49524- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49525+ pax_open_kernel();
49526+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49527+ pax_close_kernel();
49528 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49529 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49530 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49531diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49532index be7d7a6..a8983f8 100644
49533--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49534+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49535@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49536 case QLCNIC_NON_PRIV_FUNC:
49537 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49538 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49539- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49540+ pax_open_kernel();
49541+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49542+ pax_close_kernel();
49543 break;
49544 case QLCNIC_PRIV_FUNC:
49545 ahw->op_mode = QLCNIC_PRIV_FUNC;
49546 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49547- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49548+ pax_open_kernel();
49549+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49550+ pax_close_kernel();
49551 break;
49552 case QLCNIC_MGMT_FUNC:
49553 ahw->op_mode = QLCNIC_MGMT_FUNC;
49554 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49555- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49556+ pax_open_kernel();
49557+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49558+ pax_close_kernel();
49559 break;
49560 default:
49561 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49562diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49563index e46fc39..abe135b 100644
49564--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49565+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49566@@ -1228,7 +1228,7 @@ flash_temp:
49567 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49568 {
49569 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49570- static const struct qlcnic_dump_operations *fw_dump_ops;
49571+ const struct qlcnic_dump_operations *fw_dump_ops;
49572 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49573 u32 entry_offset, dump, no_entries, buf_offset = 0;
49574 int i, k, ops_cnt, ops_index, dump_size = 0;
49575diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49576index 61623e9..ac97c27 100644
49577--- a/drivers/net/ethernet/realtek/r8169.c
49578+++ b/drivers/net/ethernet/realtek/r8169.c
49579@@ -759,22 +759,22 @@ struct rtl8169_private {
49580 struct mdio_ops {
49581 void (*write)(struct rtl8169_private *, int, int);
49582 int (*read)(struct rtl8169_private *, int);
49583- } mdio_ops;
49584+ } __no_const mdio_ops;
49585
49586 struct pll_power_ops {
49587 void (*down)(struct rtl8169_private *);
49588 void (*up)(struct rtl8169_private *);
49589- } pll_power_ops;
49590+ } __no_const pll_power_ops;
49591
49592 struct jumbo_ops {
49593 void (*enable)(struct rtl8169_private *);
49594 void (*disable)(struct rtl8169_private *);
49595- } jumbo_ops;
49596+ } __no_const jumbo_ops;
49597
49598 struct csi_ops {
49599 void (*write)(struct rtl8169_private *, int, int);
49600 u32 (*read)(struct rtl8169_private *, int);
49601- } csi_ops;
49602+ } __no_const csi_ops;
49603
49604 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49605 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49606diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49607index 6b861e3..204ac86 100644
49608--- a/drivers/net/ethernet/sfc/ptp.c
49609+++ b/drivers/net/ethernet/sfc/ptp.c
49610@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49611 ptp->start.dma_addr);
49612
49613 /* Clear flag that signals MC ready */
49614- ACCESS_ONCE(*start) = 0;
49615+ ACCESS_ONCE_RW(*start) = 0;
49616 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49617 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49618 EFX_BUG_ON_PARANOID(rc);
49619diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49620index 50617c5..b13724c 100644
49621--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49622+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49623@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49624
49625 writel(value, ioaddr + MMC_CNTRL);
49626
49627- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49628- MMC_CNTRL, value);
49629+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49630+// MMC_CNTRL, value);
49631 }
49632
49633 /* To mask all all interrupts.*/
49634diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
49635index 6b56f85..50e285f 100644
49636--- a/drivers/net/ethernet/ti/cpts.c
49637+++ b/drivers/net/ethernet/ti/cpts.c
49638@@ -33,6 +33,10 @@
49639
49640 #ifdef CONFIG_TI_CPTS
49641
49642+static struct sock_filter ptp_filter[] = {
49643+ PTP_FILTER
49644+};
49645+
49646 #define cpts_read32(c, r) __raw_readl(&c->reg->r)
49647 #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
49648
49649@@ -296,7 +300,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
49650 u64 ns = 0;
49651 struct cpts_event *event;
49652 struct list_head *this, *next;
49653- unsigned int class = ptp_classify_raw(skb);
49654+ unsigned int class = sk_run_filter(skb, ptp_filter);
49655 unsigned long flags;
49656 u16 seqid;
49657 u8 mtype;
49658@@ -367,6 +371,10 @@ int cpts_register(struct device *dev, struct cpts *cpts,
49659 int err, i;
49660 unsigned long flags;
49661
49662+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49663+ pr_err("cpts: bad ptp filter\n");
49664+ return -EINVAL;
49665+ }
49666 cpts->info = cpts_info;
49667 cpts->clock = ptp_clock_register(&cpts->info, dev);
49668 if (IS_ERR(cpts->clock)) {
49669diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
49670index b81bc9f..3f43101 100644
49671--- a/drivers/net/ethernet/xscale/Kconfig
49672+++ b/drivers/net/ethernet/xscale/Kconfig
49673@@ -23,7 +23,6 @@ config IXP4XX_ETH
49674 tristate "Intel IXP4xx Ethernet support"
49675 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
49676 select PHYLIB
49677- select NET_PTP_CLASSIFY
49678 ---help---
49679 Say Y here if you want to use built-in Ethernet ports
49680 on IXP4xx processor.
49681diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49682index f7e0f0f..25283f1 100644
49683--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
49684+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49685@@ -256,6 +256,10 @@ static int ports_open;
49686 static struct port *npe_port_tab[MAX_NPES];
49687 static struct dma_pool *dma_pool;
49688
49689+static struct sock_filter ptp_filter[] = {
49690+ PTP_FILTER
49691+};
49692+
49693 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49694 {
49695 u8 *data = skb->data;
49696@@ -263,7 +267,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49697 u16 *hi, *id;
49698 u32 lo;
49699
49700- if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
49701+ if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
49702 return 0;
49703
49704 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49705@@ -1409,6 +1413,11 @@ static int eth_init_one(struct platform_device *pdev)
49706 char phy_id[MII_BUS_ID_SIZE + 3];
49707 int err;
49708
49709+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49710+ pr_err("ixp4xx_eth: bad ptp filter\n");
49711+ return -EINVAL;
49712+ }
49713+
49714 if (!(dev = alloc_etherdev(sizeof(struct port))))
49715 return -ENOMEM;
49716
49717diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49718index 6cc37c1..fdd9d77 100644
49719--- a/drivers/net/hyperv/hyperv_net.h
49720+++ b/drivers/net/hyperv/hyperv_net.h
49721@@ -170,7 +170,7 @@ struct rndis_device {
49722
49723 enum rndis_device_state state;
49724 bool link_state;
49725- atomic_t new_req_id;
49726+ atomic_unchecked_t new_req_id;
49727
49728 spinlock_t request_lock;
49729 struct list_head req_list;
49730diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49731index 99c527a..6a2ce38 100644
49732--- a/drivers/net/hyperv/rndis_filter.c
49733+++ b/drivers/net/hyperv/rndis_filter.c
49734@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49735 * template
49736 */
49737 set = &rndis_msg->msg.set_req;
49738- set->req_id = atomic_inc_return(&dev->new_req_id);
49739+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49740
49741 /* Add to the request list */
49742 spin_lock_irqsave(&dev->request_lock, flags);
49743@@ -930,7 +930,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49744
49745 /* Setup the rndis set */
49746 halt = &request->request_msg.msg.halt_req;
49747- halt->req_id = atomic_inc_return(&dev->new_req_id);
49748+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49749
49750 /* Ignore return since this msg is optional. */
49751 rndis_filter_send_request(dev, request);
49752diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
49753index 78f18be..1d19c62 100644
49754--- a/drivers/net/ieee802154/fakehard.c
49755+++ b/drivers/net/ieee802154/fakehard.c
49756@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
49757 phy->transmit_power = 0xbf;
49758
49759 dev->netdev_ops = &fake_ops;
49760- dev->ml_priv = &fake_mlme;
49761+ dev->ml_priv = (void *)&fake_mlme;
49762
49763 priv = netdev_priv(dev);
49764 priv->phy = phy;
49765diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49766index ef8a5c2..76877d6 100644
49767--- a/drivers/net/macvlan.c
49768+++ b/drivers/net/macvlan.c
49769@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49770 free_nskb:
49771 kfree_skb(nskb);
49772 err:
49773- atomic_long_inc(&skb->dev->rx_dropped);
49774+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49775 }
49776
49777 /* called under rcu_read_lock() from netif_receive_skb */
49778@@ -1134,13 +1134,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49779 int macvlan_link_register(struct rtnl_link_ops *ops)
49780 {
49781 /* common fields */
49782- ops->priv_size = sizeof(struct macvlan_dev);
49783- ops->validate = macvlan_validate;
49784- ops->maxtype = IFLA_MACVLAN_MAX;
49785- ops->policy = macvlan_policy;
49786- ops->changelink = macvlan_changelink;
49787- ops->get_size = macvlan_get_size;
49788- ops->fill_info = macvlan_fill_info;
49789+ pax_open_kernel();
49790+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49791+ *(void **)&ops->validate = macvlan_validate;
49792+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49793+ *(const void **)&ops->policy = macvlan_policy;
49794+ *(void **)&ops->changelink = macvlan_changelink;
49795+ *(void **)&ops->get_size = macvlan_get_size;
49796+ *(void **)&ops->fill_info = macvlan_fill_info;
49797+ pax_close_kernel();
49798
49799 return rtnl_link_register(ops);
49800 };
49801@@ -1220,7 +1222,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49802 return NOTIFY_DONE;
49803 }
49804
49805-static struct notifier_block macvlan_notifier_block __read_mostly = {
49806+static struct notifier_block macvlan_notifier_block = {
49807 .notifier_call = macvlan_device_event,
49808 };
49809
49810diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49811index 3381c4f..dea5fd5 100644
49812--- a/drivers/net/macvtap.c
49813+++ b/drivers/net/macvtap.c
49814@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49815 }
49816
49817 ret = 0;
49818- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49819+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49820 put_user(q->flags, &ifr->ifr_flags))
49821 ret = -EFAULT;
49822 macvtap_put_vlan(vlan);
49823@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49824 return NOTIFY_DONE;
49825 }
49826
49827-static struct notifier_block macvtap_notifier_block __read_mostly = {
49828+static struct notifier_block macvtap_notifier_block = {
49829 .notifier_call = macvtap_device_event,
49830 };
49831
49832diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
49833index 9408157..d53b924 100644
49834--- a/drivers/net/phy/dp83640.c
49835+++ b/drivers/net/phy/dp83640.c
49836@@ -27,7 +27,6 @@
49837 #include <linux/module.h>
49838 #include <linux/net_tstamp.h>
49839 #include <linux/netdevice.h>
49840-#include <linux/if_vlan.h>
49841 #include <linux/phy.h>
49842 #include <linux/ptp_classify.h>
49843 #include <linux/ptp_clock_kernel.h>
49844diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49845index d5b77ef..72ff14b 100644
49846--- a/drivers/net/ppp/ppp_generic.c
49847+++ b/drivers/net/ppp/ppp_generic.c
49848@@ -143,8 +143,9 @@ struct ppp {
49849 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
49850 #endif /* CONFIG_PPP_MULTILINK */
49851 #ifdef CONFIG_PPP_FILTER
49852- struct sk_filter *pass_filter; /* filter for packets to pass */
49853- struct sk_filter *active_filter;/* filter for pkts to reset idle */
49854+ struct sock_filter *pass_filter; /* filter for packets to pass */
49855+ struct sock_filter *active_filter;/* filter for pkts to reset idle */
49856+ unsigned pass_len, active_len;
49857 #endif /* CONFIG_PPP_FILTER */
49858 struct net *ppp_net; /* the net we belong to */
49859 struct ppp_link_stats stats64; /* 64 bit network stats */
49860@@ -539,7 +540,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49861 {
49862 struct sock_fprog uprog;
49863 struct sock_filter *code = NULL;
49864- int len;
49865+ int len, err;
49866
49867 if (copy_from_user(&uprog, arg, sizeof(uprog)))
49868 return -EFAULT;
49869@@ -554,6 +555,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49870 if (IS_ERR(code))
49871 return PTR_ERR(code);
49872
49873+ err = sk_chk_filter(code, uprog.len);
49874+ if (err) {
49875+ kfree(code);
49876+ return err;
49877+ }
49878+
49879 *p = code;
49880 return uprog.len;
49881 }
49882@@ -748,52 +755,28 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
49883 case PPPIOCSPASS:
49884 {
49885 struct sock_filter *code;
49886-
49887 err = get_filter(argp, &code);
49888 if (err >= 0) {
49889- struct sock_fprog_kern fprog = {
49890- .len = err,
49891- .filter = code,
49892- };
49893-
49894 ppp_lock(ppp);
49895- if (ppp->pass_filter) {
49896- sk_unattached_filter_destroy(ppp->pass_filter);
49897- ppp->pass_filter = NULL;
49898- }
49899- if (fprog.filter != NULL)
49900- err = sk_unattached_filter_create(&ppp->pass_filter,
49901- &fprog);
49902- else
49903- err = 0;
49904- kfree(code);
49905+ kfree(ppp->pass_filter);
49906+ ppp->pass_filter = code;
49907+ ppp->pass_len = err;
49908 ppp_unlock(ppp);
49909+ err = 0;
49910 }
49911 break;
49912 }
49913 case PPPIOCSACTIVE:
49914 {
49915 struct sock_filter *code;
49916-
49917 err = get_filter(argp, &code);
49918 if (err >= 0) {
49919- struct sock_fprog_kern fprog = {
49920- .len = err,
49921- .filter = code,
49922- };
49923-
49924 ppp_lock(ppp);
49925- if (ppp->active_filter) {
49926- sk_unattached_filter_destroy(ppp->active_filter);
49927- ppp->active_filter = NULL;
49928- }
49929- if (fprog.filter != NULL)
49930- err = sk_unattached_filter_create(&ppp->active_filter,
49931- &fprog);
49932- else
49933- err = 0;
49934- kfree(code);
49935+ kfree(ppp->active_filter);
49936+ ppp->active_filter = code;
49937+ ppp->active_len = err;
49938 ppp_unlock(ppp);
49939+ err = 0;
49940 }
49941 break;
49942 }
49943@@ -1201,7 +1184,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49944 a four-byte PPP header on each packet */
49945 *skb_push(skb, 2) = 1;
49946 if (ppp->pass_filter &&
49947- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49948+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49949 if (ppp->debug & 1)
49950 netdev_printk(KERN_DEBUG, ppp->dev,
49951 "PPP: outbound frame "
49952@@ -1211,7 +1194,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49953 }
49954 /* if this packet passes the active filter, record the time */
49955 if (!(ppp->active_filter &&
49956- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49957+ sk_run_filter(skb, ppp->active_filter) == 0))
49958 ppp->last_xmit = jiffies;
49959 skb_pull(skb, 2);
49960 #else
49961@@ -1835,7 +1818,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49962
49963 *skb_push(skb, 2) = 0;
49964 if (ppp->pass_filter &&
49965- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49966+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49967 if (ppp->debug & 1)
49968 netdev_printk(KERN_DEBUG, ppp->dev,
49969 "PPP: inbound frame "
49970@@ -1844,7 +1827,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49971 return;
49972 }
49973 if (!(ppp->active_filter &&
49974- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49975+ sk_run_filter(skb, ppp->active_filter) == 0))
49976 ppp->last_recv = jiffies;
49977 __skb_pull(skb, 2);
49978 } else
49979@@ -2689,10 +2672,6 @@ ppp_create_interface(struct net *net, int unit, int *retp)
49980 ppp->minseq = -1;
49981 skb_queue_head_init(&ppp->mrq);
49982 #endif /* CONFIG_PPP_MULTILINK */
49983-#ifdef CONFIG_PPP_FILTER
49984- ppp->pass_filter = NULL;
49985- ppp->active_filter = NULL;
49986-#endif /* CONFIG_PPP_FILTER */
49987
49988 /*
49989 * drum roll: don't forget to set
49990@@ -2823,15 +2802,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
49991 skb_queue_purge(&ppp->mrq);
49992 #endif /* CONFIG_PPP_MULTILINK */
49993 #ifdef CONFIG_PPP_FILTER
49994- if (ppp->pass_filter) {
49995- sk_unattached_filter_destroy(ppp->pass_filter);
49996- ppp->pass_filter = NULL;
49997- }
49998-
49999- if (ppp->active_filter) {
50000- sk_unattached_filter_destroy(ppp->active_filter);
50001- ppp->active_filter = NULL;
50002- }
50003+ kfree(ppp->pass_filter);
50004+ ppp->pass_filter = NULL;
50005+ kfree(ppp->active_filter);
50006+ ppp->active_filter = NULL;
50007 #endif /* CONFIG_PPP_FILTER */
50008
50009 kfree_skb(ppp->xmit_pending);
50010diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
50011index 1252d9c..80e660b 100644
50012--- a/drivers/net/slip/slhc.c
50013+++ b/drivers/net/slip/slhc.c
50014@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
50015 register struct tcphdr *thp;
50016 register struct iphdr *ip;
50017 register struct cstate *cs;
50018- int len, hdrlen;
50019+ long len, hdrlen;
50020 unsigned char *cp = icp;
50021
50022 /* We've got a compressed packet; read the change byte */
50023diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
50024index b4958c7..277cb96 100644
50025--- a/drivers/net/team/team.c
50026+++ b/drivers/net/team/team.c
50027@@ -2868,7 +2868,7 @@ static int team_device_event(struct notifier_block *unused,
50028 return NOTIFY_DONE;
50029 }
50030
50031-static struct notifier_block team_notifier_block __read_mostly = {
50032+static struct notifier_block team_notifier_block = {
50033 .notifier_call = team_device_event,
50034 };
50035
50036diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
50037index a58dfeb..dbde341 100644
50038--- a/drivers/net/team/team_mode_loadbalance.c
50039+++ b/drivers/net/team/team_mode_loadbalance.c
50040@@ -49,7 +49,7 @@ struct lb_port_mapping {
50041 struct lb_priv_ex {
50042 struct team *team;
50043 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
50044- struct sock_fprog_kern *orig_fprog;
50045+ struct sock_fprog *orig_fprog;
50046 struct {
50047 unsigned int refresh_interval; /* in tenths of second */
50048 struct delayed_work refresh_dw;
50049@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
50050 return 0;
50051 }
50052
50053-static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
50054+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
50055 const void *data)
50056 {
50057- struct sock_fprog_kern *fprog;
50058+ struct sock_fprog *fprog;
50059 struct sock_filter *filter = (struct sock_filter *) data;
50060
50061 if (data_len % sizeof(struct sock_filter))
50062 return -EINVAL;
50063- fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
50064+ fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
50065 if (!fprog)
50066 return -ENOMEM;
50067 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
50068@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
50069 return 0;
50070 }
50071
50072-static void __fprog_destroy(struct sock_fprog_kern *fprog)
50073+static void __fprog_destroy(struct sock_fprog *fprog)
50074 {
50075 kfree(fprog->filter);
50076 kfree(fprog);
50077@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
50078 struct lb_priv *lb_priv = get_lb_priv(team);
50079 struct sk_filter *fp = NULL;
50080 struct sk_filter *orig_fp;
50081- struct sock_fprog_kern *fprog = NULL;
50082+ struct sock_fprog *fprog = NULL;
50083 int err;
50084
50085 if (ctx->data.bin_val.len) {
50086diff --git a/drivers/net/tun.c b/drivers/net/tun.c
50087index 98bad1f..f197d7a 100644
50088--- a/drivers/net/tun.c
50089+++ b/drivers/net/tun.c
50090@@ -1854,7 +1854,7 @@ unlock:
50091 }
50092
50093 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
50094- unsigned long arg, int ifreq_len)
50095+ unsigned long arg, size_t ifreq_len)
50096 {
50097 struct tun_file *tfile = file->private_data;
50098 struct tun_struct *tun;
50099@@ -1867,6 +1867,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
50100 unsigned int ifindex;
50101 int ret;
50102
50103+ if (ifreq_len > sizeof ifr)
50104+ return -EFAULT;
50105+
50106 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
50107 if (copy_from_user(&ifr, argp, ifreq_len))
50108 return -EFAULT;
50109diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
50110index a4272ed..cdd69ff 100644
50111--- a/drivers/net/usb/hso.c
50112+++ b/drivers/net/usb/hso.c
50113@@ -71,7 +71,7 @@
50114 #include <asm/byteorder.h>
50115 #include <linux/serial_core.h>
50116 #include <linux/serial.h>
50117-
50118+#include <asm/local.h>
50119
50120 #define MOD_AUTHOR "Option Wireless"
50121 #define MOD_DESCRIPTION "USB High Speed Option driver"
50122@@ -1177,7 +1177,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
50123 struct urb *urb;
50124
50125 urb = serial->rx_urb[0];
50126- if (serial->port.count > 0) {
50127+ if (atomic_read(&serial->port.count) > 0) {
50128 count = put_rxbuf_data(urb, serial);
50129 if (count == -1)
50130 return;
50131@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
50132 DUMP1(urb->transfer_buffer, urb->actual_length);
50133
50134 /* Anyone listening? */
50135- if (serial->port.count == 0)
50136+ if (atomic_read(&serial->port.count) == 0)
50137 return;
50138
50139 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
50140@@ -1277,8 +1277,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
50141 tty_port_tty_set(&serial->port, tty);
50142
50143 /* check for port already opened, if not set the termios */
50144- serial->port.count++;
50145- if (serial->port.count == 1) {
50146+ if (atomic_inc_return(&serial->port.count) == 1) {
50147 serial->rx_state = RX_IDLE;
50148 /* Force default termio settings */
50149 _hso_serial_set_termios(tty, NULL);
50150@@ -1288,7 +1287,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
50151 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
50152 if (result) {
50153 hso_stop_serial_device(serial->parent);
50154- serial->port.count--;
50155+ atomic_dec(&serial->port.count);
50156 kref_put(&serial->parent->ref, hso_serial_ref_free);
50157 }
50158 } else {
50159@@ -1325,10 +1324,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
50160
50161 /* reset the rts and dtr */
50162 /* do the actual close */
50163- serial->port.count--;
50164+ atomic_dec(&serial->port.count);
50165
50166- if (serial->port.count <= 0) {
50167- serial->port.count = 0;
50168+ if (atomic_read(&serial->port.count) <= 0) {
50169+ atomic_set(&serial->port.count, 0);
50170 tty_port_tty_set(&serial->port, NULL);
50171 if (!usb_gone)
50172 hso_stop_serial_device(serial->parent);
50173@@ -1403,7 +1402,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
50174
50175 /* the actual setup */
50176 spin_lock_irqsave(&serial->serial_lock, flags);
50177- if (serial->port.count)
50178+ if (atomic_read(&serial->port.count))
50179 _hso_serial_set_termios(tty, old);
50180 else
50181 tty->termios = *old;
50182@@ -1872,7 +1871,7 @@ static void intr_callback(struct urb *urb)
50183 D1("Pending read interrupt on port %d\n", i);
50184 spin_lock(&serial->serial_lock);
50185 if (serial->rx_state == RX_IDLE &&
50186- serial->port.count > 0) {
50187+ atomic_read(&serial->port.count) > 0) {
50188 /* Setup and send a ctrl req read on
50189 * port i */
50190 if (!serial->rx_urb_filled[0]) {
50191@@ -3045,7 +3044,7 @@ static int hso_resume(struct usb_interface *iface)
50192 /* Start all serial ports */
50193 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
50194 if (serial_table[i] && (serial_table[i]->interface == iface)) {
50195- if (dev2ser(serial_table[i])->port.count) {
50196+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
50197 result =
50198 hso_start_serial_device(serial_table[i], GFP_NOIO);
50199 hso_kick_transmit(dev2ser(serial_table[i]));
50200diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
50201index 3eab74c..fb6097c 100644
50202--- a/drivers/net/usb/r8152.c
50203+++ b/drivers/net/usb/r8152.c
50204@@ -567,7 +567,7 @@ struct r8152 {
50205 void (*up)(struct r8152 *);
50206 void (*down)(struct r8152 *);
50207 void (*unload)(struct r8152 *);
50208- } rtl_ops;
50209+ } __no_const rtl_ops;
50210
50211 int intr_interval;
50212 u32 saved_wolopts;
50213diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
50214index a2515887..6d13233 100644
50215--- a/drivers/net/usb/sierra_net.c
50216+++ b/drivers/net/usb/sierra_net.c
50217@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
50218 /* atomic counter partially included in MAC address to make sure 2 devices
50219 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
50220 */
50221-static atomic_t iface_counter = ATOMIC_INIT(0);
50222+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
50223
50224 /*
50225 * SYNC Timer Delay definition used to set the expiry time
50226@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
50227 dev->net->netdev_ops = &sierra_net_device_ops;
50228
50229 /* change MAC addr to include, ifacenum, and to be unique */
50230- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
50231+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
50232 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
50233
50234 /* we will have to manufacture ethernet headers, prepare template */
50235diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
50236index 7d9f84a..7f690da 100644
50237--- a/drivers/net/virtio_net.c
50238+++ b/drivers/net/virtio_net.c
50239@@ -47,7 +47,7 @@ module_param(gso, bool, 0444);
50240 #define RECEIVE_AVG_WEIGHT 64
50241
50242 /* Minimum alignment for mergeable packet buffers. */
50243-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
50244+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
50245
50246 #define VIRTNET_DRIVER_VERSION "1.0.0"
50247
50248diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
50249index 9f79192..838cf95 100644
50250--- a/drivers/net/vxlan.c
50251+++ b/drivers/net/vxlan.c
50252@@ -2838,7 +2838,7 @@ nla_put_failure:
50253 return -EMSGSIZE;
50254 }
50255
50256-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
50257+static struct rtnl_link_ops vxlan_link_ops = {
50258 .kind = "vxlan",
50259 .maxtype = IFLA_VXLAN_MAX,
50260 .policy = vxlan_policy,
50261@@ -2885,7 +2885,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
50262 return NOTIFY_DONE;
50263 }
50264
50265-static struct notifier_block vxlan_notifier_block __read_mostly = {
50266+static struct notifier_block vxlan_notifier_block = {
50267 .notifier_call = vxlan_lowerdev_event,
50268 };
50269
50270diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
50271index 5920c99..ff2e4a5 100644
50272--- a/drivers/net/wan/lmc/lmc_media.c
50273+++ b/drivers/net/wan/lmc/lmc_media.c
50274@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
50275 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
50276
50277 lmc_media_t lmc_ds3_media = {
50278- lmc_ds3_init, /* special media init stuff */
50279- lmc_ds3_default, /* reset to default state */
50280- lmc_ds3_set_status, /* reset status to state provided */
50281- lmc_dummy_set_1, /* set clock source */
50282- lmc_dummy_set2_1, /* set line speed */
50283- lmc_ds3_set_100ft, /* set cable length */
50284- lmc_ds3_set_scram, /* set scrambler */
50285- lmc_ds3_get_link_status, /* get link status */
50286- lmc_dummy_set_1, /* set link status */
50287- lmc_ds3_set_crc_length, /* set CRC length */
50288- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50289- lmc_ds3_watchdog
50290+ .init = lmc_ds3_init, /* special media init stuff */
50291+ .defaults = lmc_ds3_default, /* reset to default state */
50292+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
50293+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
50294+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50295+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
50296+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
50297+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
50298+ .set_link_status = lmc_dummy_set_1, /* set link status */
50299+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
50300+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50301+ .watchdog = lmc_ds3_watchdog
50302 };
50303
50304 lmc_media_t lmc_hssi_media = {
50305- lmc_hssi_init, /* special media init stuff */
50306- lmc_hssi_default, /* reset to default state */
50307- lmc_hssi_set_status, /* reset status to state provided */
50308- lmc_hssi_set_clock, /* set clock source */
50309- lmc_dummy_set2_1, /* set line speed */
50310- lmc_dummy_set_1, /* set cable length */
50311- lmc_dummy_set_1, /* set scrambler */
50312- lmc_hssi_get_link_status, /* get link status */
50313- lmc_hssi_set_link_status, /* set link status */
50314- lmc_hssi_set_crc_length, /* set CRC length */
50315- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50316- lmc_hssi_watchdog
50317+ .init = lmc_hssi_init, /* special media init stuff */
50318+ .defaults = lmc_hssi_default, /* reset to default state */
50319+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
50320+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
50321+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50322+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50323+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50324+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
50325+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
50326+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
50327+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50328+ .watchdog = lmc_hssi_watchdog
50329 };
50330
50331-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
50332- lmc_ssi_default, /* reset to default state */
50333- lmc_ssi_set_status, /* reset status to state provided */
50334- lmc_ssi_set_clock, /* set clock source */
50335- lmc_ssi_set_speed, /* set line speed */
50336- lmc_dummy_set_1, /* set cable length */
50337- lmc_dummy_set_1, /* set scrambler */
50338- lmc_ssi_get_link_status, /* get link status */
50339- lmc_ssi_set_link_status, /* set link status */
50340- lmc_ssi_set_crc_length, /* set CRC length */
50341- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50342- lmc_ssi_watchdog
50343+lmc_media_t lmc_ssi_media = {
50344+ .init = lmc_ssi_init, /* special media init stuff */
50345+ .defaults = lmc_ssi_default, /* reset to default state */
50346+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
50347+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
50348+ .set_speed = lmc_ssi_set_speed, /* set line speed */
50349+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50350+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50351+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
50352+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
50353+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
50354+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50355+ .watchdog = lmc_ssi_watchdog
50356 };
50357
50358 lmc_media_t lmc_t1_media = {
50359- lmc_t1_init, /* special media init stuff */
50360- lmc_t1_default, /* reset to default state */
50361- lmc_t1_set_status, /* reset status to state provided */
50362- lmc_t1_set_clock, /* set clock source */
50363- lmc_dummy_set2_1, /* set line speed */
50364- lmc_dummy_set_1, /* set cable length */
50365- lmc_dummy_set_1, /* set scrambler */
50366- lmc_t1_get_link_status, /* get link status */
50367- lmc_dummy_set_1, /* set link status */
50368- lmc_t1_set_crc_length, /* set CRC length */
50369- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
50370- lmc_t1_watchdog
50371+ .init = lmc_t1_init, /* special media init stuff */
50372+ .defaults = lmc_t1_default, /* reset to default state */
50373+ .set_status = lmc_t1_set_status, /* reset status to state provided */
50374+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
50375+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50376+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50377+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50378+ .get_link_status = lmc_t1_get_link_status, /* get link status */
50379+ .set_link_status = lmc_dummy_set_1, /* set link status */
50380+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
50381+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
50382+ .watchdog = lmc_t1_watchdog
50383 };
50384
50385 static void
50386diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
50387index feacc3b..5bac0de 100644
50388--- a/drivers/net/wan/z85230.c
50389+++ b/drivers/net/wan/z85230.c
50390@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
50391
50392 struct z8530_irqhandler z8530_sync =
50393 {
50394- z8530_rx,
50395- z8530_tx,
50396- z8530_status
50397+ .rx = z8530_rx,
50398+ .tx = z8530_tx,
50399+ .status = z8530_status
50400 };
50401
50402 EXPORT_SYMBOL(z8530_sync);
50403@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
50404 }
50405
50406 static struct z8530_irqhandler z8530_dma_sync = {
50407- z8530_dma_rx,
50408- z8530_dma_tx,
50409- z8530_dma_status
50410+ .rx = z8530_dma_rx,
50411+ .tx = z8530_dma_tx,
50412+ .status = z8530_dma_status
50413 };
50414
50415 static struct z8530_irqhandler z8530_txdma_sync = {
50416- z8530_rx,
50417- z8530_dma_tx,
50418- z8530_dma_status
50419+ .rx = z8530_rx,
50420+ .tx = z8530_dma_tx,
50421+ .status = z8530_dma_status
50422 };
50423
50424 /**
50425@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
50426
50427 struct z8530_irqhandler z8530_nop=
50428 {
50429- z8530_rx_clear,
50430- z8530_tx_clear,
50431- z8530_status_clear
50432+ .rx = z8530_rx_clear,
50433+ .tx = z8530_tx_clear,
50434+ .status = z8530_status_clear
50435 };
50436
50437
50438diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
50439index 0b60295..b8bfa5b 100644
50440--- a/drivers/net/wimax/i2400m/rx.c
50441+++ b/drivers/net/wimax/i2400m/rx.c
50442@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
50443 if (i2400m->rx_roq == NULL)
50444 goto error_roq_alloc;
50445
50446- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
50447+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
50448 GFP_KERNEL);
50449 if (rd == NULL) {
50450 result = -ENOMEM;
50451diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
50452index 64747d4..17c4cf3 100644
50453--- a/drivers/net/wireless/airo.c
50454+++ b/drivers/net/wireless/airo.c
50455@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
50456 struct airo_info *ai = dev->ml_priv;
50457 int ridcode;
50458 int enabled;
50459- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50460+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50461 unsigned char *iobuf;
50462
50463 /* Only super-user can write RIDs */
50464diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
50465index d48776e..373d049 100644
50466--- a/drivers/net/wireless/at76c50x-usb.c
50467+++ b/drivers/net/wireless/at76c50x-usb.c
50468@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
50469 }
50470
50471 /* Convert timeout from the DFU status to jiffies */
50472-static inline unsigned long at76_get_timeout(struct dfu_status *s)
50473+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
50474 {
50475 return msecs_to_jiffies((s->poll_timeout[2] << 16)
50476 | (s->poll_timeout[1] << 8)
50477diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
50478index e493db4..2c1853a 100644
50479--- a/drivers/net/wireless/ath/ath10k/htc.c
50480+++ b/drivers/net/wireless/ath/ath10k/htc.c
50481@@ -840,7 +840,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
50482 /* registered target arrival callback from the HIF layer */
50483 int ath10k_htc_init(struct ath10k *ar)
50484 {
50485- struct ath10k_hif_cb htc_callbacks;
50486+ static struct ath10k_hif_cb htc_callbacks = {
50487+ .rx_completion = ath10k_htc_rx_completion_handler,
50488+ .tx_completion = ath10k_htc_tx_completion_handler,
50489+ };
50490 struct ath10k_htc_ep *ep = NULL;
50491 struct ath10k_htc *htc = &ar->htc;
50492
50493@@ -850,8 +853,6 @@ int ath10k_htc_init(struct ath10k *ar)
50494 ath10k_htc_reset_endpoint_states(htc);
50495
50496 /* setup HIF layer callbacks */
50497- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
50498- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
50499 htc->ar = ar;
50500
50501 /* Get HIF default pipe for HTC message exchange */
50502diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50503index 4716d33..a688310 100644
50504--- a/drivers/net/wireless/ath/ath10k/htc.h
50505+++ b/drivers/net/wireless/ath/ath10k/htc.h
50506@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
50507
50508 struct ath10k_htc_ops {
50509 void (*target_send_suspend_complete)(struct ath10k *ar);
50510-};
50511+} __no_const;
50512
50513 struct ath10k_htc_ep_ops {
50514 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50515 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50516 void (*ep_tx_credits)(struct ath10k *);
50517-};
50518+} __no_const;
50519
50520 /* service connection information */
50521 struct ath10k_htc_svc_conn_req {
50522diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50523index 741b38d..b7ae41b 100644
50524--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50525+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50526@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50527 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50528 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50529
50530- ACCESS_ONCE(ads->ds_link) = i->link;
50531- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50532+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50533+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50534
50535 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50536 ctl6 = SM(i->keytype, AR_EncrType);
50537@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50538
50539 if ((i->is_first || i->is_last) &&
50540 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50541- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50542+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50543 | set11nTries(i->rates, 1)
50544 | set11nTries(i->rates, 2)
50545 | set11nTries(i->rates, 3)
50546 | (i->dur_update ? AR_DurUpdateEna : 0)
50547 | SM(0, AR_BurstDur);
50548
50549- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50550+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50551 | set11nRate(i->rates, 1)
50552 | set11nRate(i->rates, 2)
50553 | set11nRate(i->rates, 3);
50554 } else {
50555- ACCESS_ONCE(ads->ds_ctl2) = 0;
50556- ACCESS_ONCE(ads->ds_ctl3) = 0;
50557+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50558+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50559 }
50560
50561 if (!i->is_first) {
50562- ACCESS_ONCE(ads->ds_ctl0) = 0;
50563- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50564- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50565+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50566+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50567+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50568 return;
50569 }
50570
50571@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50572 break;
50573 }
50574
50575- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50576+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50577 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50578 | SM(i->txpower, AR_XmitPower)
50579 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50580@@ -289,19 +289,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50581 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50582 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50583
50584- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50585- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50586+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50587+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50588
50589 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50590 return;
50591
50592- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50593+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50594 | set11nPktDurRTSCTS(i->rates, 1);
50595
50596- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50597+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50598 | set11nPktDurRTSCTS(i->rates, 3);
50599
50600- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50601+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50602 | set11nRateFlags(i->rates, 1)
50603 | set11nRateFlags(i->rates, 2)
50604 | set11nRateFlags(i->rates, 3)
50605diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50606index 729ffbf..49f50e3 100644
50607--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50608+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50609@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50610 (i->qcu << AR_TxQcuNum_S) | desc_len;
50611
50612 checksum += val;
50613- ACCESS_ONCE(ads->info) = val;
50614+ ACCESS_ONCE_RW(ads->info) = val;
50615
50616 checksum += i->link;
50617- ACCESS_ONCE(ads->link) = i->link;
50618+ ACCESS_ONCE_RW(ads->link) = i->link;
50619
50620 checksum += i->buf_addr[0];
50621- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50622+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50623 checksum += i->buf_addr[1];
50624- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50625+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50626 checksum += i->buf_addr[2];
50627- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50628+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50629 checksum += i->buf_addr[3];
50630- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50631+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50632
50633 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50634- ACCESS_ONCE(ads->ctl3) = val;
50635+ ACCESS_ONCE_RW(ads->ctl3) = val;
50636 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50637- ACCESS_ONCE(ads->ctl5) = val;
50638+ ACCESS_ONCE_RW(ads->ctl5) = val;
50639 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50640- ACCESS_ONCE(ads->ctl7) = val;
50641+ ACCESS_ONCE_RW(ads->ctl7) = val;
50642 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50643- ACCESS_ONCE(ads->ctl9) = val;
50644+ ACCESS_ONCE_RW(ads->ctl9) = val;
50645
50646 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50647- ACCESS_ONCE(ads->ctl10) = checksum;
50648+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50649
50650 if (i->is_first || i->is_last) {
50651- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50652+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50653 | set11nTries(i->rates, 1)
50654 | set11nTries(i->rates, 2)
50655 | set11nTries(i->rates, 3)
50656 | (i->dur_update ? AR_DurUpdateEna : 0)
50657 | SM(0, AR_BurstDur);
50658
50659- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50660+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50661 | set11nRate(i->rates, 1)
50662 | set11nRate(i->rates, 2)
50663 | set11nRate(i->rates, 3);
50664 } else {
50665- ACCESS_ONCE(ads->ctl13) = 0;
50666- ACCESS_ONCE(ads->ctl14) = 0;
50667+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50668+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50669 }
50670
50671 ads->ctl20 = 0;
50672@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50673
50674 ctl17 = SM(i->keytype, AR_EncrType);
50675 if (!i->is_first) {
50676- ACCESS_ONCE(ads->ctl11) = 0;
50677- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50678- ACCESS_ONCE(ads->ctl15) = 0;
50679- ACCESS_ONCE(ads->ctl16) = 0;
50680- ACCESS_ONCE(ads->ctl17) = ctl17;
50681- ACCESS_ONCE(ads->ctl18) = 0;
50682- ACCESS_ONCE(ads->ctl19) = 0;
50683+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50684+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50685+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50686+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50687+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50688+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50689+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50690 return;
50691 }
50692
50693- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50694+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50695 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50696 | SM(i->txpower, AR_XmitPower)
50697 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50698@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50699 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50700 ctl12 |= SM(val, AR_PAPRDChainMask);
50701
50702- ACCESS_ONCE(ads->ctl12) = ctl12;
50703- ACCESS_ONCE(ads->ctl17) = ctl17;
50704+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50705+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50706
50707- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50708+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50709 | set11nPktDurRTSCTS(i->rates, 1);
50710
50711- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50712+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50713 | set11nPktDurRTSCTS(i->rates, 3);
50714
50715- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50716+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50717 | set11nRateFlags(i->rates, 1)
50718 | set11nRateFlags(i->rates, 2)
50719 | set11nRateFlags(i->rates, 3)
50720 | SM(i->rtscts_rate, AR_RTSCTSRate);
50721
50722- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50723+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50724 }
50725
50726 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50727diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50728index 0acd4b5..0591c91 100644
50729--- a/drivers/net/wireless/ath/ath9k/hw.h
50730+++ b/drivers/net/wireless/ath/ath9k/hw.h
50731@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
50732
50733 /* ANI */
50734 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50735-};
50736+} __no_const;
50737
50738 /**
50739 * struct ath_spec_scan - parameters for Atheros spectral scan
50740@@ -706,7 +706,7 @@ struct ath_hw_ops {
50741 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50742 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50743 #endif
50744-};
50745+} __no_const;
50746
50747 struct ath_nf_limits {
50748 s16 max;
50749diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50750index 92190da..f3a4c4c 100644
50751--- a/drivers/net/wireless/b43/phy_lp.c
50752+++ b/drivers/net/wireless/b43/phy_lp.c
50753@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50754 {
50755 struct ssb_bus *bus = dev->dev->sdev->bus;
50756
50757- static const struct b206x_channel *chandata = NULL;
50758+ const struct b206x_channel *chandata = NULL;
50759 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50760 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50761 u16 old_comm15, scale;
50762diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50763index dc1d20c..f7a4f06 100644
50764--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50765+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50766@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50767 */
50768 if (il3945_mod_params.disable_hw_scan) {
50769 D_INFO("Disabling hw_scan\n");
50770- il3945_mac_ops.hw_scan = NULL;
50771+ pax_open_kernel();
50772+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50773+ pax_close_kernel();
50774 }
50775
50776 D_INFO("*** LOAD DRIVER ***\n");
50777diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50778index 0ffb6ff..c0b7f0e 100644
50779--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50780+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50781@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50782 {
50783 struct iwl_priv *priv = file->private_data;
50784 char buf[64];
50785- int buf_size;
50786+ size_t buf_size;
50787 u32 offset, len;
50788
50789 memset(buf, 0, sizeof(buf));
50790@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50791 struct iwl_priv *priv = file->private_data;
50792
50793 char buf[8];
50794- int buf_size;
50795+ size_t buf_size;
50796 u32 reset_flag;
50797
50798 memset(buf, 0, sizeof(buf));
50799@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50800 {
50801 struct iwl_priv *priv = file->private_data;
50802 char buf[8];
50803- int buf_size;
50804+ size_t buf_size;
50805 int ht40;
50806
50807 memset(buf, 0, sizeof(buf));
50808@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50809 {
50810 struct iwl_priv *priv = file->private_data;
50811 char buf[8];
50812- int buf_size;
50813+ size_t buf_size;
50814 int value;
50815
50816 memset(buf, 0, sizeof(buf));
50817@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50818 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50819 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50820
50821-static const char *fmt_value = " %-30s %10u\n";
50822-static const char *fmt_hex = " %-30s 0x%02X\n";
50823-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50824-static const char *fmt_header =
50825+static const char fmt_value[] = " %-30s %10u\n";
50826+static const char fmt_hex[] = " %-30s 0x%02X\n";
50827+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50828+static const char fmt_header[] =
50829 "%-32s current cumulative delta max\n";
50830
50831 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50832@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50833 {
50834 struct iwl_priv *priv = file->private_data;
50835 char buf[8];
50836- int buf_size;
50837+ size_t buf_size;
50838 int clear;
50839
50840 memset(buf, 0, sizeof(buf));
50841@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50842 {
50843 struct iwl_priv *priv = file->private_data;
50844 char buf[8];
50845- int buf_size;
50846+ size_t buf_size;
50847 int trace;
50848
50849 memset(buf, 0, sizeof(buf));
50850@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50851 {
50852 struct iwl_priv *priv = file->private_data;
50853 char buf[8];
50854- int buf_size;
50855+ size_t buf_size;
50856 int missed;
50857
50858 memset(buf, 0, sizeof(buf));
50859@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50860
50861 struct iwl_priv *priv = file->private_data;
50862 char buf[8];
50863- int buf_size;
50864+ size_t buf_size;
50865 int plcp;
50866
50867 memset(buf, 0, sizeof(buf));
50868@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50869
50870 struct iwl_priv *priv = file->private_data;
50871 char buf[8];
50872- int buf_size;
50873+ size_t buf_size;
50874 int flush;
50875
50876 memset(buf, 0, sizeof(buf));
50877@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50878
50879 struct iwl_priv *priv = file->private_data;
50880 char buf[8];
50881- int buf_size;
50882+ size_t buf_size;
50883 int rts;
50884
50885 if (!priv->cfg->ht_params)
50886@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50887 {
50888 struct iwl_priv *priv = file->private_data;
50889 char buf[8];
50890- int buf_size;
50891+ size_t buf_size;
50892
50893 memset(buf, 0, sizeof(buf));
50894 buf_size = min(count, sizeof(buf) - 1);
50895@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50896 struct iwl_priv *priv = file->private_data;
50897 u32 event_log_flag;
50898 char buf[8];
50899- int buf_size;
50900+ size_t buf_size;
50901
50902 /* check that the interface is up */
50903 if (!iwl_is_ready(priv))
50904@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50905 struct iwl_priv *priv = file->private_data;
50906 char buf[8];
50907 u32 calib_disabled;
50908- int buf_size;
50909+ size_t buf_size;
50910
50911 memset(buf, 0, sizeof(buf));
50912 buf_size = min(count, sizeof(buf) - 1);
50913diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50914index 788085b..0bc852a 100644
50915--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50916+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50917@@ -1598,7 +1598,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50918 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50919
50920 char buf[8];
50921- int buf_size;
50922+ size_t buf_size;
50923 u32 reset_flag;
50924
50925 memset(buf, 0, sizeof(buf));
50926@@ -1619,7 +1619,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50927 {
50928 struct iwl_trans *trans = file->private_data;
50929 char buf[8];
50930- int buf_size;
50931+ size_t buf_size;
50932 int csr;
50933
50934 memset(buf, 0, sizeof(buf));
50935diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50936index a312c65..162b13a 100644
50937--- a/drivers/net/wireless/mac80211_hwsim.c
50938+++ b/drivers/net/wireless/mac80211_hwsim.c
50939@@ -2573,20 +2573,20 @@ static int __init init_mac80211_hwsim(void)
50940 if (channels < 1)
50941 return -EINVAL;
50942
50943- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50944- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50945- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50946- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50947- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50948- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50949- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50950- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50951- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50952- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50953- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50954- mac80211_hwsim_assign_vif_chanctx;
50955- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50956- mac80211_hwsim_unassign_vif_chanctx;
50957+ pax_open_kernel();
50958+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50959+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50960+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50961+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50962+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50963+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50964+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50965+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50966+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50967+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50968+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50969+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50970+ pax_close_kernel();
50971
50972 spin_lock_init(&hwsim_radio_lock);
50973 INIT_LIST_HEAD(&hwsim_radios);
50974diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50975index d2a9a08..0cb175d 100644
50976--- a/drivers/net/wireless/rndis_wlan.c
50977+++ b/drivers/net/wireless/rndis_wlan.c
50978@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50979
50980 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50981
50982- if (rts_threshold < 0 || rts_threshold > 2347)
50983+ if (rts_threshold > 2347)
50984 rts_threshold = 2347;
50985
50986 tmp = cpu_to_le32(rts_threshold);
50987diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50988index d13f25c..2573994 100644
50989--- a/drivers/net/wireless/rt2x00/rt2x00.h
50990+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50991@@ -375,7 +375,7 @@ struct rt2x00_intf {
50992 * for hardware which doesn't support hardware
50993 * sequence counting.
50994 */
50995- atomic_t seqno;
50996+ atomic_unchecked_t seqno;
50997 };
50998
50999 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
51000diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
51001index 5642ccc..01f03eb 100644
51002--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
51003+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
51004@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
51005 * sequence counter given by mac80211.
51006 */
51007 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
51008- seqno = atomic_add_return(0x10, &intf->seqno);
51009+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
51010 else
51011- seqno = atomic_read(&intf->seqno);
51012+ seqno = atomic_read_unchecked(&intf->seqno);
51013
51014 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
51015 hdr->seq_ctrl |= cpu_to_le16(seqno);
51016diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
51017index b661f896..ddf7d2b 100644
51018--- a/drivers/net/wireless/ti/wl1251/sdio.c
51019+++ b/drivers/net/wireless/ti/wl1251/sdio.c
51020@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
51021
51022 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
51023
51024- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
51025- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
51026+ pax_open_kernel();
51027+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
51028+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
51029+ pax_close_kernel();
51030
51031 wl1251_info("using dedicated interrupt line");
51032 } else {
51033- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
51034- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
51035+ pax_open_kernel();
51036+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
51037+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
51038+ pax_close_kernel();
51039
51040 wl1251_info("using SDIO interrupt");
51041 }
51042diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
51043index d50dfac..0a6f5be3 100644
51044--- a/drivers/net/wireless/ti/wl12xx/main.c
51045+++ b/drivers/net/wireless/ti/wl12xx/main.c
51046@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
51047 sizeof(wl->conf.mem));
51048
51049 /* read data preparation is only needed by wl127x */
51050- wl->ops->prepare_read = wl127x_prepare_read;
51051+ pax_open_kernel();
51052+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
51053+ pax_close_kernel();
51054
51055 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
51056 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
51057@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
51058 sizeof(wl->conf.mem));
51059
51060 /* read data preparation is only needed by wl127x */
51061- wl->ops->prepare_read = wl127x_prepare_read;
51062+ pax_open_kernel();
51063+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
51064+ pax_close_kernel();
51065
51066 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
51067 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
51068diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
51069index de5b4fa..7996ec6 100644
51070--- a/drivers/net/wireless/ti/wl18xx/main.c
51071+++ b/drivers/net/wireless/ti/wl18xx/main.c
51072@@ -1900,8 +1900,10 @@ static int wl18xx_setup(struct wl1271 *wl)
51073 }
51074
51075 if (!checksum_param) {
51076- wl18xx_ops.set_rx_csum = NULL;
51077- wl18xx_ops.init_vif = NULL;
51078+ pax_open_kernel();
51079+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
51080+ *(void **)&wl18xx_ops.init_vif = NULL;
51081+ pax_close_kernel();
51082 }
51083
51084 /* Enable 11a Band only if we have 5G antennas */
51085diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
51086index a912dc0..a8225ba 100644
51087--- a/drivers/net/wireless/zd1211rw/zd_usb.c
51088+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
51089@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
51090 {
51091 struct zd_usb *usb = urb->context;
51092 struct zd_usb_interrupt *intr = &usb->intr;
51093- int len;
51094+ unsigned int len;
51095 u16 int_num;
51096
51097 ZD_ASSERT(in_interrupt());
51098diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
51099index 683671a..4519fc2 100644
51100--- a/drivers/nfc/nfcwilink.c
51101+++ b/drivers/nfc/nfcwilink.c
51102@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
51103
51104 static int nfcwilink_probe(struct platform_device *pdev)
51105 {
51106- static struct nfcwilink *drv;
51107+ struct nfcwilink *drv;
51108 int rc;
51109 __u32 protocols;
51110
51111diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
51112index d93b2b6..ae50401 100644
51113--- a/drivers/oprofile/buffer_sync.c
51114+++ b/drivers/oprofile/buffer_sync.c
51115@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
51116 if (cookie == NO_COOKIE)
51117 offset = pc;
51118 if (cookie == INVALID_COOKIE) {
51119- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
51120+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
51121 offset = pc;
51122 }
51123 if (cookie != last_cookie) {
51124@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
51125 /* add userspace sample */
51126
51127 if (!mm) {
51128- atomic_inc(&oprofile_stats.sample_lost_no_mm);
51129+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
51130 return 0;
51131 }
51132
51133 cookie = lookup_dcookie(mm, s->eip, &offset);
51134
51135 if (cookie == INVALID_COOKIE) {
51136- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
51137+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
51138 return 0;
51139 }
51140
51141@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
51142 /* ignore backtraces if failed to add a sample */
51143 if (state == sb_bt_start) {
51144 state = sb_bt_ignore;
51145- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
51146+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
51147 }
51148 }
51149 release_mm(mm);
51150diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
51151index c0cc4e7..44d4e54 100644
51152--- a/drivers/oprofile/event_buffer.c
51153+++ b/drivers/oprofile/event_buffer.c
51154@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
51155 }
51156
51157 if (buffer_pos == buffer_size) {
51158- atomic_inc(&oprofile_stats.event_lost_overflow);
51159+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
51160 return;
51161 }
51162
51163diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
51164index ed2c3ec..deda85a 100644
51165--- a/drivers/oprofile/oprof.c
51166+++ b/drivers/oprofile/oprof.c
51167@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
51168 if (oprofile_ops.switch_events())
51169 return;
51170
51171- atomic_inc(&oprofile_stats.multiplex_counter);
51172+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
51173 start_switch_worker();
51174 }
51175
51176diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
51177index ee2cfce..7f8f699 100644
51178--- a/drivers/oprofile/oprofile_files.c
51179+++ b/drivers/oprofile/oprofile_files.c
51180@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
51181
51182 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
51183
51184-static ssize_t timeout_read(struct file *file, char __user *buf,
51185+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
51186 size_t count, loff_t *offset)
51187 {
51188 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
51189diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
51190index 59659ce..6c860a0 100644
51191--- a/drivers/oprofile/oprofile_stats.c
51192+++ b/drivers/oprofile/oprofile_stats.c
51193@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
51194 cpu_buf->sample_invalid_eip = 0;
51195 }
51196
51197- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
51198- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
51199- atomic_set(&oprofile_stats.event_lost_overflow, 0);
51200- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
51201- atomic_set(&oprofile_stats.multiplex_counter, 0);
51202+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
51203+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
51204+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
51205+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
51206+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
51207 }
51208
51209
51210diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
51211index 1fc622b..8c48fc3 100644
51212--- a/drivers/oprofile/oprofile_stats.h
51213+++ b/drivers/oprofile/oprofile_stats.h
51214@@ -13,11 +13,11 @@
51215 #include <linux/atomic.h>
51216
51217 struct oprofile_stat_struct {
51218- atomic_t sample_lost_no_mm;
51219- atomic_t sample_lost_no_mapping;
51220- atomic_t bt_lost_no_mapping;
51221- atomic_t event_lost_overflow;
51222- atomic_t multiplex_counter;
51223+ atomic_unchecked_t sample_lost_no_mm;
51224+ atomic_unchecked_t sample_lost_no_mapping;
51225+ atomic_unchecked_t bt_lost_no_mapping;
51226+ atomic_unchecked_t event_lost_overflow;
51227+ atomic_unchecked_t multiplex_counter;
51228 };
51229
51230 extern struct oprofile_stat_struct oprofile_stats;
51231diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
51232index 3f49345..c750d0b 100644
51233--- a/drivers/oprofile/oprofilefs.c
51234+++ b/drivers/oprofile/oprofilefs.c
51235@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
51236
51237 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
51238 {
51239- atomic_t *val = file->private_data;
51240- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
51241+ atomic_unchecked_t *val = file->private_data;
51242+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
51243 }
51244
51245
51246@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
51247
51248
51249 int oprofilefs_create_ro_atomic(struct dentry *root,
51250- char const *name, atomic_t *val)
51251+ char const *name, atomic_unchecked_t *val)
51252 {
51253 return __oprofilefs_create_file(root, name,
51254 &atomic_ro_fops, 0444, val);
51255diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
51256index 61be1d9..dec05d7 100644
51257--- a/drivers/oprofile/timer_int.c
51258+++ b/drivers/oprofile/timer_int.c
51259@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
51260 return NOTIFY_OK;
51261 }
51262
51263-static struct notifier_block __refdata oprofile_cpu_notifier = {
51264+static struct notifier_block oprofile_cpu_notifier = {
51265 .notifier_call = oprofile_cpu_notify,
51266 };
51267
51268diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
51269index 3b47080..6cd05dd 100644
51270--- a/drivers/parport/procfs.c
51271+++ b/drivers/parport/procfs.c
51272@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
51273
51274 *ppos += len;
51275
51276- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
51277+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
51278 }
51279
51280 #ifdef CONFIG_PARPORT_1284
51281@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
51282
51283 *ppos += len;
51284
51285- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
51286+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
51287 }
51288 #endif /* IEEE1284.3 support. */
51289
51290diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
51291index 8dcccff..35d701d 100644
51292--- a/drivers/pci/hotplug/acpiphp_ibm.c
51293+++ b/drivers/pci/hotplug/acpiphp_ibm.c
51294@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
51295 goto init_cleanup;
51296 }
51297
51298- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
51299+ pax_open_kernel();
51300+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
51301+ pax_close_kernel();
51302 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
51303
51304 return retval;
51305diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
51306index 04fcd78..39e83f1 100644
51307--- a/drivers/pci/hotplug/cpcihp_generic.c
51308+++ b/drivers/pci/hotplug/cpcihp_generic.c
51309@@ -73,7 +73,6 @@ static u16 port;
51310 static unsigned int enum_bit;
51311 static u8 enum_mask;
51312
51313-static struct cpci_hp_controller_ops generic_hpc_ops;
51314 static struct cpci_hp_controller generic_hpc;
51315
51316 static int __init validate_parameters(void)
51317@@ -139,6 +138,10 @@ static int query_enum(void)
51318 return ((value & enum_mask) == enum_mask);
51319 }
51320
51321+static struct cpci_hp_controller_ops generic_hpc_ops = {
51322+ .query_enum = query_enum,
51323+};
51324+
51325 static int __init cpcihp_generic_init(void)
51326 {
51327 int status;
51328@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
51329 pci_dev_put(dev);
51330
51331 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
51332- generic_hpc_ops.query_enum = query_enum;
51333 generic_hpc.ops = &generic_hpc_ops;
51334
51335 status = cpci_hp_register_controller(&generic_hpc);
51336diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
51337index 6757b3e..d3bad62 100644
51338--- a/drivers/pci/hotplug/cpcihp_zt5550.c
51339+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
51340@@ -59,7 +59,6 @@
51341 /* local variables */
51342 static bool debug;
51343 static bool poll;
51344-static struct cpci_hp_controller_ops zt5550_hpc_ops;
51345 static struct cpci_hp_controller zt5550_hpc;
51346
51347 /* Primary cPCI bus bridge device */
51348@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
51349 return 0;
51350 }
51351
51352+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
51353+ .query_enum = zt5550_hc_query_enum,
51354+};
51355+
51356 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
51357 {
51358 int status;
51359@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
51360 dbg("returned from zt5550_hc_config");
51361
51362 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
51363- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
51364 zt5550_hpc.ops = &zt5550_hpc_ops;
51365 if(!poll) {
51366 zt5550_hpc.irq = hc_dev->irq;
51367 zt5550_hpc.irq_flags = IRQF_SHARED;
51368 zt5550_hpc.dev_id = hc_dev;
51369
51370- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51371- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51372- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51373+ pax_open_kernel();
51374+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51375+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51376+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51377+ pax_open_kernel();
51378 } else {
51379 info("using ENUM# polling mode");
51380 }
51381diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
51382index 0968a9b..5a00edf 100644
51383--- a/drivers/pci/hotplug/cpqphp_nvram.c
51384+++ b/drivers/pci/hotplug/cpqphp_nvram.c
51385@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
51386
51387 void compaq_nvram_init (void __iomem *rom_start)
51388 {
51389+
51390+#ifndef CONFIG_PAX_KERNEXEC
51391 if (rom_start) {
51392 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
51393 }
51394+#endif
51395+
51396 dbg("int15 entry = %p\n", compaq_int15_entry_point);
51397
51398 /* initialize our int15 lock */
51399diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
51400index 56d8486..f26113f 100644
51401--- a/drivers/pci/hotplug/pci_hotplug_core.c
51402+++ b/drivers/pci/hotplug/pci_hotplug_core.c
51403@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
51404 return -EINVAL;
51405 }
51406
51407- slot->ops->owner = owner;
51408- slot->ops->mod_name = mod_name;
51409+ pax_open_kernel();
51410+ *(struct module **)&slot->ops->owner = owner;
51411+ *(const char **)&slot->ops->mod_name = mod_name;
51412+ pax_close_kernel();
51413
51414 mutex_lock(&pci_hp_mutex);
51415 /*
51416diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51417index a2297db..7c7d161 100644
51418--- a/drivers/pci/hotplug/pciehp_core.c
51419+++ b/drivers/pci/hotplug/pciehp_core.c
51420@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51421 struct slot *slot = ctrl->slot;
51422 struct hotplug_slot *hotplug = NULL;
51423 struct hotplug_slot_info *info = NULL;
51424- struct hotplug_slot_ops *ops = NULL;
51425+ hotplug_slot_ops_no_const *ops = NULL;
51426 char name[SLOT_NAME_SIZE];
51427 int retval = -ENOMEM;
51428
51429diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51430index 13f3d30..363cb44 100644
51431--- a/drivers/pci/msi.c
51432+++ b/drivers/pci/msi.c
51433@@ -523,8 +523,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51434 {
51435 struct attribute **msi_attrs;
51436 struct attribute *msi_attr;
51437- struct device_attribute *msi_dev_attr;
51438- struct attribute_group *msi_irq_group;
51439+ device_attribute_no_const *msi_dev_attr;
51440+ attribute_group_no_const *msi_irq_group;
51441 const struct attribute_group **msi_irq_groups;
51442 struct msi_desc *entry;
51443 int ret = -ENOMEM;
51444@@ -584,7 +584,7 @@ error_attrs:
51445 count = 0;
51446 msi_attr = msi_attrs[count];
51447 while (msi_attr) {
51448- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51449+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51450 kfree(msi_attr->name);
51451 kfree(msi_dev_attr);
51452 ++count;
51453diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51454index 9ff0a90..e819dda 100644
51455--- a/drivers/pci/pci-sysfs.c
51456+++ b/drivers/pci/pci-sysfs.c
51457@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51458 {
51459 /* allocate attribute structure, piggyback attribute name */
51460 int name_len = write_combine ? 13 : 10;
51461- struct bin_attribute *res_attr;
51462+ bin_attribute_no_const *res_attr;
51463 int retval;
51464
51465 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51466@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51467 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51468 {
51469 int retval;
51470- struct bin_attribute *attr;
51471+ bin_attribute_no_const *attr;
51472
51473 /* If the device has VPD, try to expose it in sysfs. */
51474 if (dev->vpd) {
51475@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51476 {
51477 int retval;
51478 int rom_size = 0;
51479- struct bin_attribute *attr;
51480+ bin_attribute_no_const *attr;
51481
51482 if (!sysfs_initialized)
51483 return -EACCES;
51484diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51485index 0601890..dc15007 100644
51486--- a/drivers/pci/pci.h
51487+++ b/drivers/pci/pci.h
51488@@ -91,7 +91,7 @@ struct pci_vpd_ops {
51489 struct pci_vpd {
51490 unsigned int len;
51491 const struct pci_vpd_ops *ops;
51492- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51493+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51494 };
51495
51496 int pci_vpd_pci22_init(struct pci_dev *dev);
51497diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51498index e1e7026..d28dd33 100644
51499--- a/drivers/pci/pcie/aspm.c
51500+++ b/drivers/pci/pcie/aspm.c
51501@@ -27,9 +27,9 @@
51502 #define MODULE_PARAM_PREFIX "pcie_aspm."
51503
51504 /* Note: those are not register definitions */
51505-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51506-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51507-#define ASPM_STATE_L1 (4) /* L1 state */
51508+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51509+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51510+#define ASPM_STATE_L1 (4U) /* L1 state */
51511 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51512 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51513
51514diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51515index e3cf8a2..be1baf0 100644
51516--- a/drivers/pci/probe.c
51517+++ b/drivers/pci/probe.c
51518@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51519 struct pci_bus_region region, inverted_region;
51520 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
51521
51522- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51523+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51524
51525 /* No printks while decoding is disabled! */
51526 if (!dev->mmio_always_on) {
51527diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51528index 3f155e7..0f4b1f0 100644
51529--- a/drivers/pci/proc.c
51530+++ b/drivers/pci/proc.c
51531@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51532 static int __init pci_proc_init(void)
51533 {
51534 struct pci_dev *dev = NULL;
51535+
51536+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51537+#ifdef CONFIG_GRKERNSEC_PROC_USER
51538+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51539+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51540+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51541+#endif
51542+#else
51543 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51544+#endif
51545 proc_create("devices", 0, proc_bus_pci_dir,
51546 &proc_bus_pci_dev_operations);
51547 proc_initialized = 1;
51548diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
51549index 7f1a2e2..bc4b405 100644
51550--- a/drivers/platform/chrome/chromeos_laptop.c
51551+++ b/drivers/platform/chrome/chromeos_laptop.c
51552@@ -395,7 +395,7 @@ static struct chromeos_laptop cr48 = {
51553 .callback = chromeos_laptop_dmi_matched, \
51554 .driver_data = (void *)&board_
51555
51556-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
51557+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
51558 {
51559 .ident = "Samsung Series 5 550",
51560 .matches = {
51561diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51562index 297b664..ab91e39 100644
51563--- a/drivers/platform/x86/alienware-wmi.c
51564+++ b/drivers/platform/x86/alienware-wmi.c
51565@@ -133,7 +133,7 @@ struct wmax_led_args {
51566 } __packed;
51567
51568 static struct platform_device *platform_device;
51569-static struct device_attribute *zone_dev_attrs;
51570+static device_attribute_no_const *zone_dev_attrs;
51571 static struct attribute **zone_attrs;
51572 static struct platform_zone *zone_data;
51573
51574@@ -144,7 +144,7 @@ static struct platform_driver platform_driver = {
51575 }
51576 };
51577
51578-static struct attribute_group zone_attribute_group = {
51579+static attribute_group_no_const zone_attribute_group = {
51580 .name = "rgb_zones",
51581 };
51582
51583diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51584index 3c6cced..12e0771 100644
51585--- a/drivers/platform/x86/asus-wmi.c
51586+++ b/drivers/platform/x86/asus-wmi.c
51587@@ -1592,6 +1592,10 @@ static int show_dsts(struct seq_file *m, void *data)
51588 int err;
51589 u32 retval = -1;
51590
51591+#ifdef CONFIG_GRKERNSEC_KMEM
51592+ return -EPERM;
51593+#endif
51594+
51595 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51596
51597 if (err < 0)
51598@@ -1608,6 +1612,10 @@ static int show_devs(struct seq_file *m, void *data)
51599 int err;
51600 u32 retval = -1;
51601
51602+#ifdef CONFIG_GRKERNSEC_KMEM
51603+ return -EPERM;
51604+#endif
51605+
51606 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51607 &retval);
51608
51609@@ -1632,6 +1640,10 @@ static int show_call(struct seq_file *m, void *data)
51610 union acpi_object *obj;
51611 acpi_status status;
51612
51613+#ifdef CONFIG_GRKERNSEC_KMEM
51614+ return -EPERM;
51615+#endif
51616+
51617 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51618 1, asus->debug.method_id,
51619 &input, &output);
51620diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51621index 62f8030..c7f2a45 100644
51622--- a/drivers/platform/x86/msi-laptop.c
51623+++ b/drivers/platform/x86/msi-laptop.c
51624@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51625
51626 if (!quirks->ec_read_only) {
51627 /* allow userland write sysfs file */
51628- dev_attr_bluetooth.store = store_bluetooth;
51629- dev_attr_wlan.store = store_wlan;
51630- dev_attr_threeg.store = store_threeg;
51631- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51632- dev_attr_wlan.attr.mode |= S_IWUSR;
51633- dev_attr_threeg.attr.mode |= S_IWUSR;
51634+ pax_open_kernel();
51635+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51636+ *(void **)&dev_attr_wlan.store = store_wlan;
51637+ *(void **)&dev_attr_threeg.store = store_threeg;
51638+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51639+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51640+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51641+ pax_close_kernel();
51642 }
51643
51644 /* disable hardware control by fn key */
51645diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51646index 70222f2..8c8ce66 100644
51647--- a/drivers/platform/x86/msi-wmi.c
51648+++ b/drivers/platform/x86/msi-wmi.c
51649@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51650 static void msi_wmi_notify(u32 value, void *context)
51651 {
51652 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51653- static struct key_entry *key;
51654+ struct key_entry *key;
51655 union acpi_object *obj;
51656 acpi_status status;
51657
51658diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51659index 9c5a074..06c976a 100644
51660--- a/drivers/platform/x86/sony-laptop.c
51661+++ b/drivers/platform/x86/sony-laptop.c
51662@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51663 }
51664
51665 /* High speed charging function */
51666-static struct device_attribute *hsc_handle;
51667+static device_attribute_no_const *hsc_handle;
51668
51669 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51670 struct device_attribute *attr,
51671@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51672 }
51673
51674 /* low battery function */
51675-static struct device_attribute *lowbatt_handle;
51676+static device_attribute_no_const *lowbatt_handle;
51677
51678 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51679 struct device_attribute *attr,
51680@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51681 }
51682
51683 /* fan speed function */
51684-static struct device_attribute *fan_handle, *hsf_handle;
51685+static device_attribute_no_const *fan_handle, *hsf_handle;
51686
51687 static ssize_t sony_nc_hsfan_store(struct device *dev,
51688 struct device_attribute *attr,
51689@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51690 }
51691
51692 /* USB charge function */
51693-static struct device_attribute *uc_handle;
51694+static device_attribute_no_const *uc_handle;
51695
51696 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51697 struct device_attribute *attr,
51698@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51699 }
51700
51701 /* Panel ID function */
51702-static struct device_attribute *panel_handle;
51703+static device_attribute_no_const *panel_handle;
51704
51705 static ssize_t sony_nc_panelid_show(struct device *dev,
51706 struct device_attribute *attr, char *buffer)
51707@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51708 }
51709
51710 /* smart connect function */
51711-static struct device_attribute *sc_handle;
51712+static device_attribute_no_const *sc_handle;
51713
51714 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51715 struct device_attribute *attr,
51716diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51717index d82f196..5458f34 100644
51718--- a/drivers/platform/x86/thinkpad_acpi.c
51719+++ b/drivers/platform/x86/thinkpad_acpi.c
51720@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
51721 return 0;
51722 }
51723
51724-void static hotkey_mask_warn_incomplete_mask(void)
51725+static void hotkey_mask_warn_incomplete_mask(void)
51726 {
51727 /* log only what the user can fix... */
51728 const u32 wantedmask = hotkey_driver_mask &
51729@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51730 && !tp_features.bright_unkfw)
51731 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51732 }
51733+}
51734
51735 #undef TPACPI_COMPARE_KEY
51736 #undef TPACPI_MAY_SEND_KEY
51737-}
51738
51739 /*
51740 * Polling driver
51741diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51742index 438d4c7..ca8a2fb 100644
51743--- a/drivers/pnp/pnpbios/bioscalls.c
51744+++ b/drivers/pnp/pnpbios/bioscalls.c
51745@@ -59,7 +59,7 @@ do { \
51746 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51747 } while(0)
51748
51749-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51750+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51751 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51752
51753 /*
51754@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51755
51756 cpu = get_cpu();
51757 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51758+
51759+ pax_open_kernel();
51760 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51761+ pax_close_kernel();
51762
51763 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51764 spin_lock_irqsave(&pnp_bios_lock, flags);
51765@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51766 :"memory");
51767 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51768
51769+ pax_open_kernel();
51770 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51771+ pax_close_kernel();
51772+
51773 put_cpu();
51774
51775 /* If we get here and this is set then the PnP BIOS faulted on us. */
51776@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51777 return status;
51778 }
51779
51780-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51781+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51782 {
51783 int i;
51784
51785@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51786 pnp_bios_callpoint.offset = header->fields.pm16offset;
51787 pnp_bios_callpoint.segment = PNP_CS16;
51788
51789+ pax_open_kernel();
51790+
51791 for_each_possible_cpu(i) {
51792 struct desc_struct *gdt = get_cpu_gdt_table(i);
51793 if (!gdt)
51794@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51795 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51796 (unsigned long)__va(header->fields.pm16dseg));
51797 }
51798+
51799+ pax_close_kernel();
51800 }
51801diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51802index 0c52e2a..3421ab7 100644
51803--- a/drivers/power/pda_power.c
51804+++ b/drivers/power/pda_power.c
51805@@ -37,7 +37,11 @@ static int polling;
51806
51807 #if IS_ENABLED(CONFIG_USB_PHY)
51808 static struct usb_phy *transceiver;
51809-static struct notifier_block otg_nb;
51810+static int otg_handle_notification(struct notifier_block *nb,
51811+ unsigned long event, void *unused);
51812+static struct notifier_block otg_nb = {
51813+ .notifier_call = otg_handle_notification
51814+};
51815 #endif
51816
51817 static struct regulator *ac_draw;
51818@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51819
51820 #if IS_ENABLED(CONFIG_USB_PHY)
51821 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51822- otg_nb.notifier_call = otg_handle_notification;
51823 ret = usb_register_notifier(transceiver, &otg_nb);
51824 if (ret) {
51825 dev_err(dev, "failure to register otg notifier\n");
51826diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51827index cc439fd..8fa30df 100644
51828--- a/drivers/power/power_supply.h
51829+++ b/drivers/power/power_supply.h
51830@@ -16,12 +16,12 @@ struct power_supply;
51831
51832 #ifdef CONFIG_SYSFS
51833
51834-extern void power_supply_init_attrs(struct device_type *dev_type);
51835+extern void power_supply_init_attrs(void);
51836 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51837
51838 #else
51839
51840-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51841+static inline void power_supply_init_attrs(void) {}
51842 #define power_supply_uevent NULL
51843
51844 #endif /* CONFIG_SYSFS */
51845diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51846index 5a5a24e..f7a3754 100644
51847--- a/drivers/power/power_supply_core.c
51848+++ b/drivers/power/power_supply_core.c
51849@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51850 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51851 EXPORT_SYMBOL_GPL(power_supply_notifier);
51852
51853-static struct device_type power_supply_dev_type;
51854+extern const struct attribute_group *power_supply_attr_groups[];
51855+static struct device_type power_supply_dev_type = {
51856+ .groups = power_supply_attr_groups,
51857+};
51858
51859 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51860 struct power_supply *supply)
51861@@ -639,7 +642,7 @@ static int __init power_supply_class_init(void)
51862 return PTR_ERR(power_supply_class);
51863
51864 power_supply_class->dev_uevent = power_supply_uevent;
51865- power_supply_init_attrs(&power_supply_dev_type);
51866+ power_supply_init_attrs();
51867
51868 return 0;
51869 }
51870diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51871index 44420d1..967126e 100644
51872--- a/drivers/power/power_supply_sysfs.c
51873+++ b/drivers/power/power_supply_sysfs.c
51874@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
51875 .is_visible = power_supply_attr_is_visible,
51876 };
51877
51878-static const struct attribute_group *power_supply_attr_groups[] = {
51879+const struct attribute_group *power_supply_attr_groups[] = {
51880 &power_supply_attr_group,
51881 NULL,
51882 };
51883
51884-void power_supply_init_attrs(struct device_type *dev_type)
51885+void power_supply_init_attrs(void)
51886 {
51887 int i;
51888
51889- dev_type->groups = power_supply_attr_groups;
51890-
51891 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51892 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51893 }
51894diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51895index 84419af..268ede8 100644
51896--- a/drivers/powercap/powercap_sys.c
51897+++ b/drivers/powercap/powercap_sys.c
51898@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51899 struct device_attribute name_attr;
51900 };
51901
51902+static ssize_t show_constraint_name(struct device *dev,
51903+ struct device_attribute *dev_attr,
51904+ char *buf);
51905+
51906 static struct powercap_constraint_attr
51907- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51908+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51909+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51910+ .power_limit_attr = {
51911+ .attr = {
51912+ .name = NULL,
51913+ .mode = S_IWUSR | S_IRUGO
51914+ },
51915+ .show = show_constraint_power_limit_uw,
51916+ .store = store_constraint_power_limit_uw
51917+ },
51918+
51919+ .time_window_attr = {
51920+ .attr = {
51921+ .name = NULL,
51922+ .mode = S_IWUSR | S_IRUGO
51923+ },
51924+ .show = show_constraint_time_window_us,
51925+ .store = store_constraint_time_window_us
51926+ },
51927+
51928+ .max_power_attr = {
51929+ .attr = {
51930+ .name = NULL,
51931+ .mode = S_IRUGO
51932+ },
51933+ .show = show_constraint_max_power_uw,
51934+ .store = NULL
51935+ },
51936+
51937+ .min_power_attr = {
51938+ .attr = {
51939+ .name = NULL,
51940+ .mode = S_IRUGO
51941+ },
51942+ .show = show_constraint_min_power_uw,
51943+ .store = NULL
51944+ },
51945+
51946+ .max_time_window_attr = {
51947+ .attr = {
51948+ .name = NULL,
51949+ .mode = S_IRUGO
51950+ },
51951+ .show = show_constraint_max_time_window_us,
51952+ .store = NULL
51953+ },
51954+
51955+ .min_time_window_attr = {
51956+ .attr = {
51957+ .name = NULL,
51958+ .mode = S_IRUGO
51959+ },
51960+ .show = show_constraint_min_time_window_us,
51961+ .store = NULL
51962+ },
51963+
51964+ .name_attr = {
51965+ .attr = {
51966+ .name = NULL,
51967+ .mode = S_IRUGO
51968+ },
51969+ .show = show_constraint_name,
51970+ .store = NULL
51971+ }
51972+ }
51973+};
51974
51975 /* A list of powercap control_types */
51976 static LIST_HEAD(powercap_cntrl_list);
51977@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51978 }
51979
51980 static int create_constraint_attribute(int id, const char *name,
51981- int mode,
51982- struct device_attribute *dev_attr,
51983- ssize_t (*show)(struct device *,
51984- struct device_attribute *, char *),
51985- ssize_t (*store)(struct device *,
51986- struct device_attribute *,
51987- const char *, size_t)
51988- )
51989+ struct device_attribute *dev_attr)
51990 {
51991+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51992
51993- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51994- id, name);
51995- if (!dev_attr->attr.name)
51996+ if (!name)
51997 return -ENOMEM;
51998- dev_attr->attr.mode = mode;
51999- dev_attr->show = show;
52000- dev_attr->store = store;
52001+
52002+ pax_open_kernel();
52003+ *(const char **)&dev_attr->attr.name = name;
52004+ pax_close_kernel();
52005
52006 return 0;
52007 }
52008@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
52009
52010 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
52011 ret = create_constraint_attribute(i, "power_limit_uw",
52012- S_IWUSR | S_IRUGO,
52013- &constraint_attrs[i].power_limit_attr,
52014- show_constraint_power_limit_uw,
52015- store_constraint_power_limit_uw);
52016+ &constraint_attrs[i].power_limit_attr);
52017 if (ret)
52018 goto err_alloc;
52019 ret = create_constraint_attribute(i, "time_window_us",
52020- S_IWUSR | S_IRUGO,
52021- &constraint_attrs[i].time_window_attr,
52022- show_constraint_time_window_us,
52023- store_constraint_time_window_us);
52024+ &constraint_attrs[i].time_window_attr);
52025 if (ret)
52026 goto err_alloc;
52027- ret = create_constraint_attribute(i, "name", S_IRUGO,
52028- &constraint_attrs[i].name_attr,
52029- show_constraint_name,
52030- NULL);
52031+ ret = create_constraint_attribute(i, "name",
52032+ &constraint_attrs[i].name_attr);
52033 if (ret)
52034 goto err_alloc;
52035- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
52036- &constraint_attrs[i].max_power_attr,
52037- show_constraint_max_power_uw,
52038- NULL);
52039+ ret = create_constraint_attribute(i, "max_power_uw",
52040+ &constraint_attrs[i].max_power_attr);
52041 if (ret)
52042 goto err_alloc;
52043- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
52044- &constraint_attrs[i].min_power_attr,
52045- show_constraint_min_power_uw,
52046- NULL);
52047+ ret = create_constraint_attribute(i, "min_power_uw",
52048+ &constraint_attrs[i].min_power_attr);
52049 if (ret)
52050 goto err_alloc;
52051 ret = create_constraint_attribute(i, "max_time_window_us",
52052- S_IRUGO,
52053- &constraint_attrs[i].max_time_window_attr,
52054- show_constraint_max_time_window_us,
52055- NULL);
52056+ &constraint_attrs[i].max_time_window_attr);
52057 if (ret)
52058 goto err_alloc;
52059 ret = create_constraint_attribute(i, "min_time_window_us",
52060- S_IRUGO,
52061- &constraint_attrs[i].min_time_window_attr,
52062- show_constraint_min_time_window_us,
52063- NULL);
52064+ &constraint_attrs[i].min_time_window_attr);
52065 if (ret)
52066 goto err_alloc;
52067
52068@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
52069 power_zone->zone_dev_attrs[count++] =
52070 &dev_attr_max_energy_range_uj.attr;
52071 if (power_zone->ops->get_energy_uj) {
52072+ pax_open_kernel();
52073 if (power_zone->ops->reset_energy_uj)
52074- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
52075+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
52076 else
52077- dev_attr_energy_uj.attr.mode = S_IRUGO;
52078+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
52079+ pax_close_kernel();
52080 power_zone->zone_dev_attrs[count++] =
52081 &dev_attr_energy_uj.attr;
52082 }
52083diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
52084index ee3de34..bec7285 100644
52085--- a/drivers/ptp/Kconfig
52086+++ b/drivers/ptp/Kconfig
52087@@ -8,7 +8,6 @@ config PTP_1588_CLOCK
52088 tristate "PTP clock support"
52089 depends on NET
52090 select PPS
52091- select NET_PTP_CLASSIFY
52092 help
52093 The IEEE 1588 standard defines a method to precisely
52094 synchronize distributed clocks over Ethernet networks. The
52095diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
52096index 9c5d414..c7900ce 100644
52097--- a/drivers/ptp/ptp_private.h
52098+++ b/drivers/ptp/ptp_private.h
52099@@ -51,7 +51,7 @@ struct ptp_clock {
52100 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
52101 wait_queue_head_t tsev_wq;
52102 int defunct; /* tells readers to go away when clock is being removed */
52103- struct device_attribute *pin_dev_attr;
52104+ device_attribute_no_const *pin_dev_attr;
52105 struct attribute **pin_attr;
52106 struct attribute_group pin_attr_group;
52107 };
52108diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
52109index 302e626..12579af 100644
52110--- a/drivers/ptp/ptp_sysfs.c
52111+++ b/drivers/ptp/ptp_sysfs.c
52112@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
52113 goto no_pin_attr;
52114
52115 for (i = 0; i < n_pins; i++) {
52116- struct device_attribute *da = &ptp->pin_dev_attr[i];
52117+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
52118 sysfs_attr_init(&da->attr);
52119 da->attr.name = info->pin_config[i].name;
52120 da->attr.mode = 0644;
52121diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
52122index 4c1f999..11078c9 100644
52123--- a/drivers/regulator/core.c
52124+++ b/drivers/regulator/core.c
52125@@ -3391,7 +3391,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
52126 {
52127 const struct regulation_constraints *constraints = NULL;
52128 const struct regulator_init_data *init_data;
52129- static atomic_t regulator_no = ATOMIC_INIT(0);
52130+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
52131 struct regulator_dev *rdev;
52132 struct device *dev;
52133 int ret, i;
52134@@ -3461,7 +3461,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
52135 rdev->dev.of_node = of_node_get(config->of_node);
52136 rdev->dev.parent = dev;
52137 dev_set_name(&rdev->dev, "regulator.%d",
52138- atomic_inc_return(&regulator_no) - 1);
52139+ atomic_inc_return_unchecked(&regulator_no) - 1);
52140 ret = device_register(&rdev->dev);
52141 if (ret != 0) {
52142 put_device(&rdev->dev);
52143diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
52144index 2fc4111..6aa88ca 100644
52145--- a/drivers/regulator/max8660.c
52146+++ b/drivers/regulator/max8660.c
52147@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
52148 max8660->shadow_regs[MAX8660_OVER1] = 5;
52149 } else {
52150 /* Otherwise devices can be toggled via software */
52151- max8660_dcdc_ops.enable = max8660_dcdc_enable;
52152- max8660_dcdc_ops.disable = max8660_dcdc_disable;
52153+ pax_open_kernel();
52154+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
52155+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
52156+ pax_close_kernel();
52157 }
52158
52159 /*
52160diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
52161index dbedf17..18ff6b7 100644
52162--- a/drivers/regulator/max8973-regulator.c
52163+++ b/drivers/regulator/max8973-regulator.c
52164@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
52165 if (!pdata || !pdata->enable_ext_control) {
52166 max->desc.enable_reg = MAX8973_VOUT;
52167 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
52168- max->ops.enable = regulator_enable_regmap;
52169- max->ops.disable = regulator_disable_regmap;
52170- max->ops.is_enabled = regulator_is_enabled_regmap;
52171+ pax_open_kernel();
52172+ *(void **)&max->ops.enable = regulator_enable_regmap;
52173+ *(void **)&max->ops.disable = regulator_disable_regmap;
52174+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
52175+ pax_close_kernel();
52176 }
52177
52178 if (pdata) {
52179diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
52180index f374fa5..26f0683 100644
52181--- a/drivers/regulator/mc13892-regulator.c
52182+++ b/drivers/regulator/mc13892-regulator.c
52183@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
52184 }
52185 mc13xxx_unlock(mc13892);
52186
52187- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
52188+ pax_open_kernel();
52189+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
52190 = mc13892_vcam_set_mode;
52191- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
52192+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
52193 = mc13892_vcam_get_mode;
52194+ pax_close_kernel();
52195
52196 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
52197 ARRAY_SIZE(mc13892_regulators));
52198diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
52199index b0e4a3e..e5dc11e 100644
52200--- a/drivers/rtc/rtc-cmos.c
52201+++ b/drivers/rtc/rtc-cmos.c
52202@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
52203 hpet_rtc_timer_init();
52204
52205 /* export at least the first block of NVRAM */
52206- nvram.size = address_space - NVRAM_OFFSET;
52207+ pax_open_kernel();
52208+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
52209+ pax_close_kernel();
52210 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
52211 if (retval < 0) {
52212 dev_dbg(dev, "can't create nvram file? %d\n", retval);
52213diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
52214index d049393..bb20be0 100644
52215--- a/drivers/rtc/rtc-dev.c
52216+++ b/drivers/rtc/rtc-dev.c
52217@@ -16,6 +16,7 @@
52218 #include <linux/module.h>
52219 #include <linux/rtc.h>
52220 #include <linux/sched.h>
52221+#include <linux/grsecurity.h>
52222 #include "rtc-core.h"
52223
52224 static dev_t rtc_devt;
52225@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
52226 if (copy_from_user(&tm, uarg, sizeof(tm)))
52227 return -EFAULT;
52228
52229+ gr_log_timechange();
52230+
52231 return rtc_set_time(rtc, &tm);
52232
52233 case RTC_PIE_ON:
52234diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
52235index f03d5ba..8325bf6 100644
52236--- a/drivers/rtc/rtc-ds1307.c
52237+++ b/drivers/rtc/rtc-ds1307.c
52238@@ -107,7 +107,7 @@ struct ds1307 {
52239 u8 offset; /* register's offset */
52240 u8 regs[11];
52241 u16 nvram_offset;
52242- struct bin_attribute *nvram;
52243+ bin_attribute_no_const *nvram;
52244 enum ds_type type;
52245 unsigned long flags;
52246 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
52247diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
52248index 11880c1..b823aa4 100644
52249--- a/drivers/rtc/rtc-m48t59.c
52250+++ b/drivers/rtc/rtc-m48t59.c
52251@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
52252 if (IS_ERR(m48t59->rtc))
52253 return PTR_ERR(m48t59->rtc);
52254
52255- m48t59_nvram_attr.size = pdata->offset;
52256+ pax_open_kernel();
52257+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
52258+ pax_close_kernel();
52259
52260 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
52261 if (ret)
52262diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
52263index e693af6..2e525b6 100644
52264--- a/drivers/scsi/bfa/bfa_fcpim.h
52265+++ b/drivers/scsi/bfa/bfa_fcpim.h
52266@@ -36,7 +36,7 @@ struct bfa_iotag_s {
52267
52268 struct bfa_itn_s {
52269 bfa_isr_func_t isr;
52270-};
52271+} __no_const;
52272
52273 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
52274 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
52275diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
52276index a3ab5cc..8143622 100644
52277--- a/drivers/scsi/bfa/bfa_fcs.c
52278+++ b/drivers/scsi/bfa/bfa_fcs.c
52279@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
52280 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
52281
52282 static struct bfa_fcs_mod_s fcs_modules[] = {
52283- { bfa_fcs_port_attach, NULL, NULL },
52284- { bfa_fcs_uf_attach, NULL, NULL },
52285- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
52286- bfa_fcs_fabric_modexit },
52287+ {
52288+ .attach = bfa_fcs_port_attach,
52289+ .modinit = NULL,
52290+ .modexit = NULL
52291+ },
52292+ {
52293+ .attach = bfa_fcs_uf_attach,
52294+ .modinit = NULL,
52295+ .modexit = NULL
52296+ },
52297+ {
52298+ .attach = bfa_fcs_fabric_attach,
52299+ .modinit = bfa_fcs_fabric_modinit,
52300+ .modexit = bfa_fcs_fabric_modexit
52301+ },
52302 };
52303
52304 /*
52305diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
52306index ff75ef8..2dfe00a 100644
52307--- a/drivers/scsi/bfa/bfa_fcs_lport.c
52308+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
52309@@ -89,15 +89,26 @@ static struct {
52310 void (*offline) (struct bfa_fcs_lport_s *port);
52311 } __port_action[] = {
52312 {
52313- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
52314- bfa_fcs_lport_unknown_offline}, {
52315- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
52316- bfa_fcs_lport_fab_offline}, {
52317- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
52318- bfa_fcs_lport_n2n_offline}, {
52319- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
52320- bfa_fcs_lport_loop_offline},
52321- };
52322+ .init = bfa_fcs_lport_unknown_init,
52323+ .online = bfa_fcs_lport_unknown_online,
52324+ .offline = bfa_fcs_lport_unknown_offline
52325+ },
52326+ {
52327+ .init = bfa_fcs_lport_fab_init,
52328+ .online = bfa_fcs_lport_fab_online,
52329+ .offline = bfa_fcs_lport_fab_offline
52330+ },
52331+ {
52332+ .init = bfa_fcs_lport_n2n_init,
52333+ .online = bfa_fcs_lport_n2n_online,
52334+ .offline = bfa_fcs_lport_n2n_offline
52335+ },
52336+ {
52337+ .init = bfa_fcs_lport_loop_init,
52338+ .online = bfa_fcs_lport_loop_online,
52339+ .offline = bfa_fcs_lport_loop_offline
52340+ },
52341+};
52342
52343 /*
52344 * fcs_port_sm FCS logical port state machine
52345diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
52346index a38aafa0..fe8f03b 100644
52347--- a/drivers/scsi/bfa/bfa_ioc.h
52348+++ b/drivers/scsi/bfa/bfa_ioc.h
52349@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
52350 bfa_ioc_disable_cbfn_t disable_cbfn;
52351 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
52352 bfa_ioc_reset_cbfn_t reset_cbfn;
52353-};
52354+} __no_const;
52355
52356 /*
52357 * IOC event notification mechanism.
52358@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
52359 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
52360 enum bfi_ioc_state fwstate);
52361 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
52362-};
52363+} __no_const;
52364
52365 /*
52366 * Queue element to wait for room in request queue. FIFO order is
52367diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
52368index a14c784..6de6790 100644
52369--- a/drivers/scsi/bfa/bfa_modules.h
52370+++ b/drivers/scsi/bfa/bfa_modules.h
52371@@ -78,12 +78,12 @@ enum {
52372 \
52373 extern struct bfa_module_s hal_mod_ ## __mod; \
52374 struct bfa_module_s hal_mod_ ## __mod = { \
52375- bfa_ ## __mod ## _meminfo, \
52376- bfa_ ## __mod ## _attach, \
52377- bfa_ ## __mod ## _detach, \
52378- bfa_ ## __mod ## _start, \
52379- bfa_ ## __mod ## _stop, \
52380- bfa_ ## __mod ## _iocdisable, \
52381+ .meminfo = bfa_ ## __mod ## _meminfo, \
52382+ .attach = bfa_ ## __mod ## _attach, \
52383+ .detach = bfa_ ## __mod ## _detach, \
52384+ .start = bfa_ ## __mod ## _start, \
52385+ .stop = bfa_ ## __mod ## _stop, \
52386+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
52387 }
52388
52389 #define BFA_CACHELINE_SZ (256)
52390diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
52391index 045c4e1..13de803 100644
52392--- a/drivers/scsi/fcoe/fcoe_sysfs.c
52393+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
52394@@ -33,8 +33,8 @@
52395 */
52396 #include "libfcoe.h"
52397
52398-static atomic_t ctlr_num;
52399-static atomic_t fcf_num;
52400+static atomic_unchecked_t ctlr_num;
52401+static atomic_unchecked_t fcf_num;
52402
52403 /*
52404 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
52405@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
52406 if (!ctlr)
52407 goto out;
52408
52409- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52410+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52411 ctlr->f = f;
52412 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52413 INIT_LIST_HEAD(&ctlr->fcfs);
52414@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52415 fcf->dev.parent = &ctlr->dev;
52416 fcf->dev.bus = &fcoe_bus_type;
52417 fcf->dev.type = &fcoe_fcf_device_type;
52418- fcf->id = atomic_inc_return(&fcf_num) - 1;
52419+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52420 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52421
52422 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52423@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52424 {
52425 int error;
52426
52427- atomic_set(&ctlr_num, 0);
52428- atomic_set(&fcf_num, 0);
52429+ atomic_set_unchecked(&ctlr_num, 0);
52430+ atomic_set_unchecked(&fcf_num, 0);
52431
52432 error = bus_register(&fcoe_bus_type);
52433 if (error)
52434diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52435index 3cbb57a..95e47a3 100644
52436--- a/drivers/scsi/hosts.c
52437+++ b/drivers/scsi/hosts.c
52438@@ -42,7 +42,7 @@
52439 #include "scsi_logging.h"
52440
52441
52442-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52443+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52444
52445
52446 static void scsi_host_cls_release(struct device *dev)
52447@@ -369,7 +369,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52448 * subtract one because we increment first then return, but we need to
52449 * know what the next host number was before increment
52450 */
52451- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52452+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52453 shost->dma_channel = 0xff;
52454
52455 /* These three are default values which can be overridden */
52456diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52457index 489e83b..193815b 100644
52458--- a/drivers/scsi/hpsa.c
52459+++ b/drivers/scsi/hpsa.c
52460@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52461 unsigned long flags;
52462
52463 if (h->transMethod & CFGTBL_Trans_io_accel1)
52464- return h->access.command_completed(h, q);
52465+ return h->access->command_completed(h, q);
52466
52467 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52468- return h->access.command_completed(h, q);
52469+ return h->access->command_completed(h, q);
52470
52471 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52472 a = rq->head[rq->current_entry];
52473@@ -5455,7 +5455,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52474 while (!list_empty(&h->reqQ)) {
52475 c = list_entry(h->reqQ.next, struct CommandList, list);
52476 /* can't do anything if fifo is full */
52477- if ((h->access.fifo_full(h))) {
52478+ if ((h->access->fifo_full(h))) {
52479 h->fifo_recently_full = 1;
52480 dev_warn(&h->pdev->dev, "fifo full\n");
52481 break;
52482@@ -5477,7 +5477,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52483
52484 /* Tell the controller execute command */
52485 spin_unlock_irqrestore(&h->lock, *flags);
52486- h->access.submit_command(h, c);
52487+ h->access->submit_command(h, c);
52488 spin_lock_irqsave(&h->lock, *flags);
52489 }
52490 }
52491@@ -5493,17 +5493,17 @@ static void lock_and_start_io(struct ctlr_info *h)
52492
52493 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52494 {
52495- return h->access.command_completed(h, q);
52496+ return h->access->command_completed(h, q);
52497 }
52498
52499 static inline bool interrupt_pending(struct ctlr_info *h)
52500 {
52501- return h->access.intr_pending(h);
52502+ return h->access->intr_pending(h);
52503 }
52504
52505 static inline long interrupt_not_for_us(struct ctlr_info *h)
52506 {
52507- return (h->access.intr_pending(h) == 0) ||
52508+ return (h->access->intr_pending(h) == 0) ||
52509 (h->interrupts_enabled == 0);
52510 }
52511
52512@@ -6459,7 +6459,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52513 if (prod_index < 0)
52514 return -ENODEV;
52515 h->product_name = products[prod_index].product_name;
52516- h->access = *(products[prod_index].access);
52517+ h->access = products[prod_index].access;
52518
52519 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52520 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52521@@ -6781,7 +6781,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52522 unsigned long flags;
52523 u32 lockup_detected;
52524
52525- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52526+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52527 spin_lock_irqsave(&h->lock, flags);
52528 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52529 if (!lockup_detected) {
52530@@ -7022,7 +7022,7 @@ reinit_after_soft_reset:
52531 }
52532
52533 /* make sure the board interrupts are off */
52534- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52535+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52536
52537 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52538 goto clean2;
52539@@ -7057,7 +7057,7 @@ reinit_after_soft_reset:
52540 * fake ones to scoop up any residual completions.
52541 */
52542 spin_lock_irqsave(&h->lock, flags);
52543- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52544+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52545 spin_unlock_irqrestore(&h->lock, flags);
52546 free_irqs(h);
52547 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
52548@@ -7076,9 +7076,9 @@ reinit_after_soft_reset:
52549 dev_info(&h->pdev->dev, "Board READY.\n");
52550 dev_info(&h->pdev->dev,
52551 "Waiting for stale completions to drain.\n");
52552- h->access.set_intr_mask(h, HPSA_INTR_ON);
52553+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52554 msleep(10000);
52555- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52556+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52557
52558 rc = controller_reset_failed(h->cfgtable);
52559 if (rc)
52560@@ -7104,7 +7104,7 @@ reinit_after_soft_reset:
52561 h->drv_req_rescan = 0;
52562
52563 /* Turn the interrupts on so we can service requests */
52564- h->access.set_intr_mask(h, HPSA_INTR_ON);
52565+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52566
52567 hpsa_hba_inquiry(h);
52568 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52569@@ -7169,7 +7169,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52570 * To write all data in the battery backed cache to disks
52571 */
52572 hpsa_flush_cache(h);
52573- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52574+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52575 hpsa_free_irqs_and_disable_msix(h);
52576 }
52577
52578@@ -7287,7 +7287,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52579 CFGTBL_Trans_enable_directed_msix |
52580 (trans_support & (CFGTBL_Trans_io_accel1 |
52581 CFGTBL_Trans_io_accel2));
52582- struct access_method access = SA5_performant_access;
52583+ struct access_method *access = &SA5_performant_access;
52584
52585 /* This is a bit complicated. There are 8 registers on
52586 * the controller which we write to to tell it 8 different
52587@@ -7329,7 +7329,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52588 * perform the superfluous readl() after each command submission.
52589 */
52590 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52591- access = SA5_performant_access_no_read;
52592+ access = &SA5_performant_access_no_read;
52593
52594 /* Controller spec: zero out this buffer. */
52595 for (i = 0; i < h->nreply_queues; i++)
52596@@ -7359,12 +7359,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52597 * enable outbound interrupt coalescing in accelerator mode;
52598 */
52599 if (trans_support & CFGTBL_Trans_io_accel1) {
52600- access = SA5_ioaccel_mode1_access;
52601+ access = &SA5_ioaccel_mode1_access;
52602 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52603 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52604 } else {
52605 if (trans_support & CFGTBL_Trans_io_accel2) {
52606- access = SA5_ioaccel_mode2_access;
52607+ access = &SA5_ioaccel_mode2_access;
52608 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52609 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52610 }
52611diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52612index 24472ce..8782caf 100644
52613--- a/drivers/scsi/hpsa.h
52614+++ b/drivers/scsi/hpsa.h
52615@@ -127,7 +127,7 @@ struct ctlr_info {
52616 unsigned int msix_vector;
52617 unsigned int msi_vector;
52618 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52619- struct access_method access;
52620+ struct access_method *access;
52621 char hba_mode_enabled;
52622
52623 /* queue and queue Info */
52624@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52625 }
52626
52627 static struct access_method SA5_access = {
52628- SA5_submit_command,
52629- SA5_intr_mask,
52630- SA5_fifo_full,
52631- SA5_intr_pending,
52632- SA5_completed,
52633+ .submit_command = SA5_submit_command,
52634+ .set_intr_mask = SA5_intr_mask,
52635+ .fifo_full = SA5_fifo_full,
52636+ .intr_pending = SA5_intr_pending,
52637+ .command_completed = SA5_completed,
52638 };
52639
52640 static struct access_method SA5_ioaccel_mode1_access = {
52641- SA5_submit_command,
52642- SA5_performant_intr_mask,
52643- SA5_fifo_full,
52644- SA5_ioaccel_mode1_intr_pending,
52645- SA5_ioaccel_mode1_completed,
52646+ .submit_command = SA5_submit_command,
52647+ .set_intr_mask = SA5_performant_intr_mask,
52648+ .fifo_full = SA5_fifo_full,
52649+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52650+ .command_completed = SA5_ioaccel_mode1_completed,
52651 };
52652
52653 static struct access_method SA5_ioaccel_mode2_access = {
52654- SA5_submit_command_ioaccel2,
52655- SA5_performant_intr_mask,
52656- SA5_fifo_full,
52657- SA5_performant_intr_pending,
52658- SA5_performant_completed,
52659+ .submit_command = SA5_submit_command_ioaccel2,
52660+ .set_intr_mask = SA5_performant_intr_mask,
52661+ .fifo_full = SA5_fifo_full,
52662+ .intr_pending = SA5_performant_intr_pending,
52663+ .command_completed = SA5_performant_completed,
52664 };
52665
52666 static struct access_method SA5_performant_access = {
52667- SA5_submit_command,
52668- SA5_performant_intr_mask,
52669- SA5_fifo_full,
52670- SA5_performant_intr_pending,
52671- SA5_performant_completed,
52672+ .submit_command = SA5_submit_command,
52673+ .set_intr_mask = SA5_performant_intr_mask,
52674+ .fifo_full = SA5_fifo_full,
52675+ .intr_pending = SA5_performant_intr_pending,
52676+ .command_completed = SA5_performant_completed,
52677 };
52678
52679 static struct access_method SA5_performant_access_no_read = {
52680- SA5_submit_command_no_read,
52681- SA5_performant_intr_mask,
52682- SA5_fifo_full,
52683- SA5_performant_intr_pending,
52684- SA5_performant_completed,
52685+ .submit_command = SA5_submit_command_no_read,
52686+ .set_intr_mask = SA5_performant_intr_mask,
52687+ .fifo_full = SA5_fifo_full,
52688+ .intr_pending = SA5_performant_intr_pending,
52689+ .command_completed = SA5_performant_completed,
52690 };
52691
52692 struct board_type {
52693diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52694index 1b3a094..068e683 100644
52695--- a/drivers/scsi/libfc/fc_exch.c
52696+++ b/drivers/scsi/libfc/fc_exch.c
52697@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52698 u16 pool_max_index;
52699
52700 struct {
52701- atomic_t no_free_exch;
52702- atomic_t no_free_exch_xid;
52703- atomic_t xid_not_found;
52704- atomic_t xid_busy;
52705- atomic_t seq_not_found;
52706- atomic_t non_bls_resp;
52707+ atomic_unchecked_t no_free_exch;
52708+ atomic_unchecked_t no_free_exch_xid;
52709+ atomic_unchecked_t xid_not_found;
52710+ atomic_unchecked_t xid_busy;
52711+ atomic_unchecked_t seq_not_found;
52712+ atomic_unchecked_t non_bls_resp;
52713 } stats;
52714 };
52715
52716@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52717 /* allocate memory for exchange */
52718 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52719 if (!ep) {
52720- atomic_inc(&mp->stats.no_free_exch);
52721+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52722 goto out;
52723 }
52724 memset(ep, 0, sizeof(*ep));
52725@@ -874,7 +874,7 @@ out:
52726 return ep;
52727 err:
52728 spin_unlock_bh(&pool->lock);
52729- atomic_inc(&mp->stats.no_free_exch_xid);
52730+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52731 mempool_free(ep, mp->ep_pool);
52732 return NULL;
52733 }
52734@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52735 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52736 ep = fc_exch_find(mp, xid);
52737 if (!ep) {
52738- atomic_inc(&mp->stats.xid_not_found);
52739+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52740 reject = FC_RJT_OX_ID;
52741 goto out;
52742 }
52743@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52744 ep = fc_exch_find(mp, xid);
52745 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52746 if (ep) {
52747- atomic_inc(&mp->stats.xid_busy);
52748+ atomic_inc_unchecked(&mp->stats.xid_busy);
52749 reject = FC_RJT_RX_ID;
52750 goto rel;
52751 }
52752@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52753 }
52754 xid = ep->xid; /* get our XID */
52755 } else if (!ep) {
52756- atomic_inc(&mp->stats.xid_not_found);
52757+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52758 reject = FC_RJT_RX_ID; /* XID not found */
52759 goto out;
52760 }
52761@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52762 } else {
52763 sp = &ep->seq;
52764 if (sp->id != fh->fh_seq_id) {
52765- atomic_inc(&mp->stats.seq_not_found);
52766+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52767 if (f_ctl & FC_FC_END_SEQ) {
52768 /*
52769 * Update sequence_id based on incoming last
52770@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52771
52772 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52773 if (!ep) {
52774- atomic_inc(&mp->stats.xid_not_found);
52775+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52776 goto out;
52777 }
52778 if (ep->esb_stat & ESB_ST_COMPLETE) {
52779- atomic_inc(&mp->stats.xid_not_found);
52780+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52781 goto rel;
52782 }
52783 if (ep->rxid == FC_XID_UNKNOWN)
52784 ep->rxid = ntohs(fh->fh_rx_id);
52785 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52786- atomic_inc(&mp->stats.xid_not_found);
52787+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52788 goto rel;
52789 }
52790 if (ep->did != ntoh24(fh->fh_s_id) &&
52791 ep->did != FC_FID_FLOGI) {
52792- atomic_inc(&mp->stats.xid_not_found);
52793+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52794 goto rel;
52795 }
52796 sof = fr_sof(fp);
52797@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52798 sp->ssb_stat |= SSB_ST_RESP;
52799 sp->id = fh->fh_seq_id;
52800 } else if (sp->id != fh->fh_seq_id) {
52801- atomic_inc(&mp->stats.seq_not_found);
52802+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52803 goto rel;
52804 }
52805
52806@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52807 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52808
52809 if (!sp)
52810- atomic_inc(&mp->stats.xid_not_found);
52811+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52812 else
52813- atomic_inc(&mp->stats.non_bls_resp);
52814+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52815
52816 fc_frame_free(fp);
52817 }
52818@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52819
52820 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52821 mp = ema->mp;
52822- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52823+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52824 st->fc_no_free_exch_xid +=
52825- atomic_read(&mp->stats.no_free_exch_xid);
52826- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52827- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52828- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52829- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52830+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52831+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52832+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52833+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52834+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52835 }
52836 }
52837 EXPORT_SYMBOL(fc_exch_update_stats);
52838diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52839index 766098a..1c6c971 100644
52840--- a/drivers/scsi/libsas/sas_ata.c
52841+++ b/drivers/scsi/libsas/sas_ata.c
52842@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
52843 .postreset = ata_std_postreset,
52844 .error_handler = ata_std_error_handler,
52845 .post_internal_cmd = sas_ata_post_internal,
52846- .qc_defer = ata_std_qc_defer,
52847+ .qc_defer = ata_std_qc_defer,
52848 .qc_prep = ata_noop_qc_prep,
52849 .qc_issue = sas_ata_qc_issue,
52850 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52851diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52852index 434e903..5a4a79b 100644
52853--- a/drivers/scsi/lpfc/lpfc.h
52854+++ b/drivers/scsi/lpfc/lpfc.h
52855@@ -430,7 +430,7 @@ struct lpfc_vport {
52856 struct dentry *debug_nodelist;
52857 struct dentry *vport_debugfs_root;
52858 struct lpfc_debugfs_trc *disc_trc;
52859- atomic_t disc_trc_cnt;
52860+ atomic_unchecked_t disc_trc_cnt;
52861 #endif
52862 uint8_t stat_data_enabled;
52863 uint8_t stat_data_blocked;
52864@@ -880,8 +880,8 @@ struct lpfc_hba {
52865 struct timer_list fabric_block_timer;
52866 unsigned long bit_flags;
52867 #define FABRIC_COMANDS_BLOCKED 0
52868- atomic_t num_rsrc_err;
52869- atomic_t num_cmd_success;
52870+ atomic_unchecked_t num_rsrc_err;
52871+ atomic_unchecked_t num_cmd_success;
52872 unsigned long last_rsrc_error_time;
52873 unsigned long last_ramp_down_time;
52874 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52875@@ -916,7 +916,7 @@ struct lpfc_hba {
52876
52877 struct dentry *debug_slow_ring_trc;
52878 struct lpfc_debugfs_trc *slow_ring_trc;
52879- atomic_t slow_ring_trc_cnt;
52880+ atomic_unchecked_t slow_ring_trc_cnt;
52881 /* iDiag debugfs sub-directory */
52882 struct dentry *idiag_root;
52883 struct dentry *idiag_pci_cfg;
52884diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52885index b0aedce..89c6ca6 100644
52886--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52887+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52888@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52889
52890 #include <linux/debugfs.h>
52891
52892-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52893+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52894 static unsigned long lpfc_debugfs_start_time = 0L;
52895
52896 /* iDiag */
52897@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52898 lpfc_debugfs_enable = 0;
52899
52900 len = 0;
52901- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52902+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52903 (lpfc_debugfs_max_disc_trc - 1);
52904 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52905 dtp = vport->disc_trc + i;
52906@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52907 lpfc_debugfs_enable = 0;
52908
52909 len = 0;
52910- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52911+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52912 (lpfc_debugfs_max_slow_ring_trc - 1);
52913 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52914 dtp = phba->slow_ring_trc + i;
52915@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52916 !vport || !vport->disc_trc)
52917 return;
52918
52919- index = atomic_inc_return(&vport->disc_trc_cnt) &
52920+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52921 (lpfc_debugfs_max_disc_trc - 1);
52922 dtp = vport->disc_trc + index;
52923 dtp->fmt = fmt;
52924 dtp->data1 = data1;
52925 dtp->data2 = data2;
52926 dtp->data3 = data3;
52927- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52928+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52929 dtp->jif = jiffies;
52930 #endif
52931 return;
52932@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52933 !phba || !phba->slow_ring_trc)
52934 return;
52935
52936- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52937+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52938 (lpfc_debugfs_max_slow_ring_trc - 1);
52939 dtp = phba->slow_ring_trc + index;
52940 dtp->fmt = fmt;
52941 dtp->data1 = data1;
52942 dtp->data2 = data2;
52943 dtp->data3 = data3;
52944- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52945+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52946 dtp->jif = jiffies;
52947 #endif
52948 return;
52949@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52950 "slow_ring buffer\n");
52951 goto debug_failed;
52952 }
52953- atomic_set(&phba->slow_ring_trc_cnt, 0);
52954+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52955 memset(phba->slow_ring_trc, 0,
52956 (sizeof(struct lpfc_debugfs_trc) *
52957 lpfc_debugfs_max_slow_ring_trc));
52958@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52959 "buffer\n");
52960 goto debug_failed;
52961 }
52962- atomic_set(&vport->disc_trc_cnt, 0);
52963+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52964
52965 snprintf(name, sizeof(name), "discovery_trace");
52966 vport->debug_disc_trc =
52967diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52968index 06f9a5b..82812092 100644
52969--- a/drivers/scsi/lpfc/lpfc_init.c
52970+++ b/drivers/scsi/lpfc/lpfc_init.c
52971@@ -11296,8 +11296,10 @@ lpfc_init(void)
52972 "misc_register returned with status %d", error);
52973
52974 if (lpfc_enable_npiv) {
52975- lpfc_transport_functions.vport_create = lpfc_vport_create;
52976- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52977+ pax_open_kernel();
52978+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52979+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52980+ pax_close_kernel();
52981 }
52982 lpfc_transport_template =
52983 fc_attach_transport(&lpfc_transport_functions);
52984diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52985index 2df11da..e660a2c 100644
52986--- a/drivers/scsi/lpfc/lpfc_scsi.c
52987+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52988@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52989 uint32_t evt_posted;
52990
52991 spin_lock_irqsave(&phba->hbalock, flags);
52992- atomic_inc(&phba->num_rsrc_err);
52993+ atomic_inc_unchecked(&phba->num_rsrc_err);
52994 phba->last_rsrc_error_time = jiffies;
52995
52996 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
52997@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52998 unsigned long num_rsrc_err, num_cmd_success;
52999 int i;
53000
53001- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
53002- num_cmd_success = atomic_read(&phba->num_cmd_success);
53003+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
53004+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
53005
53006 /*
53007 * The error and success command counters are global per
53008@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
53009 }
53010 }
53011 lpfc_destroy_vport_work_array(phba, vports);
53012- atomic_set(&phba->num_rsrc_err, 0);
53013- atomic_set(&phba->num_cmd_success, 0);
53014+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
53015+ atomic_set_unchecked(&phba->num_cmd_success, 0);
53016 }
53017
53018 /**
53019diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
53020index 5055f92..376cd98 100644
53021--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
53022+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
53023@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
53024 {
53025 struct scsi_device *sdev = to_scsi_device(dev);
53026 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
53027- static struct _raid_device *raid_device;
53028+ struct _raid_device *raid_device;
53029 unsigned long flags;
53030 Mpi2RaidVolPage0_t vol_pg0;
53031 Mpi2ConfigReply_t mpi_reply;
53032@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
53033 {
53034 struct scsi_device *sdev = to_scsi_device(dev);
53035 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
53036- static struct _raid_device *raid_device;
53037+ struct _raid_device *raid_device;
53038 unsigned long flags;
53039 Mpi2RaidVolPage0_t vol_pg0;
53040 Mpi2ConfigReply_t mpi_reply;
53041@@ -6631,7 +6631,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
53042 struct fw_event_work *fw_event)
53043 {
53044 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
53045- static struct _raid_device *raid_device;
53046+ struct _raid_device *raid_device;
53047 unsigned long flags;
53048 u16 handle;
53049
53050@@ -7102,7 +7102,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
53051 u64 sas_address;
53052 struct _sas_device *sas_device;
53053 struct _sas_node *expander_device;
53054- static struct _raid_device *raid_device;
53055+ struct _raid_device *raid_device;
53056 u8 retry_count;
53057 unsigned long flags;
53058
53059diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
53060index be8ce54..94ed33a 100644
53061--- a/drivers/scsi/pmcraid.c
53062+++ b/drivers/scsi/pmcraid.c
53063@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
53064 res->scsi_dev = scsi_dev;
53065 scsi_dev->hostdata = res;
53066 res->change_detected = 0;
53067- atomic_set(&res->read_failures, 0);
53068- atomic_set(&res->write_failures, 0);
53069+ atomic_set_unchecked(&res->read_failures, 0);
53070+ atomic_set_unchecked(&res->write_failures, 0);
53071 rc = 0;
53072 }
53073 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
53074@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
53075
53076 /* If this was a SCSI read/write command keep count of errors */
53077 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
53078- atomic_inc(&res->read_failures);
53079+ atomic_inc_unchecked(&res->read_failures);
53080 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
53081- atomic_inc(&res->write_failures);
53082+ atomic_inc_unchecked(&res->write_failures);
53083
53084 if (!RES_IS_GSCSI(res->cfg_entry) &&
53085 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
53086@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
53087 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
53088 * hrrq_id assigned here in queuecommand
53089 */
53090- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
53091+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
53092 pinstance->num_hrrq;
53093 cmd->cmd_done = pmcraid_io_done;
53094
53095@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
53096 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
53097 * hrrq_id assigned here in queuecommand
53098 */
53099- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
53100+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
53101 pinstance->num_hrrq;
53102
53103 if (request_size) {
53104@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
53105
53106 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
53107 /* add resources only after host is added into system */
53108- if (!atomic_read(&pinstance->expose_resources))
53109+ if (!atomic_read_unchecked(&pinstance->expose_resources))
53110 return;
53111
53112 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
53113@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
53114 init_waitqueue_head(&pinstance->reset_wait_q);
53115
53116 atomic_set(&pinstance->outstanding_cmds, 0);
53117- atomic_set(&pinstance->last_message_id, 0);
53118- atomic_set(&pinstance->expose_resources, 0);
53119+ atomic_set_unchecked(&pinstance->last_message_id, 0);
53120+ atomic_set_unchecked(&pinstance->expose_resources, 0);
53121
53122 INIT_LIST_HEAD(&pinstance->free_res_q);
53123 INIT_LIST_HEAD(&pinstance->used_res_q);
53124@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
53125 /* Schedule worker thread to handle CCN and take care of adding and
53126 * removing devices to OS
53127 */
53128- atomic_set(&pinstance->expose_resources, 1);
53129+ atomic_set_unchecked(&pinstance->expose_resources, 1);
53130 schedule_work(&pinstance->worker_q);
53131 return rc;
53132
53133diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
53134index e1d150f..6c6df44 100644
53135--- a/drivers/scsi/pmcraid.h
53136+++ b/drivers/scsi/pmcraid.h
53137@@ -748,7 +748,7 @@ struct pmcraid_instance {
53138 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
53139
53140 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
53141- atomic_t last_message_id;
53142+ atomic_unchecked_t last_message_id;
53143
53144 /* configuration table */
53145 struct pmcraid_config_table *cfg_table;
53146@@ -777,7 +777,7 @@ struct pmcraid_instance {
53147 atomic_t outstanding_cmds;
53148
53149 /* should add/delete resources to mid-layer now ?*/
53150- atomic_t expose_resources;
53151+ atomic_unchecked_t expose_resources;
53152
53153
53154
53155@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
53156 struct pmcraid_config_table_entry_ext cfg_entry_ext;
53157 };
53158 struct scsi_device *scsi_dev; /* Link scsi_device structure */
53159- atomic_t read_failures; /* count of failed READ commands */
53160- atomic_t write_failures; /* count of failed WRITE commands */
53161+ atomic_unchecked_t read_failures; /* count of failed READ commands */
53162+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
53163
53164 /* To indicate add/delete/modify during CCN */
53165 u8 change_detected;
53166diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
53167index 16fe519..3b1ec82 100644
53168--- a/drivers/scsi/qla2xxx/qla_attr.c
53169+++ b/drivers/scsi/qla2xxx/qla_attr.c
53170@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
53171 return 0;
53172 }
53173
53174-struct fc_function_template qla2xxx_transport_functions = {
53175+fc_function_template_no_const qla2xxx_transport_functions = {
53176
53177 .show_host_node_name = 1,
53178 .show_host_port_name = 1,
53179@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
53180 .bsg_timeout = qla24xx_bsg_timeout,
53181 };
53182
53183-struct fc_function_template qla2xxx_transport_vport_functions = {
53184+fc_function_template_no_const qla2xxx_transport_vport_functions = {
53185
53186 .show_host_node_name = 1,
53187 .show_host_port_name = 1,
53188diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
53189index d48dea8..0845f78 100644
53190--- a/drivers/scsi/qla2xxx/qla_gbl.h
53191+++ b/drivers/scsi/qla2xxx/qla_gbl.h
53192@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
53193 struct device_attribute;
53194 extern struct device_attribute *qla2x00_host_attrs[];
53195 struct fc_function_template;
53196-extern struct fc_function_template qla2xxx_transport_functions;
53197-extern struct fc_function_template qla2xxx_transport_vport_functions;
53198+extern fc_function_template_no_const qla2xxx_transport_functions;
53199+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
53200 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
53201 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
53202 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
53203diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
53204index d96bfb5..d7afe90 100644
53205--- a/drivers/scsi/qla2xxx/qla_os.c
53206+++ b/drivers/scsi/qla2xxx/qla_os.c
53207@@ -1490,8 +1490,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
53208 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
53209 /* Ok, a 64bit DMA mask is applicable. */
53210 ha->flags.enable_64bit_addressing = 1;
53211- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53212- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53213+ pax_open_kernel();
53214+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53215+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53216+ pax_close_kernel();
53217 return;
53218 }
53219 }
53220diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
53221index 8f6d0fb..1b21097 100644
53222--- a/drivers/scsi/qla4xxx/ql4_def.h
53223+++ b/drivers/scsi/qla4xxx/ql4_def.h
53224@@ -305,7 +305,7 @@ struct ddb_entry {
53225 * (4000 only) */
53226 atomic_t relogin_timer; /* Max Time to wait for
53227 * relogin to complete */
53228- atomic_t relogin_retry_count; /* Num of times relogin has been
53229+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
53230 * retried */
53231 uint32_t default_time2wait; /* Default Min time between
53232 * relogins (+aens) */
53233diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
53234index 3202063..f9f0ff6 100644
53235--- a/drivers/scsi/qla4xxx/ql4_os.c
53236+++ b/drivers/scsi/qla4xxx/ql4_os.c
53237@@ -4494,12 +4494,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
53238 */
53239 if (!iscsi_is_session_online(cls_sess)) {
53240 /* Reset retry relogin timer */
53241- atomic_inc(&ddb_entry->relogin_retry_count);
53242+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
53243 DEBUG2(ql4_printk(KERN_INFO, ha,
53244 "%s: index[%d] relogin timed out-retrying"
53245 " relogin (%d), retry (%d)\n", __func__,
53246 ddb_entry->fw_ddb_index,
53247- atomic_read(&ddb_entry->relogin_retry_count),
53248+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
53249 ddb_entry->default_time2wait + 4));
53250 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
53251 atomic_set(&ddb_entry->retry_relogin_timer,
53252@@ -6607,7 +6607,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
53253
53254 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
53255 atomic_set(&ddb_entry->relogin_timer, 0);
53256- atomic_set(&ddb_entry->relogin_retry_count, 0);
53257+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
53258 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
53259 ddb_entry->default_relogin_timeout =
53260 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
53261diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
53262index 769be4d..371fc61 100644
53263--- a/drivers/scsi/scsi.c
53264+++ b/drivers/scsi/scsi.c
53265@@ -648,7 +648,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
53266 struct Scsi_Host *host = cmd->device->host;
53267 int rtn = 0;
53268
53269- atomic_inc(&cmd->device->iorequest_cnt);
53270+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53271
53272 /* check if the device is still usable */
53273 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
53274diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
53275index 3f50dfc..86af487 100644
53276--- a/drivers/scsi/scsi_lib.c
53277+++ b/drivers/scsi/scsi_lib.c
53278@@ -1423,7 +1423,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
53279 shost = sdev->host;
53280 scsi_init_cmd_errh(cmd);
53281 cmd->result = DID_NO_CONNECT << 16;
53282- atomic_inc(&cmd->device->iorequest_cnt);
53283+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53284
53285 /*
53286 * SCSI request completion path will do scsi_device_unbusy(),
53287@@ -1449,9 +1449,9 @@ static void scsi_softirq_done(struct request *rq)
53288
53289 INIT_LIST_HEAD(&cmd->eh_entry);
53290
53291- atomic_inc(&cmd->device->iodone_cnt);
53292+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
53293 if (cmd->result)
53294- atomic_inc(&cmd->device->ioerr_cnt);
53295+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
53296
53297 disposition = scsi_decide_disposition(cmd);
53298 if (disposition != SUCCESS &&
53299diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
53300index 074e8cc..f612e5c 100644
53301--- a/drivers/scsi/scsi_sysfs.c
53302+++ b/drivers/scsi/scsi_sysfs.c
53303@@ -780,7 +780,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
53304 char *buf) \
53305 { \
53306 struct scsi_device *sdev = to_scsi_device(dev); \
53307- unsigned long long count = atomic_read(&sdev->field); \
53308+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
53309 return snprintf(buf, 20, "0x%llx\n", count); \
53310 } \
53311 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
53312diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
53313index e51add0..1e06a96 100644
53314--- a/drivers/scsi/scsi_tgt_lib.c
53315+++ b/drivers/scsi/scsi_tgt_lib.c
53316@@ -363,7 +363,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
53317 int err;
53318
53319 dprintk("%lx %u\n", uaddr, len);
53320- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
53321+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
53322 if (err) {
53323 /*
53324 * TODO: need to fixup sg_tablesize, max_segment_size,
53325diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
53326index 521f583..6b15966 100644
53327--- a/drivers/scsi/scsi_transport_fc.c
53328+++ b/drivers/scsi/scsi_transport_fc.c
53329@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
53330 * Netlink Infrastructure
53331 */
53332
53333-static atomic_t fc_event_seq;
53334+static atomic_unchecked_t fc_event_seq;
53335
53336 /**
53337 * fc_get_event_number - Obtain the next sequential FC event number
53338@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
53339 u32
53340 fc_get_event_number(void)
53341 {
53342- return atomic_add_return(1, &fc_event_seq);
53343+ return atomic_add_return_unchecked(1, &fc_event_seq);
53344 }
53345 EXPORT_SYMBOL(fc_get_event_number);
53346
53347@@ -655,7 +655,7 @@ static __init int fc_transport_init(void)
53348 {
53349 int error;
53350
53351- atomic_set(&fc_event_seq, 0);
53352+ atomic_set_unchecked(&fc_event_seq, 0);
53353
53354 error = transport_class_register(&fc_host_class);
53355 if (error)
53356@@ -845,7 +845,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
53357 char *cp;
53358
53359 *val = simple_strtoul(buf, &cp, 0);
53360- if ((*cp && (*cp != '\n')) || (*val < 0))
53361+ if (*cp && (*cp != '\n'))
53362 return -EINVAL;
53363 /*
53364 * Check for overflow; dev_loss_tmo is u32
53365diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
53366index 0102a2d..cc3f8e9 100644
53367--- a/drivers/scsi/scsi_transport_iscsi.c
53368+++ b/drivers/scsi/scsi_transport_iscsi.c
53369@@ -79,7 +79,7 @@ struct iscsi_internal {
53370 struct transport_container session_cont;
53371 };
53372
53373-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53374+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
53375 static struct workqueue_struct *iscsi_eh_timer_workq;
53376
53377 static DEFINE_IDA(iscsi_sess_ida);
53378@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
53379 int err;
53380
53381 ihost = shost->shost_data;
53382- session->sid = atomic_add_return(1, &iscsi_session_nr);
53383+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
53384
53385 if (target_id == ISCSI_MAX_TARGET) {
53386 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
53387@@ -4511,7 +4511,7 @@ static __init int iscsi_transport_init(void)
53388 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
53389 ISCSI_TRANSPORT_VERSION);
53390
53391- atomic_set(&iscsi_session_nr, 0);
53392+ atomic_set_unchecked(&iscsi_session_nr, 0);
53393
53394 err = class_register(&iscsi_transport_class);
53395 if (err)
53396diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
53397index a0c5bfd..b94db1e 100644
53398--- a/drivers/scsi/scsi_transport_srp.c
53399+++ b/drivers/scsi/scsi_transport_srp.c
53400@@ -36,7 +36,7 @@
53401 #include "scsi_transport_srp_internal.h"
53402
53403 struct srp_host_attrs {
53404- atomic_t next_port_id;
53405+ atomic_unchecked_t next_port_id;
53406 };
53407 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
53408
53409@@ -101,7 +101,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53410 struct Scsi_Host *shost = dev_to_shost(dev);
53411 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53412
53413- atomic_set(&srp_host->next_port_id, 0);
53414+ atomic_set_unchecked(&srp_host->next_port_id, 0);
53415 return 0;
53416 }
53417
53418@@ -735,7 +735,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53419 rport_fast_io_fail_timedout);
53420 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53421
53422- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53423+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53424 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53425
53426 transport_setup_device(&rport->dev);
53427diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53428index ed2e99e..28cf52b 100644
53429--- a/drivers/scsi/sd.c
53430+++ b/drivers/scsi/sd.c
53431@@ -2959,7 +2959,7 @@ static int sd_probe(struct device *dev)
53432 sdkp->disk = gd;
53433 sdkp->index = index;
53434 atomic_set(&sdkp->openers, 0);
53435- atomic_set(&sdkp->device->ioerr_cnt, 0);
53436+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53437
53438 if (!sdp->request_queue->rq_timeout) {
53439 if (sdp->type != TYPE_MOD)
53440diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53441index 53268aab..17c2764 100644
53442--- a/drivers/scsi/sg.c
53443+++ b/drivers/scsi/sg.c
53444@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53445 sdp->disk->disk_name,
53446 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53447 NULL,
53448- (char *)arg);
53449+ (char __user *)arg);
53450 case BLKTRACESTART:
53451 return blk_trace_startstop(sdp->device->request_queue, 1);
53452 case BLKTRACESTOP:
53453diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53454index d4f9670..d37b662 100644
53455--- a/drivers/spi/spi.c
53456+++ b/drivers/spi/spi.c
53457@@ -2204,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master)
53458 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53459
53460 /* portable code must never pass more than 32 bytes */
53461-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53462+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53463
53464 static u8 *buf;
53465
53466diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53467index c341ac1..bf9799f 100644
53468--- a/drivers/staging/android/timed_output.c
53469+++ b/drivers/staging/android/timed_output.c
53470@@ -25,7 +25,7 @@
53471 #include "timed_output.h"
53472
53473 static struct class *timed_output_class;
53474-static atomic_t device_count;
53475+static atomic_unchecked_t device_count;
53476
53477 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53478 char *buf)
53479@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
53480 timed_output_class = class_create(THIS_MODULE, "timed_output");
53481 if (IS_ERR(timed_output_class))
53482 return PTR_ERR(timed_output_class);
53483- atomic_set(&device_count, 0);
53484+ atomic_set_unchecked(&device_count, 0);
53485 timed_output_class->dev_groups = timed_output_groups;
53486 }
53487
53488@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53489 if (ret < 0)
53490 return ret;
53491
53492- tdev->index = atomic_inc_return(&device_count);
53493+ tdev->index = atomic_inc_return_unchecked(&device_count);
53494 tdev->dev = device_create(timed_output_class, NULL,
53495 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53496 if (IS_ERR(tdev->dev))
53497diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53498index fe47cd3..19a1bd1 100644
53499--- a/drivers/staging/gdm724x/gdm_tty.c
53500+++ b/drivers/staging/gdm724x/gdm_tty.c
53501@@ -44,7 +44,7 @@
53502 #define gdm_tty_send_control(n, r, v, d, l) (\
53503 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53504
53505-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53506+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53507
53508 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53509 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53510diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
53511index def8280..e3fd96a 100644
53512--- a/drivers/staging/imx-drm/imx-drm-core.c
53513+++ b/drivers/staging/imx-drm/imx-drm-core.c
53514@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
53515 if (imxdrm->pipes >= MAX_CRTC)
53516 return -EINVAL;
53517
53518- if (imxdrm->drm->open_count)
53519+ if (local_read(&imxdrm->drm->open_count))
53520 return -EBUSY;
53521
53522 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
53523diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53524index 3f8020c..649fded 100644
53525--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53526+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53527@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53528 return 0;
53529 }
53530
53531-sfw_test_client_ops_t brw_test_client;
53532-void brw_init_test_client(void)
53533-{
53534- brw_test_client.tso_init = brw_client_init;
53535- brw_test_client.tso_fini = brw_client_fini;
53536- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53537- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53538+sfw_test_client_ops_t brw_test_client = {
53539+ .tso_init = brw_client_init,
53540+ .tso_fini = brw_client_fini,
53541+ .tso_prep_rpc = brw_client_prep_rpc,
53542+ .tso_done_rpc = brw_client_done_rpc,
53543 };
53544
53545 srpc_service_t brw_test_service;
53546diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53547index 050723a..fa6fdf1 100644
53548--- a/drivers/staging/lustre/lnet/selftest/framework.c
53549+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53550@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
53551
53552 extern sfw_test_client_ops_t ping_test_client;
53553 extern srpc_service_t ping_test_service;
53554-extern void ping_init_test_client(void);
53555 extern void ping_init_test_service(void);
53556
53557 extern sfw_test_client_ops_t brw_test_client;
53558 extern srpc_service_t brw_test_service;
53559-extern void brw_init_test_client(void);
53560 extern void brw_init_test_service(void);
53561
53562
53563@@ -1684,12 +1682,10 @@ sfw_startup (void)
53564 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53565 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53566
53567- brw_init_test_client();
53568 brw_init_test_service();
53569 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53570 LASSERT (rc == 0);
53571
53572- ping_init_test_client();
53573 ping_init_test_service();
53574 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53575 LASSERT (rc == 0);
53576diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53577index 750cac4..e4d751f 100644
53578--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53579+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53580@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53581 return 0;
53582 }
53583
53584-sfw_test_client_ops_t ping_test_client;
53585-void ping_init_test_client(void)
53586-{
53587- ping_test_client.tso_init = ping_client_init;
53588- ping_test_client.tso_fini = ping_client_fini;
53589- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53590- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53591-}
53592+sfw_test_client_ops_t ping_test_client = {
53593+ .tso_init = ping_client_init,
53594+ .tso_fini = ping_client_fini,
53595+ .tso_prep_rpc = ping_client_prep_rpc,
53596+ .tso_done_rpc = ping_client_done_rpc,
53597+};
53598
53599 srpc_service_t ping_test_service;
53600 void ping_init_test_service(void)
53601diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53602index 0c6b784..c64235c 100644
53603--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53604+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53605@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
53606 ldlm_completion_callback lcs_completion;
53607 ldlm_blocking_callback lcs_blocking;
53608 ldlm_glimpse_callback lcs_glimpse;
53609-};
53610+} __no_const;
53611
53612 /* ldlm_lockd.c */
53613 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53614diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53615index d5c4613..a341678 100644
53616--- a/drivers/staging/lustre/lustre/include/obd.h
53617+++ b/drivers/staging/lustre/lustre/include/obd.h
53618@@ -1439,7 +1439,7 @@ struct md_ops {
53619 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53620 * wrapper function in include/linux/obd_class.h.
53621 */
53622-};
53623+} __no_const;
53624
53625 struct lsm_operations {
53626 void (*lsm_free)(struct lov_stripe_md *);
53627diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53628index 986bf38..eab2558f 100644
53629--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53630+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53631@@ -259,7 +259,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53632 int added = (mode == LCK_NL);
53633 int overlaps = 0;
53634 int splitted = 0;
53635- const struct ldlm_callback_suite null_cbs = { NULL };
53636+ const struct ldlm_callback_suite null_cbs = { };
53637
53638 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
53639 LPU64" end "LPU64"\n", *flags,
53640diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53641index e947b91..f408990 100644
53642--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53643+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53644@@ -217,7 +217,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
53645 int LL_PROC_PROTO(proc_console_max_delay_cs)
53646 {
53647 int rc, max_delay_cs;
53648- ctl_table_t dummy = *table;
53649+ ctl_table_no_const dummy = *table;
53650 cfs_duration_t d;
53651
53652 dummy.data = &max_delay_cs;
53653@@ -248,7 +248,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
53654 int LL_PROC_PROTO(proc_console_min_delay_cs)
53655 {
53656 int rc, min_delay_cs;
53657- ctl_table_t dummy = *table;
53658+ ctl_table_no_const dummy = *table;
53659 cfs_duration_t d;
53660
53661 dummy.data = &min_delay_cs;
53662@@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
53663 int LL_PROC_PROTO(proc_console_backoff)
53664 {
53665 int rc, backoff;
53666- ctl_table_t dummy = *table;
53667+ ctl_table_no_const dummy = *table;
53668
53669 dummy.data = &backoff;
53670 dummy.proc_handler = &proc_dointvec;
53671diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53672index b16ee08..a3db5c6 100644
53673--- a/drivers/staging/lustre/lustre/libcfs/module.c
53674+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53675@@ -314,11 +314,11 @@ out:
53676
53677
53678 struct cfs_psdev_ops libcfs_psdev_ops = {
53679- libcfs_psdev_open,
53680- libcfs_psdev_release,
53681- NULL,
53682- NULL,
53683- libcfs_ioctl
53684+ .p_open = libcfs_psdev_open,
53685+ .p_close = libcfs_psdev_release,
53686+ .p_read = NULL,
53687+ .p_write = NULL,
53688+ .p_ioctl = libcfs_ioctl
53689 };
53690
53691 extern int insert_proc(void);
53692diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
53693index ae6f61a..03c3d5d 100644
53694--- a/drivers/staging/lustre/lustre/llite/dir.c
53695+++ b/drivers/staging/lustre/lustre/llite/dir.c
53696@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
53697 int mode;
53698 int err;
53699
53700- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
53701+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
53702 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
53703 strlen(filename), mode, LUSTRE_OPC_MKDIR,
53704 lump);
53705diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
53706index f670469..03b7438 100644
53707--- a/drivers/staging/media/solo6x10/solo6x10-core.c
53708+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
53709@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
53710
53711 static int solo_sysfs_init(struct solo_dev *solo_dev)
53712 {
53713- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
53714+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
53715 struct device *dev = &solo_dev->dev;
53716 const char *driver;
53717 int i;
53718diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
53719index 74f037b..5b5bb76 100644
53720--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
53721+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
53722@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
53723
53724 int solo_g723_init(struct solo_dev *solo_dev)
53725 {
53726- static struct snd_device_ops ops = { NULL };
53727+ static struct snd_device_ops ops = { };
53728 struct snd_card *card;
53729 struct snd_kcontrol_new kctl;
53730 char name[32];
53731diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53732index 7f2f247..d999137 100644
53733--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
53734+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53735@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
53736
53737 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
53738 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
53739- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
53740+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
53741 if (p2m_id < 0)
53742 p2m_id = -p2m_id;
53743 }
53744diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
53745index 8964f8b..36eb087 100644
53746--- a/drivers/staging/media/solo6x10/solo6x10.h
53747+++ b/drivers/staging/media/solo6x10/solo6x10.h
53748@@ -237,7 +237,7 @@ struct solo_dev {
53749
53750 /* P2M DMA Engine */
53751 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
53752- atomic_t p2m_count;
53753+ atomic_unchecked_t p2m_count;
53754 int p2m_jiffies;
53755 unsigned int p2m_timeouts;
53756
53757diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53758index a0f4868..139f1fb 100644
53759--- a/drivers/staging/octeon/ethernet-rx.c
53760+++ b/drivers/staging/octeon/ethernet-rx.c
53761@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53762 /* Increment RX stats for virtual ports */
53763 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53764 #ifdef CONFIG_64BIT
53765- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
53766- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
53767+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
53768+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
53769 #else
53770- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
53771- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
53772+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
53773+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
53774 #endif
53775 }
53776 netif_receive_skb(skb);
53777@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53778 dev->name);
53779 */
53780 #ifdef CONFIG_64BIT
53781- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
53782+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53783 #else
53784- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
53785+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
53786 #endif
53787 dev_kfree_skb_irq(skb);
53788 }
53789diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53790index da9dd6b..8e3e0f5 100644
53791--- a/drivers/staging/octeon/ethernet.c
53792+++ b/drivers/staging/octeon/ethernet.c
53793@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53794 * since the RX tasklet also increments it.
53795 */
53796 #ifdef CONFIG_64BIT
53797- atomic64_add(rx_status.dropped_packets,
53798- (atomic64_t *)&priv->stats.rx_dropped);
53799+ atomic64_add_unchecked(rx_status.dropped_packets,
53800+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53801 #else
53802- atomic_add(rx_status.dropped_packets,
53803- (atomic_t *)&priv->stats.rx_dropped);
53804+ atomic_add_unchecked(rx_status.dropped_packets,
53805+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53806 #endif
53807 }
53808
53809diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53810index c59fccd..79f8fc2 100644
53811--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53812+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53813@@ -267,7 +267,7 @@ struct hal_ops {
53814 s32 (*c2h_handler)(struct adapter *padapter,
53815 struct c2h_evt_hdr *c2h_evt);
53816 c2h_id_filter c2h_id_filter_ccx;
53817-};
53818+} __no_const;
53819
53820 enum rt_eeprom_type {
53821 EEPROM_93C46,
53822diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
53823index e8790f8..b4a5980 100644
53824--- a/drivers/staging/rtl8188eu/include/rtw_io.h
53825+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
53826@@ -124,7 +124,7 @@ struct _io_ops {
53827 u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
53828 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
53829 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
53830-};
53831+} __no_const;
53832
53833 struct io_req {
53834 struct list_head list;
53835diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53836index dc23395..cf7e9b1 100644
53837--- a/drivers/staging/rtl8712/rtl871x_io.h
53838+++ b/drivers/staging/rtl8712/rtl871x_io.h
53839@@ -108,7 +108,7 @@ struct _io_ops {
53840 u8 *pmem);
53841 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53842 u8 *pmem);
53843-};
53844+} __no_const;
53845
53846 struct io_req {
53847 struct list_head list;
53848diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
53849index a863a98..d272795 100644
53850--- a/drivers/staging/usbip/vhci.h
53851+++ b/drivers/staging/usbip/vhci.h
53852@@ -83,7 +83,7 @@ struct vhci_hcd {
53853 unsigned resuming:1;
53854 unsigned long re_timeout;
53855
53856- atomic_t seqnum;
53857+ atomic_unchecked_t seqnum;
53858
53859 /*
53860 * NOTE:
53861diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
53862index 0007d30..c06a693 100644
53863--- a/drivers/staging/usbip/vhci_hcd.c
53864+++ b/drivers/staging/usbip/vhci_hcd.c
53865@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
53866
53867 spin_lock(&vdev->priv_lock);
53868
53869- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
53870+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53871 if (priv->seqnum == 0xffff)
53872 dev_info(&urb->dev->dev, "seqnum max\n");
53873
53874@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
53875 return -ENOMEM;
53876 }
53877
53878- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
53879+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53880 if (unlink->seqnum == 0xffff)
53881 pr_info("seqnum max\n");
53882
53883@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
53884 vdev->rhport = rhport;
53885 }
53886
53887- atomic_set(&vhci->seqnum, 0);
53888+ atomic_set_unchecked(&vhci->seqnum, 0);
53889 spin_lock_init(&vhci->lock);
53890
53891 hcd->power_budget = 0; /* no limit */
53892diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
53893index d07fcb5..358e1e1 100644
53894--- a/drivers/staging/usbip/vhci_rx.c
53895+++ b/drivers/staging/usbip/vhci_rx.c
53896@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
53897 if (!urb) {
53898 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
53899 pr_info("max seqnum %d\n",
53900- atomic_read(&the_controller->seqnum));
53901+ atomic_read_unchecked(&the_controller->seqnum));
53902 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
53903 return;
53904 }
53905diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
53906index 317c2a8..ffeb4ef 100644
53907--- a/drivers/staging/vt6655/hostap.c
53908+++ b/drivers/staging/vt6655/hostap.c
53909@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
53910 *
53911 */
53912
53913+static net_device_ops_no_const apdev_netdev_ops;
53914+
53915 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53916 {
53917 PSDevice apdev_priv;
53918 struct net_device *dev = pDevice->dev;
53919 int ret;
53920- const struct net_device_ops apdev_netdev_ops = {
53921- .ndo_start_xmit = pDevice->tx_80211,
53922- };
53923
53924 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
53925
53926@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53927 *apdev_priv = *pDevice;
53928 eth_hw_addr_inherit(pDevice->apdev, dev);
53929
53930+ /* only half broken now */
53931+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
53932 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
53933
53934 pDevice->apdev->type = ARPHRD_IEEE80211;
53935diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53936index e7e9372..161f530 100644
53937--- a/drivers/target/sbp/sbp_target.c
53938+++ b/drivers/target/sbp/sbp_target.c
53939@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53940
53941 #define SESSION_MAINTENANCE_INTERVAL HZ
53942
53943-static atomic_t login_id = ATOMIC_INIT(0);
53944+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53945
53946 static void session_maintenance_work(struct work_struct *);
53947 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53948@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53949 login->lun = se_lun;
53950 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53951 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53952- login->login_id = atomic_inc_return(&login_id);
53953+ login->login_id = atomic_inc_return_unchecked(&login_id);
53954
53955 login->tgt_agt = sbp_target_agent_register(login);
53956 if (IS_ERR(login->tgt_agt)) {
53957diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53958index 98da901..bb443e8 100644
53959--- a/drivers/target/target_core_device.c
53960+++ b/drivers/target/target_core_device.c
53961@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53962 spin_lock_init(&dev->se_tmr_lock);
53963 spin_lock_init(&dev->qf_cmd_lock);
53964 sema_init(&dev->caw_sem, 1);
53965- atomic_set(&dev->dev_ordered_id, 0);
53966+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53967 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53968 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53969 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53970diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53971index 7fa62fc..abdd041 100644
53972--- a/drivers/target/target_core_transport.c
53973+++ b/drivers/target/target_core_transport.c
53974@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53975 * Used to determine when ORDERED commands should go from
53976 * Dormant to Active status.
53977 */
53978- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53979+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53980 smp_mb__after_atomic();
53981 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53982 cmd->se_ordered_id, cmd->sam_task_attr,
53983diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53984index 4b2b999..cad9fa5 100644
53985--- a/drivers/thermal/of-thermal.c
53986+++ b/drivers/thermal/of-thermal.c
53987@@ -30,6 +30,7 @@
53988 #include <linux/err.h>
53989 #include <linux/export.h>
53990 #include <linux/string.h>
53991+#include <linux/mm.h>
53992
53993 #include "thermal_core.h"
53994
53995@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53996 tz->get_trend = get_trend;
53997 tz->sensor_data = data;
53998
53999- tzd->ops->get_temp = of_thermal_get_temp;
54000- tzd->ops->get_trend = of_thermal_get_trend;
54001+ pax_open_kernel();
54002+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
54003+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
54004+ pax_close_kernel();
54005 mutex_unlock(&tzd->lock);
54006
54007 return tzd;
54008@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
54009 return;
54010
54011 mutex_lock(&tzd->lock);
54012- tzd->ops->get_temp = NULL;
54013- tzd->ops->get_trend = NULL;
54014+ pax_open_kernel();
54015+ *(void **)&tzd->ops->get_temp = NULL;
54016+ *(void **)&tzd->ops->get_trend = NULL;
54017+ pax_close_kernel();
54018
54019 tz->get_temp = NULL;
54020 tz->get_trend = NULL;
54021diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
54022index a57bb5a..1f727d33 100644
54023--- a/drivers/tty/cyclades.c
54024+++ b/drivers/tty/cyclades.c
54025@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
54026 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
54027 info->port.count);
54028 #endif
54029- info->port.count++;
54030+ atomic_inc(&info->port.count);
54031 #ifdef CY_DEBUG_COUNT
54032 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
54033- current->pid, info->port.count);
54034+ current->pid, atomic_read(&info->port.count));
54035 #endif
54036
54037 /*
54038@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
54039 for (j = 0; j < cy_card[i].nports; j++) {
54040 info = &cy_card[i].ports[j];
54041
54042- if (info->port.count) {
54043+ if (atomic_read(&info->port.count)) {
54044 /* XXX is the ldisc num worth this? */
54045 struct tty_struct *tty;
54046 struct tty_ldisc *ld;
54047diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
54048index 4fcec1d..5a036f7 100644
54049--- a/drivers/tty/hvc/hvc_console.c
54050+++ b/drivers/tty/hvc/hvc_console.c
54051@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
54052
54053 spin_lock_irqsave(&hp->port.lock, flags);
54054 /* Check and then increment for fast path open. */
54055- if (hp->port.count++ > 0) {
54056+ if (atomic_inc_return(&hp->port.count) > 1) {
54057 spin_unlock_irqrestore(&hp->port.lock, flags);
54058 hvc_kick();
54059 return 0;
54060@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54061
54062 spin_lock_irqsave(&hp->port.lock, flags);
54063
54064- if (--hp->port.count == 0) {
54065+ if (atomic_dec_return(&hp->port.count) == 0) {
54066 spin_unlock_irqrestore(&hp->port.lock, flags);
54067 /* We are done with the tty pointer now. */
54068 tty_port_tty_set(&hp->port, NULL);
54069@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54070 */
54071 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
54072 } else {
54073- if (hp->port.count < 0)
54074+ if (atomic_read(&hp->port.count) < 0)
54075 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
54076- hp->vtermno, hp->port.count);
54077+ hp->vtermno, atomic_read(&hp->port.count));
54078 spin_unlock_irqrestore(&hp->port.lock, flags);
54079 }
54080 }
54081@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
54082 * open->hangup case this can be called after the final close so prevent
54083 * that from happening for now.
54084 */
54085- if (hp->port.count <= 0) {
54086+ if (atomic_read(&hp->port.count) <= 0) {
54087 spin_unlock_irqrestore(&hp->port.lock, flags);
54088 return;
54089 }
54090
54091- hp->port.count = 0;
54092+ atomic_set(&hp->port.count, 0);
54093 spin_unlock_irqrestore(&hp->port.lock, flags);
54094 tty_port_tty_set(&hp->port, NULL);
54095
54096@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
54097 return -EPIPE;
54098
54099 /* FIXME what's this (unprotected) check for? */
54100- if (hp->port.count <= 0)
54101+ if (atomic_read(&hp->port.count) <= 0)
54102 return -EIO;
54103
54104 spin_lock_irqsave(&hp->lock, flags);
54105diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
54106index 81e939e..95ead10 100644
54107--- a/drivers/tty/hvc/hvcs.c
54108+++ b/drivers/tty/hvc/hvcs.c
54109@@ -83,6 +83,7 @@
54110 #include <asm/hvcserver.h>
54111 #include <asm/uaccess.h>
54112 #include <asm/vio.h>
54113+#include <asm/local.h>
54114
54115 /*
54116 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
54117@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
54118
54119 spin_lock_irqsave(&hvcsd->lock, flags);
54120
54121- if (hvcsd->port.count > 0) {
54122+ if (atomic_read(&hvcsd->port.count) > 0) {
54123 spin_unlock_irqrestore(&hvcsd->lock, flags);
54124 printk(KERN_INFO "HVCS: vterm state unchanged. "
54125 "The hvcs device node is still in use.\n");
54126@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
54127 }
54128 }
54129
54130- hvcsd->port.count = 0;
54131+ atomic_set(&hvcsd->port.count, 0);
54132 hvcsd->port.tty = tty;
54133 tty->driver_data = hvcsd;
54134
54135@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
54136 unsigned long flags;
54137
54138 spin_lock_irqsave(&hvcsd->lock, flags);
54139- hvcsd->port.count++;
54140+ atomic_inc(&hvcsd->port.count);
54141 hvcsd->todo_mask |= HVCS_SCHED_READ;
54142 spin_unlock_irqrestore(&hvcsd->lock, flags);
54143
54144@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54145 hvcsd = tty->driver_data;
54146
54147 spin_lock_irqsave(&hvcsd->lock, flags);
54148- if (--hvcsd->port.count == 0) {
54149+ if (atomic_dec_and_test(&hvcsd->port.count)) {
54150
54151 vio_disable_interrupts(hvcsd->vdev);
54152
54153@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54154
54155 free_irq(irq, hvcsd);
54156 return;
54157- } else if (hvcsd->port.count < 0) {
54158+ } else if (atomic_read(&hvcsd->port.count) < 0) {
54159 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
54160 " is missmanaged.\n",
54161- hvcsd->vdev->unit_address, hvcsd->port.count);
54162+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
54163 }
54164
54165 spin_unlock_irqrestore(&hvcsd->lock, flags);
54166@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54167
54168 spin_lock_irqsave(&hvcsd->lock, flags);
54169 /* Preserve this so that we know how many kref refs to put */
54170- temp_open_count = hvcsd->port.count;
54171+ temp_open_count = atomic_read(&hvcsd->port.count);
54172
54173 /*
54174 * Don't kref put inside the spinlock because the destruction
54175@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54176 tty->driver_data = NULL;
54177 hvcsd->port.tty = NULL;
54178
54179- hvcsd->port.count = 0;
54180+ atomic_set(&hvcsd->port.count, 0);
54181
54182 /* This will drop any buffered data on the floor which is OK in a hangup
54183 * scenario. */
54184@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
54185 * the middle of a write operation? This is a crummy place to do this
54186 * but we want to keep it all in the spinlock.
54187 */
54188- if (hvcsd->port.count <= 0) {
54189+ if (atomic_read(&hvcsd->port.count) <= 0) {
54190 spin_unlock_irqrestore(&hvcsd->lock, flags);
54191 return -ENODEV;
54192 }
54193@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
54194 {
54195 struct hvcs_struct *hvcsd = tty->driver_data;
54196
54197- if (!hvcsd || hvcsd->port.count <= 0)
54198+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
54199 return 0;
54200
54201 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
54202diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
54203index 4190199..06d5bfa 100644
54204--- a/drivers/tty/hvc/hvsi.c
54205+++ b/drivers/tty/hvc/hvsi.c
54206@@ -85,7 +85,7 @@ struct hvsi_struct {
54207 int n_outbuf;
54208 uint32_t vtermno;
54209 uint32_t virq;
54210- atomic_t seqno; /* HVSI packet sequence number */
54211+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
54212 uint16_t mctrl;
54213 uint8_t state; /* HVSI protocol state */
54214 uint8_t flags;
54215@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
54216
54217 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
54218 packet.hdr.len = sizeof(struct hvsi_query_response);
54219- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54220+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54221 packet.verb = VSV_SEND_VERSION_NUMBER;
54222 packet.u.version = HVSI_VERSION;
54223 packet.query_seqno = query_seqno+1;
54224@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
54225
54226 packet.hdr.type = VS_QUERY_PACKET_HEADER;
54227 packet.hdr.len = sizeof(struct hvsi_query);
54228- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54229+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54230 packet.verb = verb;
54231
54232 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
54233@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
54234 int wrote;
54235
54236 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
54237- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54238+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54239 packet.hdr.len = sizeof(struct hvsi_control);
54240 packet.verb = VSV_SET_MODEM_CTL;
54241 packet.mask = HVSI_TSDTR;
54242@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
54243 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
54244
54245 packet.hdr.type = VS_DATA_PACKET_HEADER;
54246- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54247+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54248 packet.hdr.len = count + sizeof(struct hvsi_header);
54249 memcpy(&packet.data, buf, count);
54250
54251@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
54252 struct hvsi_control packet __ALIGNED__;
54253
54254 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
54255- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54256+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54257 packet.hdr.len = 6;
54258 packet.verb = VSV_CLOSE_PROTOCOL;
54259
54260@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
54261
54262 tty_port_tty_set(&hp->port, tty);
54263 spin_lock_irqsave(&hp->lock, flags);
54264- hp->port.count++;
54265+ atomic_inc(&hp->port.count);
54266 atomic_set(&hp->seqno, 0);
54267 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
54268 spin_unlock_irqrestore(&hp->lock, flags);
54269@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54270
54271 spin_lock_irqsave(&hp->lock, flags);
54272
54273- if (--hp->port.count == 0) {
54274+ if (atomic_dec_return(&hp->port.count) == 0) {
54275 tty_port_tty_set(&hp->port, NULL);
54276 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
54277
54278@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54279
54280 spin_lock_irqsave(&hp->lock, flags);
54281 }
54282- } else if (hp->port.count < 0)
54283+ } else if (atomic_read(&hp->port.count) < 0)
54284 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
54285- hp - hvsi_ports, hp->port.count);
54286+ hp - hvsi_ports, atomic_read(&hp->port.count));
54287
54288 spin_unlock_irqrestore(&hp->lock, flags);
54289 }
54290@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
54291 tty_port_tty_set(&hp->port, NULL);
54292
54293 spin_lock_irqsave(&hp->lock, flags);
54294- hp->port.count = 0;
54295+ atomic_set(&hp->port.count, 0);
54296 hp->n_outbuf = 0;
54297 spin_unlock_irqrestore(&hp->lock, flags);
54298 }
54299diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
54300index 7ae6c29..05c6dba 100644
54301--- a/drivers/tty/hvc/hvsi_lib.c
54302+++ b/drivers/tty/hvc/hvsi_lib.c
54303@@ -8,7 +8,7 @@
54304
54305 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
54306 {
54307- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
54308+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
54309
54310 /* Assumes that always succeeds, works in practice */
54311 return pv->put_chars(pv->termno, (char *)packet, packet->len);
54312@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
54313
54314 /* Reset state */
54315 pv->established = 0;
54316- atomic_set(&pv->seqno, 0);
54317+ atomic_set_unchecked(&pv->seqno, 0);
54318
54319 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
54320
54321diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
54322index 17ee3bf..8d2520d 100644
54323--- a/drivers/tty/ipwireless/tty.c
54324+++ b/drivers/tty/ipwireless/tty.c
54325@@ -28,6 +28,7 @@
54326 #include <linux/tty_driver.h>
54327 #include <linux/tty_flip.h>
54328 #include <linux/uaccess.h>
54329+#include <asm/local.h>
54330
54331 #include "tty.h"
54332 #include "network.h"
54333@@ -98,10 +99,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54334 mutex_unlock(&tty->ipw_tty_mutex);
54335 return -ENODEV;
54336 }
54337- if (tty->port.count == 0)
54338+ if (atomic_read(&tty->port.count) == 0)
54339 tty->tx_bytes_queued = 0;
54340
54341- tty->port.count++;
54342+ atomic_inc(&tty->port.count);
54343
54344 tty->port.tty = linux_tty;
54345 linux_tty->driver_data = tty;
54346@@ -117,9 +118,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54347
54348 static void do_ipw_close(struct ipw_tty *tty)
54349 {
54350- tty->port.count--;
54351-
54352- if (tty->port.count == 0) {
54353+ if (atomic_dec_return(&tty->port.count) == 0) {
54354 struct tty_struct *linux_tty = tty->port.tty;
54355
54356 if (linux_tty != NULL) {
54357@@ -140,7 +139,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54358 return;
54359
54360 mutex_lock(&tty->ipw_tty_mutex);
54361- if (tty->port.count == 0) {
54362+ if (atomic_read(&tty->port.count) == 0) {
54363 mutex_unlock(&tty->ipw_tty_mutex);
54364 return;
54365 }
54366@@ -163,7 +162,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54367
54368 mutex_lock(&tty->ipw_tty_mutex);
54369
54370- if (!tty->port.count) {
54371+ if (!atomic_read(&tty->port.count)) {
54372 mutex_unlock(&tty->ipw_tty_mutex);
54373 return;
54374 }
54375@@ -202,7 +201,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54376 return -ENODEV;
54377
54378 mutex_lock(&tty->ipw_tty_mutex);
54379- if (!tty->port.count) {
54380+ if (!atomic_read(&tty->port.count)) {
54381 mutex_unlock(&tty->ipw_tty_mutex);
54382 return -EINVAL;
54383 }
54384@@ -242,7 +241,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54385 if (!tty)
54386 return -ENODEV;
54387
54388- if (!tty->port.count)
54389+ if (!atomic_read(&tty->port.count))
54390 return -EINVAL;
54391
54392 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54393@@ -284,7 +283,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54394 if (!tty)
54395 return 0;
54396
54397- if (!tty->port.count)
54398+ if (!atomic_read(&tty->port.count))
54399 return 0;
54400
54401 return tty->tx_bytes_queued;
54402@@ -365,7 +364,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54403 if (!tty)
54404 return -ENODEV;
54405
54406- if (!tty->port.count)
54407+ if (!atomic_read(&tty->port.count))
54408 return -EINVAL;
54409
54410 return get_control_lines(tty);
54411@@ -381,7 +380,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54412 if (!tty)
54413 return -ENODEV;
54414
54415- if (!tty->port.count)
54416+ if (!atomic_read(&tty->port.count))
54417 return -EINVAL;
54418
54419 return set_control_lines(tty, set, clear);
54420@@ -395,7 +394,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54421 if (!tty)
54422 return -ENODEV;
54423
54424- if (!tty->port.count)
54425+ if (!atomic_read(&tty->port.count))
54426 return -EINVAL;
54427
54428 /* FIXME: Exactly how is the tty object locked here .. */
54429@@ -551,7 +550,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54430 * are gone */
54431 mutex_lock(&ttyj->ipw_tty_mutex);
54432 }
54433- while (ttyj->port.count)
54434+ while (atomic_read(&ttyj->port.count))
54435 do_ipw_close(ttyj);
54436 ipwireless_disassociate_network_ttys(network,
54437 ttyj->channel_idx);
54438diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54439index 1deaca4..c8582d4 100644
54440--- a/drivers/tty/moxa.c
54441+++ b/drivers/tty/moxa.c
54442@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54443 }
54444
54445 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54446- ch->port.count++;
54447+ atomic_inc(&ch->port.count);
54448 tty->driver_data = ch;
54449 tty_port_tty_set(&ch->port, tty);
54450 mutex_lock(&ch->port.mutex);
54451diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54452index 2ebe47b..3205833 100644
54453--- a/drivers/tty/n_gsm.c
54454+++ b/drivers/tty/n_gsm.c
54455@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54456 spin_lock_init(&dlci->lock);
54457 mutex_init(&dlci->mutex);
54458 dlci->fifo = &dlci->_fifo;
54459- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54460+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54461 kfree(dlci);
54462 return NULL;
54463 }
54464@@ -2954,7 +2954,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54465 struct gsm_dlci *dlci = tty->driver_data;
54466 struct tty_port *port = &dlci->port;
54467
54468- port->count++;
54469+ atomic_inc(&port->count);
54470 tty_port_tty_set(port, tty);
54471
54472 dlci->modem_rx = 0;
54473diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54474index f44f1ba..a8d5915 100644
54475--- a/drivers/tty/n_tty.c
54476+++ b/drivers/tty/n_tty.c
54477@@ -115,7 +115,7 @@ struct n_tty_data {
54478 int minimum_to_wake;
54479
54480 /* consumer-published */
54481- size_t read_tail;
54482+ size_t read_tail __intentional_overflow(-1);
54483 size_t line_start;
54484
54485 /* protected by output lock */
54486@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54487 {
54488 *ops = tty_ldisc_N_TTY;
54489 ops->owner = NULL;
54490- ops->refcount = ops->flags = 0;
54491+ atomic_set(&ops->refcount, 0);
54492+ ops->flags = 0;
54493 }
54494 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54495diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54496index 25c9bc7..24077b7 100644
54497--- a/drivers/tty/pty.c
54498+++ b/drivers/tty/pty.c
54499@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
54500 panic("Couldn't register Unix98 pts driver");
54501
54502 /* Now create the /dev/ptmx special device */
54503+ pax_open_kernel();
54504 tty_default_fops(&ptmx_fops);
54505- ptmx_fops.open = ptmx_open;
54506+ *(void **)&ptmx_fops.open = ptmx_open;
54507+ pax_close_kernel();
54508
54509 cdev_init(&ptmx_cdev, &ptmx_fops);
54510 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54511diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54512index 383c4c7..d408e21 100644
54513--- a/drivers/tty/rocket.c
54514+++ b/drivers/tty/rocket.c
54515@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54516 tty->driver_data = info;
54517 tty_port_tty_set(port, tty);
54518
54519- if (port->count++ == 0) {
54520+ if (atomic_inc_return(&port->count) == 1) {
54521 atomic_inc(&rp_num_ports_open);
54522
54523 #ifdef ROCKET_DEBUG_OPEN
54524@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54525 #endif
54526 }
54527 #ifdef ROCKET_DEBUG_OPEN
54528- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54529+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54530 #endif
54531
54532 /*
54533@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54534 spin_unlock_irqrestore(&info->port.lock, flags);
54535 return;
54536 }
54537- if (info->port.count)
54538+ if (atomic_read(&info->port.count))
54539 atomic_dec(&rp_num_ports_open);
54540 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54541 spin_unlock_irqrestore(&info->port.lock, flags);
54542diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54543index 1274499..f541382 100644
54544--- a/drivers/tty/serial/ioc4_serial.c
54545+++ b/drivers/tty/serial/ioc4_serial.c
54546@@ -437,7 +437,7 @@ struct ioc4_soft {
54547 } is_intr_info[MAX_IOC4_INTR_ENTS];
54548
54549 /* Number of entries active in the above array */
54550- atomic_t is_num_intrs;
54551+ atomic_unchecked_t is_num_intrs;
54552 } is_intr_type[IOC4_NUM_INTR_TYPES];
54553
54554 /* is_ir_lock must be held while
54555@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54556 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54557 || (type == IOC4_OTHER_INTR_TYPE)));
54558
54559- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54560+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54561 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54562
54563 /* Save off the lower level interrupt handler */
54564@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54565
54566 soft = arg;
54567 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54568- num_intrs = (int)atomic_read(
54569+ num_intrs = (int)atomic_read_unchecked(
54570 &soft->is_intr_type[intr_type].is_num_intrs);
54571
54572 this_mir = this_ir = pending_intrs(soft, intr_type);
54573diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54574index cfadf29..8cf4595 100644
54575--- a/drivers/tty/serial/kgdb_nmi.c
54576+++ b/drivers/tty/serial/kgdb_nmi.c
54577@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54578 * I/O utilities that messages sent to the console will automatically
54579 * be displayed on the dbg_io.
54580 */
54581- dbg_io_ops->is_console = true;
54582+ pax_open_kernel();
54583+ *(int *)&dbg_io_ops->is_console = true;
54584+ pax_close_kernel();
54585
54586 return 0;
54587 }
54588diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54589index a260cde..6b2b5ce 100644
54590--- a/drivers/tty/serial/kgdboc.c
54591+++ b/drivers/tty/serial/kgdboc.c
54592@@ -24,8 +24,9 @@
54593 #define MAX_CONFIG_LEN 40
54594
54595 static struct kgdb_io kgdboc_io_ops;
54596+static struct kgdb_io kgdboc_io_ops_console;
54597
54598-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54599+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54600 static int configured = -1;
54601
54602 static char config[MAX_CONFIG_LEN];
54603@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54604 kgdboc_unregister_kbd();
54605 if (configured == 1)
54606 kgdb_unregister_io_module(&kgdboc_io_ops);
54607+ else if (configured == 2)
54608+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54609 }
54610
54611 static int configure_kgdboc(void)
54612@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54613 int err;
54614 char *cptr = config;
54615 struct console *cons;
54616+ int is_console = 0;
54617
54618 err = kgdboc_option_setup(config);
54619 if (err || !strlen(config) || isspace(config[0]))
54620 goto noconfig;
54621
54622 err = -ENODEV;
54623- kgdboc_io_ops.is_console = 0;
54624 kgdb_tty_driver = NULL;
54625
54626 kgdboc_use_kms = 0;
54627@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54628 int idx;
54629 if (cons->device && cons->device(cons, &idx) == p &&
54630 idx == tty_line) {
54631- kgdboc_io_ops.is_console = 1;
54632+ is_console = 1;
54633 break;
54634 }
54635 cons = cons->next;
54636@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54637 kgdb_tty_line = tty_line;
54638
54639 do_register:
54640- err = kgdb_register_io_module(&kgdboc_io_ops);
54641+ if (is_console) {
54642+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54643+ configured = 2;
54644+ } else {
54645+ err = kgdb_register_io_module(&kgdboc_io_ops);
54646+ configured = 1;
54647+ }
54648 if (err)
54649 goto noconfig;
54650
54651@@ -205,8 +214,6 @@ do_register:
54652 if (err)
54653 goto nmi_con_failed;
54654
54655- configured = 1;
54656-
54657 return 0;
54658
54659 nmi_con_failed:
54660@@ -223,7 +230,7 @@ noconfig:
54661 static int __init init_kgdboc(void)
54662 {
54663 /* Already configured? */
54664- if (configured == 1)
54665+ if (configured >= 1)
54666 return 0;
54667
54668 return configure_kgdboc();
54669@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54670 if (config[len - 1] == '\n')
54671 config[len - 1] = '\0';
54672
54673- if (configured == 1)
54674+ if (configured >= 1)
54675 cleanup_kgdboc();
54676
54677 /* Go and configure with the new params. */
54678@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54679 .post_exception = kgdboc_post_exp_handler,
54680 };
54681
54682+static struct kgdb_io kgdboc_io_ops_console = {
54683+ .name = "kgdboc",
54684+ .read_char = kgdboc_get_char,
54685+ .write_char = kgdboc_put_char,
54686+ .pre_exception = kgdboc_pre_exp_handler,
54687+ .post_exception = kgdboc_post_exp_handler,
54688+ .is_console = 1
54689+};
54690+
54691 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54692 /* This is only available if kgdboc is a built in for early debugging */
54693 static int __init kgdboc_early_init(char *opt)
54694diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54695index 72000a6..a190bc4 100644
54696--- a/drivers/tty/serial/msm_serial.c
54697+++ b/drivers/tty/serial/msm_serial.c
54698@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
54699 .cons = MSM_CONSOLE,
54700 };
54701
54702-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54703+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54704
54705 static const struct of_device_id msm_uartdm_table[] = {
54706 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54707@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54708 int irq;
54709
54710 if (pdev->id == -1)
54711- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
54712+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54713
54714 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
54715 return -ENXIO;
54716diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54717index c1d3ebd..f618a93 100644
54718--- a/drivers/tty/serial/samsung.c
54719+++ b/drivers/tty/serial/samsung.c
54720@@ -486,11 +486,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54721 }
54722 }
54723
54724+static int s3c64xx_serial_startup(struct uart_port *port);
54725 static int s3c24xx_serial_startup(struct uart_port *port)
54726 {
54727 struct s3c24xx_uart_port *ourport = to_ourport(port);
54728 int ret;
54729
54730+ /* Startup sequence is different for s3c64xx and higher SoC's */
54731+ if (s3c24xx_serial_has_interrupt_mask(port))
54732+ return s3c64xx_serial_startup(port);
54733+
54734 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54735 port, (unsigned long long)port->mapbase, port->membase);
54736
54737@@ -1164,10 +1169,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54738 /* setup info for port */
54739 port->dev = &platdev->dev;
54740
54741- /* Startup sequence is different for s3c64xx and higher SoC's */
54742- if (s3c24xx_serial_has_interrupt_mask(port))
54743- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54744-
54745 port->uartclk = 1;
54746
54747 if (cfg->uart_flags & UPF_CONS_FLOW) {
54748diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54749index ef2fb36..238d80c 100644
54750--- a/drivers/tty/serial/serial_core.c
54751+++ b/drivers/tty/serial/serial_core.c
54752@@ -1336,7 +1336,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54753
54754 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54755
54756- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54757+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54758 return;
54759
54760 /*
54761@@ -1463,7 +1463,7 @@ static void uart_hangup(struct tty_struct *tty)
54762 uart_flush_buffer(tty);
54763 uart_shutdown(tty, state);
54764 spin_lock_irqsave(&port->lock, flags);
54765- port->count = 0;
54766+ atomic_set(&port->count, 0);
54767 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54768 spin_unlock_irqrestore(&port->lock, flags);
54769 tty_port_tty_set(port, NULL);
54770@@ -1561,7 +1561,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54771 goto end;
54772 }
54773
54774- port->count++;
54775+ atomic_inc(&port->count);
54776 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
54777 retval = -ENXIO;
54778 goto err_dec_count;
54779@@ -1601,7 +1601,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54780 end:
54781 return retval;
54782 err_dec_count:
54783- port->count--;
54784+ atomic_inc(&port->count);
54785 mutex_unlock(&port->mutex);
54786 goto end;
54787 }
54788diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54789index d48e040..0f52764 100644
54790--- a/drivers/tty/synclink.c
54791+++ b/drivers/tty/synclink.c
54792@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54793
54794 if (debug_level >= DEBUG_LEVEL_INFO)
54795 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54796- __FILE__,__LINE__, info->device_name, info->port.count);
54797+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54798
54799 if (tty_port_close_start(&info->port, tty, filp) == 0)
54800 goto cleanup;
54801@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54802 cleanup:
54803 if (debug_level >= DEBUG_LEVEL_INFO)
54804 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54805- tty->driver->name, info->port.count);
54806+ tty->driver->name, atomic_read(&info->port.count));
54807
54808 } /* end of mgsl_close() */
54809
54810@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54811
54812 mgsl_flush_buffer(tty);
54813 shutdown(info);
54814-
54815- info->port.count = 0;
54816+
54817+ atomic_set(&info->port.count, 0);
54818 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54819 info->port.tty = NULL;
54820
54821@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54822
54823 if (debug_level >= DEBUG_LEVEL_INFO)
54824 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54825- __FILE__,__LINE__, tty->driver->name, port->count );
54826+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54827
54828 spin_lock_irqsave(&info->irq_spinlock, flags);
54829 if (!tty_hung_up_p(filp)) {
54830 extra_count = true;
54831- port->count--;
54832+ atomic_dec(&port->count);
54833 }
54834 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54835 port->blocked_open++;
54836@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54837
54838 if (debug_level >= DEBUG_LEVEL_INFO)
54839 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54840- __FILE__,__LINE__, tty->driver->name, port->count );
54841+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54842
54843 tty_unlock(tty);
54844 schedule();
54845@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54846
54847 /* FIXME: Racy on hangup during close wait */
54848 if (extra_count)
54849- port->count++;
54850+ atomic_inc(&port->count);
54851 port->blocked_open--;
54852
54853 if (debug_level >= DEBUG_LEVEL_INFO)
54854 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54855- __FILE__,__LINE__, tty->driver->name, port->count );
54856+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54857
54858 if (!retval)
54859 port->flags |= ASYNC_NORMAL_ACTIVE;
54860@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54861
54862 if (debug_level >= DEBUG_LEVEL_INFO)
54863 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54864- __FILE__,__LINE__,tty->driver->name, info->port.count);
54865+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54866
54867 /* If port is closing, signal caller to try again */
54868 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54869@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54870 spin_unlock_irqrestore(&info->netlock, flags);
54871 goto cleanup;
54872 }
54873- info->port.count++;
54874+ atomic_inc(&info->port.count);
54875 spin_unlock_irqrestore(&info->netlock, flags);
54876
54877- if (info->port.count == 1) {
54878+ if (atomic_read(&info->port.count) == 1) {
54879 /* 1st open on this device, init hardware */
54880 retval = startup(info);
54881 if (retval < 0)
54882@@ -3446,8 +3446,8 @@ cleanup:
54883 if (retval) {
54884 if (tty->count == 1)
54885 info->port.tty = NULL; /* tty layer will release tty struct */
54886- if(info->port.count)
54887- info->port.count--;
54888+ if (atomic_read(&info->port.count))
54889+ atomic_dec(&info->port.count);
54890 }
54891
54892 return retval;
54893@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54894 unsigned short new_crctype;
54895
54896 /* return error if TTY interface open */
54897- if (info->port.count)
54898+ if (atomic_read(&info->port.count))
54899 return -EBUSY;
54900
54901 switch (encoding)
54902@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
54903
54904 /* arbitrate between network and tty opens */
54905 spin_lock_irqsave(&info->netlock, flags);
54906- if (info->port.count != 0 || info->netcount != 0) {
54907+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54908 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54909 spin_unlock_irqrestore(&info->netlock, flags);
54910 return -EBUSY;
54911@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54912 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54913
54914 /* return error if TTY interface open */
54915- if (info->port.count)
54916+ if (atomic_read(&info->port.count))
54917 return -EBUSY;
54918
54919 if (cmd != SIOCWANDEV)
54920diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54921index c359a91..959fc26 100644
54922--- a/drivers/tty/synclink_gt.c
54923+++ b/drivers/tty/synclink_gt.c
54924@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54925 tty->driver_data = info;
54926 info->port.tty = tty;
54927
54928- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54929+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54930
54931 /* If port is closing, signal caller to try again */
54932 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54933@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54934 mutex_unlock(&info->port.mutex);
54935 goto cleanup;
54936 }
54937- info->port.count++;
54938+ atomic_inc(&info->port.count);
54939 spin_unlock_irqrestore(&info->netlock, flags);
54940
54941- if (info->port.count == 1) {
54942+ if (atomic_read(&info->port.count) == 1) {
54943 /* 1st open on this device, init hardware */
54944 retval = startup(info);
54945 if (retval < 0) {
54946@@ -715,8 +715,8 @@ cleanup:
54947 if (retval) {
54948 if (tty->count == 1)
54949 info->port.tty = NULL; /* tty layer will release tty struct */
54950- if(info->port.count)
54951- info->port.count--;
54952+ if(atomic_read(&info->port.count))
54953+ atomic_dec(&info->port.count);
54954 }
54955
54956 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54957@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54958
54959 if (sanity_check(info, tty->name, "close"))
54960 return;
54961- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54962+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54963
54964 if (tty_port_close_start(&info->port, tty, filp) == 0)
54965 goto cleanup;
54966@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54967 tty_port_close_end(&info->port, tty);
54968 info->port.tty = NULL;
54969 cleanup:
54970- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54971+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54972 }
54973
54974 static void hangup(struct tty_struct *tty)
54975@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54976 shutdown(info);
54977
54978 spin_lock_irqsave(&info->port.lock, flags);
54979- info->port.count = 0;
54980+ atomic_set(&info->port.count, 0);
54981 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54982 info->port.tty = NULL;
54983 spin_unlock_irqrestore(&info->port.lock, flags);
54984@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54985 unsigned short new_crctype;
54986
54987 /* return error if TTY interface open */
54988- if (info->port.count)
54989+ if (atomic_read(&info->port.count))
54990 return -EBUSY;
54991
54992 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54993@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54994
54995 /* arbitrate between network and tty opens */
54996 spin_lock_irqsave(&info->netlock, flags);
54997- if (info->port.count != 0 || info->netcount != 0) {
54998+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54999 DBGINFO(("%s hdlc_open busy\n", dev->name));
55000 spin_unlock_irqrestore(&info->netlock, flags);
55001 return -EBUSY;
55002@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55003 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
55004
55005 /* return error if TTY interface open */
55006- if (info->port.count)
55007+ if (atomic_read(&info->port.count))
55008 return -EBUSY;
55009
55010 if (cmd != SIOCWANDEV)
55011@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
55012 if (port == NULL)
55013 continue;
55014 spin_lock(&port->lock);
55015- if ((port->port.count || port->netcount) &&
55016+ if ((atomic_read(&port->port.count) || port->netcount) &&
55017 port->pending_bh && !port->bh_running &&
55018 !port->bh_requested) {
55019 DBGISR(("%s bh queued\n", port->device_name));
55020@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55021 spin_lock_irqsave(&info->lock, flags);
55022 if (!tty_hung_up_p(filp)) {
55023 extra_count = true;
55024- port->count--;
55025+ atomic_dec(&port->count);
55026 }
55027 spin_unlock_irqrestore(&info->lock, flags);
55028 port->blocked_open++;
55029@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55030 remove_wait_queue(&port->open_wait, &wait);
55031
55032 if (extra_count)
55033- port->count++;
55034+ atomic_inc(&port->count);
55035 port->blocked_open--;
55036
55037 if (!retval)
55038diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
55039index 53ba853..3c30f6d 100644
55040--- a/drivers/tty/synclinkmp.c
55041+++ b/drivers/tty/synclinkmp.c
55042@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
55043
55044 if (debug_level >= DEBUG_LEVEL_INFO)
55045 printk("%s(%d):%s open(), old ref count = %d\n",
55046- __FILE__,__LINE__,tty->driver->name, info->port.count);
55047+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
55048
55049 /* If port is closing, signal caller to try again */
55050 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
55051@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
55052 spin_unlock_irqrestore(&info->netlock, flags);
55053 goto cleanup;
55054 }
55055- info->port.count++;
55056+ atomic_inc(&info->port.count);
55057 spin_unlock_irqrestore(&info->netlock, flags);
55058
55059- if (info->port.count == 1) {
55060+ if (atomic_read(&info->port.count) == 1) {
55061 /* 1st open on this device, init hardware */
55062 retval = startup(info);
55063 if (retval < 0)
55064@@ -796,8 +796,8 @@ cleanup:
55065 if (retval) {
55066 if (tty->count == 1)
55067 info->port.tty = NULL; /* tty layer will release tty struct */
55068- if(info->port.count)
55069- info->port.count--;
55070+ if(atomic_read(&info->port.count))
55071+ atomic_dec(&info->port.count);
55072 }
55073
55074 return retval;
55075@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55076
55077 if (debug_level >= DEBUG_LEVEL_INFO)
55078 printk("%s(%d):%s close() entry, count=%d\n",
55079- __FILE__,__LINE__, info->device_name, info->port.count);
55080+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
55081
55082 if (tty_port_close_start(&info->port, tty, filp) == 0)
55083 goto cleanup;
55084@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55085 cleanup:
55086 if (debug_level >= DEBUG_LEVEL_INFO)
55087 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
55088- tty->driver->name, info->port.count);
55089+ tty->driver->name, atomic_read(&info->port.count));
55090 }
55091
55092 /* Called by tty_hangup() when a hangup is signaled.
55093@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
55094 shutdown(info);
55095
55096 spin_lock_irqsave(&info->port.lock, flags);
55097- info->port.count = 0;
55098+ atomic_set(&info->port.count, 0);
55099 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
55100 info->port.tty = NULL;
55101 spin_unlock_irqrestore(&info->port.lock, flags);
55102@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55103 unsigned short new_crctype;
55104
55105 /* return error if TTY interface open */
55106- if (info->port.count)
55107+ if (atomic_read(&info->port.count))
55108 return -EBUSY;
55109
55110 switch (encoding)
55111@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
55112
55113 /* arbitrate between network and tty opens */
55114 spin_lock_irqsave(&info->netlock, flags);
55115- if (info->port.count != 0 || info->netcount != 0) {
55116+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55117 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
55118 spin_unlock_irqrestore(&info->netlock, flags);
55119 return -EBUSY;
55120@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55121 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
55122
55123 /* return error if TTY interface open */
55124- if (info->port.count)
55125+ if (atomic_read(&info->port.count))
55126 return -EBUSY;
55127
55128 if (cmd != SIOCWANDEV)
55129@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
55130 * do not request bottom half processing if the
55131 * device is not open in a normal mode.
55132 */
55133- if ( port && (port->port.count || port->netcount) &&
55134+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
55135 port->pending_bh && !port->bh_running &&
55136 !port->bh_requested ) {
55137 if ( debug_level >= DEBUG_LEVEL_ISR )
55138@@ -3319,12 +3319,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55139
55140 if (debug_level >= DEBUG_LEVEL_INFO)
55141 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
55142- __FILE__,__LINE__, tty->driver->name, port->count );
55143+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55144
55145 spin_lock_irqsave(&info->lock, flags);
55146 if (!tty_hung_up_p(filp)) {
55147 extra_count = true;
55148- port->count--;
55149+ atomic_dec(&port->count);
55150 }
55151 spin_unlock_irqrestore(&info->lock, flags);
55152 port->blocked_open++;
55153@@ -3353,7 +3353,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55154
55155 if (debug_level >= DEBUG_LEVEL_INFO)
55156 printk("%s(%d):%s block_til_ready() count=%d\n",
55157- __FILE__,__LINE__, tty->driver->name, port->count );
55158+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55159
55160 tty_unlock(tty);
55161 schedule();
55162@@ -3364,12 +3364,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55163 remove_wait_queue(&port->open_wait, &wait);
55164
55165 if (extra_count)
55166- port->count++;
55167+ atomic_inc(&port->count);
55168 port->blocked_open--;
55169
55170 if (debug_level >= DEBUG_LEVEL_INFO)
55171 printk("%s(%d):%s block_til_ready() after, count=%d\n",
55172- __FILE__,__LINE__, tty->driver->name, port->count );
55173+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55174
55175 if (!retval)
55176 port->flags |= ASYNC_NORMAL_ACTIVE;
55177diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
55178index 454b658..57b1430 100644
55179--- a/drivers/tty/sysrq.c
55180+++ b/drivers/tty/sysrq.c
55181@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
55182 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
55183 size_t count, loff_t *ppos)
55184 {
55185- if (count) {
55186+ if (count && capable(CAP_SYS_ADMIN)) {
55187 char c;
55188
55189 if (get_user(c, buf))
55190diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
55191index 3411071..86f2cf2 100644
55192--- a/drivers/tty/tty_io.c
55193+++ b/drivers/tty/tty_io.c
55194@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
55195
55196 void tty_default_fops(struct file_operations *fops)
55197 {
55198- *fops = tty_fops;
55199+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
55200 }
55201
55202 /*
55203diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
55204index 2d822aa..a566234 100644
55205--- a/drivers/tty/tty_ldisc.c
55206+++ b/drivers/tty/tty_ldisc.c
55207@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
55208 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55209 tty_ldiscs[disc] = new_ldisc;
55210 new_ldisc->num = disc;
55211- new_ldisc->refcount = 0;
55212+ atomic_set(&new_ldisc->refcount, 0);
55213 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55214
55215 return ret;
55216@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
55217 return -EINVAL;
55218
55219 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55220- if (tty_ldiscs[disc]->refcount)
55221+ if (atomic_read(&tty_ldiscs[disc]->refcount))
55222 ret = -EBUSY;
55223 else
55224 tty_ldiscs[disc] = NULL;
55225@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
55226 if (ldops) {
55227 ret = ERR_PTR(-EAGAIN);
55228 if (try_module_get(ldops->owner)) {
55229- ldops->refcount++;
55230+ atomic_inc(&ldops->refcount);
55231 ret = ldops;
55232 }
55233 }
55234@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
55235 unsigned long flags;
55236
55237 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55238- ldops->refcount--;
55239+ atomic_dec(&ldops->refcount);
55240 module_put(ldops->owner);
55241 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55242 }
55243diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
55244index 3f746c8..2f2fcaa 100644
55245--- a/drivers/tty/tty_port.c
55246+++ b/drivers/tty/tty_port.c
55247@@ -235,7 +235,7 @@ void tty_port_hangup(struct tty_port *port)
55248 unsigned long flags;
55249
55250 spin_lock_irqsave(&port->lock, flags);
55251- port->count = 0;
55252+ atomic_set(&port->count, 0);
55253 port->flags &= ~ASYNC_NORMAL_ACTIVE;
55254 tty = port->tty;
55255 if (tty)
55256@@ -393,7 +393,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55257 /* The port lock protects the port counts */
55258 spin_lock_irqsave(&port->lock, flags);
55259 if (!tty_hung_up_p(filp))
55260- port->count--;
55261+ atomic_dec(&port->count);
55262 port->blocked_open++;
55263 spin_unlock_irqrestore(&port->lock, flags);
55264
55265@@ -435,7 +435,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55266 we must not mess that up further */
55267 spin_lock_irqsave(&port->lock, flags);
55268 if (!tty_hung_up_p(filp))
55269- port->count++;
55270+ atomic_inc(&port->count);
55271 port->blocked_open--;
55272 if (retval == 0)
55273 port->flags |= ASYNC_NORMAL_ACTIVE;
55274@@ -469,19 +469,19 @@ int tty_port_close_start(struct tty_port *port,
55275 return 0;
55276 }
55277
55278- if (tty->count == 1 && port->count != 1) {
55279+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
55280 printk(KERN_WARNING
55281 "tty_port_close_start: tty->count = 1 port count = %d.\n",
55282- port->count);
55283- port->count = 1;
55284+ atomic_read(&port->count));
55285+ atomic_set(&port->count, 1);
55286 }
55287- if (--port->count < 0) {
55288+ if (atomic_dec_return(&port->count) < 0) {
55289 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
55290- port->count);
55291- port->count = 0;
55292+ atomic_read(&port->count));
55293+ atomic_set(&port->count, 0);
55294 }
55295
55296- if (port->count) {
55297+ if (atomic_read(&port->count)) {
55298 spin_unlock_irqrestore(&port->lock, flags);
55299 return 0;
55300 }
55301@@ -563,7 +563,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
55302 {
55303 spin_lock_irq(&port->lock);
55304 if (!tty_hung_up_p(filp))
55305- ++port->count;
55306+ atomic_inc(&port->count);
55307 spin_unlock_irq(&port->lock);
55308 tty_port_tty_set(port, tty);
55309
55310diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
55311index d0e3a44..5f8b754 100644
55312--- a/drivers/tty/vt/keyboard.c
55313+++ b/drivers/tty/vt/keyboard.c
55314@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
55315 kbd->kbdmode == VC_OFF) &&
55316 value != KVAL(K_SAK))
55317 return; /* SAK is allowed even in raw mode */
55318+
55319+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55320+ {
55321+ void *func = fn_handler[value];
55322+ if (func == fn_show_state || func == fn_show_ptregs ||
55323+ func == fn_show_mem)
55324+ return;
55325+ }
55326+#endif
55327+
55328 fn_handler[value](vc);
55329 }
55330
55331@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55332 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
55333 return -EFAULT;
55334
55335- if (!capable(CAP_SYS_TTY_CONFIG))
55336- perm = 0;
55337-
55338 switch (cmd) {
55339 case KDGKBENT:
55340 /* Ensure another thread doesn't free it under us */
55341@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55342 spin_unlock_irqrestore(&kbd_event_lock, flags);
55343 return put_user(val, &user_kbe->kb_value);
55344 case KDSKBENT:
55345+ if (!capable(CAP_SYS_TTY_CONFIG))
55346+ perm = 0;
55347+
55348 if (!perm)
55349 return -EPERM;
55350 if (!i && v == K_NOSUCHMAP) {
55351@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55352 int i, j, k;
55353 int ret;
55354
55355- if (!capable(CAP_SYS_TTY_CONFIG))
55356- perm = 0;
55357-
55358 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
55359 if (!kbs) {
55360 ret = -ENOMEM;
55361@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55362 kfree(kbs);
55363 return ((p && *p) ? -EOVERFLOW : 0);
55364 case KDSKBSENT:
55365+ if (!capable(CAP_SYS_TTY_CONFIG))
55366+ perm = 0;
55367+
55368 if (!perm) {
55369 ret = -EPERM;
55370 goto reterr;
55371diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55372index a673e5b..36e5d32 100644
55373--- a/drivers/uio/uio.c
55374+++ b/drivers/uio/uio.c
55375@@ -25,6 +25,7 @@
55376 #include <linux/kobject.h>
55377 #include <linux/cdev.h>
55378 #include <linux/uio_driver.h>
55379+#include <asm/local.h>
55380
55381 #define UIO_MAX_DEVICES (1U << MINORBITS)
55382
55383@@ -32,7 +33,7 @@ struct uio_device {
55384 struct module *owner;
55385 struct device *dev;
55386 int minor;
55387- atomic_t event;
55388+ atomic_unchecked_t event;
55389 struct fasync_struct *async_queue;
55390 wait_queue_head_t wait;
55391 struct uio_info *info;
55392@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
55393 struct device_attribute *attr, char *buf)
55394 {
55395 struct uio_device *idev = dev_get_drvdata(dev);
55396- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55397+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55398 }
55399 static DEVICE_ATTR_RO(event);
55400
55401@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
55402 {
55403 struct uio_device *idev = info->uio_dev;
55404
55405- atomic_inc(&idev->event);
55406+ atomic_inc_unchecked(&idev->event);
55407 wake_up_interruptible(&idev->wait);
55408 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55409 }
55410@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55411 }
55412
55413 listener->dev = idev;
55414- listener->event_count = atomic_read(&idev->event);
55415+ listener->event_count = atomic_read_unchecked(&idev->event);
55416 filep->private_data = listener;
55417
55418 if (idev->info->open) {
55419@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55420 return -EIO;
55421
55422 poll_wait(filep, &idev->wait, wait);
55423- if (listener->event_count != atomic_read(&idev->event))
55424+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55425 return POLLIN | POLLRDNORM;
55426 return 0;
55427 }
55428@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55429 do {
55430 set_current_state(TASK_INTERRUPTIBLE);
55431
55432- event_count = atomic_read(&idev->event);
55433+ event_count = atomic_read_unchecked(&idev->event);
55434 if (event_count != listener->event_count) {
55435 if (copy_to_user(buf, &event_count, count))
55436 retval = -EFAULT;
55437@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55438 static int uio_find_mem_index(struct vm_area_struct *vma)
55439 {
55440 struct uio_device *idev = vma->vm_private_data;
55441+ unsigned long size;
55442
55443 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55444- if (idev->info->mem[vma->vm_pgoff].size == 0)
55445+ size = idev->info->mem[vma->vm_pgoff].size;
55446+ if (size == 0)
55447+ return -1;
55448+ if (vma->vm_end - vma->vm_start > size)
55449 return -1;
55450 return (int)vma->vm_pgoff;
55451 }
55452@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
55453 idev->owner = owner;
55454 idev->info = info;
55455 init_waitqueue_head(&idev->wait);
55456- atomic_set(&idev->event, 0);
55457+ atomic_set_unchecked(&idev->event, 0);
55458
55459 ret = uio_get_minor(idev);
55460 if (ret)
55461diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55462index 813d4d3..a71934f 100644
55463--- a/drivers/usb/atm/cxacru.c
55464+++ b/drivers/usb/atm/cxacru.c
55465@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55466 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55467 if (ret < 2)
55468 return -EINVAL;
55469- if (index < 0 || index > 0x7f)
55470+ if (index > 0x7f)
55471 return -EINVAL;
55472 pos += tmp;
55473
55474diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55475index dada014..1d0d517 100644
55476--- a/drivers/usb/atm/usbatm.c
55477+++ b/drivers/usb/atm/usbatm.c
55478@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55479 if (printk_ratelimit())
55480 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55481 __func__, vpi, vci);
55482- atomic_inc(&vcc->stats->rx_err);
55483+ atomic_inc_unchecked(&vcc->stats->rx_err);
55484 return;
55485 }
55486
55487@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55488 if (length > ATM_MAX_AAL5_PDU) {
55489 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55490 __func__, length, vcc);
55491- atomic_inc(&vcc->stats->rx_err);
55492+ atomic_inc_unchecked(&vcc->stats->rx_err);
55493 goto out;
55494 }
55495
55496@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55497 if (sarb->len < pdu_length) {
55498 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55499 __func__, pdu_length, sarb->len, vcc);
55500- atomic_inc(&vcc->stats->rx_err);
55501+ atomic_inc_unchecked(&vcc->stats->rx_err);
55502 goto out;
55503 }
55504
55505 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55506 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55507 __func__, vcc);
55508- atomic_inc(&vcc->stats->rx_err);
55509+ atomic_inc_unchecked(&vcc->stats->rx_err);
55510 goto out;
55511 }
55512
55513@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55514 if (printk_ratelimit())
55515 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55516 __func__, length);
55517- atomic_inc(&vcc->stats->rx_drop);
55518+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55519 goto out;
55520 }
55521
55522@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55523
55524 vcc->push(vcc, skb);
55525
55526- atomic_inc(&vcc->stats->rx);
55527+ atomic_inc_unchecked(&vcc->stats->rx);
55528 out:
55529 skb_trim(sarb, 0);
55530 }
55531@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55532 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55533
55534 usbatm_pop(vcc, skb);
55535- atomic_inc(&vcc->stats->tx);
55536+ atomic_inc_unchecked(&vcc->stats->tx);
55537
55538 skb = skb_dequeue(&instance->sndqueue);
55539 }
55540@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55541 if (!left--)
55542 return sprintf(page,
55543 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55544- atomic_read(&atm_dev->stats.aal5.tx),
55545- atomic_read(&atm_dev->stats.aal5.tx_err),
55546- atomic_read(&atm_dev->stats.aal5.rx),
55547- atomic_read(&atm_dev->stats.aal5.rx_err),
55548- atomic_read(&atm_dev->stats.aal5.rx_drop));
55549+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55550+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55551+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55552+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55553+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55554
55555 if (!left--) {
55556 if (instance->disconnected)
55557diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55558index 2a3bbdf..91d72cf 100644
55559--- a/drivers/usb/core/devices.c
55560+++ b/drivers/usb/core/devices.c
55561@@ -126,7 +126,7 @@ static const char format_endpt[] =
55562 * time it gets called.
55563 */
55564 static struct device_connect_event {
55565- atomic_t count;
55566+ atomic_unchecked_t count;
55567 wait_queue_head_t wait;
55568 } device_event = {
55569 .count = ATOMIC_INIT(1),
55570@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55571
55572 void usbfs_conn_disc_event(void)
55573 {
55574- atomic_add(2, &device_event.count);
55575+ atomic_add_unchecked(2, &device_event.count);
55576 wake_up(&device_event.wait);
55577 }
55578
55579@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55580
55581 poll_wait(file, &device_event.wait, wait);
55582
55583- event_count = atomic_read(&device_event.count);
55584+ event_count = atomic_read_unchecked(&device_event.count);
55585 if (file->f_version != event_count) {
55586 file->f_version = event_count;
55587 return POLLIN | POLLRDNORM;
55588diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55589index 0b59731..46ee7d1 100644
55590--- a/drivers/usb/core/devio.c
55591+++ b/drivers/usb/core/devio.c
55592@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55593 struct usb_dev_state *ps = file->private_data;
55594 struct usb_device *dev = ps->dev;
55595 ssize_t ret = 0;
55596- unsigned len;
55597+ size_t len;
55598 loff_t pos;
55599 int i;
55600
55601@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55602 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55603 struct usb_config_descriptor *config =
55604 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55605- unsigned int length = le16_to_cpu(config->wTotalLength);
55606+ size_t length = le16_to_cpu(config->wTotalLength);
55607
55608 if (*ppos < pos + length) {
55609
55610 /* The descriptor may claim to be longer than it
55611 * really is. Here is the actual allocated length. */
55612- unsigned alloclen =
55613+ size_t alloclen =
55614 le16_to_cpu(dev->config[i].desc.wTotalLength);
55615
55616- len = length - (*ppos - pos);
55617+ len = length + pos - *ppos;
55618 if (len > nbytes)
55619 len = nbytes;
55620
55621 /* Simply don't write (skip over) unallocated parts */
55622 if (alloclen > (*ppos - pos)) {
55623- alloclen -= (*ppos - pos);
55624+ alloclen = alloclen + pos - *ppos;
55625 if (copy_to_user(buf,
55626 dev->rawdescriptors[i] + (*ppos - pos),
55627 min(len, alloclen))) {
55628diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55629index bec31e2..b8091cd 100644
55630--- a/drivers/usb/core/hcd.c
55631+++ b/drivers/usb/core/hcd.c
55632@@ -1554,7 +1554,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55633 */
55634 usb_get_urb(urb);
55635 atomic_inc(&urb->use_count);
55636- atomic_inc(&urb->dev->urbnum);
55637+ atomic_inc_unchecked(&urb->dev->urbnum);
55638 usbmon_urb_submit(&hcd->self, urb);
55639
55640 /* NOTE requirements on root-hub callers (usbfs and the hub
55641@@ -1581,7 +1581,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55642 urb->hcpriv = NULL;
55643 INIT_LIST_HEAD(&urb->urb_list);
55644 atomic_dec(&urb->use_count);
55645- atomic_dec(&urb->dev->urbnum);
55646+ atomic_dec_unchecked(&urb->dev->urbnum);
55647 if (atomic_read(&urb->reject))
55648 wake_up(&usb_kill_urb_queue);
55649 usb_put_urb(urb);
55650diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55651index 27f2171..e3dfc22 100644
55652--- a/drivers/usb/core/hub.c
55653+++ b/drivers/usb/core/hub.c
55654@@ -27,6 +27,7 @@
55655 #include <linux/freezer.h>
55656 #include <linux/random.h>
55657 #include <linux/pm_qos.h>
55658+#include <linux/grsecurity.h>
55659
55660 #include <asm/uaccess.h>
55661 #include <asm/byteorder.h>
55662@@ -4644,6 +4645,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55663 goto done;
55664 return;
55665 }
55666+
55667+ if (gr_handle_new_usb())
55668+ goto done;
55669+
55670 if (hub_is_superspeed(hub->hdev))
55671 unit_load = 150;
55672 else
55673diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55674index 0c8a7fc..c45b40a 100644
55675--- a/drivers/usb/core/message.c
55676+++ b/drivers/usb/core/message.c
55677@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55678 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55679 * error number.
55680 */
55681-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55682+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55683 __u8 requesttype, __u16 value, __u16 index, void *data,
55684 __u16 size, int timeout)
55685 {
55686@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55687 * If successful, 0. Otherwise a negative error number. The number of actual
55688 * bytes transferred will be stored in the @actual_length parameter.
55689 */
55690-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55691+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55692 void *data, int len, int *actual_length, int timeout)
55693 {
55694 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55695@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55696 * bytes transferred will be stored in the @actual_length parameter.
55697 *
55698 */
55699-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55700+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55701 void *data, int len, int *actual_length, int timeout)
55702 {
55703 struct urb *urb;
55704diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55705index 1236c60..d47a51c 100644
55706--- a/drivers/usb/core/sysfs.c
55707+++ b/drivers/usb/core/sysfs.c
55708@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55709 struct usb_device *udev;
55710
55711 udev = to_usb_device(dev);
55712- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55713+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55714 }
55715 static DEVICE_ATTR_RO(urbnum);
55716
55717diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55718index 4d11449..f4ccabf 100644
55719--- a/drivers/usb/core/usb.c
55720+++ b/drivers/usb/core/usb.c
55721@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55722 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55723 dev->state = USB_STATE_ATTACHED;
55724 dev->lpm_disable_count = 1;
55725- atomic_set(&dev->urbnum, 0);
55726+ atomic_set_unchecked(&dev->urbnum, 0);
55727
55728 INIT_LIST_HEAD(&dev->ep0.urb_list);
55729 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55730diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
55731index dab7927..6f53afc 100644
55732--- a/drivers/usb/dwc3/gadget.c
55733+++ b/drivers/usb/dwc3/gadget.c
55734@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
55735 if (!usb_endpoint_xfer_isoc(desc))
55736 return 0;
55737
55738- memset(&trb_link, 0, sizeof(trb_link));
55739-
55740 /* Link TRB for ISOC. The HWO bit is never reset */
55741 trb_st_hw = &dep->trb_pool[0];
55742
55743diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55744index 8cfc319..4868255 100644
55745--- a/drivers/usb/early/ehci-dbgp.c
55746+++ b/drivers/usb/early/ehci-dbgp.c
55747@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55748
55749 #ifdef CONFIG_KGDB
55750 static struct kgdb_io kgdbdbgp_io_ops;
55751-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55752+static struct kgdb_io kgdbdbgp_io_ops_console;
55753+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55754 #else
55755 #define dbgp_kgdb_mode (0)
55756 #endif
55757@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55758 .write_char = kgdbdbgp_write_char,
55759 };
55760
55761+static struct kgdb_io kgdbdbgp_io_ops_console = {
55762+ .name = "kgdbdbgp",
55763+ .read_char = kgdbdbgp_read_char,
55764+ .write_char = kgdbdbgp_write_char,
55765+ .is_console = 1
55766+};
55767+
55768 static int kgdbdbgp_wait_time;
55769
55770 static int __init kgdbdbgp_parse_config(char *str)
55771@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55772 ptr++;
55773 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55774 }
55775- kgdb_register_io_module(&kgdbdbgp_io_ops);
55776- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55777+ if (early_dbgp_console.index != -1)
55778+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55779+ else
55780+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55781
55782 return 0;
55783 }
55784diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
55785index 2b4c82d..06a8ee6 100644
55786--- a/drivers/usb/gadget/f_uac1.c
55787+++ b/drivers/usb/gadget/f_uac1.c
55788@@ -13,6 +13,7 @@
55789 #include <linux/kernel.h>
55790 #include <linux/device.h>
55791 #include <linux/atomic.h>
55792+#include <linux/module.h>
55793
55794 #include "u_uac1.h"
55795
55796diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
55797index ad0aca8..8ff84865 100644
55798--- a/drivers/usb/gadget/u_serial.c
55799+++ b/drivers/usb/gadget/u_serial.c
55800@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55801 spin_lock_irq(&port->port_lock);
55802
55803 /* already open? Great. */
55804- if (port->port.count) {
55805+ if (atomic_read(&port->port.count)) {
55806 status = 0;
55807- port->port.count++;
55808+ atomic_inc(&port->port.count);
55809
55810 /* currently opening/closing? wait ... */
55811 } else if (port->openclose) {
55812@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55813 tty->driver_data = port;
55814 port->port.tty = tty;
55815
55816- port->port.count = 1;
55817+ atomic_set(&port->port.count, 1);
55818 port->openclose = false;
55819
55820 /* if connected, start the I/O stream */
55821@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55822
55823 spin_lock_irq(&port->port_lock);
55824
55825- if (port->port.count != 1) {
55826- if (port->port.count == 0)
55827+ if (atomic_read(&port->port.count) != 1) {
55828+ if (atomic_read(&port->port.count) == 0)
55829 WARN_ON(1);
55830 else
55831- --port->port.count;
55832+ atomic_dec(&port->port.count);
55833 goto exit;
55834 }
55835
55836@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55837 * and sleep if necessary
55838 */
55839 port->openclose = true;
55840- port->port.count = 0;
55841+ atomic_set(&port->port.count, 0);
55842
55843 gser = port->port_usb;
55844 if (gser && gser->disconnect)
55845@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
55846 int cond;
55847
55848 spin_lock_irq(&port->port_lock);
55849- cond = (port->port.count == 0) && !port->openclose;
55850+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55851 spin_unlock_irq(&port->port_lock);
55852 return cond;
55853 }
55854@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55855 /* if it's already open, start I/O ... and notify the serial
55856 * protocol about open/close status (connect/disconnect).
55857 */
55858- if (port->port.count) {
55859+ if (atomic_read(&port->port.count)) {
55860 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55861 gs_start_io(port);
55862 if (gser->connect)
55863@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
55864
55865 port->port_usb = NULL;
55866 gser->ioport = NULL;
55867- if (port->port.count > 0 || port->openclose) {
55868+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55869 wake_up_interruptible(&port->drain_wait);
55870 if (port->port.tty)
55871 tty_hangup(port->port.tty);
55872@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
55873
55874 /* finally, free any unused/unusable I/O buffers */
55875 spin_lock_irqsave(&port->port_lock, flags);
55876- if (port->port.count == 0 && !port->openclose)
55877+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55878 gs_buf_free(&port->port_write_buf);
55879 gs_free_requests(gser->out, &port->read_pool, NULL);
55880 gs_free_requests(gser->out, &port->read_queue, NULL);
55881diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
55882index 7a55fea..cc0ed4f 100644
55883--- a/drivers/usb/gadget/u_uac1.c
55884+++ b/drivers/usb/gadget/u_uac1.c
55885@@ -16,6 +16,7 @@
55886 #include <linux/ctype.h>
55887 #include <linux/random.h>
55888 #include <linux/syscalls.h>
55889+#include <linux/module.h>
55890
55891 #include "u_uac1.h"
55892
55893diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55894index 6130b75..3b60008 100644
55895--- a/drivers/usb/host/ehci-hub.c
55896+++ b/drivers/usb/host/ehci-hub.c
55897@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
55898 urb->transfer_flags = URB_DIR_IN;
55899 usb_get_urb(urb);
55900 atomic_inc(&urb->use_count);
55901- atomic_inc(&urb->dev->urbnum);
55902+ atomic_inc_unchecked(&urb->dev->urbnum);
55903 urb->setup_dma = dma_map_single(
55904 hcd->self.controller,
55905 urb->setup_packet,
55906@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55907 urb->status = -EINPROGRESS;
55908 usb_get_urb(urb);
55909 atomic_inc(&urb->use_count);
55910- atomic_inc(&urb->dev->urbnum);
55911+ atomic_inc_unchecked(&urb->dev->urbnum);
55912 retval = submit_single_step_set_feature(hcd, urb, 0);
55913 if (!retval && !wait_for_completion_timeout(&done,
55914 msecs_to_jiffies(2000))) {
55915diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55916index d0d8fad..668ef7b 100644
55917--- a/drivers/usb/host/hwa-hc.c
55918+++ b/drivers/usb/host/hwa-hc.c
55919@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55920 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55921 struct wahc *wa = &hwahc->wa;
55922 struct device *dev = &wa->usb_iface->dev;
55923- u8 mas_le[UWB_NUM_MAS/8];
55924+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55925+
55926+ if (mas_le == NULL)
55927+ return -ENOMEM;
55928
55929 /* Set the stream index */
55930 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55931@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55932 WUSB_REQ_SET_WUSB_MAS,
55933 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55934 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55935- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55936+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55937 if (result < 0)
55938 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55939 out:
55940+ kfree(mas_le);
55941+
55942 return result;
55943 }
55944
55945diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55946index b3d245e..99549ed 100644
55947--- a/drivers/usb/misc/appledisplay.c
55948+++ b/drivers/usb/misc/appledisplay.c
55949@@ -84,7 +84,7 @@ struct appledisplay {
55950 struct mutex sysfslock; /* concurrent read and write */
55951 };
55952
55953-static atomic_t count_displays = ATOMIC_INIT(0);
55954+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55955 static struct workqueue_struct *wq;
55956
55957 static void appledisplay_complete(struct urb *urb)
55958@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55959
55960 /* Register backlight device */
55961 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55962- atomic_inc_return(&count_displays) - 1);
55963+ atomic_inc_return_unchecked(&count_displays) - 1);
55964 memset(&props, 0, sizeof(struct backlight_properties));
55965 props.type = BACKLIGHT_RAW;
55966 props.max_brightness = 0xff;
55967diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55968index 8d7fc48..01c4986 100644
55969--- a/drivers/usb/serial/console.c
55970+++ b/drivers/usb/serial/console.c
55971@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
55972
55973 info->port = port;
55974
55975- ++port->port.count;
55976+ atomic_inc(&port->port.count);
55977 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55978 if (serial->type->set_termios) {
55979 /*
55980@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
55981 }
55982 /* Now that any required fake tty operations are completed restore
55983 * the tty port count */
55984- --port->port.count;
55985+ atomic_dec(&port->port.count);
55986 /* The console is special in terms of closing the device so
55987 * indicate this port is now acting as a system console. */
55988 port->port.console = 1;
55989@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
55990 free_tty:
55991 kfree(tty);
55992 reset_open_count:
55993- port->port.count = 0;
55994+ atomic_set(&port->port.count, 0);
55995 usb_autopm_put_interface(serial->interface);
55996 error_get_interface:
55997 usb_serial_put(serial);
55998@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
55999 static void usb_console_write(struct console *co,
56000 const char *buf, unsigned count)
56001 {
56002- static struct usbcons_info *info = &usbcons_info;
56003+ struct usbcons_info *info = &usbcons_info;
56004 struct usb_serial_port *port = info->port;
56005 struct usb_serial *serial;
56006 int retval = -ENODEV;
56007diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
56008index 307e339..6aa97cb 100644
56009--- a/drivers/usb/storage/usb.h
56010+++ b/drivers/usb/storage/usb.h
56011@@ -63,7 +63,7 @@ struct us_unusual_dev {
56012 __u8 useProtocol;
56013 __u8 useTransport;
56014 int (*initFunction)(struct us_data *);
56015-};
56016+} __do_const;
56017
56018
56019 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
56020diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
56021index f2a8d29..7bc3fe7 100644
56022--- a/drivers/usb/wusbcore/wa-hc.h
56023+++ b/drivers/usb/wusbcore/wa-hc.h
56024@@ -240,7 +240,7 @@ struct wahc {
56025 spinlock_t xfer_list_lock;
56026 struct work_struct xfer_enqueue_work;
56027 struct work_struct xfer_error_work;
56028- atomic_t xfer_id_count;
56029+ atomic_unchecked_t xfer_id_count;
56030
56031 kernel_ulong_t quirks;
56032 };
56033@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
56034 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
56035 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
56036 wa->dto_in_use = 0;
56037- atomic_set(&wa->xfer_id_count, 1);
56038+ atomic_set_unchecked(&wa->xfer_id_count, 1);
56039 /* init the buf in URBs */
56040 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
56041 usb_init_urb(&(wa->buf_in_urbs[index]));
56042diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
56043index 3e2e4ed..060c9b8 100644
56044--- a/drivers/usb/wusbcore/wa-xfer.c
56045+++ b/drivers/usb/wusbcore/wa-xfer.c
56046@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
56047 */
56048 static void wa_xfer_id_init(struct wa_xfer *xfer)
56049 {
56050- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
56051+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
56052 }
56053
56054 /* Return the xfer's ID. */
56055diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
56056index f018d8d..ccab63f 100644
56057--- a/drivers/vfio/vfio.c
56058+++ b/drivers/vfio/vfio.c
56059@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
56060 return 0;
56061
56062 /* TODO Prevent device auto probing */
56063- WARN("Device %s added to live group %d!\n", dev_name(dev),
56064+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
56065 iommu_group_id(group->iommu_group));
56066
56067 return 0;
56068diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
56069index 5174eba..451e6bc 100644
56070--- a/drivers/vhost/vringh.c
56071+++ b/drivers/vhost/vringh.c
56072@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
56073 /* Userspace access helpers: in this case, addresses are really userspace. */
56074 static inline int getu16_user(u16 *val, const u16 *p)
56075 {
56076- return get_user(*val, (__force u16 __user *)p);
56077+ return get_user(*val, (u16 __force_user *)p);
56078 }
56079
56080 static inline int putu16_user(u16 *p, u16 val)
56081 {
56082- return put_user(val, (__force u16 __user *)p);
56083+ return put_user(val, (u16 __force_user *)p);
56084 }
56085
56086 static inline int copydesc_user(void *dst, const void *src, size_t len)
56087 {
56088- return copy_from_user(dst, (__force void __user *)src, len) ?
56089+ return copy_from_user(dst, (void __force_user *)src, len) ?
56090 -EFAULT : 0;
56091 }
56092
56093@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
56094 const struct vring_used_elem *src,
56095 unsigned int num)
56096 {
56097- return copy_to_user((__force void __user *)dst, src,
56098+ return copy_to_user((void __force_user *)dst, src,
56099 sizeof(*dst) * num) ? -EFAULT : 0;
56100 }
56101
56102 static inline int xfer_from_user(void *src, void *dst, size_t len)
56103 {
56104- return copy_from_user(dst, (__force void __user *)src, len) ?
56105+ return copy_from_user(dst, (void __force_user *)src, len) ?
56106 -EFAULT : 0;
56107 }
56108
56109 static inline int xfer_to_user(void *dst, void *src, size_t len)
56110 {
56111- return copy_to_user((__force void __user *)dst, src, len) ?
56112+ return copy_to_user((void __force_user *)dst, src, len) ?
56113 -EFAULT : 0;
56114 }
56115
56116@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
56117 vrh->last_used_idx = 0;
56118 vrh->vring.num = num;
56119 /* vring expects kernel addresses, but only used via accessors. */
56120- vrh->vring.desc = (__force struct vring_desc *)desc;
56121- vrh->vring.avail = (__force struct vring_avail *)avail;
56122- vrh->vring.used = (__force struct vring_used *)used;
56123+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
56124+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
56125+ vrh->vring.used = (__force_kernel struct vring_used *)used;
56126 return 0;
56127 }
56128 EXPORT_SYMBOL(vringh_init_user);
56129@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
56130
56131 static inline int putu16_kern(u16 *p, u16 val)
56132 {
56133- ACCESS_ONCE(*p) = val;
56134+ ACCESS_ONCE_RW(*p) = val;
56135 return 0;
56136 }
56137
56138diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
56139index 84a110a..96312c3 100644
56140--- a/drivers/video/backlight/kb3886_bl.c
56141+++ b/drivers/video/backlight/kb3886_bl.c
56142@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
56143 static unsigned long kb3886bl_flags;
56144 #define KB3886BL_SUSPENDED 0x01
56145
56146-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
56147+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
56148 {
56149 .ident = "Sahara Touch-iT",
56150 .matches = {
56151diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
56152index 1b0b233..6f34c2c 100644
56153--- a/drivers/video/fbdev/arcfb.c
56154+++ b/drivers/video/fbdev/arcfb.c
56155@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
56156 return -ENOSPC;
56157
56158 err = 0;
56159- if ((count + p) > fbmemlength) {
56160+ if (count > (fbmemlength - p)) {
56161 count = fbmemlength - p;
56162 err = -ENOSPC;
56163 }
56164diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
56165index 52108be..c7c110d 100644
56166--- a/drivers/video/fbdev/aty/aty128fb.c
56167+++ b/drivers/video/fbdev/aty/aty128fb.c
56168@@ -149,7 +149,7 @@ enum {
56169 };
56170
56171 /* Must match above enum */
56172-static char * const r128_family[] = {
56173+static const char * const r128_family[] = {
56174 "AGP",
56175 "PCI",
56176 "PRO AGP",
56177diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
56178index c3d0074..0b9077e 100644
56179--- a/drivers/video/fbdev/aty/atyfb_base.c
56180+++ b/drivers/video/fbdev/aty/atyfb_base.c
56181@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
56182 par->accel_flags = var->accel_flags; /* hack */
56183
56184 if (var->accel_flags) {
56185- info->fbops->fb_sync = atyfb_sync;
56186+ pax_open_kernel();
56187+ *(void **)&info->fbops->fb_sync = atyfb_sync;
56188+ pax_close_kernel();
56189 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56190 } else {
56191- info->fbops->fb_sync = NULL;
56192+ pax_open_kernel();
56193+ *(void **)&info->fbops->fb_sync = NULL;
56194+ pax_close_kernel();
56195 info->flags |= FBINFO_HWACCEL_DISABLED;
56196 }
56197
56198diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
56199index 2fa0317..4983f2a 100644
56200--- a/drivers/video/fbdev/aty/mach64_cursor.c
56201+++ b/drivers/video/fbdev/aty/mach64_cursor.c
56202@@ -8,6 +8,7 @@
56203 #include "../core/fb_draw.h"
56204
56205 #include <asm/io.h>
56206+#include <asm/pgtable.h>
56207
56208 #ifdef __sparc__
56209 #include <asm/fbio.h>
56210@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
56211 info->sprite.buf_align = 16; /* and 64 lines tall. */
56212 info->sprite.flags = FB_PIXMAP_IO;
56213
56214- info->fbops->fb_cursor = atyfb_cursor;
56215+ pax_open_kernel();
56216+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
56217+ pax_close_kernel();
56218
56219 return 0;
56220 }
56221diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
56222index 900aa4e..6d49418 100644
56223--- a/drivers/video/fbdev/core/fb_defio.c
56224+++ b/drivers/video/fbdev/core/fb_defio.c
56225@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
56226
56227 BUG_ON(!fbdefio);
56228 mutex_init(&fbdefio->lock);
56229- info->fbops->fb_mmap = fb_deferred_io_mmap;
56230+ pax_open_kernel();
56231+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
56232+ pax_close_kernel();
56233 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
56234 INIT_LIST_HEAD(&fbdefio->pagelist);
56235 if (fbdefio->delay == 0) /* set a default of 1 s */
56236@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
56237 page->mapping = NULL;
56238 }
56239
56240- info->fbops->fb_mmap = NULL;
56241+ *(void **)&info->fbops->fb_mmap = NULL;
56242 mutex_destroy(&fbdefio->lock);
56243 }
56244 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
56245diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
56246index b5e85f6..290f8c7 100644
56247--- a/drivers/video/fbdev/core/fbmem.c
56248+++ b/drivers/video/fbdev/core/fbmem.c
56249@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
56250 __u32 data;
56251 int err;
56252
56253- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
56254+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
56255
56256 data = (__u32) (unsigned long) fix->smem_start;
56257 err |= put_user(data, &fix32->smem_start);
56258diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
56259index e23392e..8a77540 100644
56260--- a/drivers/video/fbdev/hyperv_fb.c
56261+++ b/drivers/video/fbdev/hyperv_fb.c
56262@@ -235,7 +235,7 @@ static uint screen_fb_size;
56263 static inline int synthvid_send(struct hv_device *hdev,
56264 struct synthvid_msg *msg)
56265 {
56266- static atomic64_t request_id = ATOMIC64_INIT(0);
56267+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
56268 int ret;
56269
56270 msg->pipe_hdr.type = PIPE_MSG_DATA;
56271@@ -243,7 +243,7 @@ static inline int synthvid_send(struct hv_device *hdev,
56272
56273 ret = vmbus_sendpacket(hdev->channel, msg,
56274 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
56275- atomic64_inc_return(&request_id),
56276+ atomic64_inc_return_unchecked(&request_id),
56277 VM_PKT_DATA_INBAND, 0);
56278
56279 if (ret)
56280diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
56281index 7672d2e..b56437f 100644
56282--- a/drivers/video/fbdev/i810/i810_accel.c
56283+++ b/drivers/video/fbdev/i810/i810_accel.c
56284@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
56285 }
56286 }
56287 printk("ringbuffer lockup!!!\n");
56288+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
56289 i810_report_error(mmio);
56290 par->dev_flags |= LOCKUP;
56291 info->pixmap.scan_align = 1;
56292diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56293index a01147f..5d896f8 100644
56294--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56295+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56296@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
56297
56298 #ifdef CONFIG_FB_MATROX_MYSTIQUE
56299 struct matrox_switch matrox_mystique = {
56300- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
56301+ .preinit = MGA1064_preinit,
56302+ .reset = MGA1064_reset,
56303+ .init = MGA1064_init,
56304+ .restore = MGA1064_restore,
56305 };
56306 EXPORT_SYMBOL(matrox_mystique);
56307 #endif
56308
56309 #ifdef CONFIG_FB_MATROX_G
56310 struct matrox_switch matrox_G100 = {
56311- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
56312+ .preinit = MGAG100_preinit,
56313+ .reset = MGAG100_reset,
56314+ .init = MGAG100_init,
56315+ .restore = MGAG100_restore,
56316 };
56317 EXPORT_SYMBOL(matrox_G100);
56318 #endif
56319diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56320index 195ad7c..09743fc 100644
56321--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56322+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56323@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56324 }
56325
56326 struct matrox_switch matrox_millennium = {
56327- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56328+ .preinit = Ti3026_preinit,
56329+ .reset = Ti3026_reset,
56330+ .init = Ti3026_init,
56331+ .restore = Ti3026_restore
56332 };
56333 EXPORT_SYMBOL(matrox_millennium);
56334 #endif
56335diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56336index fe92eed..106e085 100644
56337--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56338+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56339@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56340 struct mb862xxfb_par *par = info->par;
56341
56342 if (info->var.bits_per_pixel == 32) {
56343- info->fbops->fb_fillrect = cfb_fillrect;
56344- info->fbops->fb_copyarea = cfb_copyarea;
56345- info->fbops->fb_imageblit = cfb_imageblit;
56346+ pax_open_kernel();
56347+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56348+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56349+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56350+ pax_close_kernel();
56351 } else {
56352 outreg(disp, GC_L0EM, 3);
56353- info->fbops->fb_fillrect = mb86290fb_fillrect;
56354- info->fbops->fb_copyarea = mb86290fb_copyarea;
56355- info->fbops->fb_imageblit = mb86290fb_imageblit;
56356+ pax_open_kernel();
56357+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56358+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56359+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56360+ pax_close_kernel();
56361 }
56362 outreg(draw, GDC_REG_DRAW_BASE, 0);
56363 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56364diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56365index def0412..fed6529 100644
56366--- a/drivers/video/fbdev/nvidia/nvidia.c
56367+++ b/drivers/video/fbdev/nvidia/nvidia.c
56368@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56369 info->fix.line_length = (info->var.xres_virtual *
56370 info->var.bits_per_pixel) >> 3;
56371 if (info->var.accel_flags) {
56372- info->fbops->fb_imageblit = nvidiafb_imageblit;
56373- info->fbops->fb_fillrect = nvidiafb_fillrect;
56374- info->fbops->fb_copyarea = nvidiafb_copyarea;
56375- info->fbops->fb_sync = nvidiafb_sync;
56376+ pax_open_kernel();
56377+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56378+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56379+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56380+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56381+ pax_close_kernel();
56382 info->pixmap.scan_align = 4;
56383 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56384 info->flags |= FBINFO_READS_FAST;
56385 NVResetGraphics(info);
56386 } else {
56387- info->fbops->fb_imageblit = cfb_imageblit;
56388- info->fbops->fb_fillrect = cfb_fillrect;
56389- info->fbops->fb_copyarea = cfb_copyarea;
56390- info->fbops->fb_sync = NULL;
56391+ pax_open_kernel();
56392+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56393+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56394+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56395+ *(void **)&info->fbops->fb_sync = NULL;
56396+ pax_close_kernel();
56397 info->pixmap.scan_align = 1;
56398 info->flags |= FBINFO_HWACCEL_DISABLED;
56399 info->flags &= ~FBINFO_READS_FAST;
56400@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56401 info->pixmap.size = 8 * 1024;
56402 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56403
56404- if (!hwcur)
56405- info->fbops->fb_cursor = NULL;
56406+ if (!hwcur) {
56407+ pax_open_kernel();
56408+ *(void **)&info->fbops->fb_cursor = NULL;
56409+ pax_close_kernel();
56410+ }
56411
56412 info->var.accel_flags = (!noaccel);
56413
56414diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56415index 2412a0d..294215b 100644
56416--- a/drivers/video/fbdev/omap2/dss/display.c
56417+++ b/drivers/video/fbdev/omap2/dss/display.c
56418@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56419 if (dssdev->name == NULL)
56420 dssdev->name = dssdev->alias;
56421
56422+ pax_open_kernel();
56423 if (drv && drv->get_resolution == NULL)
56424- drv->get_resolution = omapdss_default_get_resolution;
56425+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56426 if (drv && drv->get_recommended_bpp == NULL)
56427- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56428+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56429 if (drv && drv->get_timings == NULL)
56430- drv->get_timings = omapdss_default_get_timings;
56431+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56432+ pax_close_kernel();
56433
56434 mutex_lock(&panel_list_mutex);
56435 list_add_tail(&dssdev->panel_list, &panel_list);
56436diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56437index 83433cb..71e9b98 100644
56438--- a/drivers/video/fbdev/s1d13xxxfb.c
56439+++ b/drivers/video/fbdev/s1d13xxxfb.c
56440@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56441
56442 switch(prod_id) {
56443 case S1D13506_PROD_ID: /* activate acceleration */
56444- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56445- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56446+ pax_open_kernel();
56447+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56448+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56449+ pax_close_kernel();
56450 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56451 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56452 break;
56453diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56454index 2bcc84a..29dd1ea 100644
56455--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56456+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56457@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56458 }
56459
56460 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56461- lcdc_sys_write_index,
56462- lcdc_sys_write_data,
56463- lcdc_sys_read_data,
56464+ .write_index = lcdc_sys_write_index,
56465+ .write_data = lcdc_sys_write_data,
56466+ .read_data = lcdc_sys_read_data,
56467 };
56468
56469 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56470diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56471index d513ed6..90b0de9 100644
56472--- a/drivers/video/fbdev/smscufx.c
56473+++ b/drivers/video/fbdev/smscufx.c
56474@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56475 fb_deferred_io_cleanup(info);
56476 kfree(info->fbdefio);
56477 info->fbdefio = NULL;
56478- info->fbops->fb_mmap = ufx_ops_mmap;
56479+ pax_open_kernel();
56480+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56481+ pax_close_kernel();
56482 }
56483
56484 pr_debug("released /dev/fb%d user=%d count=%d",
56485diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56486index 77b890e..458e666 100644
56487--- a/drivers/video/fbdev/udlfb.c
56488+++ b/drivers/video/fbdev/udlfb.c
56489@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56490 dlfb_urb_completion(urb);
56491
56492 error:
56493- atomic_add(bytes_sent, &dev->bytes_sent);
56494- atomic_add(bytes_identical, &dev->bytes_identical);
56495- atomic_add(width*height*2, &dev->bytes_rendered);
56496+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56497+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56498+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56499 end_cycles = get_cycles();
56500- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56501+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56502 >> 10)), /* Kcycles */
56503 &dev->cpu_kcycles_used);
56504
56505@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56506 dlfb_urb_completion(urb);
56507
56508 error:
56509- atomic_add(bytes_sent, &dev->bytes_sent);
56510- atomic_add(bytes_identical, &dev->bytes_identical);
56511- atomic_add(bytes_rendered, &dev->bytes_rendered);
56512+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56513+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56514+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56515 end_cycles = get_cycles();
56516- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56517+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56518 >> 10)), /* Kcycles */
56519 &dev->cpu_kcycles_used);
56520 }
56521@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56522 fb_deferred_io_cleanup(info);
56523 kfree(info->fbdefio);
56524 info->fbdefio = NULL;
56525- info->fbops->fb_mmap = dlfb_ops_mmap;
56526+ pax_open_kernel();
56527+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56528+ pax_close_kernel();
56529 }
56530
56531 pr_warn("released /dev/fb%d user=%d count=%d\n",
56532@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56533 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56534 struct dlfb_data *dev = fb_info->par;
56535 return snprintf(buf, PAGE_SIZE, "%u\n",
56536- atomic_read(&dev->bytes_rendered));
56537+ atomic_read_unchecked(&dev->bytes_rendered));
56538 }
56539
56540 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56541@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56542 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56543 struct dlfb_data *dev = fb_info->par;
56544 return snprintf(buf, PAGE_SIZE, "%u\n",
56545- atomic_read(&dev->bytes_identical));
56546+ atomic_read_unchecked(&dev->bytes_identical));
56547 }
56548
56549 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56550@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56551 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56552 struct dlfb_data *dev = fb_info->par;
56553 return snprintf(buf, PAGE_SIZE, "%u\n",
56554- atomic_read(&dev->bytes_sent));
56555+ atomic_read_unchecked(&dev->bytes_sent));
56556 }
56557
56558 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56559@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56560 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56561 struct dlfb_data *dev = fb_info->par;
56562 return snprintf(buf, PAGE_SIZE, "%u\n",
56563- atomic_read(&dev->cpu_kcycles_used));
56564+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56565 }
56566
56567 static ssize_t edid_show(
56568@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56569 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56570 struct dlfb_data *dev = fb_info->par;
56571
56572- atomic_set(&dev->bytes_rendered, 0);
56573- atomic_set(&dev->bytes_identical, 0);
56574- atomic_set(&dev->bytes_sent, 0);
56575- atomic_set(&dev->cpu_kcycles_used, 0);
56576+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56577+ atomic_set_unchecked(&dev->bytes_identical, 0);
56578+ atomic_set_unchecked(&dev->bytes_sent, 0);
56579+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56580
56581 return count;
56582 }
56583diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56584index 509d452..7c9d2de 100644
56585--- a/drivers/video/fbdev/uvesafb.c
56586+++ b/drivers/video/fbdev/uvesafb.c
56587@@ -19,6 +19,7 @@
56588 #include <linux/io.h>
56589 #include <linux/mutex.h>
56590 #include <linux/slab.h>
56591+#include <linux/moduleloader.h>
56592 #include <video/edid.h>
56593 #include <video/uvesafb.h>
56594 #ifdef CONFIG_X86
56595@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56596 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56597 par->pmi_setpal = par->ypan = 0;
56598 } else {
56599+
56600+#ifdef CONFIG_PAX_KERNEXEC
56601+#ifdef CONFIG_MODULES
56602+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56603+#endif
56604+ if (!par->pmi_code) {
56605+ par->pmi_setpal = par->ypan = 0;
56606+ return 0;
56607+ }
56608+#endif
56609+
56610 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56611 + task->t.regs.edi);
56612+
56613+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56614+ pax_open_kernel();
56615+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56616+ pax_close_kernel();
56617+
56618+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56619+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56620+#else
56621 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56622 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56623+#endif
56624+
56625 printk(KERN_INFO "uvesafb: protected mode interface info at "
56626 "%04x:%04x\n",
56627 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56628@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56629 par->ypan = ypan;
56630
56631 if (par->pmi_setpal || par->ypan) {
56632+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56633 if (__supported_pte_mask & _PAGE_NX) {
56634 par->pmi_setpal = par->ypan = 0;
56635 printk(KERN_WARNING "uvesafb: NX protection is active, "
56636 "better not use the PMI.\n");
56637- } else {
56638+ } else
56639+#endif
56640 uvesafb_vbe_getpmi(task, par);
56641- }
56642 }
56643 #else
56644 /* The protected mode interface is not available on non-x86. */
56645@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56646 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56647
56648 /* Disable blanking if the user requested so. */
56649- if (!blank)
56650- info->fbops->fb_blank = NULL;
56651+ if (!blank) {
56652+ pax_open_kernel();
56653+ *(void **)&info->fbops->fb_blank = NULL;
56654+ pax_close_kernel();
56655+ }
56656
56657 /*
56658 * Find out how much IO memory is required for the mode with
56659@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56660 info->flags = FBINFO_FLAG_DEFAULT |
56661 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56662
56663- if (!par->ypan)
56664- info->fbops->fb_pan_display = NULL;
56665+ if (!par->ypan) {
56666+ pax_open_kernel();
56667+ *(void **)&info->fbops->fb_pan_display = NULL;
56668+ pax_close_kernel();
56669+ }
56670 }
56671
56672 static void uvesafb_init_mtrr(struct fb_info *info)
56673@@ -1787,6 +1817,11 @@ out_mode:
56674 out:
56675 kfree(par->vbe_modes);
56676
56677+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56678+ if (par->pmi_code)
56679+ module_free_exec(NULL, par->pmi_code);
56680+#endif
56681+
56682 framebuffer_release(info);
56683 return err;
56684 }
56685@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
56686 kfree(par->vbe_state_orig);
56687 kfree(par->vbe_state_saved);
56688
56689+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56690+ if (par->pmi_code)
56691+ module_free_exec(NULL, par->pmi_code);
56692+#endif
56693+
56694 framebuffer_release(info);
56695 }
56696 return 0;
56697diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56698index 6170e7f..dd63031 100644
56699--- a/drivers/video/fbdev/vesafb.c
56700+++ b/drivers/video/fbdev/vesafb.c
56701@@ -9,6 +9,7 @@
56702 */
56703
56704 #include <linux/module.h>
56705+#include <linux/moduleloader.h>
56706 #include <linux/kernel.h>
56707 #include <linux/errno.h>
56708 #include <linux/string.h>
56709@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56710 static int vram_total; /* Set total amount of memory */
56711 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56712 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56713-static void (*pmi_start)(void) __read_mostly;
56714-static void (*pmi_pal) (void) __read_mostly;
56715+static void (*pmi_start)(void) __read_only;
56716+static void (*pmi_pal) (void) __read_only;
56717 static int depth __read_mostly;
56718 static int vga_compat __read_mostly;
56719 /* --------------------------------------------------------------------- */
56720@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56721 unsigned int size_remap;
56722 unsigned int size_total;
56723 char *option = NULL;
56724+ void *pmi_code = NULL;
56725
56726 /* ignore error return of fb_get_options */
56727 fb_get_options("vesafb", &option);
56728@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56729 size_remap = size_total;
56730 vesafb_fix.smem_len = size_remap;
56731
56732-#ifndef __i386__
56733- screen_info.vesapm_seg = 0;
56734-#endif
56735-
56736 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56737 printk(KERN_WARNING
56738 "vesafb: cannot reserve video memory at 0x%lx\n",
56739@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56740 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56741 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56742
56743+#ifdef __i386__
56744+
56745+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56746+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56747+ if (!pmi_code)
56748+#elif !defined(CONFIG_PAX_KERNEXEC)
56749+ if (0)
56750+#endif
56751+
56752+#endif
56753+ screen_info.vesapm_seg = 0;
56754+
56755 if (screen_info.vesapm_seg) {
56756- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56757- screen_info.vesapm_seg,screen_info.vesapm_off);
56758+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56759+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56760 }
56761
56762 if (screen_info.vesapm_seg < 0xc000)
56763@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56764
56765 if (ypan || pmi_setpal) {
56766 unsigned short *pmi_base;
56767+
56768 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56769- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56770- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56771+
56772+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56773+ pax_open_kernel();
56774+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56775+#else
56776+ pmi_code = pmi_base;
56777+#endif
56778+
56779+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56780+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56781+
56782+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56783+ pmi_start = ktva_ktla(pmi_start);
56784+ pmi_pal = ktva_ktla(pmi_pal);
56785+ pax_close_kernel();
56786+#endif
56787+
56788 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56789 if (pmi_base[3]) {
56790 printk(KERN_INFO "vesafb: pmi: ports = ");
56791@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56792 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56793 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56794
56795- if (!ypan)
56796- info->fbops->fb_pan_display = NULL;
56797+ if (!ypan) {
56798+ pax_open_kernel();
56799+ *(void **)&info->fbops->fb_pan_display = NULL;
56800+ pax_close_kernel();
56801+ }
56802
56803 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56804 err = -ENOMEM;
56805@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56806 fb_info(info, "%s frame buffer device\n", info->fix.id);
56807 return 0;
56808 err:
56809+
56810+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56811+ module_free_exec(NULL, pmi_code);
56812+#endif
56813+
56814 if (info->screen_base)
56815 iounmap(info->screen_base);
56816 framebuffer_release(info);
56817diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56818index 88714ae..16c2e11 100644
56819--- a/drivers/video/fbdev/via/via_clock.h
56820+++ b/drivers/video/fbdev/via/via_clock.h
56821@@ -56,7 +56,7 @@ struct via_clock {
56822
56823 void (*set_engine_pll_state)(u8 state);
56824 void (*set_engine_pll)(struct via_pll_config config);
56825-};
56826+} __no_const;
56827
56828
56829 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56830diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56831index 3c14e43..2630570 100644
56832--- a/drivers/video/logo/logo_linux_clut224.ppm
56833+++ b/drivers/video/logo/logo_linux_clut224.ppm
56834@@ -2,1603 +2,1123 @@ P3
56835 # Standard 224-color Linux logo
56836 80 80
56837 255
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 6 6 6 6 6 6 10 10 10 10 10 10
56848- 10 10 10 6 6 6 6 6 6 6 6 6
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 6 6 6 10 10 10 14 14 14
56867- 22 22 22 26 26 26 30 30 30 34 34 34
56868- 30 30 30 30 30 30 26 26 26 18 18 18
56869- 14 14 14 10 10 10 6 6 6 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 1 0 0 1 0 0 0
56880- 0 0 0 0 0 0 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 6 6 6 14 14 14 26 26 26 42 42 42
56887- 54 54 54 66 66 66 78 78 78 78 78 78
56888- 78 78 78 74 74 74 66 66 66 54 54 54
56889- 42 42 42 26 26 26 18 18 18 10 10 10
56890- 6 6 6 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 1 0 0 0 0 0 0 0 0 0
56900- 0 0 0 0 0 0 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 10 10 10
56906- 22 22 22 42 42 42 66 66 66 86 86 86
56907- 66 66 66 38 38 38 38 38 38 22 22 22
56908- 26 26 26 34 34 34 54 54 54 66 66 66
56909- 86 86 86 70 70 70 46 46 46 26 26 26
56910- 14 14 14 6 6 6 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 1 0 0 1 0 0 1 0 0 0
56920- 0 0 0 0 0 0 0 0 0 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 10 10 10 26 26 26
56926- 50 50 50 82 82 82 58 58 58 6 6 6
56927- 2 2 6 2 2 6 2 2 6 2 2 6
56928- 2 2 6 2 2 6 2 2 6 2 2 6
56929- 6 6 6 54 54 54 86 86 86 66 66 66
56930- 38 38 38 18 18 18 6 6 6 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 6 6 6 22 22 22 50 50 50
56946- 78 78 78 34 34 34 2 2 6 2 2 6
56947- 2 2 6 2 2 6 2 2 6 2 2 6
56948- 2 2 6 2 2 6 2 2 6 2 2 6
56949- 2 2 6 2 2 6 6 6 6 70 70 70
56950- 78 78 78 46 46 46 22 22 22 6 6 6
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 1 0 0 1 0 0 1 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 6 6 6 18 18 18 42 42 42 82 82 82
56966- 26 26 26 2 2 6 2 2 6 2 2 6
56967- 2 2 6 2 2 6 2 2 6 2 2 6
56968- 2 2 6 2 2 6 2 2 6 14 14 14
56969- 46 46 46 34 34 34 6 6 6 2 2 6
56970- 42 42 42 78 78 78 42 42 42 18 18 18
56971- 6 6 6 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 1 0 0 0 0 0 1 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 10 10 10 30 30 30 66 66 66 58 58 58
56986- 2 2 6 2 2 6 2 2 6 2 2 6
56987- 2 2 6 2 2 6 2 2 6 2 2 6
56988- 2 2 6 2 2 6 2 2 6 26 26 26
56989- 86 86 86 101 101 101 46 46 46 10 10 10
56990- 2 2 6 58 58 58 70 70 70 34 34 34
56991- 10 10 10 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 1 0 0 1 0 0 1 0 0 0
57000- 0 0 0 0 0 0 0 0 0 0 0 0
57001- 0 0 0 0 0 0 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 14 14 14 42 42 42 86 86 86 10 10 10
57006- 2 2 6 2 2 6 2 2 6 2 2 6
57007- 2 2 6 2 2 6 2 2 6 2 2 6
57008- 2 2 6 2 2 6 2 2 6 30 30 30
57009- 94 94 94 94 94 94 58 58 58 26 26 26
57010- 2 2 6 6 6 6 78 78 78 54 54 54
57011- 22 22 22 6 6 6 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 0 0 0
57020- 0 0 0 0 0 0 0 0 0 0 0 0
57021- 0 0 0 0 0 0 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 0 0 0 6 6 6
57025- 22 22 22 62 62 62 62 62 62 2 2 6
57026- 2 2 6 2 2 6 2 2 6 2 2 6
57027- 2 2 6 2 2 6 2 2 6 2 2 6
57028- 2 2 6 2 2 6 2 2 6 26 26 26
57029- 54 54 54 38 38 38 18 18 18 10 10 10
57030- 2 2 6 2 2 6 34 34 34 82 82 82
57031- 38 38 38 14 14 14 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 0 0 0
57035- 0 0 0 0 0 0 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 1 0 0 1 0 0 0
57040- 0 0 0 0 0 0 0 0 0 0 0 0
57041- 0 0 0 0 0 0 0 0 0 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 0 0 0 6 6 6
57045- 30 30 30 78 78 78 30 30 30 2 2 6
57046- 2 2 6 2 2 6 2 2 6 2 2 6
57047- 2 2 6 2 2 6 2 2 6 2 2 6
57048- 2 2 6 2 2 6 2 2 6 10 10 10
57049- 10 10 10 2 2 6 2 2 6 2 2 6
57050- 2 2 6 2 2 6 2 2 6 78 78 78
57051- 50 50 50 18 18 18 6 6 6 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 1 0 0 0 0 0 0 0 0 0
57060- 0 0 0 0 0 0 0 0 0 0 0 0
57061- 0 0 0 0 0 0 0 0 0 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 0 10 10 10
57065- 38 38 38 86 86 86 14 14 14 2 2 6
57066- 2 2 6 2 2 6 2 2 6 2 2 6
57067- 2 2 6 2 2 6 2 2 6 2 2 6
57068- 2 2 6 2 2 6 2 2 6 2 2 6
57069- 2 2 6 2 2 6 2 2 6 2 2 6
57070- 2 2 6 2 2 6 2 2 6 54 54 54
57071- 66 66 66 26 26 26 6 6 6 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 0 0 0 0 0 0
57075- 0 0 0 0 0 0 0 0 0 0 0 0
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 1 0 0 1 0 0 0
57080- 0 0 0 0 0 0 0 0 0 0 0 0
57081- 0 0 0 0 0 0 0 0 0 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 14 14 14
57085- 42 42 42 82 82 82 2 2 6 2 2 6
57086- 2 2 6 6 6 6 10 10 10 2 2 6
57087- 2 2 6 2 2 6 2 2 6 2 2 6
57088- 2 2 6 2 2 6 2 2 6 6 6 6
57089- 14 14 14 10 10 10 2 2 6 2 2 6
57090- 2 2 6 2 2 6 2 2 6 18 18 18
57091- 82 82 82 34 34 34 10 10 10 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 0 0 0 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 1 0 0 0 0 0 0 0 0 0
57100- 0 0 0 0 0 0 0 0 0 0 0 0
57101- 0 0 0 0 0 0 0 0 0 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 0 14 14 14
57105- 46 46 46 86 86 86 2 2 6 2 2 6
57106- 6 6 6 6 6 6 22 22 22 34 34 34
57107- 6 6 6 2 2 6 2 2 6 2 2 6
57108- 2 2 6 2 2 6 18 18 18 34 34 34
57109- 10 10 10 50 50 50 22 22 22 2 2 6
57110- 2 2 6 2 2 6 2 2 6 10 10 10
57111- 86 86 86 42 42 42 14 14 14 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 0 0 0 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 1 0 0 1 0 0 1 0 0 0
57120- 0 0 0 0 0 0 0 0 0 0 0 0
57121- 0 0 0 0 0 0 0 0 0 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 0 0 0 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 0 14 14 14
57125- 46 46 46 86 86 86 2 2 6 2 2 6
57126- 38 38 38 116 116 116 94 94 94 22 22 22
57127- 22 22 22 2 2 6 2 2 6 2 2 6
57128- 14 14 14 86 86 86 138 138 138 162 162 162
57129-154 154 154 38 38 38 26 26 26 6 6 6
57130- 2 2 6 2 2 6 2 2 6 2 2 6
57131- 86 86 86 46 46 46 14 14 14 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 0 0 0
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 0 0 0 0 0 0 0 0 0
57142- 0 0 0 0 0 0 0 0 0 0 0 0
57143- 0 0 0 0 0 0 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 0 14 14 14
57145- 46 46 46 86 86 86 2 2 6 14 14 14
57146-134 134 134 198 198 198 195 195 195 116 116 116
57147- 10 10 10 2 2 6 2 2 6 6 6 6
57148-101 98 89 187 187 187 210 210 210 218 218 218
57149-214 214 214 134 134 134 14 14 14 6 6 6
57150- 2 2 6 2 2 6 2 2 6 2 2 6
57151- 86 86 86 50 50 50 18 18 18 6 6 6
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 0 0 0 0 0 0 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 0 0 0 0 0 1 0 0 0
57159- 0 0 1 0 0 1 0 0 1 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 0 0 0 0 0 0 0 0 0 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 14 14 14
57165- 46 46 46 86 86 86 2 2 6 54 54 54
57166-218 218 218 195 195 195 226 226 226 246 246 246
57167- 58 58 58 2 2 6 2 2 6 30 30 30
57168-210 210 210 253 253 253 174 174 174 123 123 123
57169-221 221 221 234 234 234 74 74 74 2 2 6
57170- 2 2 6 2 2 6 2 2 6 2 2 6
57171- 70 70 70 58 58 58 22 22 22 6 6 6
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 0 14 14 14
57185- 46 46 46 82 82 82 2 2 6 106 106 106
57186-170 170 170 26 26 26 86 86 86 226 226 226
57187-123 123 123 10 10 10 14 14 14 46 46 46
57188-231 231 231 190 190 190 6 6 6 70 70 70
57189- 90 90 90 238 238 238 158 158 158 2 2 6
57190- 2 2 6 2 2 6 2 2 6 2 2 6
57191- 70 70 70 58 58 58 22 22 22 6 6 6
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197- 0 0 0 0 0 0 0 0 0 0 0 0
57198- 0 0 0 0 0 0 0 0 1 0 0 0
57199- 0 0 1 0 0 1 0 0 1 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 0 0 0 14 14 14
57205- 42 42 42 86 86 86 6 6 6 116 116 116
57206-106 106 106 6 6 6 70 70 70 149 149 149
57207-128 128 128 18 18 18 38 38 38 54 54 54
57208-221 221 221 106 106 106 2 2 6 14 14 14
57209- 46 46 46 190 190 190 198 198 198 2 2 6
57210- 2 2 6 2 2 6 2 2 6 2 2 6
57211- 74 74 74 62 62 62 22 22 22 6 6 6
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 0 0 0
57215- 0 0 0 0 0 0 0 0 0 0 0 0
57216- 0 0 0 0 0 0 0 0 0 0 0 0
57217- 0 0 0 0 0 0 0 0 0 0 0 0
57218- 0 0 0 0 0 0 0 0 1 0 0 0
57219- 0 0 1 0 0 0 0 0 1 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223- 0 0 0 0 0 0 0 0 0 0 0 0
57224- 0 0 0 0 0 0 0 0 0 14 14 14
57225- 42 42 42 94 94 94 14 14 14 101 101 101
57226-128 128 128 2 2 6 18 18 18 116 116 116
57227-118 98 46 121 92 8 121 92 8 98 78 10
57228-162 162 162 106 106 106 2 2 6 2 2 6
57229- 2 2 6 195 195 195 195 195 195 6 6 6
57230- 2 2 6 2 2 6 2 2 6 2 2 6
57231- 74 74 74 62 62 62 22 22 22 6 6 6
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 0 0 0 0 0 0 0 0 0 0 0 0
57234- 0 0 0 0 0 0 0 0 0 0 0 0
57235- 0 0 0 0 0 0 0 0 0 0 0 0
57236- 0 0 0 0 0 0 0 0 0 0 0 0
57237- 0 0 0 0 0 0 0 0 0 0 0 0
57238- 0 0 0 0 0 0 0 0 1 0 0 1
57239- 0 0 1 0 0 0 0 0 1 0 0 0
57240- 0 0 0 0 0 0 0 0 0 0 0 0
57241- 0 0 0 0 0 0 0 0 0 0 0 0
57242- 0 0 0 0 0 0 0 0 0 0 0 0
57243- 0 0 0 0 0 0 0 0 0 0 0 0
57244- 0 0 0 0 0 0 0 0 0 10 10 10
57245- 38 38 38 90 90 90 14 14 14 58 58 58
57246-210 210 210 26 26 26 54 38 6 154 114 10
57247-226 170 11 236 186 11 225 175 15 184 144 12
57248-215 174 15 175 146 61 37 26 9 2 2 6
57249- 70 70 70 246 246 246 138 138 138 2 2 6
57250- 2 2 6 2 2 6 2 2 6 2 2 6
57251- 70 70 70 66 66 66 26 26 26 6 6 6
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 0 0 0 0 0 0 0 0 0 0 0 0
57254- 0 0 0 0 0 0 0 0 0 0 0 0
57255- 0 0 0 0 0 0 0 0 0 0 0 0
57256- 0 0 0 0 0 0 0 0 0 0 0 0
57257- 0 0 0 0 0 0 0 0 0 0 0 0
57258- 0 0 0 0 0 0 0 0 0 0 0 0
57259- 0 0 0 0 0 0 0 0 0 0 0 0
57260- 0 0 0 0 0 0 0 0 0 0 0 0
57261- 0 0 0 0 0 0 0 0 0 0 0 0
57262- 0 0 0 0 0 0 0 0 0 0 0 0
57263- 0 0 0 0 0 0 0 0 0 0 0 0
57264- 0 0 0 0 0 0 0 0 0 10 10 10
57265- 38 38 38 86 86 86 14 14 14 10 10 10
57266-195 195 195 188 164 115 192 133 9 225 175 15
57267-239 182 13 234 190 10 232 195 16 232 200 30
57268-245 207 45 241 208 19 232 195 16 184 144 12
57269-218 194 134 211 206 186 42 42 42 2 2 6
57270- 2 2 6 2 2 6 2 2 6 2 2 6
57271- 50 50 50 74 74 74 30 30 30 6 6 6
57272- 0 0 0 0 0 0 0 0 0 0 0 0
57273- 0 0 0 0 0 0 0 0 0 0 0 0
57274- 0 0 0 0 0 0 0 0 0 0 0 0
57275- 0 0 0 0 0 0 0 0 0 0 0 0
57276- 0 0 0 0 0 0 0 0 0 0 0 0
57277- 0 0 0 0 0 0 0 0 0 0 0 0
57278- 0 0 0 0 0 0 0 0 0 0 0 0
57279- 0 0 0 0 0 0 0 0 0 0 0 0
57280- 0 0 0 0 0 0 0 0 0 0 0 0
57281- 0 0 0 0 0 0 0 0 0 0 0 0
57282- 0 0 0 0 0 0 0 0 0 0 0 0
57283- 0 0 0 0 0 0 0 0 0 0 0 0
57284- 0 0 0 0 0 0 0 0 0 10 10 10
57285- 34 34 34 86 86 86 14 14 14 2 2 6
57286-121 87 25 192 133 9 219 162 10 239 182 13
57287-236 186 11 232 195 16 241 208 19 244 214 54
57288-246 218 60 246 218 38 246 215 20 241 208 19
57289-241 208 19 226 184 13 121 87 25 2 2 6
57290- 2 2 6 2 2 6 2 2 6 2 2 6
57291- 50 50 50 82 82 82 34 34 34 10 10 10
57292- 0 0 0 0 0 0 0 0 0 0 0 0
57293- 0 0 0 0 0 0 0 0 0 0 0 0
57294- 0 0 0 0 0 0 0 0 0 0 0 0
57295- 0 0 0 0 0 0 0 0 0 0 0 0
57296- 0 0 0 0 0 0 0 0 0 0 0 0
57297- 0 0 0 0 0 0 0 0 0 0 0 0
57298- 0 0 0 0 0 0 0 0 0 0 0 0
57299- 0 0 0 0 0 0 0 0 0 0 0 0
57300- 0 0 0 0 0 0 0 0 0 0 0 0
57301- 0 0 0 0 0 0 0 0 0 0 0 0
57302- 0 0 0 0 0 0 0 0 0 0 0 0
57303- 0 0 0 0 0 0 0 0 0 0 0 0
57304- 0 0 0 0 0 0 0 0 0 10 10 10
57305- 34 34 34 82 82 82 30 30 30 61 42 6
57306-180 123 7 206 145 10 230 174 11 239 182 13
57307-234 190 10 238 202 15 241 208 19 246 218 74
57308-246 218 38 246 215 20 246 215 20 246 215 20
57309-226 184 13 215 174 15 184 144 12 6 6 6
57310- 2 2 6 2 2 6 2 2 6 2 2 6
57311- 26 26 26 94 94 94 42 42 42 14 14 14
57312- 0 0 0 0 0 0 0 0 0 0 0 0
57313- 0 0 0 0 0 0 0 0 0 0 0 0
57314- 0 0 0 0 0 0 0 0 0 0 0 0
57315- 0 0 0 0 0 0 0 0 0 0 0 0
57316- 0 0 0 0 0 0 0 0 0 0 0 0
57317- 0 0 0 0 0 0 0 0 0 0 0 0
57318- 0 0 0 0 0 0 0 0 0 0 0 0
57319- 0 0 0 0 0 0 0 0 0 0 0 0
57320- 0 0 0 0 0 0 0 0 0 0 0 0
57321- 0 0 0 0 0 0 0 0 0 0 0 0
57322- 0 0 0 0 0 0 0 0 0 0 0 0
57323- 0 0 0 0 0 0 0 0 0 0 0 0
57324- 0 0 0 0 0 0 0 0 0 10 10 10
57325- 30 30 30 78 78 78 50 50 50 104 69 6
57326-192 133 9 216 158 10 236 178 12 236 186 11
57327-232 195 16 241 208 19 244 214 54 245 215 43
57328-246 215 20 246 215 20 241 208 19 198 155 10
57329-200 144 11 216 158 10 156 118 10 2 2 6
57330- 2 2 6 2 2 6 2 2 6 2 2 6
57331- 6 6 6 90 90 90 54 54 54 18 18 18
57332- 6 6 6 0 0 0 0 0 0 0 0 0
57333- 0 0 0 0 0 0 0 0 0 0 0 0
57334- 0 0 0 0 0 0 0 0 0 0 0 0
57335- 0 0 0 0 0 0 0 0 0 0 0 0
57336- 0 0 0 0 0 0 0 0 0 0 0 0
57337- 0 0 0 0 0 0 0 0 0 0 0 0
57338- 0 0 0 0 0 0 0 0 0 0 0 0
57339- 0 0 0 0 0 0 0 0 0 0 0 0
57340- 0 0 0 0 0 0 0 0 0 0 0 0
57341- 0 0 0 0 0 0 0 0 0 0 0 0
57342- 0 0 0 0 0 0 0 0 0 0 0 0
57343- 0 0 0 0 0 0 0 0 0 0 0 0
57344- 0 0 0 0 0 0 0 0 0 10 10 10
57345- 30 30 30 78 78 78 46 46 46 22 22 22
57346-137 92 6 210 162 10 239 182 13 238 190 10
57347-238 202 15 241 208 19 246 215 20 246 215 20
57348-241 208 19 203 166 17 185 133 11 210 150 10
57349-216 158 10 210 150 10 102 78 10 2 2 6
57350- 6 6 6 54 54 54 14 14 14 2 2 6
57351- 2 2 6 62 62 62 74 74 74 30 30 30
57352- 10 10 10 0 0 0 0 0 0 0 0 0
57353- 0 0 0 0 0 0 0 0 0 0 0 0
57354- 0 0 0 0 0 0 0 0 0 0 0 0
57355- 0 0 0 0 0 0 0 0 0 0 0 0
57356- 0 0 0 0 0 0 0 0 0 0 0 0
57357- 0 0 0 0 0 0 0 0 0 0 0 0
57358- 0 0 0 0 0 0 0 0 0 0 0 0
57359- 0 0 0 0 0 0 0 0 0 0 0 0
57360- 0 0 0 0 0 0 0 0 0 0 0 0
57361- 0 0 0 0 0 0 0 0 0 0 0 0
57362- 0 0 0 0 0 0 0 0 0 0 0 0
57363- 0 0 0 0 0 0 0 0 0 0 0 0
57364- 0 0 0 0 0 0 0 0 0 10 10 10
57365- 34 34 34 78 78 78 50 50 50 6 6 6
57366- 94 70 30 139 102 15 190 146 13 226 184 13
57367-232 200 30 232 195 16 215 174 15 190 146 13
57368-168 122 10 192 133 9 210 150 10 213 154 11
57369-202 150 34 182 157 106 101 98 89 2 2 6
57370- 2 2 6 78 78 78 116 116 116 58 58 58
57371- 2 2 6 22 22 22 90 90 90 46 46 46
57372- 18 18 18 6 6 6 0 0 0 0 0 0
57373- 0 0 0 0 0 0 0 0 0 0 0 0
57374- 0 0 0 0 0 0 0 0 0 0 0 0
57375- 0 0 0 0 0 0 0 0 0 0 0 0
57376- 0 0 0 0 0 0 0 0 0 0 0 0
57377- 0 0 0 0 0 0 0 0 0 0 0 0
57378- 0 0 0 0 0 0 0 0 0 0 0 0
57379- 0 0 0 0 0 0 0 0 0 0 0 0
57380- 0 0 0 0 0 0 0 0 0 0 0 0
57381- 0 0 0 0 0 0 0 0 0 0 0 0
57382- 0 0 0 0 0 0 0 0 0 0 0 0
57383- 0 0 0 0 0 0 0 0 0 0 0 0
57384- 0 0 0 0 0 0 0 0 0 10 10 10
57385- 38 38 38 86 86 86 50 50 50 6 6 6
57386-128 128 128 174 154 114 156 107 11 168 122 10
57387-198 155 10 184 144 12 197 138 11 200 144 11
57388-206 145 10 206 145 10 197 138 11 188 164 115
57389-195 195 195 198 198 198 174 174 174 14 14 14
57390- 2 2 6 22 22 22 116 116 116 116 116 116
57391- 22 22 22 2 2 6 74 74 74 70 70 70
57392- 30 30 30 10 10 10 0 0 0 0 0 0
57393- 0 0 0 0 0 0 0 0 0 0 0 0
57394- 0 0 0 0 0 0 0 0 0 0 0 0
57395- 0 0 0 0 0 0 0 0 0 0 0 0
57396- 0 0 0 0 0 0 0 0 0 0 0 0
57397- 0 0 0 0 0 0 0 0 0 0 0 0
57398- 0 0 0 0 0 0 0 0 0 0 0 0
57399- 0 0 0 0 0 0 0 0 0 0 0 0
57400- 0 0 0 0 0 0 0 0 0 0 0 0
57401- 0 0 0 0 0 0 0 0 0 0 0 0
57402- 0 0 0 0 0 0 0 0 0 0 0 0
57403- 0 0 0 0 0 0 0 0 0 0 0 0
57404- 0 0 0 0 0 0 6 6 6 18 18 18
57405- 50 50 50 101 101 101 26 26 26 10 10 10
57406-138 138 138 190 190 190 174 154 114 156 107 11
57407-197 138 11 200 144 11 197 138 11 192 133 9
57408-180 123 7 190 142 34 190 178 144 187 187 187
57409-202 202 202 221 221 221 214 214 214 66 66 66
57410- 2 2 6 2 2 6 50 50 50 62 62 62
57411- 6 6 6 2 2 6 10 10 10 90 90 90
57412- 50 50 50 18 18 18 6 6 6 0 0 0
57413- 0 0 0 0 0 0 0 0 0 0 0 0
57414- 0 0 0 0 0 0 0 0 0 0 0 0
57415- 0 0 0 0 0 0 0 0 0 0 0 0
57416- 0 0 0 0 0 0 0 0 0 0 0 0
57417- 0 0 0 0 0 0 0 0 0 0 0 0
57418- 0 0 0 0 0 0 0 0 0 0 0 0
57419- 0 0 0 0 0 0 0 0 0 0 0 0
57420- 0 0 0 0 0 0 0 0 0 0 0 0
57421- 0 0 0 0 0 0 0 0 0 0 0 0
57422- 0 0 0 0 0 0 0 0 0 0 0 0
57423- 0 0 0 0 0 0 0 0 0 0 0 0
57424- 0 0 0 0 0 0 10 10 10 34 34 34
57425- 74 74 74 74 74 74 2 2 6 6 6 6
57426-144 144 144 198 198 198 190 190 190 178 166 146
57427-154 121 60 156 107 11 156 107 11 168 124 44
57428-174 154 114 187 187 187 190 190 190 210 210 210
57429-246 246 246 253 253 253 253 253 253 182 182 182
57430- 6 6 6 2 2 6 2 2 6 2 2 6
57431- 2 2 6 2 2 6 2 2 6 62 62 62
57432- 74 74 74 34 34 34 14 14 14 0 0 0
57433- 0 0 0 0 0 0 0 0 0 0 0 0
57434- 0 0 0 0 0 0 0 0 0 0 0 0
57435- 0 0 0 0 0 0 0 0 0 0 0 0
57436- 0 0 0 0 0 0 0 0 0 0 0 0
57437- 0 0 0 0 0 0 0 0 0 0 0 0
57438- 0 0 0 0 0 0 0 0 0 0 0 0
57439- 0 0 0 0 0 0 0 0 0 0 0 0
57440- 0 0 0 0 0 0 0 0 0 0 0 0
57441- 0 0 0 0 0 0 0 0 0 0 0 0
57442- 0 0 0 0 0 0 0 0 0 0 0 0
57443- 0 0 0 0 0 0 0 0 0 0 0 0
57444- 0 0 0 10 10 10 22 22 22 54 54 54
57445- 94 94 94 18 18 18 2 2 6 46 46 46
57446-234 234 234 221 221 221 190 190 190 190 190 190
57447-190 190 190 187 187 187 187 187 187 190 190 190
57448-190 190 190 195 195 195 214 214 214 242 242 242
57449-253 253 253 253 253 253 253 253 253 253 253 253
57450- 82 82 82 2 2 6 2 2 6 2 2 6
57451- 2 2 6 2 2 6 2 2 6 14 14 14
57452- 86 86 86 54 54 54 22 22 22 6 6 6
57453- 0 0 0 0 0 0 0 0 0 0 0 0
57454- 0 0 0 0 0 0 0 0 0 0 0 0
57455- 0 0 0 0 0 0 0 0 0 0 0 0
57456- 0 0 0 0 0 0 0 0 0 0 0 0
57457- 0 0 0 0 0 0 0 0 0 0 0 0
57458- 0 0 0 0 0 0 0 0 0 0 0 0
57459- 0 0 0 0 0 0 0 0 0 0 0 0
57460- 0 0 0 0 0 0 0 0 0 0 0 0
57461- 0 0 0 0 0 0 0 0 0 0 0 0
57462- 0 0 0 0 0 0 0 0 0 0 0 0
57463- 0 0 0 0 0 0 0 0 0 0 0 0
57464- 6 6 6 18 18 18 46 46 46 90 90 90
57465- 46 46 46 18 18 18 6 6 6 182 182 182
57466-253 253 253 246 246 246 206 206 206 190 190 190
57467-190 190 190 190 190 190 190 190 190 190 190 190
57468-206 206 206 231 231 231 250 250 250 253 253 253
57469-253 253 253 253 253 253 253 253 253 253 253 253
57470-202 202 202 14 14 14 2 2 6 2 2 6
57471- 2 2 6 2 2 6 2 2 6 2 2 6
57472- 42 42 42 86 86 86 42 42 42 18 18 18
57473- 6 6 6 0 0 0 0 0 0 0 0 0
57474- 0 0 0 0 0 0 0 0 0 0 0 0
57475- 0 0 0 0 0 0 0 0 0 0 0 0
57476- 0 0 0 0 0 0 0 0 0 0 0 0
57477- 0 0 0 0 0 0 0 0 0 0 0 0
57478- 0 0 0 0 0 0 0 0 0 0 0 0
57479- 0 0 0 0 0 0 0 0 0 0 0 0
57480- 0 0 0 0 0 0 0 0 0 0 0 0
57481- 0 0 0 0 0 0 0 0 0 0 0 0
57482- 0 0 0 0 0 0 0 0 0 0 0 0
57483- 0 0 0 0 0 0 0 0 0 6 6 6
57484- 14 14 14 38 38 38 74 74 74 66 66 66
57485- 2 2 6 6 6 6 90 90 90 250 250 250
57486-253 253 253 253 253 253 238 238 238 198 198 198
57487-190 190 190 190 190 190 195 195 195 221 221 221
57488-246 246 246 253 253 253 253 253 253 253 253 253
57489-253 253 253 253 253 253 253 253 253 253 253 253
57490-253 253 253 82 82 82 2 2 6 2 2 6
57491- 2 2 6 2 2 6 2 2 6 2 2 6
57492- 2 2 6 78 78 78 70 70 70 34 34 34
57493- 14 14 14 6 6 6 0 0 0 0 0 0
57494- 0 0 0 0 0 0 0 0 0 0 0 0
57495- 0 0 0 0 0 0 0 0 0 0 0 0
57496- 0 0 0 0 0 0 0 0 0 0 0 0
57497- 0 0 0 0 0 0 0 0 0 0 0 0
57498- 0 0 0 0 0 0 0 0 0 0 0 0
57499- 0 0 0 0 0 0 0 0 0 0 0 0
57500- 0 0 0 0 0 0 0 0 0 0 0 0
57501- 0 0 0 0 0 0 0 0 0 0 0 0
57502- 0 0 0 0 0 0 0 0 0 0 0 0
57503- 0 0 0 0 0 0 0 0 0 14 14 14
57504- 34 34 34 66 66 66 78 78 78 6 6 6
57505- 2 2 6 18 18 18 218 218 218 253 253 253
57506-253 253 253 253 253 253 253 253 253 246 246 246
57507-226 226 226 231 231 231 246 246 246 253 253 253
57508-253 253 253 253 253 253 253 253 253 253 253 253
57509-253 253 253 253 253 253 253 253 253 253 253 253
57510-253 253 253 178 178 178 2 2 6 2 2 6
57511- 2 2 6 2 2 6 2 2 6 2 2 6
57512- 2 2 6 18 18 18 90 90 90 62 62 62
57513- 30 30 30 10 10 10 0 0 0 0 0 0
57514- 0 0 0 0 0 0 0 0 0 0 0 0
57515- 0 0 0 0 0 0 0 0 0 0 0 0
57516- 0 0 0 0 0 0 0 0 0 0 0 0
57517- 0 0 0 0 0 0 0 0 0 0 0 0
57518- 0 0 0 0 0 0 0 0 0 0 0 0
57519- 0 0 0 0 0 0 0 0 0 0 0 0
57520- 0 0 0 0 0 0 0 0 0 0 0 0
57521- 0 0 0 0 0 0 0 0 0 0 0 0
57522- 0 0 0 0 0 0 0 0 0 0 0 0
57523- 0 0 0 0 0 0 10 10 10 26 26 26
57524- 58 58 58 90 90 90 18 18 18 2 2 6
57525- 2 2 6 110 110 110 253 253 253 253 253 253
57526-253 253 253 253 253 253 253 253 253 253 253 253
57527-250 250 250 253 253 253 253 253 253 253 253 253
57528-253 253 253 253 253 253 253 253 253 253 253 253
57529-253 253 253 253 253 253 253 253 253 253 253 253
57530-253 253 253 231 231 231 18 18 18 2 2 6
57531- 2 2 6 2 2 6 2 2 6 2 2 6
57532- 2 2 6 2 2 6 18 18 18 94 94 94
57533- 54 54 54 26 26 26 10 10 10 0 0 0
57534- 0 0 0 0 0 0 0 0 0 0 0 0
57535- 0 0 0 0 0 0 0 0 0 0 0 0
57536- 0 0 0 0 0 0 0 0 0 0 0 0
57537- 0 0 0 0 0 0 0 0 0 0 0 0
57538- 0 0 0 0 0 0 0 0 0 0 0 0
57539- 0 0 0 0 0 0 0 0 0 0 0 0
57540- 0 0 0 0 0 0 0 0 0 0 0 0
57541- 0 0 0 0 0 0 0 0 0 0 0 0
57542- 0 0 0 0 0 0 0 0 0 0 0 0
57543- 0 0 0 6 6 6 22 22 22 50 50 50
57544- 90 90 90 26 26 26 2 2 6 2 2 6
57545- 14 14 14 195 195 195 250 250 250 253 253 253
57546-253 253 253 253 253 253 253 253 253 253 253 253
57547-253 253 253 253 253 253 253 253 253 253 253 253
57548-253 253 253 253 253 253 253 253 253 253 253 253
57549-253 253 253 253 253 253 253 253 253 253 253 253
57550-250 250 250 242 242 242 54 54 54 2 2 6
57551- 2 2 6 2 2 6 2 2 6 2 2 6
57552- 2 2 6 2 2 6 2 2 6 38 38 38
57553- 86 86 86 50 50 50 22 22 22 6 6 6
57554- 0 0 0 0 0 0 0 0 0 0 0 0
57555- 0 0 0 0 0 0 0 0 0 0 0 0
57556- 0 0 0 0 0 0 0 0 0 0 0 0
57557- 0 0 0 0 0 0 0 0 0 0 0 0
57558- 0 0 0 0 0 0 0 0 0 0 0 0
57559- 0 0 0 0 0 0 0 0 0 0 0 0
57560- 0 0 0 0 0 0 0 0 0 0 0 0
57561- 0 0 0 0 0 0 0 0 0 0 0 0
57562- 0 0 0 0 0 0 0 0 0 0 0 0
57563- 6 6 6 14 14 14 38 38 38 82 82 82
57564- 34 34 34 2 2 6 2 2 6 2 2 6
57565- 42 42 42 195 195 195 246 246 246 253 253 253
57566-253 253 253 253 253 253 253 253 253 250 250 250
57567-242 242 242 242 242 242 250 250 250 253 253 253
57568-253 253 253 253 253 253 253 253 253 253 253 253
57569-253 253 253 250 250 250 246 246 246 238 238 238
57570-226 226 226 231 231 231 101 101 101 6 6 6
57571- 2 2 6 2 2 6 2 2 6 2 2 6
57572- 2 2 6 2 2 6 2 2 6 2 2 6
57573- 38 38 38 82 82 82 42 42 42 14 14 14
57574- 6 6 6 0 0 0 0 0 0 0 0 0
57575- 0 0 0 0 0 0 0 0 0 0 0 0
57576- 0 0 0 0 0 0 0 0 0 0 0 0
57577- 0 0 0 0 0 0 0 0 0 0 0 0
57578- 0 0 0 0 0 0 0 0 0 0 0 0
57579- 0 0 0 0 0 0 0 0 0 0 0 0
57580- 0 0 0 0 0 0 0 0 0 0 0 0
57581- 0 0 0 0 0 0 0 0 0 0 0 0
57582- 0 0 0 0 0 0 0 0 0 0 0 0
57583- 10 10 10 26 26 26 62 62 62 66 66 66
57584- 2 2 6 2 2 6 2 2 6 6 6 6
57585- 70 70 70 170 170 170 206 206 206 234 234 234
57586-246 246 246 250 250 250 250 250 250 238 238 238
57587-226 226 226 231 231 231 238 238 238 250 250 250
57588-250 250 250 250 250 250 246 246 246 231 231 231
57589-214 214 214 206 206 206 202 202 202 202 202 202
57590-198 198 198 202 202 202 182 182 182 18 18 18
57591- 2 2 6 2 2 6 2 2 6 2 2 6
57592- 2 2 6 2 2 6 2 2 6 2 2 6
57593- 2 2 6 62 62 62 66 66 66 30 30 30
57594- 10 10 10 0 0 0 0 0 0 0 0 0
57595- 0 0 0 0 0 0 0 0 0 0 0 0
57596- 0 0 0 0 0 0 0 0 0 0 0 0
57597- 0 0 0 0 0 0 0 0 0 0 0 0
57598- 0 0 0 0 0 0 0 0 0 0 0 0
57599- 0 0 0 0 0 0 0 0 0 0 0 0
57600- 0 0 0 0 0 0 0 0 0 0 0 0
57601- 0 0 0 0 0 0 0 0 0 0 0 0
57602- 0 0 0 0 0 0 0 0 0 0 0 0
57603- 14 14 14 42 42 42 82 82 82 18 18 18
57604- 2 2 6 2 2 6 2 2 6 10 10 10
57605- 94 94 94 182 182 182 218 218 218 242 242 242
57606-250 250 250 253 253 253 253 253 253 250 250 250
57607-234 234 234 253 253 253 253 253 253 253 253 253
57608-253 253 253 253 253 253 253 253 253 246 246 246
57609-238 238 238 226 226 226 210 210 210 202 202 202
57610-195 195 195 195 195 195 210 210 210 158 158 158
57611- 6 6 6 14 14 14 50 50 50 14 14 14
57612- 2 2 6 2 2 6 2 2 6 2 2 6
57613- 2 2 6 6 6 6 86 86 86 46 46 46
57614- 18 18 18 6 6 6 0 0 0 0 0 0
57615- 0 0 0 0 0 0 0 0 0 0 0 0
57616- 0 0 0 0 0 0 0 0 0 0 0 0
57617- 0 0 0 0 0 0 0 0 0 0 0 0
57618- 0 0 0 0 0 0 0 0 0 0 0 0
57619- 0 0 0 0 0 0 0 0 0 0 0 0
57620- 0 0 0 0 0 0 0 0 0 0 0 0
57621- 0 0 0 0 0 0 0 0 0 0 0 0
57622- 0 0 0 0 0 0 0 0 0 6 6 6
57623- 22 22 22 54 54 54 70 70 70 2 2 6
57624- 2 2 6 10 10 10 2 2 6 22 22 22
57625-166 166 166 231 231 231 250 250 250 253 253 253
57626-253 253 253 253 253 253 253 253 253 250 250 250
57627-242 242 242 253 253 253 253 253 253 253 253 253
57628-253 253 253 253 253 253 253 253 253 253 253 253
57629-253 253 253 253 253 253 253 253 253 246 246 246
57630-231 231 231 206 206 206 198 198 198 226 226 226
57631- 94 94 94 2 2 6 6 6 6 38 38 38
57632- 30 30 30 2 2 6 2 2 6 2 2 6
57633- 2 2 6 2 2 6 62 62 62 66 66 66
57634- 26 26 26 10 10 10 0 0 0 0 0 0
57635- 0 0 0 0 0 0 0 0 0 0 0 0
57636- 0 0 0 0 0 0 0 0 0 0 0 0
57637- 0 0 0 0 0 0 0 0 0 0 0 0
57638- 0 0 0 0 0 0 0 0 0 0 0 0
57639- 0 0 0 0 0 0 0 0 0 0 0 0
57640- 0 0 0 0 0 0 0 0 0 0 0 0
57641- 0 0 0 0 0 0 0 0 0 0 0 0
57642- 0 0 0 0 0 0 0 0 0 10 10 10
57643- 30 30 30 74 74 74 50 50 50 2 2 6
57644- 26 26 26 26 26 26 2 2 6 106 106 106
57645-238 238 238 253 253 253 253 253 253 253 253 253
57646-253 253 253 253 253 253 253 253 253 253 253 253
57647-253 253 253 253 253 253 253 253 253 253 253 253
57648-253 253 253 253 253 253 253 253 253 253 253 253
57649-253 253 253 253 253 253 253 253 253 253 253 253
57650-253 253 253 246 246 246 218 218 218 202 202 202
57651-210 210 210 14 14 14 2 2 6 2 2 6
57652- 30 30 30 22 22 22 2 2 6 2 2 6
57653- 2 2 6 2 2 6 18 18 18 86 86 86
57654- 42 42 42 14 14 14 0 0 0 0 0 0
57655- 0 0 0 0 0 0 0 0 0 0 0 0
57656- 0 0 0 0 0 0 0 0 0 0 0 0
57657- 0 0 0 0 0 0 0 0 0 0 0 0
57658- 0 0 0 0 0 0 0 0 0 0 0 0
57659- 0 0 0 0 0 0 0 0 0 0 0 0
57660- 0 0 0 0 0 0 0 0 0 0 0 0
57661- 0 0 0 0 0 0 0 0 0 0 0 0
57662- 0 0 0 0 0 0 0 0 0 14 14 14
57663- 42 42 42 90 90 90 22 22 22 2 2 6
57664- 42 42 42 2 2 6 18 18 18 218 218 218
57665-253 253 253 253 253 253 253 253 253 253 253 253
57666-253 253 253 253 253 253 253 253 253 253 253 253
57667-253 253 253 253 253 253 253 253 253 253 253 253
57668-253 253 253 253 253 253 253 253 253 253 253 253
57669-253 253 253 253 253 253 253 253 253 253 253 253
57670-253 253 253 253 253 253 250 250 250 221 221 221
57671-218 218 218 101 101 101 2 2 6 14 14 14
57672- 18 18 18 38 38 38 10 10 10 2 2 6
57673- 2 2 6 2 2 6 2 2 6 78 78 78
57674- 58 58 58 22 22 22 6 6 6 0 0 0
57675- 0 0 0 0 0 0 0 0 0 0 0 0
57676- 0 0 0 0 0 0 0 0 0 0 0 0
57677- 0 0 0 0 0 0 0 0 0 0 0 0
57678- 0 0 0 0 0 0 0 0 0 0 0 0
57679- 0 0 0 0 0 0 0 0 0 0 0 0
57680- 0 0 0 0 0 0 0 0 0 0 0 0
57681- 0 0 0 0 0 0 0 0 0 0 0 0
57682- 0 0 0 0 0 0 6 6 6 18 18 18
57683- 54 54 54 82 82 82 2 2 6 26 26 26
57684- 22 22 22 2 2 6 123 123 123 253 253 253
57685-253 253 253 253 253 253 253 253 253 253 253 253
57686-253 253 253 253 253 253 253 253 253 253 253 253
57687-253 253 253 253 253 253 253 253 253 253 253 253
57688-253 253 253 253 253 253 253 253 253 253 253 253
57689-253 253 253 253 253 253 253 253 253 253 253 253
57690-253 253 253 253 253 253 253 253 253 250 250 250
57691-238 238 238 198 198 198 6 6 6 38 38 38
57692- 58 58 58 26 26 26 38 38 38 2 2 6
57693- 2 2 6 2 2 6 2 2 6 46 46 46
57694- 78 78 78 30 30 30 10 10 10 0 0 0
57695- 0 0 0 0 0 0 0 0 0 0 0 0
57696- 0 0 0 0 0 0 0 0 0 0 0 0
57697- 0 0 0 0 0 0 0 0 0 0 0 0
57698- 0 0 0 0 0 0 0 0 0 0 0 0
57699- 0 0 0 0 0 0 0 0 0 0 0 0
57700- 0 0 0 0 0 0 0 0 0 0 0 0
57701- 0 0 0 0 0 0 0 0 0 0 0 0
57702- 0 0 0 0 0 0 10 10 10 30 30 30
57703- 74 74 74 58 58 58 2 2 6 42 42 42
57704- 2 2 6 22 22 22 231 231 231 253 253 253
57705-253 253 253 253 253 253 253 253 253 253 253 253
57706-253 253 253 253 253 253 253 253 253 250 250 250
57707-253 253 253 253 253 253 253 253 253 253 253 253
57708-253 253 253 253 253 253 253 253 253 253 253 253
57709-253 253 253 253 253 253 253 253 253 253 253 253
57710-253 253 253 253 253 253 253 253 253 253 253 253
57711-253 253 253 246 246 246 46 46 46 38 38 38
57712- 42 42 42 14 14 14 38 38 38 14 14 14
57713- 2 2 6 2 2 6 2 2 6 6 6 6
57714- 86 86 86 46 46 46 14 14 14 0 0 0
57715- 0 0 0 0 0 0 0 0 0 0 0 0
57716- 0 0 0 0 0 0 0 0 0 0 0 0
57717- 0 0 0 0 0 0 0 0 0 0 0 0
57718- 0 0 0 0 0 0 0 0 0 0 0 0
57719- 0 0 0 0 0 0 0 0 0 0 0 0
57720- 0 0 0 0 0 0 0 0 0 0 0 0
57721- 0 0 0 0 0 0 0 0 0 0 0 0
57722- 0 0 0 6 6 6 14 14 14 42 42 42
57723- 90 90 90 18 18 18 18 18 18 26 26 26
57724- 2 2 6 116 116 116 253 253 253 253 253 253
57725-253 253 253 253 253 253 253 253 253 253 253 253
57726-253 253 253 253 253 253 250 250 250 238 238 238
57727-253 253 253 253 253 253 253 253 253 253 253 253
57728-253 253 253 253 253 253 253 253 253 253 253 253
57729-253 253 253 253 253 253 253 253 253 253 253 253
57730-253 253 253 253 253 253 253 253 253 253 253 253
57731-253 253 253 253 253 253 94 94 94 6 6 6
57732- 2 2 6 2 2 6 10 10 10 34 34 34
57733- 2 2 6 2 2 6 2 2 6 2 2 6
57734- 74 74 74 58 58 58 22 22 22 6 6 6
57735- 0 0 0 0 0 0 0 0 0 0 0 0
57736- 0 0 0 0 0 0 0 0 0 0 0 0
57737- 0 0 0 0 0 0 0 0 0 0 0 0
57738- 0 0 0 0 0 0 0 0 0 0 0 0
57739- 0 0 0 0 0 0 0 0 0 0 0 0
57740- 0 0 0 0 0 0 0 0 0 0 0 0
57741- 0 0 0 0 0 0 0 0 0 0 0 0
57742- 0 0 0 10 10 10 26 26 26 66 66 66
57743- 82 82 82 2 2 6 38 38 38 6 6 6
57744- 14 14 14 210 210 210 253 253 253 253 253 253
57745-253 253 253 253 253 253 253 253 253 253 253 253
57746-253 253 253 253 253 253 246 246 246 242 242 242
57747-253 253 253 253 253 253 253 253 253 253 253 253
57748-253 253 253 253 253 253 253 253 253 253 253 253
57749-253 253 253 253 253 253 253 253 253 253 253 253
57750-253 253 253 253 253 253 253 253 253 253 253 253
57751-253 253 253 253 253 253 144 144 144 2 2 6
57752- 2 2 6 2 2 6 2 2 6 46 46 46
57753- 2 2 6 2 2 6 2 2 6 2 2 6
57754- 42 42 42 74 74 74 30 30 30 10 10 10
57755- 0 0 0 0 0 0 0 0 0 0 0 0
57756- 0 0 0 0 0 0 0 0 0 0 0 0
57757- 0 0 0 0 0 0 0 0 0 0 0 0
57758- 0 0 0 0 0 0 0 0 0 0 0 0
57759- 0 0 0 0 0 0 0 0 0 0 0 0
57760- 0 0 0 0 0 0 0 0 0 0 0 0
57761- 0 0 0 0 0 0 0 0 0 0 0 0
57762- 6 6 6 14 14 14 42 42 42 90 90 90
57763- 26 26 26 6 6 6 42 42 42 2 2 6
57764- 74 74 74 250 250 250 253 253 253 253 253 253
57765-253 253 253 253 253 253 253 253 253 253 253 253
57766-253 253 253 253 253 253 242 242 242 242 242 242
57767-253 253 253 253 253 253 253 253 253 253 253 253
57768-253 253 253 253 253 253 253 253 253 253 253 253
57769-253 253 253 253 253 253 253 253 253 253 253 253
57770-253 253 253 253 253 253 253 253 253 253 253 253
57771-253 253 253 253 253 253 182 182 182 2 2 6
57772- 2 2 6 2 2 6 2 2 6 46 46 46
57773- 2 2 6 2 2 6 2 2 6 2 2 6
57774- 10 10 10 86 86 86 38 38 38 10 10 10
57775- 0 0 0 0 0 0 0 0 0 0 0 0
57776- 0 0 0 0 0 0 0 0 0 0 0 0
57777- 0 0 0 0 0 0 0 0 0 0 0 0
57778- 0 0 0 0 0 0 0 0 0 0 0 0
57779- 0 0 0 0 0 0 0 0 0 0 0 0
57780- 0 0 0 0 0 0 0 0 0 0 0 0
57781- 0 0 0 0 0 0 0 0 0 0 0 0
57782- 10 10 10 26 26 26 66 66 66 82 82 82
57783- 2 2 6 22 22 22 18 18 18 2 2 6
57784-149 149 149 253 253 253 253 253 253 253 253 253
57785-253 253 253 253 253 253 253 253 253 253 253 253
57786-253 253 253 253 253 253 234 234 234 242 242 242
57787-253 253 253 253 253 253 253 253 253 253 253 253
57788-253 253 253 253 253 253 253 253 253 253 253 253
57789-253 253 253 253 253 253 253 253 253 253 253 253
57790-253 253 253 253 253 253 253 253 253 253 253 253
57791-253 253 253 253 253 253 206 206 206 2 2 6
57792- 2 2 6 2 2 6 2 2 6 38 38 38
57793- 2 2 6 2 2 6 2 2 6 2 2 6
57794- 6 6 6 86 86 86 46 46 46 14 14 14
57795- 0 0 0 0 0 0 0 0 0 0 0 0
57796- 0 0 0 0 0 0 0 0 0 0 0 0
57797- 0 0 0 0 0 0 0 0 0 0 0 0
57798- 0 0 0 0 0 0 0 0 0 0 0 0
57799- 0 0 0 0 0 0 0 0 0 0 0 0
57800- 0 0 0 0 0 0 0 0 0 0 0 0
57801- 0 0 0 0 0 0 0 0 0 6 6 6
57802- 18 18 18 46 46 46 86 86 86 18 18 18
57803- 2 2 6 34 34 34 10 10 10 6 6 6
57804-210 210 210 253 253 253 253 253 253 253 253 253
57805-253 253 253 253 253 253 253 253 253 253 253 253
57806-253 253 253 253 253 253 234 234 234 242 242 242
57807-253 253 253 253 253 253 253 253 253 253 253 253
57808-253 253 253 253 253 253 253 253 253 253 253 253
57809-253 253 253 253 253 253 253 253 253 253 253 253
57810-253 253 253 253 253 253 253 253 253 253 253 253
57811-253 253 253 253 253 253 221 221 221 6 6 6
57812- 2 2 6 2 2 6 6 6 6 30 30 30
57813- 2 2 6 2 2 6 2 2 6 2 2 6
57814- 2 2 6 82 82 82 54 54 54 18 18 18
57815- 6 6 6 0 0 0 0 0 0 0 0 0
57816- 0 0 0 0 0 0 0 0 0 0 0 0
57817- 0 0 0 0 0 0 0 0 0 0 0 0
57818- 0 0 0 0 0 0 0 0 0 0 0 0
57819- 0 0 0 0 0 0 0 0 0 0 0 0
57820- 0 0 0 0 0 0 0 0 0 0 0 0
57821- 0 0 0 0 0 0 0 0 0 10 10 10
57822- 26 26 26 66 66 66 62 62 62 2 2 6
57823- 2 2 6 38 38 38 10 10 10 26 26 26
57824-238 238 238 253 253 253 253 253 253 253 253 253
57825-253 253 253 253 253 253 253 253 253 253 253 253
57826-253 253 253 253 253 253 231 231 231 238 238 238
57827-253 253 253 253 253 253 253 253 253 253 253 253
57828-253 253 253 253 253 253 253 253 253 253 253 253
57829-253 253 253 253 253 253 253 253 253 253 253 253
57830-253 253 253 253 253 253 253 253 253 253 253 253
57831-253 253 253 253 253 253 231 231 231 6 6 6
57832- 2 2 6 2 2 6 10 10 10 30 30 30
57833- 2 2 6 2 2 6 2 2 6 2 2 6
57834- 2 2 6 66 66 66 58 58 58 22 22 22
57835- 6 6 6 0 0 0 0 0 0 0 0 0
57836- 0 0 0 0 0 0 0 0 0 0 0 0
57837- 0 0 0 0 0 0 0 0 0 0 0 0
57838- 0 0 0 0 0 0 0 0 0 0 0 0
57839- 0 0 0 0 0 0 0 0 0 0 0 0
57840- 0 0 0 0 0 0 0 0 0 0 0 0
57841- 0 0 0 0 0 0 0 0 0 10 10 10
57842- 38 38 38 78 78 78 6 6 6 2 2 6
57843- 2 2 6 46 46 46 14 14 14 42 42 42
57844-246 246 246 253 253 253 253 253 253 253 253 253
57845-253 253 253 253 253 253 253 253 253 253 253 253
57846-253 253 253 253 253 253 231 231 231 242 242 242
57847-253 253 253 253 253 253 253 253 253 253 253 253
57848-253 253 253 253 253 253 253 253 253 253 253 253
57849-253 253 253 253 253 253 253 253 253 253 253 253
57850-253 253 253 253 253 253 253 253 253 253 253 253
57851-253 253 253 253 253 253 234 234 234 10 10 10
57852- 2 2 6 2 2 6 22 22 22 14 14 14
57853- 2 2 6 2 2 6 2 2 6 2 2 6
57854- 2 2 6 66 66 66 62 62 62 22 22 22
57855- 6 6 6 0 0 0 0 0 0 0 0 0
57856- 0 0 0 0 0 0 0 0 0 0 0 0
57857- 0 0 0 0 0 0 0 0 0 0 0 0
57858- 0 0 0 0 0 0 0 0 0 0 0 0
57859- 0 0 0 0 0 0 0 0 0 0 0 0
57860- 0 0 0 0 0 0 0 0 0 0 0 0
57861- 0 0 0 0 0 0 6 6 6 18 18 18
57862- 50 50 50 74 74 74 2 2 6 2 2 6
57863- 14 14 14 70 70 70 34 34 34 62 62 62
57864-250 250 250 253 253 253 253 253 253 253 253 253
57865-253 253 253 253 253 253 253 253 253 253 253 253
57866-253 253 253 253 253 253 231 231 231 246 246 246
57867-253 253 253 253 253 253 253 253 253 253 253 253
57868-253 253 253 253 253 253 253 253 253 253 253 253
57869-253 253 253 253 253 253 253 253 253 253 253 253
57870-253 253 253 253 253 253 253 253 253 253 253 253
57871-253 253 253 253 253 253 234 234 234 14 14 14
57872- 2 2 6 2 2 6 30 30 30 2 2 6
57873- 2 2 6 2 2 6 2 2 6 2 2 6
57874- 2 2 6 66 66 66 62 62 62 22 22 22
57875- 6 6 6 0 0 0 0 0 0 0 0 0
57876- 0 0 0 0 0 0 0 0 0 0 0 0
57877- 0 0 0 0 0 0 0 0 0 0 0 0
57878- 0 0 0 0 0 0 0 0 0 0 0 0
57879- 0 0 0 0 0 0 0 0 0 0 0 0
57880- 0 0 0 0 0 0 0 0 0 0 0 0
57881- 0 0 0 0 0 0 6 6 6 18 18 18
57882- 54 54 54 62 62 62 2 2 6 2 2 6
57883- 2 2 6 30 30 30 46 46 46 70 70 70
57884-250 250 250 253 253 253 253 253 253 253 253 253
57885-253 253 253 253 253 253 253 253 253 253 253 253
57886-253 253 253 253 253 253 231 231 231 246 246 246
57887-253 253 253 253 253 253 253 253 253 253 253 253
57888-253 253 253 253 253 253 253 253 253 253 253 253
57889-253 253 253 253 253 253 253 253 253 253 253 253
57890-253 253 253 253 253 253 253 253 253 253 253 253
57891-253 253 253 253 253 253 226 226 226 10 10 10
57892- 2 2 6 6 6 6 30 30 30 2 2 6
57893- 2 2 6 2 2 6 2 2 6 2 2 6
57894- 2 2 6 66 66 66 58 58 58 22 22 22
57895- 6 6 6 0 0 0 0 0 0 0 0 0
57896- 0 0 0 0 0 0 0 0 0 0 0 0
57897- 0 0 0 0 0 0 0 0 0 0 0 0
57898- 0 0 0 0 0 0 0 0 0 0 0 0
57899- 0 0 0 0 0 0 0 0 0 0 0 0
57900- 0 0 0 0 0 0 0 0 0 0 0 0
57901- 0 0 0 0 0 0 6 6 6 22 22 22
57902- 58 58 58 62 62 62 2 2 6 2 2 6
57903- 2 2 6 2 2 6 30 30 30 78 78 78
57904-250 250 250 253 253 253 253 253 253 253 253 253
57905-253 253 253 253 253 253 253 253 253 253 253 253
57906-253 253 253 253 253 253 231 231 231 246 246 246
57907-253 253 253 253 253 253 253 253 253 253 253 253
57908-253 253 253 253 253 253 253 253 253 253 253 253
57909-253 253 253 253 253 253 253 253 253 253 253 253
57910-253 253 253 253 253 253 253 253 253 253 253 253
57911-253 253 253 253 253 253 206 206 206 2 2 6
57912- 22 22 22 34 34 34 18 14 6 22 22 22
57913- 26 26 26 18 18 18 6 6 6 2 2 6
57914- 2 2 6 82 82 82 54 54 54 18 18 18
57915- 6 6 6 0 0 0 0 0 0 0 0 0
57916- 0 0 0 0 0 0 0 0 0 0 0 0
57917- 0 0 0 0 0 0 0 0 0 0 0 0
57918- 0 0 0 0 0 0 0 0 0 0 0 0
57919- 0 0 0 0 0 0 0 0 0 0 0 0
57920- 0 0 0 0 0 0 0 0 0 0 0 0
57921- 0 0 0 0 0 0 6 6 6 26 26 26
57922- 62 62 62 106 106 106 74 54 14 185 133 11
57923-210 162 10 121 92 8 6 6 6 62 62 62
57924-238 238 238 253 253 253 253 253 253 253 253 253
57925-253 253 253 253 253 253 253 253 253 253 253 253
57926-253 253 253 253 253 253 231 231 231 246 246 246
57927-253 253 253 253 253 253 253 253 253 253 253 253
57928-253 253 253 253 253 253 253 253 253 253 253 253
57929-253 253 253 253 253 253 253 253 253 253 253 253
57930-253 253 253 253 253 253 253 253 253 253 253 253
57931-253 253 253 253 253 253 158 158 158 18 18 18
57932- 14 14 14 2 2 6 2 2 6 2 2 6
57933- 6 6 6 18 18 18 66 66 66 38 38 38
57934- 6 6 6 94 94 94 50 50 50 18 18 18
57935- 6 6 6 0 0 0 0 0 0 0 0 0
57936- 0 0 0 0 0 0 0 0 0 0 0 0
57937- 0 0 0 0 0 0 0 0 0 0 0 0
57938- 0 0 0 0 0 0 0 0 0 0 0 0
57939- 0 0 0 0 0 0 0 0 0 0 0 0
57940- 0 0 0 0 0 0 0 0 0 6 6 6
57941- 10 10 10 10 10 10 18 18 18 38 38 38
57942- 78 78 78 142 134 106 216 158 10 242 186 14
57943-246 190 14 246 190 14 156 118 10 10 10 10
57944- 90 90 90 238 238 238 253 253 253 253 253 253
57945-253 253 253 253 253 253 253 253 253 253 253 253
57946-253 253 253 253 253 253 231 231 231 250 250 250
57947-253 253 253 253 253 253 253 253 253 253 253 253
57948-253 253 253 253 253 253 253 253 253 253 253 253
57949-253 253 253 253 253 253 253 253 253 253 253 253
57950-253 253 253 253 253 253 253 253 253 246 230 190
57951-238 204 91 238 204 91 181 142 44 37 26 9
57952- 2 2 6 2 2 6 2 2 6 2 2 6
57953- 2 2 6 2 2 6 38 38 38 46 46 46
57954- 26 26 26 106 106 106 54 54 54 18 18 18
57955- 6 6 6 0 0 0 0 0 0 0 0 0
57956- 0 0 0 0 0 0 0 0 0 0 0 0
57957- 0 0 0 0 0 0 0 0 0 0 0 0
57958- 0 0 0 0 0 0 0 0 0 0 0 0
57959- 0 0 0 0 0 0 0 0 0 0 0 0
57960- 0 0 0 6 6 6 14 14 14 22 22 22
57961- 30 30 30 38 38 38 50 50 50 70 70 70
57962-106 106 106 190 142 34 226 170 11 242 186 14
57963-246 190 14 246 190 14 246 190 14 154 114 10
57964- 6 6 6 74 74 74 226 226 226 253 253 253
57965-253 253 253 253 253 253 253 253 253 253 253 253
57966-253 253 253 253 253 253 231 231 231 250 250 250
57967-253 253 253 253 253 253 253 253 253 253 253 253
57968-253 253 253 253 253 253 253 253 253 253 253 253
57969-253 253 253 253 253 253 253 253 253 253 253 253
57970-253 253 253 253 253 253 253 253 253 228 184 62
57971-241 196 14 241 208 19 232 195 16 38 30 10
57972- 2 2 6 2 2 6 2 2 6 2 2 6
57973- 2 2 6 6 6 6 30 30 30 26 26 26
57974-203 166 17 154 142 90 66 66 66 26 26 26
57975- 6 6 6 0 0 0 0 0 0 0 0 0
57976- 0 0 0 0 0 0 0 0 0 0 0 0
57977- 0 0 0 0 0 0 0 0 0 0 0 0
57978- 0 0 0 0 0 0 0 0 0 0 0 0
57979- 0 0 0 0 0 0 0 0 0 0 0 0
57980- 6 6 6 18 18 18 38 38 38 58 58 58
57981- 78 78 78 86 86 86 101 101 101 123 123 123
57982-175 146 61 210 150 10 234 174 13 246 186 14
57983-246 190 14 246 190 14 246 190 14 238 190 10
57984-102 78 10 2 2 6 46 46 46 198 198 198
57985-253 253 253 253 253 253 253 253 253 253 253 253
57986-253 253 253 253 253 253 234 234 234 242 242 242
57987-253 253 253 253 253 253 253 253 253 253 253 253
57988-253 253 253 253 253 253 253 253 253 253 253 253
57989-253 253 253 253 253 253 253 253 253 253 253 253
57990-253 253 253 253 253 253 253 253 253 224 178 62
57991-242 186 14 241 196 14 210 166 10 22 18 6
57992- 2 2 6 2 2 6 2 2 6 2 2 6
57993- 2 2 6 2 2 6 6 6 6 121 92 8
57994-238 202 15 232 195 16 82 82 82 34 34 34
57995- 10 10 10 0 0 0 0 0 0 0 0 0
57996- 0 0 0 0 0 0 0 0 0 0 0 0
57997- 0 0 0 0 0 0 0 0 0 0 0 0
57998- 0 0 0 0 0 0 0 0 0 0 0 0
57999- 0 0 0 0 0 0 0 0 0 0 0 0
58000- 14 14 14 38 38 38 70 70 70 154 122 46
58001-190 142 34 200 144 11 197 138 11 197 138 11
58002-213 154 11 226 170 11 242 186 14 246 190 14
58003-246 190 14 246 190 14 246 190 14 246 190 14
58004-225 175 15 46 32 6 2 2 6 22 22 22
58005-158 158 158 250 250 250 253 253 253 253 253 253
58006-253 253 253 253 253 253 253 253 253 253 253 253
58007-253 253 253 253 253 253 253 253 253 253 253 253
58008-253 253 253 253 253 253 253 253 253 253 253 253
58009-253 253 253 253 253 253 253 253 253 253 253 253
58010-253 253 253 250 250 250 242 242 242 224 178 62
58011-239 182 13 236 186 11 213 154 11 46 32 6
58012- 2 2 6 2 2 6 2 2 6 2 2 6
58013- 2 2 6 2 2 6 61 42 6 225 175 15
58014-238 190 10 236 186 11 112 100 78 42 42 42
58015- 14 14 14 0 0 0 0 0 0 0 0 0
58016- 0 0 0 0 0 0 0 0 0 0 0 0
58017- 0 0 0 0 0 0 0 0 0 0 0 0
58018- 0 0 0 0 0 0 0 0 0 0 0 0
58019- 0 0 0 0 0 0 0 0 0 6 6 6
58020- 22 22 22 54 54 54 154 122 46 213 154 11
58021-226 170 11 230 174 11 226 170 11 226 170 11
58022-236 178 12 242 186 14 246 190 14 246 190 14
58023-246 190 14 246 190 14 246 190 14 246 190 14
58024-241 196 14 184 144 12 10 10 10 2 2 6
58025- 6 6 6 116 116 116 242 242 242 253 253 253
58026-253 253 253 253 253 253 253 253 253 253 253 253
58027-253 253 253 253 253 253 253 253 253 253 253 253
58028-253 253 253 253 253 253 253 253 253 253 253 253
58029-253 253 253 253 253 253 253 253 253 253 253 253
58030-253 253 253 231 231 231 198 198 198 214 170 54
58031-236 178 12 236 178 12 210 150 10 137 92 6
58032- 18 14 6 2 2 6 2 2 6 2 2 6
58033- 6 6 6 70 47 6 200 144 11 236 178 12
58034-239 182 13 239 182 13 124 112 88 58 58 58
58035- 22 22 22 6 6 6 0 0 0 0 0 0
58036- 0 0 0 0 0 0 0 0 0 0 0 0
58037- 0 0 0 0 0 0 0 0 0 0 0 0
58038- 0 0 0 0 0 0 0 0 0 0 0 0
58039- 0 0 0 0 0 0 0 0 0 10 10 10
58040- 30 30 30 70 70 70 180 133 36 226 170 11
58041-239 182 13 242 186 14 242 186 14 246 186 14
58042-246 190 14 246 190 14 246 190 14 246 190 14
58043-246 190 14 246 190 14 246 190 14 246 190 14
58044-246 190 14 232 195 16 98 70 6 2 2 6
58045- 2 2 6 2 2 6 66 66 66 221 221 221
58046-253 253 253 253 253 253 253 253 253 253 253 253
58047-253 253 253 253 253 253 253 253 253 253 253 253
58048-253 253 253 253 253 253 253 253 253 253 253 253
58049-253 253 253 253 253 253 253 253 253 253 253 253
58050-253 253 253 206 206 206 198 198 198 214 166 58
58051-230 174 11 230 174 11 216 158 10 192 133 9
58052-163 110 8 116 81 8 102 78 10 116 81 8
58053-167 114 7 197 138 11 226 170 11 239 182 13
58054-242 186 14 242 186 14 162 146 94 78 78 78
58055- 34 34 34 14 14 14 6 6 6 0 0 0
58056- 0 0 0 0 0 0 0 0 0 0 0 0
58057- 0 0 0 0 0 0 0 0 0 0 0 0
58058- 0 0 0 0 0 0 0 0 0 0 0 0
58059- 0 0 0 0 0 0 0 0 0 6 6 6
58060- 30 30 30 78 78 78 190 142 34 226 170 11
58061-239 182 13 246 190 14 246 190 14 246 190 14
58062-246 190 14 246 190 14 246 190 14 246 190 14
58063-246 190 14 246 190 14 246 190 14 246 190 14
58064-246 190 14 241 196 14 203 166 17 22 18 6
58065- 2 2 6 2 2 6 2 2 6 38 38 38
58066-218 218 218 253 253 253 253 253 253 253 253 253
58067-253 253 253 253 253 253 253 253 253 253 253 253
58068-253 253 253 253 253 253 253 253 253 253 253 253
58069-253 253 253 253 253 253 253 253 253 253 253 253
58070-250 250 250 206 206 206 198 198 198 202 162 69
58071-226 170 11 236 178 12 224 166 10 210 150 10
58072-200 144 11 197 138 11 192 133 9 197 138 11
58073-210 150 10 226 170 11 242 186 14 246 190 14
58074-246 190 14 246 186 14 225 175 15 124 112 88
58075- 62 62 62 30 30 30 14 14 14 6 6 6
58076- 0 0 0 0 0 0 0 0 0 0 0 0
58077- 0 0 0 0 0 0 0 0 0 0 0 0
58078- 0 0 0 0 0 0 0 0 0 0 0 0
58079- 0 0 0 0 0 0 0 0 0 10 10 10
58080- 30 30 30 78 78 78 174 135 50 224 166 10
58081-239 182 13 246 190 14 246 190 14 246 190 14
58082-246 190 14 246 190 14 246 190 14 246 190 14
58083-246 190 14 246 190 14 246 190 14 246 190 14
58084-246 190 14 246 190 14 241 196 14 139 102 15
58085- 2 2 6 2 2 6 2 2 6 2 2 6
58086- 78 78 78 250 250 250 253 253 253 253 253 253
58087-253 253 253 253 253 253 253 253 253 253 253 253
58088-253 253 253 253 253 253 253 253 253 253 253 253
58089-253 253 253 253 253 253 253 253 253 253 253 253
58090-250 250 250 214 214 214 198 198 198 190 150 46
58091-219 162 10 236 178 12 234 174 13 224 166 10
58092-216 158 10 213 154 11 213 154 11 216 158 10
58093-226 170 11 239 182 13 246 190 14 246 190 14
58094-246 190 14 246 190 14 242 186 14 206 162 42
58095-101 101 101 58 58 58 30 30 30 14 14 14
58096- 6 6 6 0 0 0 0 0 0 0 0 0
58097- 0 0 0 0 0 0 0 0 0 0 0 0
58098- 0 0 0 0 0 0 0 0 0 0 0 0
58099- 0 0 0 0 0 0 0 0 0 10 10 10
58100- 30 30 30 74 74 74 174 135 50 216 158 10
58101-236 178 12 246 190 14 246 190 14 246 190 14
58102-246 190 14 246 190 14 246 190 14 246 190 14
58103-246 190 14 246 190 14 246 190 14 246 190 14
58104-246 190 14 246 190 14 241 196 14 226 184 13
58105- 61 42 6 2 2 6 2 2 6 2 2 6
58106- 22 22 22 238 238 238 253 253 253 253 253 253
58107-253 253 253 253 253 253 253 253 253 253 253 253
58108-253 253 253 253 253 253 253 253 253 253 253 253
58109-253 253 253 253 253 253 253 253 253 253 253 253
58110-253 253 253 226 226 226 187 187 187 180 133 36
58111-216 158 10 236 178 12 239 182 13 236 178 12
58112-230 174 11 226 170 11 226 170 11 230 174 11
58113-236 178 12 242 186 14 246 190 14 246 190 14
58114-246 190 14 246 190 14 246 186 14 239 182 13
58115-206 162 42 106 106 106 66 66 66 34 34 34
58116- 14 14 14 6 6 6 0 0 0 0 0 0
58117- 0 0 0 0 0 0 0 0 0 0 0 0
58118- 0 0 0 0 0 0 0 0 0 0 0 0
58119- 0 0 0 0 0 0 0 0 0 6 6 6
58120- 26 26 26 70 70 70 163 133 67 213 154 11
58121-236 178 12 246 190 14 246 190 14 246 190 14
58122-246 190 14 246 190 14 246 190 14 246 190 14
58123-246 190 14 246 190 14 246 190 14 246 190 14
58124-246 190 14 246 190 14 246 190 14 241 196 14
58125-190 146 13 18 14 6 2 2 6 2 2 6
58126- 46 46 46 246 246 246 253 253 253 253 253 253
58127-253 253 253 253 253 253 253 253 253 253 253 253
58128-253 253 253 253 253 253 253 253 253 253 253 253
58129-253 253 253 253 253 253 253 253 253 253 253 253
58130-253 253 253 221 221 221 86 86 86 156 107 11
58131-216 158 10 236 178 12 242 186 14 246 186 14
58132-242 186 14 239 182 13 239 182 13 242 186 14
58133-242 186 14 246 186 14 246 190 14 246 190 14
58134-246 190 14 246 190 14 246 190 14 246 190 14
58135-242 186 14 225 175 15 142 122 72 66 66 66
58136- 30 30 30 10 10 10 0 0 0 0 0 0
58137- 0 0 0 0 0 0 0 0 0 0 0 0
58138- 0 0 0 0 0 0 0 0 0 0 0 0
58139- 0 0 0 0 0 0 0 0 0 6 6 6
58140- 26 26 26 70 70 70 163 133 67 210 150 10
58141-236 178 12 246 190 14 246 190 14 246 190 14
58142-246 190 14 246 190 14 246 190 14 246 190 14
58143-246 190 14 246 190 14 246 190 14 246 190 14
58144-246 190 14 246 190 14 246 190 14 246 190 14
58145-232 195 16 121 92 8 34 34 34 106 106 106
58146-221 221 221 253 253 253 253 253 253 253 253 253
58147-253 253 253 253 253 253 253 253 253 253 253 253
58148-253 253 253 253 253 253 253 253 253 253 253 253
58149-253 253 253 253 253 253 253 253 253 253 253 253
58150-242 242 242 82 82 82 18 14 6 163 110 8
58151-216 158 10 236 178 12 242 186 14 246 190 14
58152-246 190 14 246 190 14 246 190 14 246 190 14
58153-246 190 14 246 190 14 246 190 14 246 190 14
58154-246 190 14 246 190 14 246 190 14 246 190 14
58155-246 190 14 246 190 14 242 186 14 163 133 67
58156- 46 46 46 18 18 18 6 6 6 0 0 0
58157- 0 0 0 0 0 0 0 0 0 0 0 0
58158- 0 0 0 0 0 0 0 0 0 0 0 0
58159- 0 0 0 0 0 0 0 0 0 10 10 10
58160- 30 30 30 78 78 78 163 133 67 210 150 10
58161-236 178 12 246 186 14 246 190 14 246 190 14
58162-246 190 14 246 190 14 246 190 14 246 190 14
58163-246 190 14 246 190 14 246 190 14 246 190 14
58164-246 190 14 246 190 14 246 190 14 246 190 14
58165-241 196 14 215 174 15 190 178 144 253 253 253
58166-253 253 253 253 253 253 253 253 253 253 253 253
58167-253 253 253 253 253 253 253 253 253 253 253 253
58168-253 253 253 253 253 253 253 253 253 253 253 253
58169-253 253 253 253 253 253 253 253 253 218 218 218
58170- 58 58 58 2 2 6 22 18 6 167 114 7
58171-216 158 10 236 178 12 246 186 14 246 190 14
58172-246 190 14 246 190 14 246 190 14 246 190 14
58173-246 190 14 246 190 14 246 190 14 246 190 14
58174-246 190 14 246 190 14 246 190 14 246 190 14
58175-246 190 14 246 186 14 242 186 14 190 150 46
58176- 54 54 54 22 22 22 6 6 6 0 0 0
58177- 0 0 0 0 0 0 0 0 0 0 0 0
58178- 0 0 0 0 0 0 0 0 0 0 0 0
58179- 0 0 0 0 0 0 0 0 0 14 14 14
58180- 38 38 38 86 86 86 180 133 36 213 154 11
58181-236 178 12 246 186 14 246 190 14 246 190 14
58182-246 190 14 246 190 14 246 190 14 246 190 14
58183-246 190 14 246 190 14 246 190 14 246 190 14
58184-246 190 14 246 190 14 246 190 14 246 190 14
58185-246 190 14 232 195 16 190 146 13 214 214 214
58186-253 253 253 253 253 253 253 253 253 253 253 253
58187-253 253 253 253 253 253 253 253 253 253 253 253
58188-253 253 253 253 253 253 253 253 253 253 253 253
58189-253 253 253 250 250 250 170 170 170 26 26 26
58190- 2 2 6 2 2 6 37 26 9 163 110 8
58191-219 162 10 239 182 13 246 186 14 246 190 14
58192-246 190 14 246 190 14 246 190 14 246 190 14
58193-246 190 14 246 190 14 246 190 14 246 190 14
58194-246 190 14 246 190 14 246 190 14 246 190 14
58195-246 186 14 236 178 12 224 166 10 142 122 72
58196- 46 46 46 18 18 18 6 6 6 0 0 0
58197- 0 0 0 0 0 0 0 0 0 0 0 0
58198- 0 0 0 0 0 0 0 0 0 0 0 0
58199- 0 0 0 0 0 0 6 6 6 18 18 18
58200- 50 50 50 109 106 95 192 133 9 224 166 10
58201-242 186 14 246 190 14 246 190 14 246 190 14
58202-246 190 14 246 190 14 246 190 14 246 190 14
58203-246 190 14 246 190 14 246 190 14 246 190 14
58204-246 190 14 246 190 14 246 190 14 246 190 14
58205-242 186 14 226 184 13 210 162 10 142 110 46
58206-226 226 226 253 253 253 253 253 253 253 253 253
58207-253 253 253 253 253 253 253 253 253 253 253 253
58208-253 253 253 253 253 253 253 253 253 253 253 253
58209-198 198 198 66 66 66 2 2 6 2 2 6
58210- 2 2 6 2 2 6 50 34 6 156 107 11
58211-219 162 10 239 182 13 246 186 14 246 190 14
58212-246 190 14 246 190 14 246 190 14 246 190 14
58213-246 190 14 246 190 14 246 190 14 246 190 14
58214-246 190 14 246 190 14 246 190 14 242 186 14
58215-234 174 13 213 154 11 154 122 46 66 66 66
58216- 30 30 30 10 10 10 0 0 0 0 0 0
58217- 0 0 0 0 0 0 0 0 0 0 0 0
58218- 0 0 0 0 0 0 0 0 0 0 0 0
58219- 0 0 0 0 0 0 6 6 6 22 22 22
58220- 58 58 58 154 121 60 206 145 10 234 174 13
58221-242 186 14 246 186 14 246 190 14 246 190 14
58222-246 190 14 246 190 14 246 190 14 246 190 14
58223-246 190 14 246 190 14 246 190 14 246 190 14
58224-246 190 14 246 190 14 246 190 14 246 190 14
58225-246 186 14 236 178 12 210 162 10 163 110 8
58226- 61 42 6 138 138 138 218 218 218 250 250 250
58227-253 253 253 253 253 253 253 253 253 250 250 250
58228-242 242 242 210 210 210 144 144 144 66 66 66
58229- 6 6 6 2 2 6 2 2 6 2 2 6
58230- 2 2 6 2 2 6 61 42 6 163 110 8
58231-216 158 10 236 178 12 246 190 14 246 190 14
58232-246 190 14 246 190 14 246 190 14 246 190 14
58233-246 190 14 246 190 14 246 190 14 246 190 14
58234-246 190 14 239 182 13 230 174 11 216 158 10
58235-190 142 34 124 112 88 70 70 70 38 38 38
58236- 18 18 18 6 6 6 0 0 0 0 0 0
58237- 0 0 0 0 0 0 0 0 0 0 0 0
58238- 0 0 0 0 0 0 0 0 0 0 0 0
58239- 0 0 0 0 0 0 6 6 6 22 22 22
58240- 62 62 62 168 124 44 206 145 10 224 166 10
58241-236 178 12 239 182 13 242 186 14 242 186 14
58242-246 186 14 246 190 14 246 190 14 246 190 14
58243-246 190 14 246 190 14 246 190 14 246 190 14
58244-246 190 14 246 190 14 246 190 14 246 190 14
58245-246 190 14 236 178 12 216 158 10 175 118 6
58246- 80 54 7 2 2 6 6 6 6 30 30 30
58247- 54 54 54 62 62 62 50 50 50 38 38 38
58248- 14 14 14 2 2 6 2 2 6 2 2 6
58249- 2 2 6 2 2 6 2 2 6 2 2 6
58250- 2 2 6 6 6 6 80 54 7 167 114 7
58251-213 154 11 236 178 12 246 190 14 246 190 14
58252-246 190 14 246 190 14 246 190 14 246 190 14
58253-246 190 14 242 186 14 239 182 13 239 182 13
58254-230 174 11 210 150 10 174 135 50 124 112 88
58255- 82 82 82 54 54 54 34 34 34 18 18 18
58256- 6 6 6 0 0 0 0 0 0 0 0 0
58257- 0 0 0 0 0 0 0 0 0 0 0 0
58258- 0 0 0 0 0 0 0 0 0 0 0 0
58259- 0 0 0 0 0 0 6 6 6 18 18 18
58260- 50 50 50 158 118 36 192 133 9 200 144 11
58261-216 158 10 219 162 10 224 166 10 226 170 11
58262-230 174 11 236 178 12 239 182 13 239 182 13
58263-242 186 14 246 186 14 246 190 14 246 190 14
58264-246 190 14 246 190 14 246 190 14 246 190 14
58265-246 186 14 230 174 11 210 150 10 163 110 8
58266-104 69 6 10 10 10 2 2 6 2 2 6
58267- 2 2 6 2 2 6 2 2 6 2 2 6
58268- 2 2 6 2 2 6 2 2 6 2 2 6
58269- 2 2 6 2 2 6 2 2 6 2 2 6
58270- 2 2 6 6 6 6 91 60 6 167 114 7
58271-206 145 10 230 174 11 242 186 14 246 190 14
58272-246 190 14 246 190 14 246 186 14 242 186 14
58273-239 182 13 230 174 11 224 166 10 213 154 11
58274-180 133 36 124 112 88 86 86 86 58 58 58
58275- 38 38 38 22 22 22 10 10 10 6 6 6
58276- 0 0 0 0 0 0 0 0 0 0 0 0
58277- 0 0 0 0 0 0 0 0 0 0 0 0
58278- 0 0 0 0 0 0 0 0 0 0 0 0
58279- 0 0 0 0 0 0 0 0 0 14 14 14
58280- 34 34 34 70 70 70 138 110 50 158 118 36
58281-167 114 7 180 123 7 192 133 9 197 138 11
58282-200 144 11 206 145 10 213 154 11 219 162 10
58283-224 166 10 230 174 11 239 182 13 242 186 14
58284-246 186 14 246 186 14 246 186 14 246 186 14
58285-239 182 13 216 158 10 185 133 11 152 99 6
58286-104 69 6 18 14 6 2 2 6 2 2 6
58287- 2 2 6 2 2 6 2 2 6 2 2 6
58288- 2 2 6 2 2 6 2 2 6 2 2 6
58289- 2 2 6 2 2 6 2 2 6 2 2 6
58290- 2 2 6 6 6 6 80 54 7 152 99 6
58291-192 133 9 219 162 10 236 178 12 239 182 13
58292-246 186 14 242 186 14 239 182 13 236 178 12
58293-224 166 10 206 145 10 192 133 9 154 121 60
58294- 94 94 94 62 62 62 42 42 42 22 22 22
58295- 14 14 14 6 6 6 0 0 0 0 0 0
58296- 0 0 0 0 0 0 0 0 0 0 0 0
58297- 0 0 0 0 0 0 0 0 0 0 0 0
58298- 0 0 0 0 0 0 0 0 0 0 0 0
58299- 0 0 0 0 0 0 0 0 0 6 6 6
58300- 18 18 18 34 34 34 58 58 58 78 78 78
58301-101 98 89 124 112 88 142 110 46 156 107 11
58302-163 110 8 167 114 7 175 118 6 180 123 7
58303-185 133 11 197 138 11 210 150 10 219 162 10
58304-226 170 11 236 178 12 236 178 12 234 174 13
58305-219 162 10 197 138 11 163 110 8 130 83 6
58306- 91 60 6 10 10 10 2 2 6 2 2 6
58307- 18 18 18 38 38 38 38 38 38 38 38 38
58308- 38 38 38 38 38 38 38 38 38 38 38 38
58309- 38 38 38 38 38 38 26 26 26 2 2 6
58310- 2 2 6 6 6 6 70 47 6 137 92 6
58311-175 118 6 200 144 11 219 162 10 230 174 11
58312-234 174 13 230 174 11 219 162 10 210 150 10
58313-192 133 9 163 110 8 124 112 88 82 82 82
58314- 50 50 50 30 30 30 14 14 14 6 6 6
58315- 0 0 0 0 0 0 0 0 0 0 0 0
58316- 0 0 0 0 0 0 0 0 0 0 0 0
58317- 0 0 0 0 0 0 0 0 0 0 0 0
58318- 0 0 0 0 0 0 0 0 0 0 0 0
58319- 0 0 0 0 0 0 0 0 0 0 0 0
58320- 6 6 6 14 14 14 22 22 22 34 34 34
58321- 42 42 42 58 58 58 74 74 74 86 86 86
58322-101 98 89 122 102 70 130 98 46 121 87 25
58323-137 92 6 152 99 6 163 110 8 180 123 7
58324-185 133 11 197 138 11 206 145 10 200 144 11
58325-180 123 7 156 107 11 130 83 6 104 69 6
58326- 50 34 6 54 54 54 110 110 110 101 98 89
58327- 86 86 86 82 82 82 78 78 78 78 78 78
58328- 78 78 78 78 78 78 78 78 78 78 78 78
58329- 78 78 78 82 82 82 86 86 86 94 94 94
58330-106 106 106 101 101 101 86 66 34 124 80 6
58331-156 107 11 180 123 7 192 133 9 200 144 11
58332-206 145 10 200 144 11 192 133 9 175 118 6
58333-139 102 15 109 106 95 70 70 70 42 42 42
58334- 22 22 22 10 10 10 0 0 0 0 0 0
58335- 0 0 0 0 0 0 0 0 0 0 0 0
58336- 0 0 0 0 0 0 0 0 0 0 0 0
58337- 0 0 0 0 0 0 0 0 0 0 0 0
58338- 0 0 0 0 0 0 0 0 0 0 0 0
58339- 0 0 0 0 0 0 0 0 0 0 0 0
58340- 0 0 0 0 0 0 6 6 6 10 10 10
58341- 14 14 14 22 22 22 30 30 30 38 38 38
58342- 50 50 50 62 62 62 74 74 74 90 90 90
58343-101 98 89 112 100 78 121 87 25 124 80 6
58344-137 92 6 152 99 6 152 99 6 152 99 6
58345-138 86 6 124 80 6 98 70 6 86 66 30
58346-101 98 89 82 82 82 58 58 58 46 46 46
58347- 38 38 38 34 34 34 34 34 34 34 34 34
58348- 34 34 34 34 34 34 34 34 34 34 34 34
58349- 34 34 34 34 34 34 38 38 38 42 42 42
58350- 54 54 54 82 82 82 94 86 76 91 60 6
58351-134 86 6 156 107 11 167 114 7 175 118 6
58352-175 118 6 167 114 7 152 99 6 121 87 25
58353-101 98 89 62 62 62 34 34 34 18 18 18
58354- 6 6 6 0 0 0 0 0 0 0 0 0
58355- 0 0 0 0 0 0 0 0 0 0 0 0
58356- 0 0 0 0 0 0 0 0 0 0 0 0
58357- 0 0 0 0 0 0 0 0 0 0 0 0
58358- 0 0 0 0 0 0 0 0 0 0 0 0
58359- 0 0 0 0 0 0 0 0 0 0 0 0
58360- 0 0 0 0 0 0 0 0 0 0 0 0
58361- 0 0 0 6 6 6 6 6 6 10 10 10
58362- 18 18 18 22 22 22 30 30 30 42 42 42
58363- 50 50 50 66 66 66 86 86 86 101 98 89
58364-106 86 58 98 70 6 104 69 6 104 69 6
58365-104 69 6 91 60 6 82 62 34 90 90 90
58366- 62 62 62 38 38 38 22 22 22 14 14 14
58367- 10 10 10 10 10 10 10 10 10 10 10 10
58368- 10 10 10 10 10 10 6 6 6 10 10 10
58369- 10 10 10 10 10 10 10 10 10 14 14 14
58370- 22 22 22 42 42 42 70 70 70 89 81 66
58371- 80 54 7 104 69 6 124 80 6 137 92 6
58372-134 86 6 116 81 8 100 82 52 86 86 86
58373- 58 58 58 30 30 30 14 14 14 6 6 6
58374- 0 0 0 0 0 0 0 0 0 0 0 0
58375- 0 0 0 0 0 0 0 0 0 0 0 0
58376- 0 0 0 0 0 0 0 0 0 0 0 0
58377- 0 0 0 0 0 0 0 0 0 0 0 0
58378- 0 0 0 0 0 0 0 0 0 0 0 0
58379- 0 0 0 0 0 0 0 0 0 0 0 0
58380- 0 0 0 0 0 0 0 0 0 0 0 0
58381- 0 0 0 0 0 0 0 0 0 0 0 0
58382- 0 0 0 6 6 6 10 10 10 14 14 14
58383- 18 18 18 26 26 26 38 38 38 54 54 54
58384- 70 70 70 86 86 86 94 86 76 89 81 66
58385- 89 81 66 86 86 86 74 74 74 50 50 50
58386- 30 30 30 14 14 14 6 6 6 0 0 0
58387- 0 0 0 0 0 0 0 0 0 0 0 0
58388- 0 0 0 0 0 0 0 0 0 0 0 0
58389- 0 0 0 0 0 0 0 0 0 0 0 0
58390- 6 6 6 18 18 18 34 34 34 58 58 58
58391- 82 82 82 89 81 66 89 81 66 89 81 66
58392- 94 86 66 94 86 76 74 74 74 50 50 50
58393- 26 26 26 14 14 14 6 6 6 0 0 0
58394- 0 0 0 0 0 0 0 0 0 0 0 0
58395- 0 0 0 0 0 0 0 0 0 0 0 0
58396- 0 0 0 0 0 0 0 0 0 0 0 0
58397- 0 0 0 0 0 0 0 0 0 0 0 0
58398- 0 0 0 0 0 0 0 0 0 0 0 0
58399- 0 0 0 0 0 0 0 0 0 0 0 0
58400- 0 0 0 0 0 0 0 0 0 0 0 0
58401- 0 0 0 0 0 0 0 0 0 0 0 0
58402- 0 0 0 0 0 0 0 0 0 0 0 0
58403- 6 6 6 6 6 6 14 14 14 18 18 18
58404- 30 30 30 38 38 38 46 46 46 54 54 54
58405- 50 50 50 42 42 42 30 30 30 18 18 18
58406- 10 10 10 0 0 0 0 0 0 0 0 0
58407- 0 0 0 0 0 0 0 0 0 0 0 0
58408- 0 0 0 0 0 0 0 0 0 0 0 0
58409- 0 0 0 0 0 0 0 0 0 0 0 0
58410- 0 0 0 6 6 6 14 14 14 26 26 26
58411- 38 38 38 50 50 50 58 58 58 58 58 58
58412- 54 54 54 42 42 42 30 30 30 18 18 18
58413- 10 10 10 0 0 0 0 0 0 0 0 0
58414- 0 0 0 0 0 0 0 0 0 0 0 0
58415- 0 0 0 0 0 0 0 0 0 0 0 0
58416- 0 0 0 0 0 0 0 0 0 0 0 0
58417- 0 0 0 0 0 0 0 0 0 0 0 0
58418- 0 0 0 0 0 0 0 0 0 0 0 0
58419- 0 0 0 0 0 0 0 0 0 0 0 0
58420- 0 0 0 0 0 0 0 0 0 0 0 0
58421- 0 0 0 0 0 0 0 0 0 0 0 0
58422- 0 0 0 0 0 0 0 0 0 0 0 0
58423- 0 0 0 0 0 0 0 0 0 6 6 6
58424- 6 6 6 10 10 10 14 14 14 18 18 18
58425- 18 18 18 14 14 14 10 10 10 6 6 6
58426- 0 0 0 0 0 0 0 0 0 0 0 0
58427- 0 0 0 0 0 0 0 0 0 0 0 0
58428- 0 0 0 0 0 0 0 0 0 0 0 0
58429- 0 0 0 0 0 0 0 0 0 0 0 0
58430- 0 0 0 0 0 0 0 0 0 6 6 6
58431- 14 14 14 18 18 18 22 22 22 22 22 22
58432- 18 18 18 14 14 14 10 10 10 6 6 6
58433- 0 0 0 0 0 0 0 0 0 0 0 0
58434- 0 0 0 0 0 0 0 0 0 0 0 0
58435- 0 0 0 0 0 0 0 0 0 0 0 0
58436- 0 0 0 0 0 0 0 0 0 0 0 0
58437- 0 0 0 0 0 0 0 0 0 0 0 0
58438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58451+4 4 4 4 4 4
58452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58465+4 4 4 4 4 4
58466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58479+4 4 4 4 4 4
58480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58493+4 4 4 4 4 4
58494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58507+4 4 4 4 4 4
58508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58521+4 4 4 4 4 4
58522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58526+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58527+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58531+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58532+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58533+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58535+4 4 4 4 4 4
58536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58540+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58541+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58542+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58545+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58546+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58547+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58548+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58549+4 4 4 4 4 4
58550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58554+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58555+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58556+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58559+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58560+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58561+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58562+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58563+4 4 4 4 4 4
58564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58567+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58568+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58569+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58570+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58572+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58573+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58574+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58575+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58576+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58577+4 4 4 4 4 4
58578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58581+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58582+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58583+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58584+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58585+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58586+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58587+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58588+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58589+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58590+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58591+4 4 4 4 4 4
58592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58595+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58596+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58597+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58598+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58599+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58600+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58601+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58602+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58603+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58604+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58605+4 4 4 4 4 4
58606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58608+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58609+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58610+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58611+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58612+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58613+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58614+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58615+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58616+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58617+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58618+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58619+4 4 4 4 4 4
58620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58622+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58623+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58624+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58625+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58626+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58627+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58628+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58629+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58630+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58631+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58632+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58633+4 4 4 4 4 4
58634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58636+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58637+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58638+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58639+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58640+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58641+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58642+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58643+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58644+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58645+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58646+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58647+4 4 4 4 4 4
58648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58650+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58651+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58652+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58653+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58654+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58655+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58656+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58657+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58658+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58659+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58660+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58661+4 4 4 4 4 4
58662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58663+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58664+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58665+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58666+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58667+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58668+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58669+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58670+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58671+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58672+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58673+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58674+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58675+4 4 4 4 4 4
58676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58677+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58678+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58679+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58680+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58681+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58682+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58683+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58684+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58685+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58686+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58687+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58688+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58689+0 0 0 4 4 4
58690+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58691+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58692+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58693+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58694+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58695+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58696+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58697+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58698+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58699+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58700+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58701+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58702+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58703+2 0 0 0 0 0
58704+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58705+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58706+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58707+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58708+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58709+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58710+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58711+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58712+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58713+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58714+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58715+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58716+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58717+37 38 37 0 0 0
58718+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58719+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58720+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58721+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58722+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58723+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58724+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58725+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58726+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58727+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58728+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58729+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58730+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58731+85 115 134 4 0 0
58732+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58733+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58734+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58735+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58736+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58737+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58738+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58739+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58740+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58741+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58742+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58743+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58744+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58745+60 73 81 4 0 0
58746+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58747+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58748+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58749+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58750+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58751+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58752+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58753+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58754+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58755+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58756+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58757+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58758+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58759+16 19 21 4 0 0
58760+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58761+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58762+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58763+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58764+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58765+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58766+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58767+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58768+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58769+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58770+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58771+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58772+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58773+4 0 0 4 3 3
58774+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58775+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58776+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58778+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58779+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58780+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58781+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58782+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58783+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58784+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58785+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58786+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58787+3 2 2 4 4 4
58788+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58789+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58790+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58791+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58792+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58793+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58794+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58795+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58796+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58797+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58798+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58799+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58800+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58801+4 4 4 4 4 4
58802+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58803+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58804+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58805+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58806+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58807+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58808+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58809+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58810+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58811+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58812+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58813+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58814+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58815+4 4 4 4 4 4
58816+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58817+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58818+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58819+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58820+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58821+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58822+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58823+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58824+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58825+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58826+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58827+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58828+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58829+5 5 5 5 5 5
58830+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58831+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58832+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58833+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58834+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58835+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58836+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58837+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58838+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58839+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58840+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58841+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58842+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58843+5 5 5 4 4 4
58844+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58845+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58846+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58847+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58848+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58849+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58850+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58851+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58852+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58853+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58854+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58855+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58857+4 4 4 4 4 4
58858+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58859+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58860+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58861+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58862+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58863+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58864+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58865+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58866+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58867+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58868+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58869+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58871+4 4 4 4 4 4
58872+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58873+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58874+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58875+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58876+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58877+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58878+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58879+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58880+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58881+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58882+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58885+4 4 4 4 4 4
58886+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58887+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58888+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58889+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58890+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58891+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58892+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58893+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58894+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58895+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58896+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58899+4 4 4 4 4 4
58900+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58901+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58902+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58903+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58904+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58905+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58906+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58907+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58908+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58909+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58910+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58913+4 4 4 4 4 4
58914+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58915+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58916+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58917+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58918+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58919+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58920+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58921+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58922+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58923+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58924+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58927+4 4 4 4 4 4
58928+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58929+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58930+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58931+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58932+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58933+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58934+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58935+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58936+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58937+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58938+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58941+4 4 4 4 4 4
58942+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58943+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58944+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58945+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58946+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58947+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58948+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58949+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58950+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58951+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58952+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58955+4 4 4 4 4 4
58956+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58957+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58958+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58959+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58960+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58961+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58962+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58963+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58964+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58965+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58966+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58969+4 4 4 4 4 4
58970+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58971+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58972+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58973+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58974+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58975+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58976+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58977+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58978+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58979+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58980+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58983+4 4 4 4 4 4
58984+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58985+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58986+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58987+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58988+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58989+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58990+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58991+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58992+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58993+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58994+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58997+4 4 4 4 4 4
58998+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58999+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
59000+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
59001+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
59002+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
59003+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
59004+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
59005+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
59006+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59007+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59008+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59011+4 4 4 4 4 4
59012+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
59013+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
59014+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
59015+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
59016+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59017+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
59018+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
59019+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
59020+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59021+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59022+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59025+4 4 4 4 4 4
59026+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
59027+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
59028+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
59029+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
59030+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
59031+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
59032+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
59033+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
59034+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59035+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59036+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59039+4 4 4 4 4 4
59040+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
59041+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
59042+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59043+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
59044+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
59045+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
59046+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
59047+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
59048+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
59049+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59050+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59053+4 4 4 4 4 4
59054+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59055+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
59056+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
59057+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
59058+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
59059+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
59060+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
59061+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
59062+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59063+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59064+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 4 4 4
59068+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
59069+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
59070+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59071+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
59072+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
59073+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
59074+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
59075+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
59076+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
59077+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59078+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59081+4 4 4 4 4 4
59082+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59083+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
59084+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
59085+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
59086+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
59087+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
59088+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
59089+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
59090+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59091+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59092+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59095+4 4 4 4 4 4
59096+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59097+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
59098+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59099+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
59100+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
59101+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
59102+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
59103+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
59104+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59105+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59106+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59109+4 4 4 4 4 4
59110+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59111+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
59112+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
59113+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
59114+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
59115+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
59116+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59117+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
59118+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59119+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59120+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59123+4 4 4 4 4 4
59124+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59125+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
59126+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
59127+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59128+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
59129+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
59130+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59131+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
59132+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59133+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59134+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59137+4 4 4 4 4 4
59138+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59139+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
59140+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
59141+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
59142+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
59143+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
59144+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
59145+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
59146+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
59147+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59148+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59151+4 4 4 4 4 4
59152+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59153+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
59154+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
59155+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
59156+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
59157+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
59158+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
59159+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
59160+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
59161+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59162+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59165+4 4 4 4 4 4
59166+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
59167+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
59168+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
59169+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
59170+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59171+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
59172+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
59173+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
59174+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
59175+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59176+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59179+4 4 4 4 4 4
59180+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59181+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
59182+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
59183+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
59184+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
59185+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
59186+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
59187+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
59188+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
59189+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59190+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59193+4 4 4 4 4 4
59194+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
59195+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
59196+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
59197+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
59198+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
59199+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
59200+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
59201+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
59202+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
59203+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
59204+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59207+4 4 4 4 4 4
59208+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
59209+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59210+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
59211+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
59212+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
59213+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
59214+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
59215+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
59216+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
59217+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
59218+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59221+4 4 4 4 4 4
59222+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
59223+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59224+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
59225+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
59226+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
59227+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
59228+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59229+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
59230+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
59231+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
59232+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59235+4 4 4 4 4 4
59236+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
59237+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
59238+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
59239+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
59240+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
59241+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
59242+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
59243+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
59244+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
59245+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
59246+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59249+4 4 4 4 4 4
59250+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
59251+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
59252+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59253+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
59254+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
59255+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
59256+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
59257+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
59258+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
59259+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
59260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59263+4 4 4 4 4 4
59264+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59265+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
59266+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
59267+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
59268+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
59269+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
59270+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
59271+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
59272+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
59273+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59277+4 4 4 4 4 4
59278+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
59279+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
59280+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
59281+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
59282+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
59283+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
59284+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
59285+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
59286+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
59287+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
59288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59291+4 4 4 4 4 4
59292+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
59293+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
59294+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
59295+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
59296+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
59297+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
59298+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
59299+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
59300+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
59301+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59305+4 4 4 4 4 4
59306+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
59307+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59308+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
59309+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59310+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
59311+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
59312+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
59313+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
59314+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
59315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59319+4 4 4 4 4 4
59320+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
59321+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
59322+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
59323+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
59324+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
59325+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
59326+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
59327+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
59328+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
59329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59333+4 4 4 4 4 4
59334+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59335+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
59336+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
59337+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
59338+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
59339+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
59340+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
59341+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
59342+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59347+4 4 4 4 4 4
59348+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59349+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59350+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59351+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59352+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59353+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59354+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59355+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59361+4 4 4 4 4 4
59362+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59363+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59364+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59365+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59366+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59367+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59368+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59369+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59375+4 4 4 4 4 4
59376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59377+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59378+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59379+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59380+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59381+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59382+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59383+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59389+4 4 4 4 4 4
59390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59391+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59392+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59393+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59394+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59395+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59396+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59397+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59403+4 4 4 4 4 4
59404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59405+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59406+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59407+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59408+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59409+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59410+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59411+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59417+4 4 4 4 4 4
59418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59420+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59421+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59422+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59423+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59424+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59425+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59431+4 4 4 4 4 4
59432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59435+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59436+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59437+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59438+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59445+4 4 4 4 4 4
59446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59449+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59450+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59451+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59452+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59459+4 4 4 4 4 4
59460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59463+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59464+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59465+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59466+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59473+4 4 4 4 4 4
59474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59477+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59478+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59479+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59480+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59487+4 4 4 4 4 4
59488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59492+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59493+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59494+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59501+4 4 4 4 4 4
59502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59506+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59507+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59508+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59515+4 4 4 4 4 4
59516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59520+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59521+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59522+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59529+4 4 4 4 4 4
59530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59534+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59535+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59543+4 4 4 4 4 4
59544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59548+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59549+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59557+4 4 4 4 4 4
59558diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59559index fef20db..d28b1ab 100644
59560--- a/drivers/xen/xenfs/xenstored.c
59561+++ b/drivers/xen/xenfs/xenstored.c
59562@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59563 static int xsd_kva_open(struct inode *inode, struct file *file)
59564 {
59565 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59566+#ifdef CONFIG_GRKERNSEC_HIDESYM
59567+ NULL);
59568+#else
59569 xen_store_interface);
59570+#endif
59571+
59572 if (!file->private_data)
59573 return -ENOMEM;
59574 return 0;
59575diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59576index cc1cfae..41158ad 100644
59577--- a/fs/9p/vfs_addr.c
59578+++ b/fs/9p/vfs_addr.c
59579@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59580
59581 retval = v9fs_file_write_internal(inode,
59582 v9inode->writeback_fid,
59583- (__force const char __user *)buffer,
59584+ (const char __force_user *)buffer,
59585 len, &offset, 0);
59586 if (retval > 0)
59587 retval = 0;
59588diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59589index 7fa4f7a..a7ebf8c 100644
59590--- a/fs/9p/vfs_inode.c
59591+++ b/fs/9p/vfs_inode.c
59592@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59593 void
59594 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59595 {
59596- char *s = nd_get_link(nd);
59597+ const char *s = nd_get_link(nd);
59598
59599 p9_debug(P9_DEBUG_VFS, " %s %s\n",
59600 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
59601diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59602index 370b24c..ff0be7b 100644
59603--- a/fs/Kconfig.binfmt
59604+++ b/fs/Kconfig.binfmt
59605@@ -103,7 +103,7 @@ config HAVE_AOUT
59606
59607 config BINFMT_AOUT
59608 tristate "Kernel support for a.out and ECOFF binaries"
59609- depends on HAVE_AOUT
59610+ depends on HAVE_AOUT && BROKEN
59611 ---help---
59612 A.out (Assembler.OUTput) is a set of formats for libraries and
59613 executables used in the earliest versions of UNIX. Linux used
59614diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59615index 2946712..f737435 100644
59616--- a/fs/afs/inode.c
59617+++ b/fs/afs/inode.c
59618@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59619 struct afs_vnode *vnode;
59620 struct super_block *sb;
59621 struct inode *inode;
59622- static atomic_t afs_autocell_ino;
59623+ static atomic_unchecked_t afs_autocell_ino;
59624
59625 _enter("{%x:%u},%*.*s,",
59626 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59627@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59628 data.fid.unique = 0;
59629 data.fid.vnode = 0;
59630
59631- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59632+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59633 afs_iget5_autocell_test, afs_iget5_set,
59634 &data);
59635 if (!inode) {
59636diff --git a/fs/aio.c b/fs/aio.c
59637index 1c9c5f0..c935d6e 100644
59638--- a/fs/aio.c
59639+++ b/fs/aio.c
59640@@ -141,6 +141,7 @@ struct kioctx {
59641
59642 struct {
59643 unsigned tail;
59644+ unsigned completed_events;
59645 spinlock_t completion_lock;
59646 } ____cacheline_aligned_in_smp;
59647
59648@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59649 size += sizeof(struct io_event) * nr_events;
59650
59651 nr_pages = PFN_UP(size);
59652- if (nr_pages < 0)
59653+ if (nr_pages <= 0)
59654 return -EINVAL;
59655
59656 file = aio_private_file(ctx, nr_pages);
59657@@ -880,6 +881,68 @@ out:
59658 return ret;
59659 }
59660
59661+/* refill_reqs_available
59662+ * Updates the reqs_available reference counts used for tracking the
59663+ * number of free slots in the completion ring. This can be called
59664+ * from aio_complete() (to optimistically update reqs_available) or
59665+ * from aio_get_req() (the we're out of events case). It must be
59666+ * called holding ctx->completion_lock.
59667+ */
59668+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
59669+ unsigned tail)
59670+{
59671+ unsigned events_in_ring, completed;
59672+
59673+ /* Clamp head since userland can write to it. */
59674+ head %= ctx->nr_events;
59675+ if (head <= tail)
59676+ events_in_ring = tail - head;
59677+ else
59678+ events_in_ring = ctx->nr_events - (head - tail);
59679+
59680+ completed = ctx->completed_events;
59681+ if (events_in_ring < completed)
59682+ completed -= events_in_ring;
59683+ else
59684+ completed = 0;
59685+
59686+ if (!completed)
59687+ return;
59688+
59689+ ctx->completed_events -= completed;
59690+ put_reqs_available(ctx, completed);
59691+}
59692+
59693+/* user_refill_reqs_available
59694+ * Called to refill reqs_available when aio_get_req() encounters an
59695+ * out of space in the completion ring.
59696+ */
59697+static void user_refill_reqs_available(struct kioctx *ctx)
59698+{
59699+ spin_lock_irq(&ctx->completion_lock);
59700+ if (ctx->completed_events) {
59701+ struct aio_ring *ring;
59702+ unsigned head;
59703+
59704+ /* Access of ring->head may race with aio_read_events_ring()
59705+ * here, but that's okay since whether we read the old version
59706+ * or the new version, and either will be valid. The important
59707+ * part is that head cannot pass tail since we prevent
59708+ * aio_complete() from updating tail by holding
59709+ * ctx->completion_lock. Even if head is invalid, the check
59710+ * against ctx->completed_events below will make sure we do the
59711+ * safe/right thing.
59712+ */
59713+ ring = kmap_atomic(ctx->ring_pages[0]);
59714+ head = ring->head;
59715+ kunmap_atomic(ring);
59716+
59717+ refill_reqs_available(ctx, head, ctx->tail);
59718+ }
59719+
59720+ spin_unlock_irq(&ctx->completion_lock);
59721+}
59722+
59723 /* aio_get_req
59724 * Allocate a slot for an aio request.
59725 * Returns NULL if no requests are free.
59726@@ -888,8 +951,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
59727 {
59728 struct kiocb *req;
59729
59730- if (!get_reqs_available(ctx))
59731- return NULL;
59732+ if (!get_reqs_available(ctx)) {
59733+ user_refill_reqs_available(ctx);
59734+ if (!get_reqs_available(ctx))
59735+ return NULL;
59736+ }
59737
59738 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
59739 if (unlikely(!req))
59740@@ -948,8 +1014,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59741 struct kioctx *ctx = iocb->ki_ctx;
59742 struct aio_ring *ring;
59743 struct io_event *ev_page, *event;
59744+ unsigned tail, pos, head;
59745 unsigned long flags;
59746- unsigned tail, pos;
59747
59748 /*
59749 * Special case handling for sync iocbs:
59750@@ -1010,10 +1076,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59751 ctx->tail = tail;
59752
59753 ring = kmap_atomic(ctx->ring_pages[0]);
59754+ head = ring->head;
59755 ring->tail = tail;
59756 kunmap_atomic(ring);
59757 flush_dcache_page(ctx->ring_pages[0]);
59758
59759+ ctx->completed_events++;
59760+ if (ctx->completed_events > 1)
59761+ refill_reqs_available(ctx, head, tail);
59762 spin_unlock_irqrestore(&ctx->completion_lock, flags);
59763
59764 pr_debug("added to ring %p at [%u]\n", iocb, tail);
59765@@ -1028,7 +1098,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59766
59767 /* everything turned out well, dispose of the aiocb. */
59768 kiocb_free(iocb);
59769- put_reqs_available(ctx, 1);
59770
59771 /*
59772 * We have to order our ring_info tail store above and test
59773@@ -1065,6 +1134,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
59774 tail = ring->tail;
59775 kunmap_atomic(ring);
59776
59777+ /*
59778+ * Ensure that once we've read the current tail pointer, that
59779+ * we also see the events that were stored up to the tail.
59780+ */
59781+ smp_rmb();
59782+
59783 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
59784
59785 if (head == tail)
59786diff --git a/fs/attr.c b/fs/attr.c
59787index 6530ced..4a827e2 100644
59788--- a/fs/attr.c
59789+++ b/fs/attr.c
59790@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59791 unsigned long limit;
59792
59793 limit = rlimit(RLIMIT_FSIZE);
59794+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59795 if (limit != RLIM_INFINITY && offset > limit)
59796 goto out_sig;
59797 if (offset > inode->i_sb->s_maxbytes)
59798diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59799index 116fd38..c04182da 100644
59800--- a/fs/autofs4/waitq.c
59801+++ b/fs/autofs4/waitq.c
59802@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59803 {
59804 unsigned long sigpipe, flags;
59805 mm_segment_t fs;
59806- const char *data = (const char *)addr;
59807+ const char __user *data = (const char __force_user *)addr;
59808 ssize_t wr = 0;
59809
59810 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59811@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59812 return 1;
59813 }
59814
59815+#ifdef CONFIG_GRKERNSEC_HIDESYM
59816+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59817+#endif
59818+
59819 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59820 enum autofs_notify notify)
59821 {
59822@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59823
59824 /* If this is a direct mount request create a dummy name */
59825 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59826+#ifdef CONFIG_GRKERNSEC_HIDESYM
59827+ /* this name does get written to userland via autofs4_write() */
59828+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59829+#else
59830 qstr.len = sprintf(name, "%p", dentry);
59831+#endif
59832 else {
59833 qstr.len = autofs4_getpath(sbi, dentry, &name);
59834 if (!qstr.len) {
59835diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59836index 2722387..56059b5 100644
59837--- a/fs/befs/endian.h
59838+++ b/fs/befs/endian.h
59839@@ -11,7 +11,7 @@
59840
59841 #include <asm/byteorder.h>
59842
59843-static inline u64
59844+static inline u64 __intentional_overflow(-1)
59845 fs64_to_cpu(const struct super_block *sb, fs64 n)
59846 {
59847 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59848@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59849 return (__force fs64)cpu_to_be64(n);
59850 }
59851
59852-static inline u32
59853+static inline u32 __intentional_overflow(-1)
59854 fs32_to_cpu(const struct super_block *sb, fs32 n)
59855 {
59856 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59857@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59858 return (__force fs32)cpu_to_be32(n);
59859 }
59860
59861-static inline u16
59862+static inline u16 __intentional_overflow(-1)
59863 fs16_to_cpu(const struct super_block *sb, fs16 n)
59864 {
59865 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59866diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59867index ca0ba15..0fa3257 100644
59868--- a/fs/binfmt_aout.c
59869+++ b/fs/binfmt_aout.c
59870@@ -16,6 +16,7 @@
59871 #include <linux/string.h>
59872 #include <linux/fs.h>
59873 #include <linux/file.h>
59874+#include <linux/security.h>
59875 #include <linux/stat.h>
59876 #include <linux/fcntl.h>
59877 #include <linux/ptrace.h>
59878@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59879 #endif
59880 # define START_STACK(u) ((void __user *)u.start_stack)
59881
59882+ memset(&dump, 0, sizeof(dump));
59883+
59884 fs = get_fs();
59885 set_fs(KERNEL_DS);
59886 has_dumped = 1;
59887@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59888
59889 /* If the size of the dump file exceeds the rlimit, then see what would happen
59890 if we wrote the stack, but not the data area. */
59891+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59892 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59893 dump.u_dsize = 0;
59894
59895 /* Make sure we have enough room to write the stack and data areas. */
59896+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59897 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59898 dump.u_ssize = 0;
59899
59900@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59901 rlim = rlimit(RLIMIT_DATA);
59902 if (rlim >= RLIM_INFINITY)
59903 rlim = ~0;
59904+
59905+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59906 if (ex.a_data + ex.a_bss > rlim)
59907 return -ENOMEM;
59908
59909@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59910
59911 install_exec_creds(bprm);
59912
59913+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59914+ current->mm->pax_flags = 0UL;
59915+#endif
59916+
59917+#ifdef CONFIG_PAX_PAGEEXEC
59918+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59919+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59920+
59921+#ifdef CONFIG_PAX_EMUTRAMP
59922+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59923+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59924+#endif
59925+
59926+#ifdef CONFIG_PAX_MPROTECT
59927+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59928+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59929+#endif
59930+
59931+ }
59932+#endif
59933+
59934 if (N_MAGIC(ex) == OMAGIC) {
59935 unsigned long text_addr, map_size;
59936 loff_t pos;
59937@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59938 }
59939
59940 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59941- PROT_READ | PROT_WRITE | PROT_EXEC,
59942+ PROT_READ | PROT_WRITE,
59943 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59944 fd_offset + ex.a_text);
59945 if (error != N_DATADDR(ex)) {
59946diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59947index 3892c1a..4e27c04 100644
59948--- a/fs/binfmt_elf.c
59949+++ b/fs/binfmt_elf.c
59950@@ -34,6 +34,7 @@
59951 #include <linux/utsname.h>
59952 #include <linux/coredump.h>
59953 #include <linux/sched.h>
59954+#include <linux/xattr.h>
59955 #include <asm/uaccess.h>
59956 #include <asm/param.h>
59957 #include <asm/page.h>
59958@@ -47,7 +48,7 @@
59959
59960 static int load_elf_binary(struct linux_binprm *bprm);
59961 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59962- int, int, unsigned long);
59963+ int, int, unsigned long) __intentional_overflow(-1);
59964
59965 #ifdef CONFIG_USELIB
59966 static int load_elf_library(struct file *);
59967@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59968 #define elf_core_dump NULL
59969 #endif
59970
59971+#ifdef CONFIG_PAX_MPROTECT
59972+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59973+#endif
59974+
59975+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59976+static void elf_handle_mmap(struct file *file);
59977+#endif
59978+
59979 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59980 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59981 #else
59982@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59983 .load_binary = load_elf_binary,
59984 .load_shlib = load_elf_library,
59985 .core_dump = elf_core_dump,
59986+
59987+#ifdef CONFIG_PAX_MPROTECT
59988+ .handle_mprotect= elf_handle_mprotect,
59989+#endif
59990+
59991+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59992+ .handle_mmap = elf_handle_mmap,
59993+#endif
59994+
59995 .min_coredump = ELF_EXEC_PAGESIZE,
59996 };
59997
59998@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59999
60000 static int set_brk(unsigned long start, unsigned long end)
60001 {
60002+ unsigned long e = end;
60003+
60004 start = ELF_PAGEALIGN(start);
60005 end = ELF_PAGEALIGN(end);
60006 if (end > start) {
60007@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
60008 if (BAD_ADDR(addr))
60009 return addr;
60010 }
60011- current->mm->start_brk = current->mm->brk = end;
60012+ current->mm->start_brk = current->mm->brk = e;
60013 return 0;
60014 }
60015
60016@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
60017 elf_addr_t __user *u_rand_bytes;
60018 const char *k_platform = ELF_PLATFORM;
60019 const char *k_base_platform = ELF_BASE_PLATFORM;
60020- unsigned char k_rand_bytes[16];
60021+ u32 k_rand_bytes[4];
60022 int items;
60023 elf_addr_t *elf_info;
60024 int ei_index = 0;
60025 const struct cred *cred = current_cred();
60026 struct vm_area_struct *vma;
60027+ unsigned long saved_auxv[AT_VECTOR_SIZE];
60028
60029 /*
60030 * In some cases (e.g. Hyper-Threading), we want to avoid L1
60031@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
60032 * Generate 16 random bytes for userspace PRNG seeding.
60033 */
60034 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
60035- u_rand_bytes = (elf_addr_t __user *)
60036- STACK_ALLOC(p, sizeof(k_rand_bytes));
60037+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
60038+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
60039+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
60040+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
60041+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
60042+ u_rand_bytes = (elf_addr_t __user *) p;
60043 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
60044 return -EFAULT;
60045
60046@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
60047 return -EFAULT;
60048 current->mm->env_end = p;
60049
60050+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
60051+
60052 /* Put the elf_info on the stack in the right place. */
60053 sp = (elf_addr_t __user *)envp + 1;
60054- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
60055+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
60056 return -EFAULT;
60057 return 0;
60058 }
60059@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
60060 an ELF header */
60061
60062 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60063- struct file *interpreter, unsigned long *interp_map_addr,
60064- unsigned long no_base)
60065+ struct file *interpreter, unsigned long no_base)
60066 {
60067 struct elf_phdr *elf_phdata;
60068 struct elf_phdr *eppnt;
60069- unsigned long load_addr = 0;
60070+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
60071 int load_addr_set = 0;
60072 unsigned long last_bss = 0, elf_bss = 0;
60073- unsigned long error = ~0UL;
60074+ unsigned long error = -EINVAL;
60075 unsigned long total_size;
60076 int retval, i, size;
60077
60078@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60079 goto out_close;
60080 }
60081
60082+#ifdef CONFIG_PAX_SEGMEXEC
60083+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
60084+ pax_task_size = SEGMEXEC_TASK_SIZE;
60085+#endif
60086+
60087 eppnt = elf_phdata;
60088 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
60089 if (eppnt->p_type == PT_LOAD) {
60090@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60091 map_addr = elf_map(interpreter, load_addr + vaddr,
60092 eppnt, elf_prot, elf_type, total_size);
60093 total_size = 0;
60094- if (!*interp_map_addr)
60095- *interp_map_addr = map_addr;
60096 error = map_addr;
60097 if (BAD_ADDR(map_addr))
60098 goto out_close;
60099@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60100 k = load_addr + eppnt->p_vaddr;
60101 if (BAD_ADDR(k) ||
60102 eppnt->p_filesz > eppnt->p_memsz ||
60103- eppnt->p_memsz > TASK_SIZE ||
60104- TASK_SIZE - eppnt->p_memsz < k) {
60105+ eppnt->p_memsz > pax_task_size ||
60106+ pax_task_size - eppnt->p_memsz < k) {
60107 error = -ENOMEM;
60108 goto out_close;
60109 }
60110@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60111 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
60112
60113 /* Map the last of the bss segment */
60114- error = vm_brk(elf_bss, last_bss - elf_bss);
60115- if (BAD_ADDR(error))
60116- goto out_close;
60117+ if (last_bss > elf_bss) {
60118+ error = vm_brk(elf_bss, last_bss - elf_bss);
60119+ if (BAD_ADDR(error))
60120+ goto out_close;
60121+ }
60122 }
60123
60124 error = load_addr;
60125@@ -543,6 +574,336 @@ out:
60126 return error;
60127 }
60128
60129+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60130+#ifdef CONFIG_PAX_SOFTMODE
60131+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
60132+{
60133+ unsigned long pax_flags = 0UL;
60134+
60135+#ifdef CONFIG_PAX_PAGEEXEC
60136+ if (elf_phdata->p_flags & PF_PAGEEXEC)
60137+ pax_flags |= MF_PAX_PAGEEXEC;
60138+#endif
60139+
60140+#ifdef CONFIG_PAX_SEGMEXEC
60141+ if (elf_phdata->p_flags & PF_SEGMEXEC)
60142+ pax_flags |= MF_PAX_SEGMEXEC;
60143+#endif
60144+
60145+#ifdef CONFIG_PAX_EMUTRAMP
60146+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
60147+ pax_flags |= MF_PAX_EMUTRAMP;
60148+#endif
60149+
60150+#ifdef CONFIG_PAX_MPROTECT
60151+ if (elf_phdata->p_flags & PF_MPROTECT)
60152+ pax_flags |= MF_PAX_MPROTECT;
60153+#endif
60154+
60155+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60156+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
60157+ pax_flags |= MF_PAX_RANDMMAP;
60158+#endif
60159+
60160+ return pax_flags;
60161+}
60162+#endif
60163+
60164+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
60165+{
60166+ unsigned long pax_flags = 0UL;
60167+
60168+#ifdef CONFIG_PAX_PAGEEXEC
60169+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
60170+ pax_flags |= MF_PAX_PAGEEXEC;
60171+#endif
60172+
60173+#ifdef CONFIG_PAX_SEGMEXEC
60174+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
60175+ pax_flags |= MF_PAX_SEGMEXEC;
60176+#endif
60177+
60178+#ifdef CONFIG_PAX_EMUTRAMP
60179+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
60180+ pax_flags |= MF_PAX_EMUTRAMP;
60181+#endif
60182+
60183+#ifdef CONFIG_PAX_MPROTECT
60184+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
60185+ pax_flags |= MF_PAX_MPROTECT;
60186+#endif
60187+
60188+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60189+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
60190+ pax_flags |= MF_PAX_RANDMMAP;
60191+#endif
60192+
60193+ return pax_flags;
60194+}
60195+#endif
60196+
60197+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60198+#ifdef CONFIG_PAX_SOFTMODE
60199+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
60200+{
60201+ unsigned long pax_flags = 0UL;
60202+
60203+#ifdef CONFIG_PAX_PAGEEXEC
60204+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
60205+ pax_flags |= MF_PAX_PAGEEXEC;
60206+#endif
60207+
60208+#ifdef CONFIG_PAX_SEGMEXEC
60209+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
60210+ pax_flags |= MF_PAX_SEGMEXEC;
60211+#endif
60212+
60213+#ifdef CONFIG_PAX_EMUTRAMP
60214+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
60215+ pax_flags |= MF_PAX_EMUTRAMP;
60216+#endif
60217+
60218+#ifdef CONFIG_PAX_MPROTECT
60219+ if (pax_flags_softmode & MF_PAX_MPROTECT)
60220+ pax_flags |= MF_PAX_MPROTECT;
60221+#endif
60222+
60223+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60224+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
60225+ pax_flags |= MF_PAX_RANDMMAP;
60226+#endif
60227+
60228+ return pax_flags;
60229+}
60230+#endif
60231+
60232+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
60233+{
60234+ unsigned long pax_flags = 0UL;
60235+
60236+#ifdef CONFIG_PAX_PAGEEXEC
60237+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
60238+ pax_flags |= MF_PAX_PAGEEXEC;
60239+#endif
60240+
60241+#ifdef CONFIG_PAX_SEGMEXEC
60242+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
60243+ pax_flags |= MF_PAX_SEGMEXEC;
60244+#endif
60245+
60246+#ifdef CONFIG_PAX_EMUTRAMP
60247+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
60248+ pax_flags |= MF_PAX_EMUTRAMP;
60249+#endif
60250+
60251+#ifdef CONFIG_PAX_MPROTECT
60252+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
60253+ pax_flags |= MF_PAX_MPROTECT;
60254+#endif
60255+
60256+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60257+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
60258+ pax_flags |= MF_PAX_RANDMMAP;
60259+#endif
60260+
60261+ return pax_flags;
60262+}
60263+#endif
60264+
60265+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60266+static unsigned long pax_parse_defaults(void)
60267+{
60268+ unsigned long pax_flags = 0UL;
60269+
60270+#ifdef CONFIG_PAX_SOFTMODE
60271+ if (pax_softmode)
60272+ return pax_flags;
60273+#endif
60274+
60275+#ifdef CONFIG_PAX_PAGEEXEC
60276+ pax_flags |= MF_PAX_PAGEEXEC;
60277+#endif
60278+
60279+#ifdef CONFIG_PAX_SEGMEXEC
60280+ pax_flags |= MF_PAX_SEGMEXEC;
60281+#endif
60282+
60283+#ifdef CONFIG_PAX_MPROTECT
60284+ pax_flags |= MF_PAX_MPROTECT;
60285+#endif
60286+
60287+#ifdef CONFIG_PAX_RANDMMAP
60288+ if (randomize_va_space)
60289+ pax_flags |= MF_PAX_RANDMMAP;
60290+#endif
60291+
60292+ return pax_flags;
60293+}
60294+
60295+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
60296+{
60297+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
60298+
60299+#ifdef CONFIG_PAX_EI_PAX
60300+
60301+#ifdef CONFIG_PAX_SOFTMODE
60302+ if (pax_softmode)
60303+ return pax_flags;
60304+#endif
60305+
60306+ pax_flags = 0UL;
60307+
60308+#ifdef CONFIG_PAX_PAGEEXEC
60309+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
60310+ pax_flags |= MF_PAX_PAGEEXEC;
60311+#endif
60312+
60313+#ifdef CONFIG_PAX_SEGMEXEC
60314+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
60315+ pax_flags |= MF_PAX_SEGMEXEC;
60316+#endif
60317+
60318+#ifdef CONFIG_PAX_EMUTRAMP
60319+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
60320+ pax_flags |= MF_PAX_EMUTRAMP;
60321+#endif
60322+
60323+#ifdef CONFIG_PAX_MPROTECT
60324+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
60325+ pax_flags |= MF_PAX_MPROTECT;
60326+#endif
60327+
60328+#ifdef CONFIG_PAX_ASLR
60329+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
60330+ pax_flags |= MF_PAX_RANDMMAP;
60331+#endif
60332+
60333+#endif
60334+
60335+ return pax_flags;
60336+
60337+}
60338+
60339+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
60340+{
60341+
60342+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60343+ unsigned long i;
60344+
60345+ for (i = 0UL; i < elf_ex->e_phnum; i++)
60346+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
60347+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
60348+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
60349+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
60350+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
60351+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
60352+ return PAX_PARSE_FLAGS_FALLBACK;
60353+
60354+#ifdef CONFIG_PAX_SOFTMODE
60355+ if (pax_softmode)
60356+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
60357+ else
60358+#endif
60359+
60360+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
60361+ break;
60362+ }
60363+#endif
60364+
60365+ return PAX_PARSE_FLAGS_FALLBACK;
60366+}
60367+
60368+static unsigned long pax_parse_xattr_pax(struct file * const file)
60369+{
60370+
60371+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60372+ ssize_t xattr_size, i;
60373+ unsigned char xattr_value[sizeof("pemrs") - 1];
60374+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
60375+
60376+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
60377+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
60378+ return PAX_PARSE_FLAGS_FALLBACK;
60379+
60380+ for (i = 0; i < xattr_size; i++)
60381+ switch (xattr_value[i]) {
60382+ default:
60383+ return PAX_PARSE_FLAGS_FALLBACK;
60384+
60385+#define parse_flag(option1, option2, flag) \
60386+ case option1: \
60387+ if (pax_flags_hardmode & MF_PAX_##flag) \
60388+ return PAX_PARSE_FLAGS_FALLBACK;\
60389+ pax_flags_hardmode |= MF_PAX_##flag; \
60390+ break; \
60391+ case option2: \
60392+ if (pax_flags_softmode & MF_PAX_##flag) \
60393+ return PAX_PARSE_FLAGS_FALLBACK;\
60394+ pax_flags_softmode |= MF_PAX_##flag; \
60395+ break;
60396+
60397+ parse_flag('p', 'P', PAGEEXEC);
60398+ parse_flag('e', 'E', EMUTRAMP);
60399+ parse_flag('m', 'M', MPROTECT);
60400+ parse_flag('r', 'R', RANDMMAP);
60401+ parse_flag('s', 'S', SEGMEXEC);
60402+
60403+#undef parse_flag
60404+ }
60405+
60406+ if (pax_flags_hardmode & pax_flags_softmode)
60407+ return PAX_PARSE_FLAGS_FALLBACK;
60408+
60409+#ifdef CONFIG_PAX_SOFTMODE
60410+ if (pax_softmode)
60411+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
60412+ else
60413+#endif
60414+
60415+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
60416+#else
60417+ return PAX_PARSE_FLAGS_FALLBACK;
60418+#endif
60419+
60420+}
60421+
60422+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
60423+{
60424+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
60425+
60426+ pax_flags = pax_parse_defaults();
60427+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
60428+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
60429+ xattr_pax_flags = pax_parse_xattr_pax(file);
60430+
60431+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60432+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60433+ pt_pax_flags != xattr_pax_flags)
60434+ return -EINVAL;
60435+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60436+ pax_flags = xattr_pax_flags;
60437+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60438+ pax_flags = pt_pax_flags;
60439+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60440+ pax_flags = ei_pax_flags;
60441+
60442+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
60443+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60444+ if ((__supported_pte_mask & _PAGE_NX))
60445+ pax_flags &= ~MF_PAX_SEGMEXEC;
60446+ else
60447+ pax_flags &= ~MF_PAX_PAGEEXEC;
60448+ }
60449+#endif
60450+
60451+ if (0 > pax_check_flags(&pax_flags))
60452+ return -EINVAL;
60453+
60454+ current->mm->pax_flags = pax_flags;
60455+ return 0;
60456+}
60457+#endif
60458+
60459 /*
60460 * These are the functions used to load ELF style executables and shared
60461 * libraries. There is no binary dependent code anywhere else.
60462@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
60463 {
60464 unsigned int random_variable = 0;
60465
60466+#ifdef CONFIG_PAX_RANDUSTACK
60467+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
60468+ return stack_top - current->mm->delta_stack;
60469+#endif
60470+
60471 if ((current->flags & PF_RANDOMIZE) &&
60472 !(current->personality & ADDR_NO_RANDOMIZE)) {
60473 random_variable = get_random_int() & STACK_RND_MASK;
60474@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60475 unsigned long load_addr = 0, load_bias = 0;
60476 int load_addr_set = 0;
60477 char * elf_interpreter = NULL;
60478- unsigned long error;
60479+ unsigned long error = 0;
60480 struct elf_phdr *elf_ppnt, *elf_phdata;
60481 unsigned long elf_bss, elf_brk;
60482 int retval, i;
60483@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60484 struct elfhdr elf_ex;
60485 struct elfhdr interp_elf_ex;
60486 } *loc;
60487+ unsigned long pax_task_size;
60488
60489 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
60490 if (!loc) {
60491@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
60492 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
60493 may depend on the personality. */
60494 SET_PERSONALITY(loc->elf_ex);
60495+
60496+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60497+ current->mm->pax_flags = 0UL;
60498+#endif
60499+
60500+#ifdef CONFIG_PAX_DLRESOLVE
60501+ current->mm->call_dl_resolve = 0UL;
60502+#endif
60503+
60504+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60505+ current->mm->call_syscall = 0UL;
60506+#endif
60507+
60508+#ifdef CONFIG_PAX_ASLR
60509+ current->mm->delta_mmap = 0UL;
60510+ current->mm->delta_stack = 0UL;
60511+#endif
60512+
60513+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60514+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
60515+ send_sig(SIGKILL, current, 0);
60516+ goto out_free_dentry;
60517+ }
60518+#endif
60519+
60520+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60521+ pax_set_initial_flags(bprm);
60522+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60523+ if (pax_set_initial_flags_func)
60524+ (pax_set_initial_flags_func)(bprm);
60525+#endif
60526+
60527+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60528+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
60529+ current->mm->context.user_cs_limit = PAGE_SIZE;
60530+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
60531+ }
60532+#endif
60533+
60534+#ifdef CONFIG_PAX_SEGMEXEC
60535+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60536+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
60537+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
60538+ pax_task_size = SEGMEXEC_TASK_SIZE;
60539+ current->mm->def_flags |= VM_NOHUGEPAGE;
60540+ } else
60541+#endif
60542+
60543+ pax_task_size = TASK_SIZE;
60544+
60545+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
60546+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60547+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
60548+ put_cpu();
60549+ }
60550+#endif
60551+
60552+#ifdef CONFIG_PAX_ASLR
60553+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60554+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
60555+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
60556+ }
60557+#endif
60558+
60559+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60560+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60561+ executable_stack = EXSTACK_DISABLE_X;
60562+ current->personality &= ~READ_IMPLIES_EXEC;
60563+ } else
60564+#endif
60565+
60566 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
60567 current->personality |= READ_IMPLIES_EXEC;
60568
60569@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
60570 #else
60571 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
60572 #endif
60573+
60574+#ifdef CONFIG_PAX_RANDMMAP
60575+ /* PaX: randomize base address at the default exe base if requested */
60576+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
60577+#ifdef CONFIG_SPARC64
60578+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
60579+#else
60580+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
60581+#endif
60582+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
60583+ elf_flags |= MAP_FIXED;
60584+ }
60585+#endif
60586+
60587 }
60588
60589 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
60590@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
60591 * allowed task size. Note that p_filesz must always be
60592 * <= p_memsz so it is only necessary to check p_memsz.
60593 */
60594- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60595- elf_ppnt->p_memsz > TASK_SIZE ||
60596- TASK_SIZE - elf_ppnt->p_memsz < k) {
60597+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60598+ elf_ppnt->p_memsz > pax_task_size ||
60599+ pax_task_size - elf_ppnt->p_memsz < k) {
60600 /* set_brk can never work. Avoid overflows. */
60601 send_sig(SIGKILL, current, 0);
60602 retval = -EINVAL;
60603@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
60604 goto out_free_dentry;
60605 }
60606 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
60607- send_sig(SIGSEGV, current, 0);
60608- retval = -EFAULT; /* Nobody gets to see this, but.. */
60609- goto out_free_dentry;
60610+ /*
60611+ * This bss-zeroing can fail if the ELF
60612+ * file specifies odd protections. So
60613+ * we don't check the return value
60614+ */
60615 }
60616
60617+#ifdef CONFIG_PAX_RANDMMAP
60618+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60619+ unsigned long start, size, flags;
60620+ vm_flags_t vm_flags;
60621+
60622+ start = ELF_PAGEALIGN(elf_brk);
60623+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60624+ flags = MAP_FIXED | MAP_PRIVATE;
60625+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60626+
60627+ down_write(&current->mm->mmap_sem);
60628+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60629+ retval = -ENOMEM;
60630+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60631+// if (current->personality & ADDR_NO_RANDOMIZE)
60632+// vm_flags |= VM_READ | VM_MAYREAD;
60633+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60634+ retval = IS_ERR_VALUE(start) ? start : 0;
60635+ }
60636+ up_write(&current->mm->mmap_sem);
60637+ if (retval == 0)
60638+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60639+ if (retval < 0) {
60640+ send_sig(SIGKILL, current, 0);
60641+ goto out_free_dentry;
60642+ }
60643+ }
60644+#endif
60645+
60646 if (elf_interpreter) {
60647- unsigned long interp_map_addr = 0;
60648-
60649 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60650 interpreter,
60651- &interp_map_addr,
60652 load_bias);
60653 if (!IS_ERR((void *)elf_entry)) {
60654 /*
60655@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60656 * Decide what to dump of a segment, part, all or none.
60657 */
60658 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60659- unsigned long mm_flags)
60660+ unsigned long mm_flags, long signr)
60661 {
60662 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60663
60664@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60665 if (vma->vm_file == NULL)
60666 return 0;
60667
60668- if (FILTER(MAPPED_PRIVATE))
60669+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60670 goto whole;
60671
60672 /*
60673@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60674 {
60675 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60676 int i = 0;
60677- do
60678+ do {
60679 i += 2;
60680- while (auxv[i - 2] != AT_NULL);
60681+ } while (auxv[i - 2] != AT_NULL);
60682 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60683 }
60684
60685@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60686 {
60687 mm_segment_t old_fs = get_fs();
60688 set_fs(KERNEL_DS);
60689- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60690+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60691 set_fs(old_fs);
60692 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60693 }
60694@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
60695 }
60696
60697 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
60698- unsigned long mm_flags)
60699+ struct coredump_params *cprm)
60700 {
60701 struct vm_area_struct *vma;
60702 size_t size = 0;
60703
60704 for (vma = first_vma(current, gate_vma); vma != NULL;
60705 vma = next_vma(vma, gate_vma))
60706- size += vma_dump_size(vma, mm_flags);
60707+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60708 return size;
60709 }
60710
60711@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60712
60713 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
60714
60715- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
60716+ offset += elf_core_vma_data_size(gate_vma, cprm);
60717 offset += elf_core_extra_data_size();
60718 e_shoff = offset;
60719
60720@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60721 phdr.p_offset = offset;
60722 phdr.p_vaddr = vma->vm_start;
60723 phdr.p_paddr = 0;
60724- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
60725+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60726 phdr.p_memsz = vma->vm_end - vma->vm_start;
60727 offset += phdr.p_filesz;
60728 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
60729@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60730 unsigned long addr;
60731 unsigned long end;
60732
60733- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
60734+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60735
60736 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
60737 struct page *page;
60738@@ -2210,6 +2690,167 @@ out:
60739
60740 #endif /* CONFIG_ELF_CORE */
60741
60742+#ifdef CONFIG_PAX_MPROTECT
60743+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60744+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60745+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60746+ *
60747+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60748+ * basis because we want to allow the common case and not the special ones.
60749+ */
60750+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60751+{
60752+ struct elfhdr elf_h;
60753+ struct elf_phdr elf_p;
60754+ unsigned long i;
60755+ unsigned long oldflags;
60756+ bool is_textrel_rw, is_textrel_rx, is_relro;
60757+
60758+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60759+ return;
60760+
60761+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60762+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60763+
60764+#ifdef CONFIG_PAX_ELFRELOCS
60765+ /* possible TEXTREL */
60766+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60767+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60768+#else
60769+ is_textrel_rw = false;
60770+ is_textrel_rx = false;
60771+#endif
60772+
60773+ /* possible RELRO */
60774+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60775+
60776+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60777+ return;
60778+
60779+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60780+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60781+
60782+#ifdef CONFIG_PAX_ETEXECRELOCS
60783+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60784+#else
60785+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60786+#endif
60787+
60788+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60789+ !elf_check_arch(&elf_h) ||
60790+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60791+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60792+ return;
60793+
60794+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60795+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60796+ return;
60797+ switch (elf_p.p_type) {
60798+ case PT_DYNAMIC:
60799+ if (!is_textrel_rw && !is_textrel_rx)
60800+ continue;
60801+ i = 0UL;
60802+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60803+ elf_dyn dyn;
60804+
60805+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60806+ break;
60807+ if (dyn.d_tag == DT_NULL)
60808+ break;
60809+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60810+ gr_log_textrel(vma);
60811+ if (is_textrel_rw)
60812+ vma->vm_flags |= VM_MAYWRITE;
60813+ else
60814+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60815+ vma->vm_flags &= ~VM_MAYWRITE;
60816+ break;
60817+ }
60818+ i++;
60819+ }
60820+ is_textrel_rw = false;
60821+ is_textrel_rx = false;
60822+ continue;
60823+
60824+ case PT_GNU_RELRO:
60825+ if (!is_relro)
60826+ continue;
60827+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60828+ vma->vm_flags &= ~VM_MAYWRITE;
60829+ is_relro = false;
60830+ continue;
60831+
60832+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60833+ case PT_PAX_FLAGS: {
60834+ const char *msg_mprotect = "", *msg_emutramp = "";
60835+ char *buffer_lib, *buffer_exe;
60836+
60837+ if (elf_p.p_flags & PF_NOMPROTECT)
60838+ msg_mprotect = "MPROTECT disabled";
60839+
60840+#ifdef CONFIG_PAX_EMUTRAMP
60841+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60842+ msg_emutramp = "EMUTRAMP enabled";
60843+#endif
60844+
60845+ if (!msg_mprotect[0] && !msg_emutramp[0])
60846+ continue;
60847+
60848+ if (!printk_ratelimit())
60849+ continue;
60850+
60851+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60852+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60853+ if (buffer_lib && buffer_exe) {
60854+ char *path_lib, *path_exe;
60855+
60856+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60857+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60858+
60859+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60860+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60861+
60862+ }
60863+ free_page((unsigned long)buffer_exe);
60864+ free_page((unsigned long)buffer_lib);
60865+ continue;
60866+ }
60867+#endif
60868+
60869+ }
60870+ }
60871+}
60872+#endif
60873+
60874+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60875+
60876+extern int grsec_enable_log_rwxmaps;
60877+
60878+static void elf_handle_mmap(struct file *file)
60879+{
60880+ struct elfhdr elf_h;
60881+ struct elf_phdr elf_p;
60882+ unsigned long i;
60883+
60884+ if (!grsec_enable_log_rwxmaps)
60885+ return;
60886+
60887+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60888+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60889+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60890+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60891+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60892+ return;
60893+
60894+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60895+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60896+ return;
60897+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60898+ gr_log_ptgnustack(file);
60899+ }
60900+}
60901+#endif
60902+
60903 static int __init init_elf_binfmt(void)
60904 {
60905 register_binfmt(&elf_format);
60906diff --git a/fs/block_dev.c b/fs/block_dev.c
60907index 6d72746..536d1db 100644
60908--- a/fs/block_dev.c
60909+++ b/fs/block_dev.c
60910@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60911 else if (bdev->bd_contains == bdev)
60912 return true; /* is a whole device which isn't held */
60913
60914- else if (whole->bd_holder == bd_may_claim)
60915+ else if (whole->bd_holder == (void *)bd_may_claim)
60916 return true; /* is a partition of a device that is being partitioned */
60917 else if (whole->bd_holder != NULL)
60918 return false; /* is a partition of a held device */
60919diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60920index aeab453..48dbafc 100644
60921--- a/fs/btrfs/ctree.c
60922+++ b/fs/btrfs/ctree.c
60923@@ -1184,9 +1184,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60924 free_extent_buffer(buf);
60925 add_root_to_dirty_list(root);
60926 } else {
60927- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60928- parent_start = parent->start;
60929- else
60930+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60931+ if (parent)
60932+ parent_start = parent->start;
60933+ else
60934+ parent_start = 0;
60935+ } else
60936 parent_start = 0;
60937
60938 WARN_ON(trans->transid != btrfs_header_generation(parent));
60939diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60940index a2e90f8..5135e5f 100644
60941--- a/fs/btrfs/delayed-inode.c
60942+++ b/fs/btrfs/delayed-inode.c
60943@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60944
60945 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60946 {
60947- int seq = atomic_inc_return(&delayed_root->items_seq);
60948+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60949 if ((atomic_dec_return(&delayed_root->items) <
60950 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60951 waitqueue_active(&delayed_root->wait))
60952@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60953
60954 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60955 {
60956- int val = atomic_read(&delayed_root->items_seq);
60957+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60958
60959 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60960 return 1;
60961@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60962 int seq;
60963 int ret;
60964
60965- seq = atomic_read(&delayed_root->items_seq);
60966+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60967
60968 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60969 if (ret)
60970diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60971index f70119f..ab5894d 100644
60972--- a/fs/btrfs/delayed-inode.h
60973+++ b/fs/btrfs/delayed-inode.h
60974@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60975 */
60976 struct list_head prepare_list;
60977 atomic_t items; /* for delayed items */
60978- atomic_t items_seq; /* for delayed items */
60979+ atomic_unchecked_t items_seq; /* for delayed items */
60980 int nodes; /* for delayed nodes */
60981 wait_queue_head_t wait;
60982 };
60983@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60984 struct btrfs_delayed_root *delayed_root)
60985 {
60986 atomic_set(&delayed_root->items, 0);
60987- atomic_set(&delayed_root->items_seq, 0);
60988+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60989 delayed_root->nodes = 0;
60990 spin_lock_init(&delayed_root->lock);
60991 init_waitqueue_head(&delayed_root->wait);
60992diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
60993index 47aceb4..7d28b1c 100644
60994--- a/fs/btrfs/ioctl.c
60995+++ b/fs/btrfs/ioctl.c
60996@@ -3965,9 +3965,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60997 for (i = 0; i < num_types; i++) {
60998 struct btrfs_space_info *tmp;
60999
61000+ /* Don't copy in more than we allocated */
61001 if (!slot_count)
61002 break;
61003
61004+ slot_count--;
61005+
61006 info = NULL;
61007 rcu_read_lock();
61008 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
61009@@ -3989,10 +3992,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
61010 memcpy(dest, &space, sizeof(space));
61011 dest++;
61012 space_args.total_spaces++;
61013- slot_count--;
61014 }
61015- if (!slot_count)
61016- break;
61017 }
61018 up_read(&info->groups_sem);
61019 }
61020diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
61021index 8e16bca..6eabd9e 100644
61022--- a/fs/btrfs/super.c
61023+++ b/fs/btrfs/super.c
61024@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
61025 function, line, errstr);
61026 return;
61027 }
61028- ACCESS_ONCE(trans->transaction->aborted) = errno;
61029+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
61030 /* Wake up anybody who may be waiting on this transaction */
61031 wake_up(&root->fs_info->transaction_wait);
61032 wake_up(&root->fs_info->transaction_blocked_wait);
61033diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
61034index 7869936..7e153dc 100644
61035--- a/fs/btrfs/sysfs.c
61036+++ b/fs/btrfs/sysfs.c
61037@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
61038 for (set = 0; set < FEAT_MAX; set++) {
61039 int i;
61040 struct attribute *attrs[2];
61041- struct attribute_group agroup = {
61042+ attribute_group_no_const agroup = {
61043 .name = "features",
61044 .attrs = attrs,
61045 };
61046diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
61047index 7f5b41b..e589c13 100644
61048--- a/fs/btrfs/tree-log.h
61049+++ b/fs/btrfs/tree-log.h
61050@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
61051 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
61052 struct btrfs_trans_handle *trans)
61053 {
61054- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
61055+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
61056 }
61057
61058 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
61059diff --git a/fs/buffer.c b/fs/buffer.c
61060index eba6e4f..af1182c 100644
61061--- a/fs/buffer.c
61062+++ b/fs/buffer.c
61063@@ -3429,7 +3429,7 @@ void __init buffer_init(void)
61064 bh_cachep = kmem_cache_create("buffer_head",
61065 sizeof(struct buffer_head), 0,
61066 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
61067- SLAB_MEM_SPREAD),
61068+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
61069 NULL);
61070
61071 /*
61072diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
61073index d749731..dd333a6 100644
61074--- a/fs/cachefiles/bind.c
61075+++ b/fs/cachefiles/bind.c
61076@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
61077 args);
61078
61079 /* start by checking things over */
61080- ASSERT(cache->fstop_percent >= 0 &&
61081- cache->fstop_percent < cache->fcull_percent &&
61082+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
61083 cache->fcull_percent < cache->frun_percent &&
61084 cache->frun_percent < 100);
61085
61086- ASSERT(cache->bstop_percent >= 0 &&
61087- cache->bstop_percent < cache->bcull_percent &&
61088+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
61089 cache->bcull_percent < cache->brun_percent &&
61090 cache->brun_percent < 100);
61091
61092diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
61093index b078d30..db23012 100644
61094--- a/fs/cachefiles/daemon.c
61095+++ b/fs/cachefiles/daemon.c
61096@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
61097 if (n > buflen)
61098 return -EMSGSIZE;
61099
61100- if (copy_to_user(_buffer, buffer, n) != 0)
61101+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
61102 return -EFAULT;
61103
61104 return n;
61105@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
61106 if (test_bit(CACHEFILES_DEAD, &cache->flags))
61107 return -EIO;
61108
61109- if (datalen < 0 || datalen > PAGE_SIZE - 1)
61110+ if (datalen > PAGE_SIZE - 1)
61111 return -EOPNOTSUPP;
61112
61113 /* drag the command string into the kernel so we can parse it */
61114@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
61115 if (args[0] != '%' || args[1] != '\0')
61116 return -EINVAL;
61117
61118- if (fstop < 0 || fstop >= cache->fcull_percent)
61119+ if (fstop >= cache->fcull_percent)
61120 return cachefiles_daemon_range_error(cache, args);
61121
61122 cache->fstop_percent = fstop;
61123@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
61124 if (args[0] != '%' || args[1] != '\0')
61125 return -EINVAL;
61126
61127- if (bstop < 0 || bstop >= cache->bcull_percent)
61128+ if (bstop >= cache->bcull_percent)
61129 return cachefiles_daemon_range_error(cache, args);
61130
61131 cache->bstop_percent = bstop;
61132diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
61133index 3d50998..0550d67 100644
61134--- a/fs/cachefiles/internal.h
61135+++ b/fs/cachefiles/internal.h
61136@@ -66,7 +66,7 @@ struct cachefiles_cache {
61137 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
61138 struct rb_root active_nodes; /* active nodes (can't be culled) */
61139 rwlock_t active_lock; /* lock for active_nodes */
61140- atomic_t gravecounter; /* graveyard uniquifier */
61141+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
61142 unsigned frun_percent; /* when to stop culling (% files) */
61143 unsigned fcull_percent; /* when to start culling (% files) */
61144 unsigned fstop_percent; /* when to stop allocating (% files) */
61145@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
61146 * proc.c
61147 */
61148 #ifdef CONFIG_CACHEFILES_HISTOGRAM
61149-extern atomic_t cachefiles_lookup_histogram[HZ];
61150-extern atomic_t cachefiles_mkdir_histogram[HZ];
61151-extern atomic_t cachefiles_create_histogram[HZ];
61152+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
61153+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
61154+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
61155
61156 extern int __init cachefiles_proc_init(void);
61157 extern void cachefiles_proc_cleanup(void);
61158 static inline
61159-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
61160+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
61161 {
61162 unsigned long jif = jiffies - start_jif;
61163 if (jif >= HZ)
61164 jif = HZ - 1;
61165- atomic_inc(&histogram[jif]);
61166+ atomic_inc_unchecked(&histogram[jif]);
61167 }
61168
61169 #else
61170diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
61171index 5bf2b41..85b93f9 100644
61172--- a/fs/cachefiles/namei.c
61173+++ b/fs/cachefiles/namei.c
61174@@ -312,7 +312,7 @@ try_again:
61175 /* first step is to make up a grave dentry in the graveyard */
61176 sprintf(nbuffer, "%08x%08x",
61177 (uint32_t) get_seconds(),
61178- (uint32_t) atomic_inc_return(&cache->gravecounter));
61179+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
61180
61181 /* do the multiway lock magic */
61182 trap = lock_rename(cache->graveyard, dir);
61183diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
61184index eccd339..4c1d995 100644
61185--- a/fs/cachefiles/proc.c
61186+++ b/fs/cachefiles/proc.c
61187@@ -14,9 +14,9 @@
61188 #include <linux/seq_file.h>
61189 #include "internal.h"
61190
61191-atomic_t cachefiles_lookup_histogram[HZ];
61192-atomic_t cachefiles_mkdir_histogram[HZ];
61193-atomic_t cachefiles_create_histogram[HZ];
61194+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
61195+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
61196+atomic_unchecked_t cachefiles_create_histogram[HZ];
61197
61198 /*
61199 * display the latency histogram
61200@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
61201 return 0;
61202 default:
61203 index = (unsigned long) v - 3;
61204- x = atomic_read(&cachefiles_lookup_histogram[index]);
61205- y = atomic_read(&cachefiles_mkdir_histogram[index]);
61206- z = atomic_read(&cachefiles_create_histogram[index]);
61207+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
61208+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
61209+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
61210 if (x == 0 && y == 0 && z == 0)
61211 return 0;
61212
61213diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
61214index 4b1fb5c..0d2a699 100644
61215--- a/fs/cachefiles/rdwr.c
61216+++ b/fs/cachefiles/rdwr.c
61217@@ -943,7 +943,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
61218 old_fs = get_fs();
61219 set_fs(KERNEL_DS);
61220 ret = file->f_op->write(
61221- file, (const void __user *) data, len, &pos);
61222+ file, (const void __force_user *) data, len, &pos);
61223 set_fs(old_fs);
61224 kunmap(page);
61225 file_end_write(file);
61226diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
61227index c29d6ae..719b9bb 100644
61228--- a/fs/ceph/dir.c
61229+++ b/fs/ceph/dir.c
61230@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
61231 struct dentry *dentry, *last;
61232 struct ceph_dentry_info *di;
61233 int err = 0;
61234+ char d_name[DNAME_INLINE_LEN];
61235+ const unsigned char *name;
61236
61237 /* claim ref on last dentry we returned */
61238 last = fi->dentry;
61239@@ -192,7 +194,12 @@ more:
61240
61241 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
61242 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
61243- if (!dir_emit(ctx, dentry->d_name.name,
61244+ name = dentry->d_name.name;
61245+ if (name == dentry->d_iname) {
61246+ memcpy(d_name, name, dentry->d_name.len);
61247+ name = d_name;
61248+ }
61249+ if (!dir_emit(ctx, name,
61250 dentry->d_name.len,
61251 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
61252 dentry->d_inode->i_mode >> 12)) {
61253@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
61254 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
61255 struct ceph_mds_client *mdsc = fsc->mdsc;
61256 unsigned frag = fpos_frag(ctx->pos);
61257- int off = fpos_off(ctx->pos);
61258+ unsigned int off = fpos_off(ctx->pos);
61259 int err;
61260 u32 ftype;
61261 struct ceph_mds_reply_info_parsed *rinfo;
61262diff --git a/fs/ceph/super.c b/fs/ceph/super.c
61263index 06150fd..192061b 100644
61264--- a/fs/ceph/super.c
61265+++ b/fs/ceph/super.c
61266@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
61267 /*
61268 * construct our own bdi so we can control readahead, etc.
61269 */
61270-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
61271+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
61272
61273 static int ceph_register_bdi(struct super_block *sb,
61274 struct ceph_fs_client *fsc)
61275@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
61276 default_backing_dev_info.ra_pages;
61277
61278 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
61279- atomic_long_inc_return(&bdi_seq));
61280+ atomic_long_inc_return_unchecked(&bdi_seq));
61281 if (!err)
61282 sb->s_bdi = &fsc->backing_dev_info;
61283 return err;
61284diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
61285index f3ac415..3d2420c 100644
61286--- a/fs/cifs/cifs_debug.c
61287+++ b/fs/cifs/cifs_debug.c
61288@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
61289
61290 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
61291 #ifdef CONFIG_CIFS_STATS2
61292- atomic_set(&totBufAllocCount, 0);
61293- atomic_set(&totSmBufAllocCount, 0);
61294+ atomic_set_unchecked(&totBufAllocCount, 0);
61295+ atomic_set_unchecked(&totSmBufAllocCount, 0);
61296 #endif /* CONFIG_CIFS_STATS2 */
61297 spin_lock(&cifs_tcp_ses_lock);
61298 list_for_each(tmp1, &cifs_tcp_ses_list) {
61299@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
61300 tcon = list_entry(tmp3,
61301 struct cifs_tcon,
61302 tcon_list);
61303- atomic_set(&tcon->num_smbs_sent, 0);
61304+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
61305 if (server->ops->clear_stats)
61306 server->ops->clear_stats(tcon);
61307 }
61308@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
61309 smBufAllocCount.counter, cifs_min_small);
61310 #ifdef CONFIG_CIFS_STATS2
61311 seq_printf(m, "Total Large %d Small %d Allocations\n",
61312- atomic_read(&totBufAllocCount),
61313- atomic_read(&totSmBufAllocCount));
61314+ atomic_read_unchecked(&totBufAllocCount),
61315+ atomic_read_unchecked(&totSmBufAllocCount));
61316 #endif /* CONFIG_CIFS_STATS2 */
61317
61318 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
61319@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
61320 if (tcon->need_reconnect)
61321 seq_puts(m, "\tDISCONNECTED ");
61322 seq_printf(m, "\nSMBs: %d",
61323- atomic_read(&tcon->num_smbs_sent));
61324+ atomic_read_unchecked(&tcon->num_smbs_sent));
61325 if (server->ops->print_stats)
61326 server->ops->print_stats(m, tcon);
61327 }
61328diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
61329index 8883980..c8ade72 100644
61330--- a/fs/cifs/cifsfs.c
61331+++ b/fs/cifs/cifsfs.c
61332@@ -1072,7 +1072,7 @@ cifs_init_request_bufs(void)
61333 */
61334 cifs_req_cachep = kmem_cache_create("cifs_request",
61335 CIFSMaxBufSize + max_hdr_size, 0,
61336- SLAB_HWCACHE_ALIGN, NULL);
61337+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
61338 if (cifs_req_cachep == NULL)
61339 return -ENOMEM;
61340
61341@@ -1099,7 +1099,7 @@ cifs_init_request_bufs(void)
61342 efficient to alloc 1 per page off the slab compared to 17K (5page)
61343 alloc of large cifs buffers even when page debugging is on */
61344 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
61345- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
61346+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
61347 NULL);
61348 if (cifs_sm_req_cachep == NULL) {
61349 mempool_destroy(cifs_req_poolp);
61350@@ -1184,8 +1184,8 @@ init_cifs(void)
61351 atomic_set(&bufAllocCount, 0);
61352 atomic_set(&smBufAllocCount, 0);
61353 #ifdef CONFIG_CIFS_STATS2
61354- atomic_set(&totBufAllocCount, 0);
61355- atomic_set(&totSmBufAllocCount, 0);
61356+ atomic_set_unchecked(&totBufAllocCount, 0);
61357+ atomic_set_unchecked(&totSmBufAllocCount, 0);
61358 #endif /* CONFIG_CIFS_STATS2 */
61359
61360 atomic_set(&midCount, 0);
61361diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
61362index c97fd86..e4a8274 100644
61363--- a/fs/cifs/cifsglob.h
61364+++ b/fs/cifs/cifsglob.h
61365@@ -804,35 +804,35 @@ struct cifs_tcon {
61366 __u16 Flags; /* optional support bits */
61367 enum statusEnum tidStatus;
61368 #ifdef CONFIG_CIFS_STATS
61369- atomic_t num_smbs_sent;
61370+ atomic_unchecked_t num_smbs_sent;
61371 union {
61372 struct {
61373- atomic_t num_writes;
61374- atomic_t num_reads;
61375- atomic_t num_flushes;
61376- atomic_t num_oplock_brks;
61377- atomic_t num_opens;
61378- atomic_t num_closes;
61379- atomic_t num_deletes;
61380- atomic_t num_mkdirs;
61381- atomic_t num_posixopens;
61382- atomic_t num_posixmkdirs;
61383- atomic_t num_rmdirs;
61384- atomic_t num_renames;
61385- atomic_t num_t2renames;
61386- atomic_t num_ffirst;
61387- atomic_t num_fnext;
61388- atomic_t num_fclose;
61389- atomic_t num_hardlinks;
61390- atomic_t num_symlinks;
61391- atomic_t num_locks;
61392- atomic_t num_acl_get;
61393- atomic_t num_acl_set;
61394+ atomic_unchecked_t num_writes;
61395+ atomic_unchecked_t num_reads;
61396+ atomic_unchecked_t num_flushes;
61397+ atomic_unchecked_t num_oplock_brks;
61398+ atomic_unchecked_t num_opens;
61399+ atomic_unchecked_t num_closes;
61400+ atomic_unchecked_t num_deletes;
61401+ atomic_unchecked_t num_mkdirs;
61402+ atomic_unchecked_t num_posixopens;
61403+ atomic_unchecked_t num_posixmkdirs;
61404+ atomic_unchecked_t num_rmdirs;
61405+ atomic_unchecked_t num_renames;
61406+ atomic_unchecked_t num_t2renames;
61407+ atomic_unchecked_t num_ffirst;
61408+ atomic_unchecked_t num_fnext;
61409+ atomic_unchecked_t num_fclose;
61410+ atomic_unchecked_t num_hardlinks;
61411+ atomic_unchecked_t num_symlinks;
61412+ atomic_unchecked_t num_locks;
61413+ atomic_unchecked_t num_acl_get;
61414+ atomic_unchecked_t num_acl_set;
61415 } cifs_stats;
61416 #ifdef CONFIG_CIFS_SMB2
61417 struct {
61418- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
61419- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
61420+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
61421+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
61422 } smb2_stats;
61423 #endif /* CONFIG_CIFS_SMB2 */
61424 } stats;
61425@@ -1169,7 +1169,7 @@ convert_delimiter(char *path, char delim)
61426 }
61427
61428 #ifdef CONFIG_CIFS_STATS
61429-#define cifs_stats_inc atomic_inc
61430+#define cifs_stats_inc atomic_inc_unchecked
61431
61432 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
61433 unsigned int bytes)
61434@@ -1535,8 +1535,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
61435 /* Various Debug counters */
61436 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
61437 #ifdef CONFIG_CIFS_STATS2
61438-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
61439-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
61440+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
61441+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
61442 #endif
61443 GLOBAL_EXTERN atomic_t smBufAllocCount;
61444 GLOBAL_EXTERN atomic_t midCount;
61445diff --git a/fs/cifs/file.c b/fs/cifs/file.c
61446index 9de08c9..b396124 100644
61447--- a/fs/cifs/file.c
61448+++ b/fs/cifs/file.c
61449@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
61450 index = mapping->writeback_index; /* Start from prev offset */
61451 end = -1;
61452 } else {
61453- index = wbc->range_start >> PAGE_CACHE_SHIFT;
61454- end = wbc->range_end >> PAGE_CACHE_SHIFT;
61455- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
61456+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
61457 range_whole = true;
61458+ index = 0;
61459+ end = ULONG_MAX;
61460+ } else {
61461+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
61462+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
61463+ }
61464 scanned = true;
61465 }
61466 retry:
61467diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
61468index 3b0c62e..f7d090c 100644
61469--- a/fs/cifs/misc.c
61470+++ b/fs/cifs/misc.c
61471@@ -170,7 +170,7 @@ cifs_buf_get(void)
61472 memset(ret_buf, 0, buf_size + 3);
61473 atomic_inc(&bufAllocCount);
61474 #ifdef CONFIG_CIFS_STATS2
61475- atomic_inc(&totBufAllocCount);
61476+ atomic_inc_unchecked(&totBufAllocCount);
61477 #endif /* CONFIG_CIFS_STATS2 */
61478 }
61479
61480@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
61481 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
61482 atomic_inc(&smBufAllocCount);
61483 #ifdef CONFIG_CIFS_STATS2
61484- atomic_inc(&totSmBufAllocCount);
61485+ atomic_inc_unchecked(&totSmBufAllocCount);
61486 #endif /* CONFIG_CIFS_STATS2 */
61487
61488 }
61489diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
61490index 84ca0a4..add8cba 100644
61491--- a/fs/cifs/smb1ops.c
61492+++ b/fs/cifs/smb1ops.c
61493@@ -626,27 +626,27 @@ static void
61494 cifs_clear_stats(struct cifs_tcon *tcon)
61495 {
61496 #ifdef CONFIG_CIFS_STATS
61497- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
61498- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
61499- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
61500- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61501- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
61502- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
61503- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61504- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
61505- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
61506- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
61507- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
61508- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
61509- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
61510- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
61511- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
61512- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
61513- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
61514- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
61515- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
61516- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
61517- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
61518+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
61519+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
61520+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
61521+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61522+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
61523+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
61524+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61525+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
61526+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
61527+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
61528+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
61529+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
61530+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
61531+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
61532+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
61533+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
61534+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
61535+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
61536+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
61537+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
61538+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
61539 #endif
61540 }
61541
61542@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61543 {
61544 #ifdef CONFIG_CIFS_STATS
61545 seq_printf(m, " Oplocks breaks: %d",
61546- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
61547+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
61548 seq_printf(m, "\nReads: %d Bytes: %llu",
61549- atomic_read(&tcon->stats.cifs_stats.num_reads),
61550+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
61551 (long long)(tcon->bytes_read));
61552 seq_printf(m, "\nWrites: %d Bytes: %llu",
61553- atomic_read(&tcon->stats.cifs_stats.num_writes),
61554+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
61555 (long long)(tcon->bytes_written));
61556 seq_printf(m, "\nFlushes: %d",
61557- atomic_read(&tcon->stats.cifs_stats.num_flushes));
61558+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
61559 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
61560- atomic_read(&tcon->stats.cifs_stats.num_locks),
61561- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
61562- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
61563+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
61564+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
61565+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
61566 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
61567- atomic_read(&tcon->stats.cifs_stats.num_opens),
61568- atomic_read(&tcon->stats.cifs_stats.num_closes),
61569- atomic_read(&tcon->stats.cifs_stats.num_deletes));
61570+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
61571+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
61572+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
61573 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
61574- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
61575- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
61576+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
61577+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
61578 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
61579- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
61580- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
61581+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
61582+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
61583 seq_printf(m, "\nRenames: %d T2 Renames %d",
61584- atomic_read(&tcon->stats.cifs_stats.num_renames),
61585- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
61586+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
61587+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
61588 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
61589- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61590- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61591- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61592+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61593+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61594+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61595 #endif
61596 }
61597
61598diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61599index f325c59..6bba517 100644
61600--- a/fs/cifs/smb2ops.c
61601+++ b/fs/cifs/smb2ops.c
61602@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61603 #ifdef CONFIG_CIFS_STATS
61604 int i;
61605 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61606- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61607- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61608+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61609+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61610 }
61611 #endif
61612 }
61613@@ -405,65 +405,65 @@ static void
61614 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61615 {
61616 #ifdef CONFIG_CIFS_STATS
61617- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61618- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61619+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61620+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61621 seq_printf(m, "\nNegotiates: %d sent %d failed",
61622- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61623- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61624+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61625+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61626 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61627- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61628- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61629+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61630+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61631 seq_printf(m, "\nLogoffs: %d sent %d failed",
61632- atomic_read(&sent[SMB2_LOGOFF_HE]),
61633- atomic_read(&failed[SMB2_LOGOFF_HE]));
61634+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61635+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61636 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61637- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61638- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61639+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61640+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61641 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61642- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61643- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61644+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61645+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61646 seq_printf(m, "\nCreates: %d sent %d failed",
61647- atomic_read(&sent[SMB2_CREATE_HE]),
61648- atomic_read(&failed[SMB2_CREATE_HE]));
61649+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61650+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61651 seq_printf(m, "\nCloses: %d sent %d failed",
61652- atomic_read(&sent[SMB2_CLOSE_HE]),
61653- atomic_read(&failed[SMB2_CLOSE_HE]));
61654+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61655+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61656 seq_printf(m, "\nFlushes: %d sent %d failed",
61657- atomic_read(&sent[SMB2_FLUSH_HE]),
61658- atomic_read(&failed[SMB2_FLUSH_HE]));
61659+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61660+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61661 seq_printf(m, "\nReads: %d sent %d failed",
61662- atomic_read(&sent[SMB2_READ_HE]),
61663- atomic_read(&failed[SMB2_READ_HE]));
61664+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61665+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61666 seq_printf(m, "\nWrites: %d sent %d failed",
61667- atomic_read(&sent[SMB2_WRITE_HE]),
61668- atomic_read(&failed[SMB2_WRITE_HE]));
61669+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61670+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61671 seq_printf(m, "\nLocks: %d sent %d failed",
61672- atomic_read(&sent[SMB2_LOCK_HE]),
61673- atomic_read(&failed[SMB2_LOCK_HE]));
61674+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61675+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61676 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61677- atomic_read(&sent[SMB2_IOCTL_HE]),
61678- atomic_read(&failed[SMB2_IOCTL_HE]));
61679+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61680+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61681 seq_printf(m, "\nCancels: %d sent %d failed",
61682- atomic_read(&sent[SMB2_CANCEL_HE]),
61683- atomic_read(&failed[SMB2_CANCEL_HE]));
61684+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61685+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61686 seq_printf(m, "\nEchos: %d sent %d failed",
61687- atomic_read(&sent[SMB2_ECHO_HE]),
61688- atomic_read(&failed[SMB2_ECHO_HE]));
61689+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61690+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61691 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61692- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61693- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61694+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61695+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61696 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61697- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61698- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61699+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61700+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61701 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61702- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61703- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61704+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61705+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61706 seq_printf(m, "\nSetInfos: %d sent %d failed",
61707- atomic_read(&sent[SMB2_SET_INFO_HE]),
61708- atomic_read(&failed[SMB2_SET_INFO_HE]));
61709+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61710+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61711 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61712- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61713- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61714+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61715+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61716 #endif
61717 }
61718
61719diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61720index 8707755..8f38739 100644
61721--- a/fs/cifs/smb2pdu.c
61722+++ b/fs/cifs/smb2pdu.c
61723@@ -2106,8 +2106,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61724 default:
61725 cifs_dbg(VFS, "info level %u isn't supported\n",
61726 srch_inf->info_level);
61727- rc = -EINVAL;
61728- goto qdir_exit;
61729+ return -EINVAL;
61730 }
61731
61732 req->FileIndex = cpu_to_le32(index);
61733diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61734index 1da168c..8bc7ff6 100644
61735--- a/fs/coda/cache.c
61736+++ b/fs/coda/cache.c
61737@@ -24,7 +24,7 @@
61738 #include "coda_linux.h"
61739 #include "coda_cache.h"
61740
61741-static atomic_t permission_epoch = ATOMIC_INIT(0);
61742+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61743
61744 /* replace or extend an acl cache hit */
61745 void coda_cache_enter(struct inode *inode, int mask)
61746@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61747 struct coda_inode_info *cii = ITOC(inode);
61748
61749 spin_lock(&cii->c_lock);
61750- cii->c_cached_epoch = atomic_read(&permission_epoch);
61751+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61752 if (!uid_eq(cii->c_uid, current_fsuid())) {
61753 cii->c_uid = current_fsuid();
61754 cii->c_cached_perm = mask;
61755@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61756 {
61757 struct coda_inode_info *cii = ITOC(inode);
61758 spin_lock(&cii->c_lock);
61759- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61760+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61761 spin_unlock(&cii->c_lock);
61762 }
61763
61764 /* remove all acl caches */
61765 void coda_cache_clear_all(struct super_block *sb)
61766 {
61767- atomic_inc(&permission_epoch);
61768+ atomic_inc_unchecked(&permission_epoch);
61769 }
61770
61771
61772@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61773 spin_lock(&cii->c_lock);
61774 hit = (mask & cii->c_cached_perm) == mask &&
61775 uid_eq(cii->c_uid, current_fsuid()) &&
61776- cii->c_cached_epoch == atomic_read(&permission_epoch);
61777+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61778 spin_unlock(&cii->c_lock);
61779
61780 return hit;
61781diff --git a/fs/compat.c b/fs/compat.c
61782index 66d3d3c..9c10175 100644
61783--- a/fs/compat.c
61784+++ b/fs/compat.c
61785@@ -54,7 +54,7 @@
61786 #include <asm/ioctls.h>
61787 #include "internal.h"
61788
61789-int compat_log = 1;
61790+int compat_log = 0;
61791
61792 int compat_printk(const char *fmt, ...)
61793 {
61794@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61795
61796 set_fs(KERNEL_DS);
61797 /* The __user pointer cast is valid because of the set_fs() */
61798- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61799+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61800 set_fs(oldfs);
61801 /* truncating is ok because it's a user address */
61802 if (!ret)
61803@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61804 goto out;
61805
61806 ret = -EINVAL;
61807- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61808+ if (nr_segs > UIO_MAXIOV)
61809 goto out;
61810 if (nr_segs > fast_segs) {
61811 ret = -ENOMEM;
61812@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
61813 struct compat_readdir_callback {
61814 struct dir_context ctx;
61815 struct compat_old_linux_dirent __user *dirent;
61816+ struct file * file;
61817 int result;
61818 };
61819
61820@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
61821 buf->result = -EOVERFLOW;
61822 return -EOVERFLOW;
61823 }
61824+
61825+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61826+ return 0;
61827+
61828 buf->result++;
61829 dirent = buf->dirent;
61830 if (!access_ok(VERIFY_WRITE, dirent,
61831@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61832 if (!f.file)
61833 return -EBADF;
61834
61835+ buf.file = f.file;
61836 error = iterate_dir(f.file, &buf.ctx);
61837 if (buf.result)
61838 error = buf.result;
61839@@ -917,6 +923,7 @@ struct compat_getdents_callback {
61840 struct dir_context ctx;
61841 struct compat_linux_dirent __user *current_dir;
61842 struct compat_linux_dirent __user *previous;
61843+ struct file * file;
61844 int count;
61845 int error;
61846 };
61847@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
61848 buf->error = -EOVERFLOW;
61849 return -EOVERFLOW;
61850 }
61851+
61852+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61853+ return 0;
61854+
61855 dirent = buf->previous;
61856 if (dirent) {
61857 if (__put_user(offset, &dirent->d_off))
61858@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61859 if (!f.file)
61860 return -EBADF;
61861
61862+ buf.file = f.file;
61863 error = iterate_dir(f.file, &buf.ctx);
61864 if (error >= 0)
61865 error = buf.error;
61866@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
61867 struct dir_context ctx;
61868 struct linux_dirent64 __user *current_dir;
61869 struct linux_dirent64 __user *previous;
61870+ struct file * file;
61871 int count;
61872 int error;
61873 };
61874@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
61875 buf->error = -EINVAL; /* only used if we fail.. */
61876 if (reclen > buf->count)
61877 return -EINVAL;
61878+
61879+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61880+ return 0;
61881+
61882 dirent = buf->previous;
61883
61884 if (dirent) {
61885@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61886 if (!f.file)
61887 return -EBADF;
61888
61889+ buf.file = f.file;
61890 error = iterate_dir(f.file, &buf.ctx);
61891 if (error >= 0)
61892 error = buf.error;
61893diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61894index 4d24d17..4f8c09e 100644
61895--- a/fs/compat_binfmt_elf.c
61896+++ b/fs/compat_binfmt_elf.c
61897@@ -30,11 +30,13 @@
61898 #undef elf_phdr
61899 #undef elf_shdr
61900 #undef elf_note
61901+#undef elf_dyn
61902 #undef elf_addr_t
61903 #define elfhdr elf32_hdr
61904 #define elf_phdr elf32_phdr
61905 #define elf_shdr elf32_shdr
61906 #define elf_note elf32_note
61907+#define elf_dyn Elf32_Dyn
61908 #define elf_addr_t Elf32_Addr
61909
61910 /*
61911diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61912index e822890..fed89d9 100644
61913--- a/fs/compat_ioctl.c
61914+++ b/fs/compat_ioctl.c
61915@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61916 return -EFAULT;
61917 if (__get_user(udata, &ss32->iomem_base))
61918 return -EFAULT;
61919- ss.iomem_base = compat_ptr(udata);
61920+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61921 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61922 __get_user(ss.port_high, &ss32->port_high))
61923 return -EFAULT;
61924@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61925 for (i = 0; i < nmsgs; i++) {
61926 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61927 return -EFAULT;
61928- if (get_user(datap, &umsgs[i].buf) ||
61929- put_user(compat_ptr(datap), &tmsgs[i].buf))
61930+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61931+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61932 return -EFAULT;
61933 }
61934 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61935@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61936 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61937 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61938 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61939- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61940+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61941 return -EFAULT;
61942
61943 return ioctl_preallocate(file, p);
61944@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61945 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61946 {
61947 unsigned int a, b;
61948- a = *(unsigned int *)p;
61949- b = *(unsigned int *)q;
61950+ a = *(const unsigned int *)p;
61951+ b = *(const unsigned int *)q;
61952 if (a > b)
61953 return 1;
61954 if (a < b)
61955diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61956index 668dcab..daebcd6 100644
61957--- a/fs/configfs/dir.c
61958+++ b/fs/configfs/dir.c
61959@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61960 }
61961 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61962 struct configfs_dirent *next;
61963- const char *name;
61964+ const unsigned char * name;
61965+ char d_name[sizeof(next->s_dentry->d_iname)];
61966 int len;
61967 struct inode *inode = NULL;
61968
61969@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61970 continue;
61971
61972 name = configfs_get_name(next);
61973- len = strlen(name);
61974+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61975+ len = next->s_dentry->d_name.len;
61976+ memcpy(d_name, name, len);
61977+ name = d_name;
61978+ } else
61979+ len = strlen(name);
61980
61981 /*
61982 * We'll have a dentry and an inode for
61983diff --git a/fs/coredump.c b/fs/coredump.c
61984index a93f7e6..d58bcbe 100644
61985--- a/fs/coredump.c
61986+++ b/fs/coredump.c
61987@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
61988 struct pipe_inode_info *pipe = file->private_data;
61989
61990 pipe_lock(pipe);
61991- pipe->readers++;
61992- pipe->writers--;
61993+ atomic_inc(&pipe->readers);
61994+ atomic_dec(&pipe->writers);
61995 wake_up_interruptible_sync(&pipe->wait);
61996 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61997 pipe_unlock(pipe);
61998@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
61999 * We actually want wait_event_freezable() but then we need
62000 * to clear TIF_SIGPENDING and improve dump_interrupted().
62001 */
62002- wait_event_interruptible(pipe->wait, pipe->readers == 1);
62003+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
62004
62005 pipe_lock(pipe);
62006- pipe->readers--;
62007- pipe->writers++;
62008+ atomic_dec(&pipe->readers);
62009+ atomic_inc(&pipe->writers);
62010 pipe_unlock(pipe);
62011 }
62012
62013@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
62014 struct files_struct *displaced;
62015 bool need_nonrelative = false;
62016 bool core_dumped = false;
62017- static atomic_t core_dump_count = ATOMIC_INIT(0);
62018+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
62019+ long signr = siginfo->si_signo;
62020+ int dumpable;
62021 struct coredump_params cprm = {
62022 .siginfo = siginfo,
62023 .regs = signal_pt_regs(),
62024@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
62025 .mm_flags = mm->flags,
62026 };
62027
62028- audit_core_dumps(siginfo->si_signo);
62029+ audit_core_dumps(signr);
62030+
62031+ dumpable = __get_dumpable(cprm.mm_flags);
62032+
62033+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
62034+ gr_handle_brute_attach(dumpable);
62035
62036 binfmt = mm->binfmt;
62037 if (!binfmt || !binfmt->core_dump)
62038 goto fail;
62039- if (!__get_dumpable(cprm.mm_flags))
62040+ if (!dumpable)
62041 goto fail;
62042
62043 cred = prepare_creds();
62044@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
62045 need_nonrelative = true;
62046 }
62047
62048- retval = coredump_wait(siginfo->si_signo, &core_state);
62049+ retval = coredump_wait(signr, &core_state);
62050 if (retval < 0)
62051 goto fail_creds;
62052
62053@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
62054 }
62055 cprm.limit = RLIM_INFINITY;
62056
62057- dump_count = atomic_inc_return(&core_dump_count);
62058+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
62059 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
62060 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
62061 task_tgid_vnr(current), current->comm);
62062@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
62063 } else {
62064 struct inode *inode;
62065
62066+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
62067+
62068 if (cprm.limit < binfmt->min_coredump)
62069 goto fail_unlock;
62070
62071@@ -673,7 +682,7 @@ close_fail:
62072 filp_close(cprm.file, NULL);
62073 fail_dropcount:
62074 if (ispipe)
62075- atomic_dec(&core_dump_count);
62076+ atomic_dec_unchecked(&core_dump_count);
62077 fail_unlock:
62078 kfree(cn.corename);
62079 coredump_finish(mm, core_dumped);
62080@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
62081 struct file *file = cprm->file;
62082 loff_t pos = file->f_pos;
62083 ssize_t n;
62084+
62085+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
62086 if (cprm->written + nr > cprm->limit)
62087 return 0;
62088 while (nr) {
62089diff --git a/fs/dcache.c b/fs/dcache.c
62090index e1308c5..c9c5b6c 100644
62091--- a/fs/dcache.c
62092+++ b/fs/dcache.c
62093@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
62094 * dentry_iput drops the locks, at which point nobody (except
62095 * transient RCU lookups) can reach this dentry.
62096 */
62097- BUG_ON((int)dentry->d_lockref.count > 0);
62098+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
62099 this_cpu_dec(nr_dentry);
62100 if (dentry->d_op && dentry->d_op->d_release)
62101 dentry->d_op->d_release(dentry);
62102@@ -531,7 +531,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
62103 struct dentry *parent = dentry->d_parent;
62104 if (IS_ROOT(dentry))
62105 return NULL;
62106- if (unlikely((int)dentry->d_lockref.count < 0))
62107+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
62108 return NULL;
62109 if (likely(spin_trylock(&parent->d_lock)))
62110 return parent;
62111@@ -608,7 +608,7 @@ repeat:
62112 dentry->d_flags |= DCACHE_REFERENCED;
62113 dentry_lru_add(dentry);
62114
62115- dentry->d_lockref.count--;
62116+ __lockref_dec(&dentry->d_lockref);
62117 spin_unlock(&dentry->d_lock);
62118 return;
62119
62120@@ -663,7 +663,7 @@ int d_invalidate(struct dentry * dentry)
62121 * We also need to leave mountpoints alone,
62122 * directory or not.
62123 */
62124- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
62125+ if (__lockref_read(&dentry->d_lockref) > 1 && dentry->d_inode) {
62126 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
62127 spin_unlock(&dentry->d_lock);
62128 return -EBUSY;
62129@@ -679,7 +679,7 @@ EXPORT_SYMBOL(d_invalidate);
62130 /* This must be called with d_lock held */
62131 static inline void __dget_dlock(struct dentry *dentry)
62132 {
62133- dentry->d_lockref.count++;
62134+ __lockref_inc(&dentry->d_lockref);
62135 }
62136
62137 static inline void __dget(struct dentry *dentry)
62138@@ -720,8 +720,8 @@ repeat:
62139 goto repeat;
62140 }
62141 rcu_read_unlock();
62142- BUG_ON(!ret->d_lockref.count);
62143- ret->d_lockref.count++;
62144+ BUG_ON(!__lockref_read(&ret->d_lockref));
62145+ __lockref_inc(&ret->d_lockref);
62146 spin_unlock(&ret->d_lock);
62147 return ret;
62148 }
62149@@ -804,7 +804,7 @@ restart:
62150 spin_lock(&inode->i_lock);
62151 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
62152 spin_lock(&dentry->d_lock);
62153- if (!dentry->d_lockref.count) {
62154+ if (!__lockref_read(&dentry->d_lockref)) {
62155 /*
62156 * inform the fs via d_prune that this dentry
62157 * is about to be unhashed and destroyed.
62158@@ -847,7 +847,7 @@ static void shrink_dentry_list(struct list_head *list)
62159 * We found an inuse dentry which was not removed from
62160 * the LRU because of laziness during lookup. Do not free it.
62161 */
62162- if ((int)dentry->d_lockref.count > 0) {
62163+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
62164 spin_unlock(&dentry->d_lock);
62165 if (parent)
62166 spin_unlock(&parent->d_lock);
62167@@ -885,8 +885,8 @@ static void shrink_dentry_list(struct list_head *list)
62168 dentry = parent;
62169 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
62170 parent = lock_parent(dentry);
62171- if (dentry->d_lockref.count != 1) {
62172- dentry->d_lockref.count--;
62173+ if (__lockref_read(&dentry->d_lockref) != 1) {
62174+ __lockref_inc(&dentry->d_lockref);
62175 spin_unlock(&dentry->d_lock);
62176 if (parent)
62177 spin_unlock(&parent->d_lock);
62178@@ -926,7 +926,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
62179 * counts, just remove them from the LRU. Otherwise give them
62180 * another pass through the LRU.
62181 */
62182- if (dentry->d_lockref.count) {
62183+ if (__lockref_read(&dentry->d_lockref) > 0) {
62184 d_lru_isolate(dentry);
62185 spin_unlock(&dentry->d_lock);
62186 return LRU_REMOVED;
62187@@ -1261,7 +1261,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
62188 } else {
62189 if (dentry->d_flags & DCACHE_LRU_LIST)
62190 d_lru_del(dentry);
62191- if (!dentry->d_lockref.count) {
62192+ if (!__lockref_read(&dentry->d_lockref)) {
62193 d_shrink_add(dentry, &data->dispose);
62194 data->found++;
62195 }
62196@@ -1309,7 +1309,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
62197 return D_WALK_CONTINUE;
62198
62199 /* root with refcount 1 is fine */
62200- if (dentry == _data && dentry->d_lockref.count == 1)
62201+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
62202 return D_WALK_CONTINUE;
62203
62204 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
62205@@ -1318,7 +1318,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
62206 dentry->d_inode ?
62207 dentry->d_inode->i_ino : 0UL,
62208 dentry,
62209- dentry->d_lockref.count,
62210+ __lockref_read(&dentry->d_lockref),
62211 dentry->d_sb->s_type->name,
62212 dentry->d_sb->s_id);
62213 WARN_ON(1);
62214@@ -1444,7 +1444,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
62215 */
62216 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
62217 if (name->len > DNAME_INLINE_LEN-1) {
62218- dname = kmalloc(name->len + 1, GFP_KERNEL);
62219+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
62220 if (!dname) {
62221 kmem_cache_free(dentry_cache, dentry);
62222 return NULL;
62223@@ -1462,7 +1462,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
62224 smp_wmb();
62225 dentry->d_name.name = dname;
62226
62227- dentry->d_lockref.count = 1;
62228+ __lockref_set(&dentry->d_lockref, 1);
62229 dentry->d_flags = 0;
62230 spin_lock_init(&dentry->d_lock);
62231 seqcount_init(&dentry->d_seq);
62232@@ -2225,7 +2225,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
62233 goto next;
62234 }
62235
62236- dentry->d_lockref.count++;
62237+ __lockref_inc(&dentry->d_lockref);
62238 found = dentry;
62239 spin_unlock(&dentry->d_lock);
62240 break;
62241@@ -2324,7 +2324,7 @@ again:
62242 spin_lock(&dentry->d_lock);
62243 inode = dentry->d_inode;
62244 isdir = S_ISDIR(inode->i_mode);
62245- if (dentry->d_lockref.count == 1) {
62246+ if (__lockref_read(&dentry->d_lockref) == 1) {
62247 if (!spin_trylock(&inode->i_lock)) {
62248 spin_unlock(&dentry->d_lock);
62249 cpu_relax();
62250@@ -2401,7 +2401,7 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
62251 }
62252 EXPORT_SYMBOL(dentry_update_name_case);
62253
62254-static void switch_names(struct dentry *dentry, struct dentry *target)
62255+static void switch_names(struct dentry *dentry, struct dentry *target, bool exchange)
62256 {
62257 if (dname_external(target)) {
62258 if (dname_external(dentry)) {
62259@@ -2429,7 +2429,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
62260 target->d_name.len + 1);
62261 target->d_name.name = dentry->d_name.name;
62262 dentry->d_name.name = dentry->d_iname;
62263- } else {
62264+ } else if (exchange) {
62265 /*
62266 * Both are internal.
62267 */
62268@@ -2439,6 +2439,14 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
62269 swap(((long *) &dentry->d_iname)[i],
62270 ((long *) &target->d_iname)[i]);
62271 }
62272+ } else {
62273+ /*
62274+ * Both are internal. Just copy target to dentry
62275+ */
62276+ memcpy(dentry->d_iname, target->d_name.name,
62277+ target->d_name.len + 1);
62278+ dentry->d_name.len = target->d_name.len;
62279+ return;
62280 }
62281 }
62282 swap(dentry->d_name.len, target->d_name.len);
62283@@ -2539,7 +2547,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
62284 list_del(&target->d_u.d_child);
62285
62286 /* Switch the names.. */
62287- switch_names(dentry, target);
62288+ switch_names(dentry, target, exchange);
62289 swap(dentry->d_name.hash, target->d_name.hash);
62290
62291 /* ... and switch the parents */
62292@@ -2678,7 +2686,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
62293
62294 dparent = dentry->d_parent;
62295
62296- switch_names(dentry, anon);
62297+ switch_names(dentry, anon, false);
62298 swap(dentry->d_name.hash, anon->d_name.hash);
62299
62300 dentry->d_parent = dentry;
62301@@ -3296,7 +3304,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
62302
62303 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
62304 dentry->d_flags |= DCACHE_GENOCIDE;
62305- dentry->d_lockref.count--;
62306+ __lockref_dec(&dentry->d_lockref);
62307 }
62308 }
62309 return D_WALK_CONTINUE;
62310@@ -3412,7 +3420,8 @@ void __init vfs_caches_init(unsigned long mempages)
62311 mempages -= reserve;
62312
62313 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
62314- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
62315+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
62316+ SLAB_NO_SANITIZE, NULL);
62317
62318 dcache_init();
62319 inode_init();
62320diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
62321index 16a46b6..41696fd 100644
62322--- a/fs/debugfs/inode.c
62323+++ b/fs/debugfs/inode.c
62324@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
62325 */
62326 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
62327 {
62328+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
62329+ return __create_file(name, S_IFDIR | S_IRWXU,
62330+#else
62331 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
62332+#endif
62333 parent, NULL, NULL);
62334 }
62335 EXPORT_SYMBOL_GPL(debugfs_create_dir);
62336diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
62337index d4a9431..77f9b2e 100644
62338--- a/fs/ecryptfs/inode.c
62339+++ b/fs/ecryptfs/inode.c
62340@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
62341 old_fs = get_fs();
62342 set_fs(get_ds());
62343 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
62344- (char __user *)lower_buf,
62345+ (char __force_user *)lower_buf,
62346 PATH_MAX);
62347 set_fs(old_fs);
62348 if (rc < 0)
62349diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
62350index e4141f2..d8263e8 100644
62351--- a/fs/ecryptfs/miscdev.c
62352+++ b/fs/ecryptfs/miscdev.c
62353@@ -304,7 +304,7 @@ check_list:
62354 goto out_unlock_msg_ctx;
62355 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
62356 if (msg_ctx->msg) {
62357- if (copy_to_user(&buf[i], packet_length, packet_length_size))
62358+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
62359 goto out_unlock_msg_ctx;
62360 i += packet_length_size;
62361 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
62362diff --git a/fs/exec.c b/fs/exec.c
62363index a3d33fe..49e9bc9 100644
62364--- a/fs/exec.c
62365+++ b/fs/exec.c
62366@@ -56,8 +56,20 @@
62367 #include <linux/pipe_fs_i.h>
62368 #include <linux/oom.h>
62369 #include <linux/compat.h>
62370+#include <linux/random.h>
62371+#include <linux/seq_file.h>
62372+#include <linux/coredump.h>
62373+#include <linux/mman.h>
62374+
62375+#ifdef CONFIG_PAX_REFCOUNT
62376+#include <linux/kallsyms.h>
62377+#include <linux/kdebug.h>
62378+#endif
62379+
62380+#include <trace/events/fs.h>
62381
62382 #include <asm/uaccess.h>
62383+#include <asm/sections.h>
62384 #include <asm/mmu_context.h>
62385 #include <asm/tlb.h>
62386
62387@@ -66,19 +78,34 @@
62388
62389 #include <trace/events/sched.h>
62390
62391+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62392+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
62393+{
62394+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
62395+}
62396+#endif
62397+
62398+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
62399+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62400+EXPORT_SYMBOL(pax_set_initial_flags_func);
62401+#endif
62402+
62403 int suid_dumpable = 0;
62404
62405 static LIST_HEAD(formats);
62406 static DEFINE_RWLOCK(binfmt_lock);
62407
62408+extern int gr_process_kernel_exec_ban(void);
62409+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
62410+
62411 void __register_binfmt(struct linux_binfmt * fmt, int insert)
62412 {
62413 BUG_ON(!fmt);
62414 if (WARN_ON(!fmt->load_binary))
62415 return;
62416 write_lock(&binfmt_lock);
62417- insert ? list_add(&fmt->lh, &formats) :
62418- list_add_tail(&fmt->lh, &formats);
62419+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
62420+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
62421 write_unlock(&binfmt_lock);
62422 }
62423
62424@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
62425 void unregister_binfmt(struct linux_binfmt * fmt)
62426 {
62427 write_lock(&binfmt_lock);
62428- list_del(&fmt->lh);
62429+ pax_list_del((struct list_head *)&fmt->lh);
62430 write_unlock(&binfmt_lock);
62431 }
62432
62433@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
62434 int write)
62435 {
62436 struct page *page;
62437- int ret;
62438
62439-#ifdef CONFIG_STACK_GROWSUP
62440- if (write) {
62441- ret = expand_downwards(bprm->vma, pos);
62442- if (ret < 0)
62443- return NULL;
62444- }
62445-#endif
62446- ret = get_user_pages(current, bprm->mm, pos,
62447- 1, write, 1, &page, NULL);
62448- if (ret <= 0)
62449+ if (0 > expand_downwards(bprm->vma, pos))
62450+ return NULL;
62451+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
62452 return NULL;
62453
62454 if (write) {
62455@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
62456 if (size <= ARG_MAX)
62457 return page;
62458
62459+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62460+ // only allow 512KB for argv+env on suid/sgid binaries
62461+ // to prevent easy ASLR exhaustion
62462+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
62463+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
62464+ (size > (512 * 1024))) {
62465+ put_page(page);
62466+ return NULL;
62467+ }
62468+#endif
62469+
62470 /*
62471 * Limit to 1/4-th the stack size for the argv+env strings.
62472 * This ensures that:
62473@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
62474 vma->vm_end = STACK_TOP_MAX;
62475 vma->vm_start = vma->vm_end - PAGE_SIZE;
62476 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
62477+
62478+#ifdef CONFIG_PAX_SEGMEXEC
62479+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62480+#endif
62481+
62482 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62483 INIT_LIST_HEAD(&vma->anon_vma_chain);
62484
62485@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
62486 mm->stack_vm = mm->total_vm = 1;
62487 up_write(&mm->mmap_sem);
62488 bprm->p = vma->vm_end - sizeof(void *);
62489+
62490+#ifdef CONFIG_PAX_RANDUSTACK
62491+ if (randomize_va_space)
62492+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
62493+#endif
62494+
62495 return 0;
62496 err:
62497 up_write(&mm->mmap_sem);
62498@@ -399,7 +440,7 @@ struct user_arg_ptr {
62499 } ptr;
62500 };
62501
62502-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62503+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62504 {
62505 const char __user *native;
62506
62507@@ -408,14 +449,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62508 compat_uptr_t compat;
62509
62510 if (get_user(compat, argv.ptr.compat + nr))
62511- return ERR_PTR(-EFAULT);
62512+ return (const char __force_user *)ERR_PTR(-EFAULT);
62513
62514 return compat_ptr(compat);
62515 }
62516 #endif
62517
62518 if (get_user(native, argv.ptr.native + nr))
62519- return ERR_PTR(-EFAULT);
62520+ return (const char __force_user *)ERR_PTR(-EFAULT);
62521
62522 return native;
62523 }
62524@@ -434,7 +475,7 @@ static int count(struct user_arg_ptr argv, int max)
62525 if (!p)
62526 break;
62527
62528- if (IS_ERR(p))
62529+ if (IS_ERR((const char __force_kernel *)p))
62530 return -EFAULT;
62531
62532 if (i >= max)
62533@@ -469,7 +510,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
62534
62535 ret = -EFAULT;
62536 str = get_user_arg_ptr(argv, argc);
62537- if (IS_ERR(str))
62538+ if (IS_ERR((const char __force_kernel *)str))
62539 goto out;
62540
62541 len = strnlen_user(str, MAX_ARG_STRLEN);
62542@@ -551,7 +592,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
62543 int r;
62544 mm_segment_t oldfs = get_fs();
62545 struct user_arg_ptr argv = {
62546- .ptr.native = (const char __user *const __user *)__argv,
62547+ .ptr.native = (const char __user * const __force_user *)__argv,
62548 };
62549
62550 set_fs(KERNEL_DS);
62551@@ -586,7 +627,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62552 unsigned long new_end = old_end - shift;
62553 struct mmu_gather tlb;
62554
62555- BUG_ON(new_start > new_end);
62556+ if (new_start >= new_end || new_start < mmap_min_addr)
62557+ return -ENOMEM;
62558
62559 /*
62560 * ensure there are no vmas between where we want to go
62561@@ -595,6 +637,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62562 if (vma != find_vma(mm, new_start))
62563 return -EFAULT;
62564
62565+#ifdef CONFIG_PAX_SEGMEXEC
62566+ BUG_ON(pax_find_mirror_vma(vma));
62567+#endif
62568+
62569 /*
62570 * cover the whole range: [new_start, old_end)
62571 */
62572@@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62573 stack_top = arch_align_stack(stack_top);
62574 stack_top = PAGE_ALIGN(stack_top);
62575
62576- if (unlikely(stack_top < mmap_min_addr) ||
62577- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
62578- return -ENOMEM;
62579-
62580 stack_shift = vma->vm_end - stack_top;
62581
62582 bprm->p -= stack_shift;
62583@@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
62584 bprm->exec -= stack_shift;
62585
62586 down_write(&mm->mmap_sem);
62587+
62588+ /* Move stack pages down in memory. */
62589+ if (stack_shift) {
62590+ ret = shift_arg_pages(vma, stack_shift);
62591+ if (ret)
62592+ goto out_unlock;
62593+ }
62594+
62595 vm_flags = VM_STACK_FLAGS;
62596
62597+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62598+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62599+ vm_flags &= ~VM_EXEC;
62600+
62601+#ifdef CONFIG_PAX_MPROTECT
62602+ if (mm->pax_flags & MF_PAX_MPROTECT)
62603+ vm_flags &= ~VM_MAYEXEC;
62604+#endif
62605+
62606+ }
62607+#endif
62608+
62609 /*
62610 * Adjust stack execute permissions; explicitly enable for
62611 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
62612@@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62613 goto out_unlock;
62614 BUG_ON(prev != vma);
62615
62616- /* Move stack pages down in memory. */
62617- if (stack_shift) {
62618- ret = shift_arg_pages(vma, stack_shift);
62619- if (ret)
62620- goto out_unlock;
62621- }
62622-
62623 /* mprotect_fixup is overkill to remove the temporary stack flags */
62624 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62625
62626@@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62627 #endif
62628 current->mm->start_stack = bprm->p;
62629 ret = expand_stack(vma, stack_base);
62630+
62631+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62632+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62633+ unsigned long size;
62634+ vm_flags_t vm_flags;
62635+
62636+ size = STACK_TOP - vma->vm_end;
62637+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62638+
62639+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62640+
62641+#ifdef CONFIG_X86
62642+ if (!ret) {
62643+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62644+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62645+ }
62646+#endif
62647+
62648+ }
62649+#endif
62650+
62651 if (ret)
62652 ret = -EFAULT;
62653
62654@@ -775,6 +851,8 @@ static struct file *do_open_exec(struct filename *name)
62655
62656 fsnotify_open(file);
62657
62658+ trace_open_exec(name->name);
62659+
62660 err = deny_write_access(file);
62661 if (err)
62662 goto exit;
62663@@ -804,7 +882,7 @@ int kernel_read(struct file *file, loff_t offset,
62664 old_fs = get_fs();
62665 set_fs(get_ds());
62666 /* The cast to a user pointer is valid due to the set_fs() */
62667- result = vfs_read(file, (void __user *)addr, count, &pos);
62668+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62669 set_fs(old_fs);
62670 return result;
62671 }
62672@@ -849,6 +927,7 @@ static int exec_mmap(struct mm_struct *mm)
62673 tsk->mm = mm;
62674 tsk->active_mm = mm;
62675 activate_mm(active_mm, mm);
62676+ populate_stack();
62677 tsk->mm->vmacache_seqnum = 0;
62678 vmacache_flush(tsk);
62679 task_unlock(tsk);
62680@@ -1247,7 +1326,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62681 }
62682 rcu_read_unlock();
62683
62684- if (p->fs->users > n_fs)
62685+ if (atomic_read(&p->fs->users) > n_fs)
62686 bprm->unsafe |= LSM_UNSAFE_SHARE;
62687 else
62688 p->fs->in_exec = 1;
62689@@ -1423,6 +1502,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62690 return ret;
62691 }
62692
62693+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62694+static DEFINE_PER_CPU(u64, exec_counter);
62695+static int __init init_exec_counters(void)
62696+{
62697+ unsigned int cpu;
62698+
62699+ for_each_possible_cpu(cpu) {
62700+ per_cpu(exec_counter, cpu) = (u64)cpu;
62701+ }
62702+
62703+ return 0;
62704+}
62705+early_initcall(init_exec_counters);
62706+static inline void increment_exec_counter(void)
62707+{
62708+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62709+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62710+}
62711+#else
62712+static inline void increment_exec_counter(void) {}
62713+#endif
62714+
62715+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62716+ struct user_arg_ptr argv);
62717+
62718 /*
62719 * sys_execve() executes a new program.
62720 */
62721@@ -1430,6 +1534,11 @@ static int do_execve_common(struct filename *filename,
62722 struct user_arg_ptr argv,
62723 struct user_arg_ptr envp)
62724 {
62725+#ifdef CONFIG_GRKERNSEC
62726+ struct file *old_exec_file;
62727+ struct acl_subject_label *old_acl;
62728+ struct rlimit old_rlim[RLIM_NLIMITS];
62729+#endif
62730 struct linux_binprm *bprm;
62731 struct file *file;
62732 struct files_struct *displaced;
62733@@ -1438,6 +1547,8 @@ static int do_execve_common(struct filename *filename,
62734 if (IS_ERR(filename))
62735 return PTR_ERR(filename);
62736
62737+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62738+
62739 /*
62740 * We move the actual failure in case of RLIMIT_NPROC excess from
62741 * set*uid() to execve() because too many poorly written programs
62742@@ -1475,11 +1586,21 @@ static int do_execve_common(struct filename *filename,
62743 if (IS_ERR(file))
62744 goto out_unmark;
62745
62746+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62747+ retval = -EPERM;
62748+ goto out_unmark;
62749+ }
62750+
62751 sched_exec();
62752
62753 bprm->file = file;
62754 bprm->filename = bprm->interp = filename->name;
62755
62756+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62757+ retval = -EACCES;
62758+ goto out_unmark;
62759+ }
62760+
62761 retval = bprm_mm_init(bprm);
62762 if (retval)
62763 goto out_unmark;
62764@@ -1496,24 +1617,70 @@ static int do_execve_common(struct filename *filename,
62765 if (retval < 0)
62766 goto out;
62767
62768+#ifdef CONFIG_GRKERNSEC
62769+ old_acl = current->acl;
62770+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62771+ old_exec_file = current->exec_file;
62772+ get_file(file);
62773+ current->exec_file = file;
62774+#endif
62775+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62776+ /* limit suid stack to 8MB
62777+ * we saved the old limits above and will restore them if this exec fails
62778+ */
62779+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62780+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62781+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62782+#endif
62783+
62784+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62785+ retval = -EPERM;
62786+ goto out_fail;
62787+ }
62788+
62789+ if (!gr_tpe_allow(file)) {
62790+ retval = -EACCES;
62791+ goto out_fail;
62792+ }
62793+
62794+ if (gr_check_crash_exec(file)) {
62795+ retval = -EACCES;
62796+ goto out_fail;
62797+ }
62798+
62799+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62800+ bprm->unsafe);
62801+ if (retval < 0)
62802+ goto out_fail;
62803+
62804 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62805 if (retval < 0)
62806- goto out;
62807+ goto out_fail;
62808
62809 bprm->exec = bprm->p;
62810 retval = copy_strings(bprm->envc, envp, bprm);
62811 if (retval < 0)
62812- goto out;
62813+ goto out_fail;
62814
62815 retval = copy_strings(bprm->argc, argv, bprm);
62816 if (retval < 0)
62817- goto out;
62818+ goto out_fail;
62819+
62820+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62821+
62822+ gr_handle_exec_args(bprm, argv);
62823
62824 retval = exec_binprm(bprm);
62825 if (retval < 0)
62826- goto out;
62827+ goto out_fail;
62828+#ifdef CONFIG_GRKERNSEC
62829+ if (old_exec_file)
62830+ fput(old_exec_file);
62831+#endif
62832
62833 /* execve succeeded */
62834+
62835+ increment_exec_counter();
62836 current->fs->in_exec = 0;
62837 current->in_execve = 0;
62838 acct_update_integrals(current);
62839@@ -1524,6 +1691,14 @@ static int do_execve_common(struct filename *filename,
62840 put_files_struct(displaced);
62841 return retval;
62842
62843+out_fail:
62844+#ifdef CONFIG_GRKERNSEC
62845+ current->acl = old_acl;
62846+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62847+ fput(current->exec_file);
62848+ current->exec_file = old_exec_file;
62849+#endif
62850+
62851 out:
62852 if (bprm->mm) {
62853 acct_arg_size(bprm, 0);
62854@@ -1615,3 +1790,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
62855 return compat_do_execve(getname(filename), argv, envp);
62856 }
62857 #endif
62858+
62859+int pax_check_flags(unsigned long *flags)
62860+{
62861+ int retval = 0;
62862+
62863+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62864+ if (*flags & MF_PAX_SEGMEXEC)
62865+ {
62866+ *flags &= ~MF_PAX_SEGMEXEC;
62867+ retval = -EINVAL;
62868+ }
62869+#endif
62870+
62871+ if ((*flags & MF_PAX_PAGEEXEC)
62872+
62873+#ifdef CONFIG_PAX_PAGEEXEC
62874+ && (*flags & MF_PAX_SEGMEXEC)
62875+#endif
62876+
62877+ )
62878+ {
62879+ *flags &= ~MF_PAX_PAGEEXEC;
62880+ retval = -EINVAL;
62881+ }
62882+
62883+ if ((*flags & MF_PAX_MPROTECT)
62884+
62885+#ifdef CONFIG_PAX_MPROTECT
62886+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62887+#endif
62888+
62889+ )
62890+ {
62891+ *flags &= ~MF_PAX_MPROTECT;
62892+ retval = -EINVAL;
62893+ }
62894+
62895+ if ((*flags & MF_PAX_EMUTRAMP)
62896+
62897+#ifdef CONFIG_PAX_EMUTRAMP
62898+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62899+#endif
62900+
62901+ )
62902+ {
62903+ *flags &= ~MF_PAX_EMUTRAMP;
62904+ retval = -EINVAL;
62905+ }
62906+
62907+ return retval;
62908+}
62909+
62910+EXPORT_SYMBOL(pax_check_flags);
62911+
62912+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62913+char *pax_get_path(const struct path *path, char *buf, int buflen)
62914+{
62915+ char *pathname = d_path(path, buf, buflen);
62916+
62917+ if (IS_ERR(pathname))
62918+ goto toolong;
62919+
62920+ pathname = mangle_path(buf, pathname, "\t\n\\");
62921+ if (!pathname)
62922+ goto toolong;
62923+
62924+ *pathname = 0;
62925+ return buf;
62926+
62927+toolong:
62928+ return "<path too long>";
62929+}
62930+EXPORT_SYMBOL(pax_get_path);
62931+
62932+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62933+{
62934+ struct task_struct *tsk = current;
62935+ struct mm_struct *mm = current->mm;
62936+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62937+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62938+ char *path_exec = NULL;
62939+ char *path_fault = NULL;
62940+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62941+ siginfo_t info = { };
62942+
62943+ if (buffer_exec && buffer_fault) {
62944+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62945+
62946+ down_read(&mm->mmap_sem);
62947+ vma = mm->mmap;
62948+ while (vma && (!vma_exec || !vma_fault)) {
62949+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62950+ vma_exec = vma;
62951+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62952+ vma_fault = vma;
62953+ vma = vma->vm_next;
62954+ }
62955+ if (vma_exec)
62956+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62957+ if (vma_fault) {
62958+ start = vma_fault->vm_start;
62959+ end = vma_fault->vm_end;
62960+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62961+ if (vma_fault->vm_file)
62962+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62963+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62964+ path_fault = "<heap>";
62965+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62966+ path_fault = "<stack>";
62967+ else
62968+ path_fault = "<anonymous mapping>";
62969+ }
62970+ up_read(&mm->mmap_sem);
62971+ }
62972+ if (tsk->signal->curr_ip)
62973+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62974+ else
62975+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62976+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62977+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62978+ free_page((unsigned long)buffer_exec);
62979+ free_page((unsigned long)buffer_fault);
62980+ pax_report_insns(regs, pc, sp);
62981+ info.si_signo = SIGKILL;
62982+ info.si_errno = 0;
62983+ info.si_code = SI_KERNEL;
62984+ info.si_pid = 0;
62985+ info.si_uid = 0;
62986+ do_coredump(&info);
62987+}
62988+#endif
62989+
62990+#ifdef CONFIG_PAX_REFCOUNT
62991+void pax_report_refcount_overflow(struct pt_regs *regs)
62992+{
62993+ if (current->signal->curr_ip)
62994+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62995+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62996+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62997+ else
62998+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62999+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
63000+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
63001+ preempt_disable();
63002+ show_regs(regs);
63003+ preempt_enable();
63004+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
63005+}
63006+#endif
63007+
63008+#ifdef CONFIG_PAX_USERCOPY
63009+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
63010+static noinline int check_stack_object(const void *obj, unsigned long len)
63011+{
63012+ const void * const stack = task_stack_page(current);
63013+ const void * const stackend = stack + THREAD_SIZE;
63014+
63015+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
63016+ const void *frame = NULL;
63017+ const void *oldframe;
63018+#endif
63019+
63020+ if (obj + len < obj)
63021+ return -1;
63022+
63023+ if (obj + len <= stack || stackend <= obj)
63024+ return 0;
63025+
63026+ if (obj < stack || stackend < obj + len)
63027+ return -1;
63028+
63029+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
63030+ oldframe = __builtin_frame_address(1);
63031+ if (oldframe)
63032+ frame = __builtin_frame_address(2);
63033+ /*
63034+ low ----------------------------------------------> high
63035+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
63036+ ^----------------^
63037+ allow copies only within here
63038+ */
63039+ while (stack <= frame && frame < stackend) {
63040+ /* if obj + len extends past the last frame, this
63041+ check won't pass and the next frame will be 0,
63042+ causing us to bail out and correctly report
63043+ the copy as invalid
63044+ */
63045+ if (obj + len <= frame)
63046+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
63047+ oldframe = frame;
63048+ frame = *(const void * const *)frame;
63049+ }
63050+ return -1;
63051+#else
63052+ return 1;
63053+#endif
63054+}
63055+
63056+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
63057+{
63058+ if (current->signal->curr_ip)
63059+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
63060+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
63061+ else
63062+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
63063+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
63064+ dump_stack();
63065+ gr_handle_kernel_exploit();
63066+ do_group_exit(SIGKILL);
63067+}
63068+#endif
63069+
63070+#ifdef CONFIG_PAX_USERCOPY
63071+
63072+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
63073+{
63074+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63075+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
63076+#ifdef CONFIG_MODULES
63077+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
63078+#else
63079+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
63080+#endif
63081+
63082+#else
63083+ unsigned long textlow = (unsigned long)_stext;
63084+ unsigned long texthigh = (unsigned long)_etext;
63085+
63086+#ifdef CONFIG_X86_64
63087+ /* check against linear mapping as well */
63088+ if (high > (unsigned long)__va(__pa(textlow)) &&
63089+ low < (unsigned long)__va(__pa(texthigh)))
63090+ return true;
63091+#endif
63092+
63093+#endif
63094+
63095+ if (high <= textlow || low >= texthigh)
63096+ return false;
63097+ else
63098+ return true;
63099+}
63100+#endif
63101+
63102+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
63103+{
63104+#ifdef CONFIG_PAX_USERCOPY
63105+ const char *type;
63106+#endif
63107+
63108+#ifndef CONFIG_STACK_GROWSUP
63109+ unsigned long stackstart = (unsigned long)task_stack_page(current);
63110+ unsigned long currentsp = (unsigned long)&stackstart;
63111+ if (unlikely((currentsp < stackstart + 512 ||
63112+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
63113+ BUG();
63114+#endif
63115+
63116+#ifndef CONFIG_PAX_USERCOPY_DEBUG
63117+ if (const_size)
63118+ return;
63119+#endif
63120+
63121+#ifdef CONFIG_PAX_USERCOPY
63122+ if (!n)
63123+ return;
63124+
63125+ type = check_heap_object(ptr, n);
63126+ if (!type) {
63127+ int ret = check_stack_object(ptr, n);
63128+ if (ret == 1 || ret == 2)
63129+ return;
63130+ if (ret == 0) {
63131+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
63132+ type = "<kernel text>";
63133+ else
63134+ return;
63135+ } else
63136+ type = "<process stack>";
63137+ }
63138+
63139+ pax_report_usercopy(ptr, n, to_user, type);
63140+#endif
63141+
63142+}
63143+EXPORT_SYMBOL(__check_object_size);
63144+
63145+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
63146+void pax_track_stack(void)
63147+{
63148+ unsigned long sp = (unsigned long)&sp;
63149+ if (sp < current_thread_info()->lowest_stack &&
63150+ sp > (unsigned long)task_stack_page(current))
63151+ current_thread_info()->lowest_stack = sp;
63152+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
63153+ BUG();
63154+}
63155+EXPORT_SYMBOL(pax_track_stack);
63156+#endif
63157+
63158+#ifdef CONFIG_PAX_SIZE_OVERFLOW
63159+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
63160+{
63161+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
63162+ dump_stack();
63163+ do_group_exit(SIGKILL);
63164+}
63165+EXPORT_SYMBOL(report_size_overflow);
63166+#endif
63167diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
63168index 9f9992b..8b59411 100644
63169--- a/fs/ext2/balloc.c
63170+++ b/fs/ext2/balloc.c
63171@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
63172
63173 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
63174 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
63175- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
63176+ if (free_blocks < root_blocks + 1 &&
63177 !uid_eq(sbi->s_resuid, current_fsuid()) &&
63178 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
63179- !in_group_p (sbi->s_resgid))) {
63180+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
63181 return 0;
63182 }
63183 return 1;
63184diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
63185index 9142614..97484fa 100644
63186--- a/fs/ext2/xattr.c
63187+++ b/fs/ext2/xattr.c
63188@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
63189 struct buffer_head *bh = NULL;
63190 struct ext2_xattr_entry *entry;
63191 char *end;
63192- size_t rest = buffer_size;
63193+ size_t rest = buffer_size, total_size = 0;
63194 int error;
63195
63196 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
63197@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
63198 buffer += size;
63199 }
63200 rest -= size;
63201+ total_size += size;
63202 }
63203 }
63204- error = buffer_size - rest; /* total size */
63205+ error = total_size;
63206
63207 cleanup:
63208 brelse(bh);
63209diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
63210index 158b5d4..2432610 100644
63211--- a/fs/ext3/balloc.c
63212+++ b/fs/ext3/balloc.c
63213@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
63214
63215 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
63216 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
63217- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
63218+ if (free_blocks < root_blocks + 1 &&
63219 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
63220 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
63221- !in_group_p (sbi->s_resgid))) {
63222+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
63223 return 0;
63224 }
63225 return 1;
63226diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
63227index c6874be..f8a6ae8 100644
63228--- a/fs/ext3/xattr.c
63229+++ b/fs/ext3/xattr.c
63230@@ -330,7 +330,7 @@ static int
63231 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
63232 char *buffer, size_t buffer_size)
63233 {
63234- size_t rest = buffer_size;
63235+ size_t rest = buffer_size, total_size = 0;
63236
63237 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
63238 const struct xattr_handler *handler =
63239@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
63240 buffer += size;
63241 }
63242 rest -= size;
63243+ total_size += size;
63244 }
63245 }
63246- return buffer_size - rest;
63247+ return total_size;
63248 }
63249
63250 static int
63251diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
63252index fca3820..e1ea241 100644
63253--- a/fs/ext4/balloc.c
63254+++ b/fs/ext4/balloc.c
63255@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
63256 /* Hm, nope. Are (enough) root reserved clusters available? */
63257 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
63258 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
63259- capable(CAP_SYS_RESOURCE) ||
63260- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
63261+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
63262+ capable_nolog(CAP_SYS_RESOURCE)) {
63263
63264 if (free_clusters >= (nclusters + dirty_clusters +
63265 resv_clusters))
63266diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
63267index 1bbe7c3..6f404a5c 100644
63268--- a/fs/ext4/ext4.h
63269+++ b/fs/ext4/ext4.h
63270@@ -1276,19 +1276,19 @@ struct ext4_sb_info {
63271 unsigned long s_mb_last_start;
63272
63273 /* stats for buddy allocator */
63274- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
63275- atomic_t s_bal_success; /* we found long enough chunks */
63276- atomic_t s_bal_allocated; /* in blocks */
63277- atomic_t s_bal_ex_scanned; /* total extents scanned */
63278- atomic_t s_bal_goals; /* goal hits */
63279- atomic_t s_bal_breaks; /* too long searches */
63280- atomic_t s_bal_2orders; /* 2^order hits */
63281+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
63282+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
63283+ atomic_unchecked_t s_bal_allocated; /* in blocks */
63284+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
63285+ atomic_unchecked_t s_bal_goals; /* goal hits */
63286+ atomic_unchecked_t s_bal_breaks; /* too long searches */
63287+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
63288 spinlock_t s_bal_lock;
63289 unsigned long s_mb_buddies_generated;
63290 unsigned long long s_mb_generation_time;
63291- atomic_t s_mb_lost_chunks;
63292- atomic_t s_mb_preallocated;
63293- atomic_t s_mb_discarded;
63294+ atomic_unchecked_t s_mb_lost_chunks;
63295+ atomic_unchecked_t s_mb_preallocated;
63296+ atomic_unchecked_t s_mb_discarded;
63297 atomic_t s_lock_busy;
63298
63299 /* locality groups */
63300@@ -1826,7 +1826,7 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
63301 /*
63302 * Special error return code only used by dx_probe() and its callers.
63303 */
63304-#define ERR_BAD_DX_DIR -75000
63305+#define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1))
63306
63307 /*
63308 * Timeout and state flag for lazy initialization inode thread.
63309diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
63310index c3e7418..f62cab3 100644
63311--- a/fs/ext4/mballoc.c
63312+++ b/fs/ext4/mballoc.c
63313@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
63314 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
63315
63316 if (EXT4_SB(sb)->s_mb_stats)
63317- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
63318+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
63319
63320 break;
63321 }
63322@@ -2211,7 +2211,7 @@ repeat:
63323 ac->ac_status = AC_STATUS_CONTINUE;
63324 ac->ac_flags |= EXT4_MB_HINT_FIRST;
63325 cr = 3;
63326- atomic_inc(&sbi->s_mb_lost_chunks);
63327+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
63328 goto repeat;
63329 }
63330 }
63331@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
63332 if (sbi->s_mb_stats) {
63333 ext4_msg(sb, KERN_INFO,
63334 "mballoc: %u blocks %u reqs (%u success)",
63335- atomic_read(&sbi->s_bal_allocated),
63336- atomic_read(&sbi->s_bal_reqs),
63337- atomic_read(&sbi->s_bal_success));
63338+ atomic_read_unchecked(&sbi->s_bal_allocated),
63339+ atomic_read_unchecked(&sbi->s_bal_reqs),
63340+ atomic_read_unchecked(&sbi->s_bal_success));
63341 ext4_msg(sb, KERN_INFO,
63342 "mballoc: %u extents scanned, %u goal hits, "
63343 "%u 2^N hits, %u breaks, %u lost",
63344- atomic_read(&sbi->s_bal_ex_scanned),
63345- atomic_read(&sbi->s_bal_goals),
63346- atomic_read(&sbi->s_bal_2orders),
63347- atomic_read(&sbi->s_bal_breaks),
63348- atomic_read(&sbi->s_mb_lost_chunks));
63349+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
63350+ atomic_read_unchecked(&sbi->s_bal_goals),
63351+ atomic_read_unchecked(&sbi->s_bal_2orders),
63352+ atomic_read_unchecked(&sbi->s_bal_breaks),
63353+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
63354 ext4_msg(sb, KERN_INFO,
63355 "mballoc: %lu generated and it took %Lu",
63356 sbi->s_mb_buddies_generated,
63357 sbi->s_mb_generation_time);
63358 ext4_msg(sb, KERN_INFO,
63359 "mballoc: %u preallocated, %u discarded",
63360- atomic_read(&sbi->s_mb_preallocated),
63361- atomic_read(&sbi->s_mb_discarded));
63362+ atomic_read_unchecked(&sbi->s_mb_preallocated),
63363+ atomic_read_unchecked(&sbi->s_mb_discarded));
63364 }
63365
63366 free_percpu(sbi->s_locality_groups);
63367@@ -3191,16 +3191,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
63368 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
63369
63370 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
63371- atomic_inc(&sbi->s_bal_reqs);
63372- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
63373+ atomic_inc_unchecked(&sbi->s_bal_reqs);
63374+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
63375 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
63376- atomic_inc(&sbi->s_bal_success);
63377- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
63378+ atomic_inc_unchecked(&sbi->s_bal_success);
63379+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
63380 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
63381 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
63382- atomic_inc(&sbi->s_bal_goals);
63383+ atomic_inc_unchecked(&sbi->s_bal_goals);
63384 if (ac->ac_found > sbi->s_mb_max_to_scan)
63385- atomic_inc(&sbi->s_bal_breaks);
63386+ atomic_inc_unchecked(&sbi->s_bal_breaks);
63387 }
63388
63389 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
63390@@ -3627,7 +3627,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
63391 trace_ext4_mb_new_inode_pa(ac, pa);
63392
63393 ext4_mb_use_inode_pa(ac, pa);
63394- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
63395+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
63396
63397 ei = EXT4_I(ac->ac_inode);
63398 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
63399@@ -3687,7 +3687,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
63400 trace_ext4_mb_new_group_pa(ac, pa);
63401
63402 ext4_mb_use_group_pa(ac, pa);
63403- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
63404+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
63405
63406 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
63407 lg = ac->ac_lg;
63408@@ -3776,7 +3776,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
63409 * from the bitmap and continue.
63410 */
63411 }
63412- atomic_add(free, &sbi->s_mb_discarded);
63413+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
63414
63415 return err;
63416 }
63417@@ -3794,7 +3794,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
63418 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
63419 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
63420 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
63421- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
63422+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
63423 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
63424
63425 return 0;
63426diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
63427index 32bce84..112d969 100644
63428--- a/fs/ext4/mmp.c
63429+++ b/fs/ext4/mmp.c
63430@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
63431 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
63432 const char *function, unsigned int line, const char *msg)
63433 {
63434- __ext4_warning(sb, function, line, msg);
63435+ __ext4_warning(sb, function, line, "%s", msg);
63436 __ext4_warning(sb, function, line,
63437 "MMP failure info: last update time: %llu, last update "
63438 "node: %s, last update device: %s\n",
63439diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
63440index 9e6eced..5e127be 100644
63441--- a/fs/ext4/namei.c
63442+++ b/fs/ext4/namei.c
63443@@ -1227,7 +1227,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
63444 buffer */
63445 int num = 0;
63446 ext4_lblk_t nblocks;
63447- int i, err;
63448+ int i, err = 0;
63449 int namelen;
63450
63451 *res_dir = NULL;
63452@@ -1264,7 +1264,11 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
63453 * return. Otherwise, fall back to doing a search the
63454 * old fashioned way.
63455 */
63456- if (bh || (err != ERR_BAD_DX_DIR))
63457+ if (err == -ENOENT)
63458+ return NULL;
63459+ if (err && err != ERR_BAD_DX_DIR)
63460+ return ERR_PTR(err);
63461+ if (bh)
63462 return bh;
63463 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
63464 "falling back\n"));
63465@@ -1295,6 +1299,11 @@ restart:
63466 }
63467 num++;
63468 bh = ext4_getblk(NULL, dir, b++, 0, &err);
63469+ if (unlikely(err)) {
63470+ if (ra_max == 0)
63471+ return ERR_PTR(err);
63472+ break;
63473+ }
63474 bh_use[ra_max] = bh;
63475 if (bh)
63476 ll_rw_block(READ | REQ_META | REQ_PRIO,
63477@@ -1417,6 +1426,8 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
63478 return ERR_PTR(-ENAMETOOLONG);
63479
63480 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
63481+ if (IS_ERR(bh))
63482+ return (struct dentry *) bh;
63483 inode = NULL;
63484 if (bh) {
63485 __u32 ino = le32_to_cpu(de->inode);
63486@@ -1450,6 +1461,8 @@ struct dentry *ext4_get_parent(struct dentry *child)
63487 struct buffer_head *bh;
63488
63489 bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
63490+ if (IS_ERR(bh))
63491+ return (struct dentry *) bh;
63492 if (!bh)
63493 return ERR_PTR(-ENOENT);
63494 ino = le32_to_cpu(de->inode);
63495@@ -2727,6 +2740,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
63496
63497 retval = -ENOENT;
63498 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
63499+ if (IS_ERR(bh))
63500+ return PTR_ERR(bh);
63501 if (!bh)
63502 goto end_rmdir;
63503
63504@@ -2794,6 +2809,8 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
63505
63506 retval = -ENOENT;
63507 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
63508+ if (IS_ERR(bh))
63509+ return PTR_ERR(bh);
63510 if (!bh)
63511 goto end_unlink;
63512
63513@@ -3121,6 +3138,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
63514 struct ext4_dir_entry_2 *de;
63515
63516 bh = ext4_find_entry(dir, d_name, &de, NULL);
63517+ if (IS_ERR(bh))
63518+ return PTR_ERR(bh);
63519 if (bh) {
63520 retval = ext4_delete_entry(handle, dir, de, bh);
63521 brelse(bh);
63522@@ -3205,6 +3224,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
63523 dquot_initialize(new.inode);
63524
63525 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
63526+ if (IS_ERR(old.bh))
63527+ return PTR_ERR(old.bh);
63528 /*
63529 * Check for inode number is _not_ due to possible IO errors.
63530 * We might rmdir the source, keep it as pwd of some process
63531@@ -3217,6 +3238,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
63532
63533 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
63534 &new.de, &new.inlined);
63535+ if (IS_ERR(new.bh)) {
63536+ retval = PTR_ERR(new.bh);
63537+ new.bh = NULL;
63538+ goto end_rename;
63539+ }
63540 if (new.bh) {
63541 if (!new.inode) {
63542 brelse(new.bh);
63543@@ -3345,6 +3371,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
63544
63545 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
63546 &old.de, &old.inlined);
63547+ if (IS_ERR(old.bh))
63548+ return PTR_ERR(old.bh);
63549 /*
63550 * Check for inode number is _not_ due to possible IO errors.
63551 * We might rmdir the source, keep it as pwd of some process
63552@@ -3357,6 +3385,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
63553
63554 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
63555 &new.de, &new.inlined);
63556+ if (IS_ERR(new.bh)) {
63557+ retval = PTR_ERR(new.bh);
63558+ new.bh = NULL;
63559+ goto end_rename;
63560+ }
63561
63562 /* RENAME_EXCHANGE case: old *and* new must both exist */
63563 if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
63564diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
63565index bb0e80f..1e43b90 100644
63566--- a/fs/ext4/resize.c
63567+++ b/fs/ext4/resize.c
63568@@ -575,6 +575,7 @@ handle_bb:
63569 bh = bclean(handle, sb, block);
63570 if (IS_ERR(bh)) {
63571 err = PTR_ERR(bh);
63572+ bh = NULL;
63573 goto out;
63574 }
63575 overhead = ext4_group_overhead_blocks(sb, group);
63576@@ -603,6 +604,7 @@ handle_ib:
63577 bh = bclean(handle, sb, block);
63578 if (IS_ERR(bh)) {
63579 err = PTR_ERR(bh);
63580+ bh = NULL;
63581 goto out;
63582 }
63583
63584diff --git a/fs/ext4/super.c b/fs/ext4/super.c
63585index beeb5c4..998c28d 100644
63586--- a/fs/ext4/super.c
63587+++ b/fs/ext4/super.c
63588@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
63589 }
63590
63591 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
63592-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63593+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63594 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
63595
63596 #ifdef CONFIG_QUOTA
63597@@ -2464,7 +2464,7 @@ struct ext4_attr {
63598 int offset;
63599 int deprecated_val;
63600 } u;
63601-};
63602+} __do_const;
63603
63604 static int parse_strtoull(const char *buf,
63605 unsigned long long max, unsigned long long *value)
63606diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
63607index e738733..9843a6c 100644
63608--- a/fs/ext4/xattr.c
63609+++ b/fs/ext4/xattr.c
63610@@ -386,7 +386,7 @@ static int
63611 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63612 char *buffer, size_t buffer_size)
63613 {
63614- size_t rest = buffer_size;
63615+ size_t rest = buffer_size, total_size = 0;
63616
63617 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
63618 const struct xattr_handler *handler =
63619@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63620 buffer += size;
63621 }
63622 rest -= size;
63623+ total_size += size;
63624 }
63625 }
63626- return buffer_size - rest;
63627+ return total_size;
63628 }
63629
63630 static int
63631diff --git a/fs/fcntl.c b/fs/fcntl.c
63632index 72c82f6..a18b263 100644
63633--- a/fs/fcntl.c
63634+++ b/fs/fcntl.c
63635@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
63636 if (err)
63637 return err;
63638
63639+ if (gr_handle_chroot_fowner(pid, type))
63640+ return -ENOENT;
63641+ if (gr_check_protected_task_fowner(pid, type))
63642+ return -EACCES;
63643+
63644 f_modown(filp, pid, type, force);
63645 return 0;
63646 }
63647diff --git a/fs/fhandle.c b/fs/fhandle.c
63648index 999ff5c..ac037c9 100644
63649--- a/fs/fhandle.c
63650+++ b/fs/fhandle.c
63651@@ -8,6 +8,7 @@
63652 #include <linux/fs_struct.h>
63653 #include <linux/fsnotify.h>
63654 #include <linux/personality.h>
63655+#include <linux/grsecurity.h>
63656 #include <asm/uaccess.h>
63657 #include "internal.h"
63658 #include "mount.h"
63659@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
63660 } else
63661 retval = 0;
63662 /* copy the mount id */
63663- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
63664- sizeof(*mnt_id)) ||
63665+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
63666 copy_to_user(ufh, handle,
63667 sizeof(struct file_handle) + handle_bytes))
63668 retval = -EFAULT;
63669@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63670 * the directory. Ideally we would like CAP_DAC_SEARCH.
63671 * But we don't have that
63672 */
63673- if (!capable(CAP_DAC_READ_SEARCH)) {
63674+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
63675 retval = -EPERM;
63676 goto out_err;
63677 }
63678diff --git a/fs/file.c b/fs/file.c
63679index 66923fe..2849783 100644
63680--- a/fs/file.c
63681+++ b/fs/file.c
63682@@ -16,6 +16,7 @@
63683 #include <linux/slab.h>
63684 #include <linux/vmalloc.h>
63685 #include <linux/file.h>
63686+#include <linux/security.h>
63687 #include <linux/fdtable.h>
63688 #include <linux/bitops.h>
63689 #include <linux/interrupt.h>
63690@@ -139,7 +140,7 @@ out:
63691 * Return <0 error code on error; 1 on successful completion.
63692 * The files->file_lock should be held on entry, and will be held on exit.
63693 */
63694-static int expand_fdtable(struct files_struct *files, int nr)
63695+static int expand_fdtable(struct files_struct *files, unsigned int nr)
63696 __releases(files->file_lock)
63697 __acquires(files->file_lock)
63698 {
63699@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
63700 * expanded and execution may have blocked.
63701 * The files->file_lock should be held on entry, and will be held on exit.
63702 */
63703-static int expand_files(struct files_struct *files, int nr)
63704+static int expand_files(struct files_struct *files, unsigned int nr)
63705 {
63706 struct fdtable *fdt;
63707
63708@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
63709 if (!file)
63710 return __close_fd(files, fd);
63711
63712+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
63713 if (fd >= rlimit(RLIMIT_NOFILE))
63714 return -EBADF;
63715
63716@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
63717 if (unlikely(oldfd == newfd))
63718 return -EINVAL;
63719
63720+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
63721 if (newfd >= rlimit(RLIMIT_NOFILE))
63722 return -EBADF;
63723
63724@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
63725 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
63726 {
63727 int err;
63728+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
63729 if (from >= rlimit(RLIMIT_NOFILE))
63730 return -EINVAL;
63731 err = alloc_fd(from, flags);
63732diff --git a/fs/filesystems.c b/fs/filesystems.c
63733index 5797d45..7d7d79a 100644
63734--- a/fs/filesystems.c
63735+++ b/fs/filesystems.c
63736@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
63737 int len = dot ? dot - name : strlen(name);
63738
63739 fs = __get_fs_type(name, len);
63740+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63741+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
63742+#else
63743 if (!fs && (request_module("fs-%.*s", len, name) == 0))
63744+#endif
63745 fs = __get_fs_type(name, len);
63746
63747 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
63748diff --git a/fs/fs_struct.c b/fs/fs_struct.c
63749index 7dca743..543d620 100644
63750--- a/fs/fs_struct.c
63751+++ b/fs/fs_struct.c
63752@@ -4,6 +4,7 @@
63753 #include <linux/path.h>
63754 #include <linux/slab.h>
63755 #include <linux/fs_struct.h>
63756+#include <linux/grsecurity.h>
63757 #include "internal.h"
63758
63759 /*
63760@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
63761 write_seqcount_begin(&fs->seq);
63762 old_root = fs->root;
63763 fs->root = *path;
63764+ gr_set_chroot_entries(current, path);
63765 write_seqcount_end(&fs->seq);
63766 spin_unlock(&fs->lock);
63767 if (old_root.dentry)
63768@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63769 int hits = 0;
63770 spin_lock(&fs->lock);
63771 write_seqcount_begin(&fs->seq);
63772+ /* this root replacement is only done by pivot_root,
63773+ leave grsec's chroot tagging alone for this task
63774+ so that a pivoted root isn't treated as a chroot
63775+ */
63776 hits += replace_path(&fs->root, old_root, new_root);
63777 hits += replace_path(&fs->pwd, old_root, new_root);
63778 write_seqcount_end(&fs->seq);
63779@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
63780 task_lock(tsk);
63781 spin_lock(&fs->lock);
63782 tsk->fs = NULL;
63783- kill = !--fs->users;
63784+ gr_clear_chroot_entries(tsk);
63785+ kill = !atomic_dec_return(&fs->users);
63786 spin_unlock(&fs->lock);
63787 task_unlock(tsk);
63788 if (kill)
63789@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63790 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63791 /* We don't need to lock fs - think why ;-) */
63792 if (fs) {
63793- fs->users = 1;
63794+ atomic_set(&fs->users, 1);
63795 fs->in_exec = 0;
63796 spin_lock_init(&fs->lock);
63797 seqcount_init(&fs->seq);
63798@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63799 spin_lock(&old->lock);
63800 fs->root = old->root;
63801 path_get(&fs->root);
63802+ /* instead of calling gr_set_chroot_entries here,
63803+ we call it from every caller of this function
63804+ */
63805 fs->pwd = old->pwd;
63806 path_get(&fs->pwd);
63807 spin_unlock(&old->lock);
63808@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
63809
63810 task_lock(current);
63811 spin_lock(&fs->lock);
63812- kill = !--fs->users;
63813+ kill = !atomic_dec_return(&fs->users);
63814 current->fs = new_fs;
63815+ gr_set_chroot_entries(current, &new_fs->root);
63816 spin_unlock(&fs->lock);
63817 task_unlock(current);
63818
63819@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63820
63821 int current_umask(void)
63822 {
63823- return current->fs->umask;
63824+ return current->fs->umask | gr_acl_umask();
63825 }
63826 EXPORT_SYMBOL(current_umask);
63827
63828 /* to be mentioned only in INIT_TASK */
63829 struct fs_struct init_fs = {
63830- .users = 1,
63831+ .users = ATOMIC_INIT(1),
63832 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63833 .seq = SEQCNT_ZERO(init_fs.seq),
63834 .umask = 0022,
63835diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63836index aec01be..cf81ff9 100644
63837--- a/fs/fscache/cookie.c
63838+++ b/fs/fscache/cookie.c
63839@@ -19,7 +19,7 @@
63840
63841 struct kmem_cache *fscache_cookie_jar;
63842
63843-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63844+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63845
63846 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63847 static int fscache_alloc_object(struct fscache_cache *cache,
63848@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63849 parent ? (char *) parent->def->name : "<no-parent>",
63850 def->name, netfs_data, enable);
63851
63852- fscache_stat(&fscache_n_acquires);
63853+ fscache_stat_unchecked(&fscache_n_acquires);
63854
63855 /* if there's no parent cookie, then we don't create one here either */
63856 if (!parent) {
63857- fscache_stat(&fscache_n_acquires_null);
63858+ fscache_stat_unchecked(&fscache_n_acquires_null);
63859 _leave(" [no parent]");
63860 return NULL;
63861 }
63862@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63863 /* allocate and initialise a cookie */
63864 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63865 if (!cookie) {
63866- fscache_stat(&fscache_n_acquires_oom);
63867+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63868 _leave(" [ENOMEM]");
63869 return NULL;
63870 }
63871@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63872
63873 switch (cookie->def->type) {
63874 case FSCACHE_COOKIE_TYPE_INDEX:
63875- fscache_stat(&fscache_n_cookie_index);
63876+ fscache_stat_unchecked(&fscache_n_cookie_index);
63877 break;
63878 case FSCACHE_COOKIE_TYPE_DATAFILE:
63879- fscache_stat(&fscache_n_cookie_data);
63880+ fscache_stat_unchecked(&fscache_n_cookie_data);
63881 break;
63882 default:
63883- fscache_stat(&fscache_n_cookie_special);
63884+ fscache_stat_unchecked(&fscache_n_cookie_special);
63885 break;
63886 }
63887
63888@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63889 } else {
63890 atomic_dec(&parent->n_children);
63891 __fscache_cookie_put(cookie);
63892- fscache_stat(&fscache_n_acquires_nobufs);
63893+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63894 _leave(" = NULL");
63895 return NULL;
63896 }
63897@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63898 }
63899 }
63900
63901- fscache_stat(&fscache_n_acquires_ok);
63902+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63903 _leave(" = %p", cookie);
63904 return cookie;
63905 }
63906@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63907 cache = fscache_select_cache_for_object(cookie->parent);
63908 if (!cache) {
63909 up_read(&fscache_addremove_sem);
63910- fscache_stat(&fscache_n_acquires_no_cache);
63911+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63912 _leave(" = -ENOMEDIUM [no cache]");
63913 return -ENOMEDIUM;
63914 }
63915@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63916 object = cache->ops->alloc_object(cache, cookie);
63917 fscache_stat_d(&fscache_n_cop_alloc_object);
63918 if (IS_ERR(object)) {
63919- fscache_stat(&fscache_n_object_no_alloc);
63920+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63921 ret = PTR_ERR(object);
63922 goto error;
63923 }
63924
63925- fscache_stat(&fscache_n_object_alloc);
63926+ fscache_stat_unchecked(&fscache_n_object_alloc);
63927
63928- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63929+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63930
63931 _debug("ALLOC OBJ%x: %s {%lx}",
63932 object->debug_id, cookie->def->name, object->events);
63933@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63934
63935 _enter("{%s}", cookie->def->name);
63936
63937- fscache_stat(&fscache_n_invalidates);
63938+ fscache_stat_unchecked(&fscache_n_invalidates);
63939
63940 /* Only permit invalidation of data files. Invalidating an index will
63941 * require the caller to release all its attachments to the tree rooted
63942@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63943 {
63944 struct fscache_object *object;
63945
63946- fscache_stat(&fscache_n_updates);
63947+ fscache_stat_unchecked(&fscache_n_updates);
63948
63949 if (!cookie) {
63950- fscache_stat(&fscache_n_updates_null);
63951+ fscache_stat_unchecked(&fscache_n_updates_null);
63952 _leave(" [no cookie]");
63953 return;
63954 }
63955@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63956 */
63957 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63958 {
63959- fscache_stat(&fscache_n_relinquishes);
63960+ fscache_stat_unchecked(&fscache_n_relinquishes);
63961 if (retire)
63962- fscache_stat(&fscache_n_relinquishes_retire);
63963+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63964
63965 if (!cookie) {
63966- fscache_stat(&fscache_n_relinquishes_null);
63967+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63968 _leave(" [no cookie]");
63969 return;
63970 }
63971@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63972 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63973 goto inconsistent;
63974
63975- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63976+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63977
63978 __fscache_use_cookie(cookie);
63979 if (fscache_submit_op(object, op) < 0)
63980diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63981index bc6c08f..09c0d96 100644
63982--- a/fs/fscache/internal.h
63983+++ b/fs/fscache/internal.h
63984@@ -139,8 +139,8 @@ extern void fscache_operation_gc(struct work_struct *);
63985 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63986 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63987 struct fscache_operation *,
63988- atomic_t *,
63989- atomic_t *,
63990+ atomic_unchecked_t *,
63991+ atomic_unchecked_t *,
63992 void (*)(struct fscache_operation *));
63993 extern void fscache_invalidate_writes(struct fscache_cookie *);
63994
63995@@ -159,101 +159,101 @@ extern void fscache_proc_cleanup(void);
63996 * stats.c
63997 */
63998 #ifdef CONFIG_FSCACHE_STATS
63999-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
64000-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
64001+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
64002+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
64003
64004-extern atomic_t fscache_n_op_pend;
64005-extern atomic_t fscache_n_op_run;
64006-extern atomic_t fscache_n_op_enqueue;
64007-extern atomic_t fscache_n_op_deferred_release;
64008-extern atomic_t fscache_n_op_release;
64009-extern atomic_t fscache_n_op_gc;
64010-extern atomic_t fscache_n_op_cancelled;
64011-extern atomic_t fscache_n_op_rejected;
64012+extern atomic_unchecked_t fscache_n_op_pend;
64013+extern atomic_unchecked_t fscache_n_op_run;
64014+extern atomic_unchecked_t fscache_n_op_enqueue;
64015+extern atomic_unchecked_t fscache_n_op_deferred_release;
64016+extern atomic_unchecked_t fscache_n_op_release;
64017+extern atomic_unchecked_t fscache_n_op_gc;
64018+extern atomic_unchecked_t fscache_n_op_cancelled;
64019+extern atomic_unchecked_t fscache_n_op_rejected;
64020
64021-extern atomic_t fscache_n_attr_changed;
64022-extern atomic_t fscache_n_attr_changed_ok;
64023-extern atomic_t fscache_n_attr_changed_nobufs;
64024-extern atomic_t fscache_n_attr_changed_nomem;
64025-extern atomic_t fscache_n_attr_changed_calls;
64026+extern atomic_unchecked_t fscache_n_attr_changed;
64027+extern atomic_unchecked_t fscache_n_attr_changed_ok;
64028+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
64029+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
64030+extern atomic_unchecked_t fscache_n_attr_changed_calls;
64031
64032-extern atomic_t fscache_n_allocs;
64033-extern atomic_t fscache_n_allocs_ok;
64034-extern atomic_t fscache_n_allocs_wait;
64035-extern atomic_t fscache_n_allocs_nobufs;
64036-extern atomic_t fscache_n_allocs_intr;
64037-extern atomic_t fscache_n_allocs_object_dead;
64038-extern atomic_t fscache_n_alloc_ops;
64039-extern atomic_t fscache_n_alloc_op_waits;
64040+extern atomic_unchecked_t fscache_n_allocs;
64041+extern atomic_unchecked_t fscache_n_allocs_ok;
64042+extern atomic_unchecked_t fscache_n_allocs_wait;
64043+extern atomic_unchecked_t fscache_n_allocs_nobufs;
64044+extern atomic_unchecked_t fscache_n_allocs_intr;
64045+extern atomic_unchecked_t fscache_n_allocs_object_dead;
64046+extern atomic_unchecked_t fscache_n_alloc_ops;
64047+extern atomic_unchecked_t fscache_n_alloc_op_waits;
64048
64049-extern atomic_t fscache_n_retrievals;
64050-extern atomic_t fscache_n_retrievals_ok;
64051-extern atomic_t fscache_n_retrievals_wait;
64052-extern atomic_t fscache_n_retrievals_nodata;
64053-extern atomic_t fscache_n_retrievals_nobufs;
64054-extern atomic_t fscache_n_retrievals_intr;
64055-extern atomic_t fscache_n_retrievals_nomem;
64056-extern atomic_t fscache_n_retrievals_object_dead;
64057-extern atomic_t fscache_n_retrieval_ops;
64058-extern atomic_t fscache_n_retrieval_op_waits;
64059+extern atomic_unchecked_t fscache_n_retrievals;
64060+extern atomic_unchecked_t fscache_n_retrievals_ok;
64061+extern atomic_unchecked_t fscache_n_retrievals_wait;
64062+extern atomic_unchecked_t fscache_n_retrievals_nodata;
64063+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
64064+extern atomic_unchecked_t fscache_n_retrievals_intr;
64065+extern atomic_unchecked_t fscache_n_retrievals_nomem;
64066+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
64067+extern atomic_unchecked_t fscache_n_retrieval_ops;
64068+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
64069
64070-extern atomic_t fscache_n_stores;
64071-extern atomic_t fscache_n_stores_ok;
64072-extern atomic_t fscache_n_stores_again;
64073-extern atomic_t fscache_n_stores_nobufs;
64074-extern atomic_t fscache_n_stores_oom;
64075-extern atomic_t fscache_n_store_ops;
64076-extern atomic_t fscache_n_store_calls;
64077-extern atomic_t fscache_n_store_pages;
64078-extern atomic_t fscache_n_store_radix_deletes;
64079-extern atomic_t fscache_n_store_pages_over_limit;
64080+extern atomic_unchecked_t fscache_n_stores;
64081+extern atomic_unchecked_t fscache_n_stores_ok;
64082+extern atomic_unchecked_t fscache_n_stores_again;
64083+extern atomic_unchecked_t fscache_n_stores_nobufs;
64084+extern atomic_unchecked_t fscache_n_stores_oom;
64085+extern atomic_unchecked_t fscache_n_store_ops;
64086+extern atomic_unchecked_t fscache_n_store_calls;
64087+extern atomic_unchecked_t fscache_n_store_pages;
64088+extern atomic_unchecked_t fscache_n_store_radix_deletes;
64089+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
64090
64091-extern atomic_t fscache_n_store_vmscan_not_storing;
64092-extern atomic_t fscache_n_store_vmscan_gone;
64093-extern atomic_t fscache_n_store_vmscan_busy;
64094-extern atomic_t fscache_n_store_vmscan_cancelled;
64095-extern atomic_t fscache_n_store_vmscan_wait;
64096+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64097+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
64098+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
64099+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64100+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
64101
64102-extern atomic_t fscache_n_marks;
64103-extern atomic_t fscache_n_uncaches;
64104+extern atomic_unchecked_t fscache_n_marks;
64105+extern atomic_unchecked_t fscache_n_uncaches;
64106
64107-extern atomic_t fscache_n_acquires;
64108-extern atomic_t fscache_n_acquires_null;
64109-extern atomic_t fscache_n_acquires_no_cache;
64110-extern atomic_t fscache_n_acquires_ok;
64111-extern atomic_t fscache_n_acquires_nobufs;
64112-extern atomic_t fscache_n_acquires_oom;
64113+extern atomic_unchecked_t fscache_n_acquires;
64114+extern atomic_unchecked_t fscache_n_acquires_null;
64115+extern atomic_unchecked_t fscache_n_acquires_no_cache;
64116+extern atomic_unchecked_t fscache_n_acquires_ok;
64117+extern atomic_unchecked_t fscache_n_acquires_nobufs;
64118+extern atomic_unchecked_t fscache_n_acquires_oom;
64119
64120-extern atomic_t fscache_n_invalidates;
64121-extern atomic_t fscache_n_invalidates_run;
64122+extern atomic_unchecked_t fscache_n_invalidates;
64123+extern atomic_unchecked_t fscache_n_invalidates_run;
64124
64125-extern atomic_t fscache_n_updates;
64126-extern atomic_t fscache_n_updates_null;
64127-extern atomic_t fscache_n_updates_run;
64128+extern atomic_unchecked_t fscache_n_updates;
64129+extern atomic_unchecked_t fscache_n_updates_null;
64130+extern atomic_unchecked_t fscache_n_updates_run;
64131
64132-extern atomic_t fscache_n_relinquishes;
64133-extern atomic_t fscache_n_relinquishes_null;
64134-extern atomic_t fscache_n_relinquishes_waitcrt;
64135-extern atomic_t fscache_n_relinquishes_retire;
64136+extern atomic_unchecked_t fscache_n_relinquishes;
64137+extern atomic_unchecked_t fscache_n_relinquishes_null;
64138+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64139+extern atomic_unchecked_t fscache_n_relinquishes_retire;
64140
64141-extern atomic_t fscache_n_cookie_index;
64142-extern atomic_t fscache_n_cookie_data;
64143-extern atomic_t fscache_n_cookie_special;
64144+extern atomic_unchecked_t fscache_n_cookie_index;
64145+extern atomic_unchecked_t fscache_n_cookie_data;
64146+extern atomic_unchecked_t fscache_n_cookie_special;
64147
64148-extern atomic_t fscache_n_object_alloc;
64149-extern atomic_t fscache_n_object_no_alloc;
64150-extern atomic_t fscache_n_object_lookups;
64151-extern atomic_t fscache_n_object_lookups_negative;
64152-extern atomic_t fscache_n_object_lookups_positive;
64153-extern atomic_t fscache_n_object_lookups_timed_out;
64154-extern atomic_t fscache_n_object_created;
64155-extern atomic_t fscache_n_object_avail;
64156-extern atomic_t fscache_n_object_dead;
64157+extern atomic_unchecked_t fscache_n_object_alloc;
64158+extern atomic_unchecked_t fscache_n_object_no_alloc;
64159+extern atomic_unchecked_t fscache_n_object_lookups;
64160+extern atomic_unchecked_t fscache_n_object_lookups_negative;
64161+extern atomic_unchecked_t fscache_n_object_lookups_positive;
64162+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
64163+extern atomic_unchecked_t fscache_n_object_created;
64164+extern atomic_unchecked_t fscache_n_object_avail;
64165+extern atomic_unchecked_t fscache_n_object_dead;
64166
64167-extern atomic_t fscache_n_checkaux_none;
64168-extern atomic_t fscache_n_checkaux_okay;
64169-extern atomic_t fscache_n_checkaux_update;
64170-extern atomic_t fscache_n_checkaux_obsolete;
64171+extern atomic_unchecked_t fscache_n_checkaux_none;
64172+extern atomic_unchecked_t fscache_n_checkaux_okay;
64173+extern atomic_unchecked_t fscache_n_checkaux_update;
64174+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
64175
64176 extern atomic_t fscache_n_cop_alloc_object;
64177 extern atomic_t fscache_n_cop_lookup_object;
64178@@ -278,6 +278,11 @@ static inline void fscache_stat(atomic_t *stat)
64179 atomic_inc(stat);
64180 }
64181
64182+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
64183+{
64184+ atomic_inc_unchecked(stat);
64185+}
64186+
64187 static inline void fscache_stat_d(atomic_t *stat)
64188 {
64189 atomic_dec(stat);
64190@@ -290,6 +295,7 @@ extern const struct file_operations fscache_stats_fops;
64191
64192 #define __fscache_stat(stat) (NULL)
64193 #define fscache_stat(stat) do {} while (0)
64194+#define fscache_stat_unchecked(stat) do {} while (0)
64195 #define fscache_stat_d(stat) do {} while (0)
64196 #endif
64197
64198diff --git a/fs/fscache/object.c b/fs/fscache/object.c
64199index d3b4539..ed0c659 100644
64200--- a/fs/fscache/object.c
64201+++ b/fs/fscache/object.c
64202@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
64203 _debug("LOOKUP \"%s\" in \"%s\"",
64204 cookie->def->name, object->cache->tag->name);
64205
64206- fscache_stat(&fscache_n_object_lookups);
64207+ fscache_stat_unchecked(&fscache_n_object_lookups);
64208 fscache_stat(&fscache_n_cop_lookup_object);
64209 ret = object->cache->ops->lookup_object(object);
64210 fscache_stat_d(&fscache_n_cop_lookup_object);
64211@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
64212 if (ret == -ETIMEDOUT) {
64213 /* probably stuck behind another object, so move this one to
64214 * the back of the queue */
64215- fscache_stat(&fscache_n_object_lookups_timed_out);
64216+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
64217 _leave(" [timeout]");
64218 return NO_TRANSIT;
64219 }
64220@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
64221 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
64222
64223 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
64224- fscache_stat(&fscache_n_object_lookups_negative);
64225+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
64226
64227 /* Allow write requests to begin stacking up and read requests to begin
64228 * returning ENODATA.
64229@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
64230 /* if we were still looking up, then we must have a positive lookup
64231 * result, in which case there may be data available */
64232 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
64233- fscache_stat(&fscache_n_object_lookups_positive);
64234+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
64235
64236 /* We do (presumably) have data */
64237 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
64238@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
64239 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
64240 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
64241 } else {
64242- fscache_stat(&fscache_n_object_created);
64243+ fscache_stat_unchecked(&fscache_n_object_created);
64244 }
64245
64246 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
64247@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
64248 fscache_stat_d(&fscache_n_cop_lookup_complete);
64249
64250 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
64251- fscache_stat(&fscache_n_object_avail);
64252+ fscache_stat_unchecked(&fscache_n_object_avail);
64253
64254 _leave("");
64255 return transit_to(JUMPSTART_DEPS);
64256@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
64257
64258 /* this just shifts the object release to the work processor */
64259 fscache_put_object(object);
64260- fscache_stat(&fscache_n_object_dead);
64261+ fscache_stat_unchecked(&fscache_n_object_dead);
64262
64263 _leave("");
64264 return transit_to(OBJECT_DEAD);
64265@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
64266 enum fscache_checkaux result;
64267
64268 if (!object->cookie->def->check_aux) {
64269- fscache_stat(&fscache_n_checkaux_none);
64270+ fscache_stat_unchecked(&fscache_n_checkaux_none);
64271 return FSCACHE_CHECKAUX_OKAY;
64272 }
64273
64274@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
64275 switch (result) {
64276 /* entry okay as is */
64277 case FSCACHE_CHECKAUX_OKAY:
64278- fscache_stat(&fscache_n_checkaux_okay);
64279+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
64280 break;
64281
64282 /* entry requires update */
64283 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
64284- fscache_stat(&fscache_n_checkaux_update);
64285+ fscache_stat_unchecked(&fscache_n_checkaux_update);
64286 break;
64287
64288 /* entry requires deletion */
64289 case FSCACHE_CHECKAUX_OBSOLETE:
64290- fscache_stat(&fscache_n_checkaux_obsolete);
64291+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
64292 break;
64293
64294 default:
64295@@ -992,7 +992,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
64296 {
64297 const struct fscache_state *s;
64298
64299- fscache_stat(&fscache_n_invalidates_run);
64300+ fscache_stat_unchecked(&fscache_n_invalidates_run);
64301 fscache_stat(&fscache_n_cop_invalidate_object);
64302 s = _fscache_invalidate_object(object, event);
64303 fscache_stat_d(&fscache_n_cop_invalidate_object);
64304@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
64305 {
64306 _enter("{OBJ%x},%d", object->debug_id, event);
64307
64308- fscache_stat(&fscache_n_updates_run);
64309+ fscache_stat_unchecked(&fscache_n_updates_run);
64310 fscache_stat(&fscache_n_cop_update_object);
64311 object->cache->ops->update_object(object);
64312 fscache_stat_d(&fscache_n_cop_update_object);
64313diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
64314index e7b87a0..a85d47a 100644
64315--- a/fs/fscache/operation.c
64316+++ b/fs/fscache/operation.c
64317@@ -17,7 +17,7 @@
64318 #include <linux/slab.h>
64319 #include "internal.h"
64320
64321-atomic_t fscache_op_debug_id;
64322+atomic_unchecked_t fscache_op_debug_id;
64323 EXPORT_SYMBOL(fscache_op_debug_id);
64324
64325 /**
64326@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
64327 ASSERTCMP(atomic_read(&op->usage), >, 0);
64328 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
64329
64330- fscache_stat(&fscache_n_op_enqueue);
64331+ fscache_stat_unchecked(&fscache_n_op_enqueue);
64332 switch (op->flags & FSCACHE_OP_TYPE) {
64333 case FSCACHE_OP_ASYNC:
64334 _debug("queue async");
64335@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
64336 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
64337 if (op->processor)
64338 fscache_enqueue_operation(op);
64339- fscache_stat(&fscache_n_op_run);
64340+ fscache_stat_unchecked(&fscache_n_op_run);
64341 }
64342
64343 /*
64344@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
64345 if (object->n_in_progress > 0) {
64346 atomic_inc(&op->usage);
64347 list_add_tail(&op->pend_link, &object->pending_ops);
64348- fscache_stat(&fscache_n_op_pend);
64349+ fscache_stat_unchecked(&fscache_n_op_pend);
64350 } else if (!list_empty(&object->pending_ops)) {
64351 atomic_inc(&op->usage);
64352 list_add_tail(&op->pend_link, &object->pending_ops);
64353- fscache_stat(&fscache_n_op_pend);
64354+ fscache_stat_unchecked(&fscache_n_op_pend);
64355 fscache_start_operations(object);
64356 } else {
64357 ASSERTCMP(object->n_in_progress, ==, 0);
64358@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
64359 object->n_exclusive++; /* reads and writes must wait */
64360 atomic_inc(&op->usage);
64361 list_add_tail(&op->pend_link, &object->pending_ops);
64362- fscache_stat(&fscache_n_op_pend);
64363+ fscache_stat_unchecked(&fscache_n_op_pend);
64364 ret = 0;
64365 } else {
64366 /* If we're in any other state, there must have been an I/O
64367@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
64368 if (object->n_exclusive > 0) {
64369 atomic_inc(&op->usage);
64370 list_add_tail(&op->pend_link, &object->pending_ops);
64371- fscache_stat(&fscache_n_op_pend);
64372+ fscache_stat_unchecked(&fscache_n_op_pend);
64373 } else if (!list_empty(&object->pending_ops)) {
64374 atomic_inc(&op->usage);
64375 list_add_tail(&op->pend_link, &object->pending_ops);
64376- fscache_stat(&fscache_n_op_pend);
64377+ fscache_stat_unchecked(&fscache_n_op_pend);
64378 fscache_start_operations(object);
64379 } else {
64380 ASSERTCMP(object->n_exclusive, ==, 0);
64381@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
64382 object->n_ops++;
64383 atomic_inc(&op->usage);
64384 list_add_tail(&op->pend_link, &object->pending_ops);
64385- fscache_stat(&fscache_n_op_pend);
64386+ fscache_stat_unchecked(&fscache_n_op_pend);
64387 ret = 0;
64388 } else if (fscache_object_is_dying(object)) {
64389- fscache_stat(&fscache_n_op_rejected);
64390+ fscache_stat_unchecked(&fscache_n_op_rejected);
64391 op->state = FSCACHE_OP_ST_CANCELLED;
64392 ret = -ENOBUFS;
64393 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
64394@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
64395 ret = -EBUSY;
64396 if (op->state == FSCACHE_OP_ST_PENDING) {
64397 ASSERT(!list_empty(&op->pend_link));
64398- fscache_stat(&fscache_n_op_cancelled);
64399+ fscache_stat_unchecked(&fscache_n_op_cancelled);
64400 list_del_init(&op->pend_link);
64401 if (do_cancel)
64402 do_cancel(op);
64403@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
64404 while (!list_empty(&object->pending_ops)) {
64405 op = list_entry(object->pending_ops.next,
64406 struct fscache_operation, pend_link);
64407- fscache_stat(&fscache_n_op_cancelled);
64408+ fscache_stat_unchecked(&fscache_n_op_cancelled);
64409 list_del_init(&op->pend_link);
64410
64411 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
64412@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
64413 op->state, ==, FSCACHE_OP_ST_CANCELLED);
64414 op->state = FSCACHE_OP_ST_DEAD;
64415
64416- fscache_stat(&fscache_n_op_release);
64417+ fscache_stat_unchecked(&fscache_n_op_release);
64418
64419 if (op->release) {
64420 op->release(op);
64421@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
64422 * lock, and defer it otherwise */
64423 if (!spin_trylock(&object->lock)) {
64424 _debug("defer put");
64425- fscache_stat(&fscache_n_op_deferred_release);
64426+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
64427
64428 cache = object->cache;
64429 spin_lock(&cache->op_gc_list_lock);
64430@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
64431
64432 _debug("GC DEFERRED REL OBJ%x OP%x",
64433 object->debug_id, op->debug_id);
64434- fscache_stat(&fscache_n_op_gc);
64435+ fscache_stat_unchecked(&fscache_n_op_gc);
64436
64437 ASSERTCMP(atomic_read(&op->usage), ==, 0);
64438 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
64439diff --git a/fs/fscache/page.c b/fs/fscache/page.c
64440index ed70714..67f4982 100644
64441--- a/fs/fscache/page.c
64442+++ b/fs/fscache/page.c
64443@@ -61,7 +61,7 @@ try_again:
64444 val = radix_tree_lookup(&cookie->stores, page->index);
64445 if (!val) {
64446 rcu_read_unlock();
64447- fscache_stat(&fscache_n_store_vmscan_not_storing);
64448+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
64449 __fscache_uncache_page(cookie, page);
64450 return true;
64451 }
64452@@ -91,11 +91,11 @@ try_again:
64453 spin_unlock(&cookie->stores_lock);
64454
64455 if (xpage) {
64456- fscache_stat(&fscache_n_store_vmscan_cancelled);
64457- fscache_stat(&fscache_n_store_radix_deletes);
64458+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
64459+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
64460 ASSERTCMP(xpage, ==, page);
64461 } else {
64462- fscache_stat(&fscache_n_store_vmscan_gone);
64463+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
64464 }
64465
64466 wake_up_bit(&cookie->flags, 0);
64467@@ -110,11 +110,11 @@ page_busy:
64468 * sleeping on memory allocation, so we may need to impose a timeout
64469 * too. */
64470 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
64471- fscache_stat(&fscache_n_store_vmscan_busy);
64472+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
64473 return false;
64474 }
64475
64476- fscache_stat(&fscache_n_store_vmscan_wait);
64477+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
64478 __fscache_wait_on_page_write(cookie, page);
64479 gfp &= ~__GFP_WAIT;
64480 goto try_again;
64481@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
64482 FSCACHE_COOKIE_STORING_TAG);
64483 if (!radix_tree_tag_get(&cookie->stores, page->index,
64484 FSCACHE_COOKIE_PENDING_TAG)) {
64485- fscache_stat(&fscache_n_store_radix_deletes);
64486+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
64487 xpage = radix_tree_delete(&cookie->stores, page->index);
64488 }
64489 spin_unlock(&cookie->stores_lock);
64490@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
64491
64492 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
64493
64494- fscache_stat(&fscache_n_attr_changed_calls);
64495+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
64496
64497 if (fscache_object_is_active(object)) {
64498 fscache_stat(&fscache_n_cop_attr_changed);
64499@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64500
64501 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64502
64503- fscache_stat(&fscache_n_attr_changed);
64504+ fscache_stat_unchecked(&fscache_n_attr_changed);
64505
64506 op = kzalloc(sizeof(*op), GFP_KERNEL);
64507 if (!op) {
64508- fscache_stat(&fscache_n_attr_changed_nomem);
64509+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
64510 _leave(" = -ENOMEM");
64511 return -ENOMEM;
64512 }
64513@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64514 if (fscache_submit_exclusive_op(object, op) < 0)
64515 goto nobufs;
64516 spin_unlock(&cookie->lock);
64517- fscache_stat(&fscache_n_attr_changed_ok);
64518+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
64519 fscache_put_operation(op);
64520 _leave(" = 0");
64521 return 0;
64522@@ -225,7 +225,7 @@ nobufs:
64523 kfree(op);
64524 if (wake_cookie)
64525 __fscache_wake_unused_cookie(cookie);
64526- fscache_stat(&fscache_n_attr_changed_nobufs);
64527+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
64528 _leave(" = %d", -ENOBUFS);
64529 return -ENOBUFS;
64530 }
64531@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
64532 /* allocate a retrieval operation and attempt to submit it */
64533 op = kzalloc(sizeof(*op), GFP_NOIO);
64534 if (!op) {
64535- fscache_stat(&fscache_n_retrievals_nomem);
64536+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64537 return NULL;
64538 }
64539
64540@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
64541 return 0;
64542 }
64543
64544- fscache_stat(&fscache_n_retrievals_wait);
64545+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
64546
64547 jif = jiffies;
64548 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
64549 fscache_wait_bit_interruptible,
64550 TASK_INTERRUPTIBLE) != 0) {
64551- fscache_stat(&fscache_n_retrievals_intr);
64552+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64553 _leave(" = -ERESTARTSYS");
64554 return -ERESTARTSYS;
64555 }
64556@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
64557 */
64558 int fscache_wait_for_operation_activation(struct fscache_object *object,
64559 struct fscache_operation *op,
64560- atomic_t *stat_op_waits,
64561- atomic_t *stat_object_dead,
64562+ atomic_unchecked_t *stat_op_waits,
64563+ atomic_unchecked_t *stat_object_dead,
64564 void (*do_cancel)(struct fscache_operation *))
64565 {
64566 int ret;
64567@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64568
64569 _debug(">>> WT");
64570 if (stat_op_waits)
64571- fscache_stat(stat_op_waits);
64572+ fscache_stat_unchecked(stat_op_waits);
64573 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
64574 fscache_wait_bit_interruptible,
64575 TASK_INTERRUPTIBLE) != 0) {
64576@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64577 check_if_dead:
64578 if (op->state == FSCACHE_OP_ST_CANCELLED) {
64579 if (stat_object_dead)
64580- fscache_stat(stat_object_dead);
64581+ fscache_stat_unchecked(stat_object_dead);
64582 _leave(" = -ENOBUFS [cancelled]");
64583 return -ENOBUFS;
64584 }
64585@@ -366,7 +366,7 @@ check_if_dead:
64586 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
64587 fscache_cancel_op(op, do_cancel);
64588 if (stat_object_dead)
64589- fscache_stat(stat_object_dead);
64590+ fscache_stat_unchecked(stat_object_dead);
64591 return -ENOBUFS;
64592 }
64593 return 0;
64594@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64595
64596 _enter("%p,%p,,,", cookie, page);
64597
64598- fscache_stat(&fscache_n_retrievals);
64599+ fscache_stat_unchecked(&fscache_n_retrievals);
64600
64601 if (hlist_empty(&cookie->backing_objects))
64602 goto nobufs;
64603@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64604 goto nobufs_unlock_dec;
64605 spin_unlock(&cookie->lock);
64606
64607- fscache_stat(&fscache_n_retrieval_ops);
64608+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64609
64610 /* pin the netfs read context in case we need to do the actual netfs
64611 * read because we've encountered a cache read failure */
64612@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64613
64614 error:
64615 if (ret == -ENOMEM)
64616- fscache_stat(&fscache_n_retrievals_nomem);
64617+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64618 else if (ret == -ERESTARTSYS)
64619- fscache_stat(&fscache_n_retrievals_intr);
64620+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64621 else if (ret == -ENODATA)
64622- fscache_stat(&fscache_n_retrievals_nodata);
64623+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64624 else if (ret < 0)
64625- fscache_stat(&fscache_n_retrievals_nobufs);
64626+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64627 else
64628- fscache_stat(&fscache_n_retrievals_ok);
64629+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64630
64631 fscache_put_retrieval(op);
64632 _leave(" = %d", ret);
64633@@ -490,7 +490,7 @@ nobufs_unlock:
64634 __fscache_wake_unused_cookie(cookie);
64635 kfree(op);
64636 nobufs:
64637- fscache_stat(&fscache_n_retrievals_nobufs);
64638+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64639 _leave(" = -ENOBUFS");
64640 return -ENOBUFS;
64641 }
64642@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64643
64644 _enter("%p,,%d,,,", cookie, *nr_pages);
64645
64646- fscache_stat(&fscache_n_retrievals);
64647+ fscache_stat_unchecked(&fscache_n_retrievals);
64648
64649 if (hlist_empty(&cookie->backing_objects))
64650 goto nobufs;
64651@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64652 goto nobufs_unlock_dec;
64653 spin_unlock(&cookie->lock);
64654
64655- fscache_stat(&fscache_n_retrieval_ops);
64656+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64657
64658 /* pin the netfs read context in case we need to do the actual netfs
64659 * read because we've encountered a cache read failure */
64660@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64661
64662 error:
64663 if (ret == -ENOMEM)
64664- fscache_stat(&fscache_n_retrievals_nomem);
64665+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64666 else if (ret == -ERESTARTSYS)
64667- fscache_stat(&fscache_n_retrievals_intr);
64668+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64669 else if (ret == -ENODATA)
64670- fscache_stat(&fscache_n_retrievals_nodata);
64671+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64672 else if (ret < 0)
64673- fscache_stat(&fscache_n_retrievals_nobufs);
64674+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64675 else
64676- fscache_stat(&fscache_n_retrievals_ok);
64677+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64678
64679 fscache_put_retrieval(op);
64680 _leave(" = %d", ret);
64681@@ -621,7 +621,7 @@ nobufs_unlock:
64682 if (wake_cookie)
64683 __fscache_wake_unused_cookie(cookie);
64684 nobufs:
64685- fscache_stat(&fscache_n_retrievals_nobufs);
64686+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64687 _leave(" = -ENOBUFS");
64688 return -ENOBUFS;
64689 }
64690@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64691
64692 _enter("%p,%p,,,", cookie, page);
64693
64694- fscache_stat(&fscache_n_allocs);
64695+ fscache_stat_unchecked(&fscache_n_allocs);
64696
64697 if (hlist_empty(&cookie->backing_objects))
64698 goto nobufs;
64699@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64700 goto nobufs_unlock_dec;
64701 spin_unlock(&cookie->lock);
64702
64703- fscache_stat(&fscache_n_alloc_ops);
64704+ fscache_stat_unchecked(&fscache_n_alloc_ops);
64705
64706 ret = fscache_wait_for_operation_activation(
64707 object, &op->op,
64708@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64709
64710 error:
64711 if (ret == -ERESTARTSYS)
64712- fscache_stat(&fscache_n_allocs_intr);
64713+ fscache_stat_unchecked(&fscache_n_allocs_intr);
64714 else if (ret < 0)
64715- fscache_stat(&fscache_n_allocs_nobufs);
64716+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64717 else
64718- fscache_stat(&fscache_n_allocs_ok);
64719+ fscache_stat_unchecked(&fscache_n_allocs_ok);
64720
64721 fscache_put_retrieval(op);
64722 _leave(" = %d", ret);
64723@@ -715,7 +715,7 @@ nobufs_unlock:
64724 if (wake_cookie)
64725 __fscache_wake_unused_cookie(cookie);
64726 nobufs:
64727- fscache_stat(&fscache_n_allocs_nobufs);
64728+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64729 _leave(" = -ENOBUFS");
64730 return -ENOBUFS;
64731 }
64732@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64733
64734 spin_lock(&cookie->stores_lock);
64735
64736- fscache_stat(&fscache_n_store_calls);
64737+ fscache_stat_unchecked(&fscache_n_store_calls);
64738
64739 /* find a page to store */
64740 page = NULL;
64741@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64742 page = results[0];
64743 _debug("gang %d [%lx]", n, page->index);
64744 if (page->index > op->store_limit) {
64745- fscache_stat(&fscache_n_store_pages_over_limit);
64746+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
64747 goto superseded;
64748 }
64749
64750@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64751 spin_unlock(&cookie->stores_lock);
64752 spin_unlock(&object->lock);
64753
64754- fscache_stat(&fscache_n_store_pages);
64755+ fscache_stat_unchecked(&fscache_n_store_pages);
64756 fscache_stat(&fscache_n_cop_write_page);
64757 ret = object->cache->ops->write_page(op, page);
64758 fscache_stat_d(&fscache_n_cop_write_page);
64759@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64760 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64761 ASSERT(PageFsCache(page));
64762
64763- fscache_stat(&fscache_n_stores);
64764+ fscache_stat_unchecked(&fscache_n_stores);
64765
64766 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
64767 _leave(" = -ENOBUFS [invalidating]");
64768@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64769 spin_unlock(&cookie->stores_lock);
64770 spin_unlock(&object->lock);
64771
64772- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
64773+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64774 op->store_limit = object->store_limit;
64775
64776 __fscache_use_cookie(cookie);
64777@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64778
64779 spin_unlock(&cookie->lock);
64780 radix_tree_preload_end();
64781- fscache_stat(&fscache_n_store_ops);
64782- fscache_stat(&fscache_n_stores_ok);
64783+ fscache_stat_unchecked(&fscache_n_store_ops);
64784+ fscache_stat_unchecked(&fscache_n_stores_ok);
64785
64786 /* the work queue now carries its own ref on the object */
64787 fscache_put_operation(&op->op);
64788@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64789 return 0;
64790
64791 already_queued:
64792- fscache_stat(&fscache_n_stores_again);
64793+ fscache_stat_unchecked(&fscache_n_stores_again);
64794 already_pending:
64795 spin_unlock(&cookie->stores_lock);
64796 spin_unlock(&object->lock);
64797 spin_unlock(&cookie->lock);
64798 radix_tree_preload_end();
64799 kfree(op);
64800- fscache_stat(&fscache_n_stores_ok);
64801+ fscache_stat_unchecked(&fscache_n_stores_ok);
64802 _leave(" = 0");
64803 return 0;
64804
64805@@ -1024,14 +1024,14 @@ nobufs:
64806 kfree(op);
64807 if (wake_cookie)
64808 __fscache_wake_unused_cookie(cookie);
64809- fscache_stat(&fscache_n_stores_nobufs);
64810+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64811 _leave(" = -ENOBUFS");
64812 return -ENOBUFS;
64813
64814 nomem_free:
64815 kfree(op);
64816 nomem:
64817- fscache_stat(&fscache_n_stores_oom);
64818+ fscache_stat_unchecked(&fscache_n_stores_oom);
64819 _leave(" = -ENOMEM");
64820 return -ENOMEM;
64821 }
64822@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64823 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64824 ASSERTCMP(page, !=, NULL);
64825
64826- fscache_stat(&fscache_n_uncaches);
64827+ fscache_stat_unchecked(&fscache_n_uncaches);
64828
64829 /* cache withdrawal may beat us to it */
64830 if (!PageFsCache(page))
64831@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64832 struct fscache_cookie *cookie = op->op.object->cookie;
64833
64834 #ifdef CONFIG_FSCACHE_STATS
64835- atomic_inc(&fscache_n_marks);
64836+ atomic_inc_unchecked(&fscache_n_marks);
64837 #endif
64838
64839 _debug("- mark %p{%lx}", page, page->index);
64840diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64841index 40d13c7..ddf52b9 100644
64842--- a/fs/fscache/stats.c
64843+++ b/fs/fscache/stats.c
64844@@ -18,99 +18,99 @@
64845 /*
64846 * operation counters
64847 */
64848-atomic_t fscache_n_op_pend;
64849-atomic_t fscache_n_op_run;
64850-atomic_t fscache_n_op_enqueue;
64851-atomic_t fscache_n_op_requeue;
64852-atomic_t fscache_n_op_deferred_release;
64853-atomic_t fscache_n_op_release;
64854-atomic_t fscache_n_op_gc;
64855-atomic_t fscache_n_op_cancelled;
64856-atomic_t fscache_n_op_rejected;
64857+atomic_unchecked_t fscache_n_op_pend;
64858+atomic_unchecked_t fscache_n_op_run;
64859+atomic_unchecked_t fscache_n_op_enqueue;
64860+atomic_unchecked_t fscache_n_op_requeue;
64861+atomic_unchecked_t fscache_n_op_deferred_release;
64862+atomic_unchecked_t fscache_n_op_release;
64863+atomic_unchecked_t fscache_n_op_gc;
64864+atomic_unchecked_t fscache_n_op_cancelled;
64865+atomic_unchecked_t fscache_n_op_rejected;
64866
64867-atomic_t fscache_n_attr_changed;
64868-atomic_t fscache_n_attr_changed_ok;
64869-atomic_t fscache_n_attr_changed_nobufs;
64870-atomic_t fscache_n_attr_changed_nomem;
64871-atomic_t fscache_n_attr_changed_calls;
64872+atomic_unchecked_t fscache_n_attr_changed;
64873+atomic_unchecked_t fscache_n_attr_changed_ok;
64874+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64875+atomic_unchecked_t fscache_n_attr_changed_nomem;
64876+atomic_unchecked_t fscache_n_attr_changed_calls;
64877
64878-atomic_t fscache_n_allocs;
64879-atomic_t fscache_n_allocs_ok;
64880-atomic_t fscache_n_allocs_wait;
64881-atomic_t fscache_n_allocs_nobufs;
64882-atomic_t fscache_n_allocs_intr;
64883-atomic_t fscache_n_allocs_object_dead;
64884-atomic_t fscache_n_alloc_ops;
64885-atomic_t fscache_n_alloc_op_waits;
64886+atomic_unchecked_t fscache_n_allocs;
64887+atomic_unchecked_t fscache_n_allocs_ok;
64888+atomic_unchecked_t fscache_n_allocs_wait;
64889+atomic_unchecked_t fscache_n_allocs_nobufs;
64890+atomic_unchecked_t fscache_n_allocs_intr;
64891+atomic_unchecked_t fscache_n_allocs_object_dead;
64892+atomic_unchecked_t fscache_n_alloc_ops;
64893+atomic_unchecked_t fscache_n_alloc_op_waits;
64894
64895-atomic_t fscache_n_retrievals;
64896-atomic_t fscache_n_retrievals_ok;
64897-atomic_t fscache_n_retrievals_wait;
64898-atomic_t fscache_n_retrievals_nodata;
64899-atomic_t fscache_n_retrievals_nobufs;
64900-atomic_t fscache_n_retrievals_intr;
64901-atomic_t fscache_n_retrievals_nomem;
64902-atomic_t fscache_n_retrievals_object_dead;
64903-atomic_t fscache_n_retrieval_ops;
64904-atomic_t fscache_n_retrieval_op_waits;
64905+atomic_unchecked_t fscache_n_retrievals;
64906+atomic_unchecked_t fscache_n_retrievals_ok;
64907+atomic_unchecked_t fscache_n_retrievals_wait;
64908+atomic_unchecked_t fscache_n_retrievals_nodata;
64909+atomic_unchecked_t fscache_n_retrievals_nobufs;
64910+atomic_unchecked_t fscache_n_retrievals_intr;
64911+atomic_unchecked_t fscache_n_retrievals_nomem;
64912+atomic_unchecked_t fscache_n_retrievals_object_dead;
64913+atomic_unchecked_t fscache_n_retrieval_ops;
64914+atomic_unchecked_t fscache_n_retrieval_op_waits;
64915
64916-atomic_t fscache_n_stores;
64917-atomic_t fscache_n_stores_ok;
64918-atomic_t fscache_n_stores_again;
64919-atomic_t fscache_n_stores_nobufs;
64920-atomic_t fscache_n_stores_oom;
64921-atomic_t fscache_n_store_ops;
64922-atomic_t fscache_n_store_calls;
64923-atomic_t fscache_n_store_pages;
64924-atomic_t fscache_n_store_radix_deletes;
64925-atomic_t fscache_n_store_pages_over_limit;
64926+atomic_unchecked_t fscache_n_stores;
64927+atomic_unchecked_t fscache_n_stores_ok;
64928+atomic_unchecked_t fscache_n_stores_again;
64929+atomic_unchecked_t fscache_n_stores_nobufs;
64930+atomic_unchecked_t fscache_n_stores_oom;
64931+atomic_unchecked_t fscache_n_store_ops;
64932+atomic_unchecked_t fscache_n_store_calls;
64933+atomic_unchecked_t fscache_n_store_pages;
64934+atomic_unchecked_t fscache_n_store_radix_deletes;
64935+atomic_unchecked_t fscache_n_store_pages_over_limit;
64936
64937-atomic_t fscache_n_store_vmscan_not_storing;
64938-atomic_t fscache_n_store_vmscan_gone;
64939-atomic_t fscache_n_store_vmscan_busy;
64940-atomic_t fscache_n_store_vmscan_cancelled;
64941-atomic_t fscache_n_store_vmscan_wait;
64942+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64943+atomic_unchecked_t fscache_n_store_vmscan_gone;
64944+atomic_unchecked_t fscache_n_store_vmscan_busy;
64945+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64946+atomic_unchecked_t fscache_n_store_vmscan_wait;
64947
64948-atomic_t fscache_n_marks;
64949-atomic_t fscache_n_uncaches;
64950+atomic_unchecked_t fscache_n_marks;
64951+atomic_unchecked_t fscache_n_uncaches;
64952
64953-atomic_t fscache_n_acquires;
64954-atomic_t fscache_n_acquires_null;
64955-atomic_t fscache_n_acquires_no_cache;
64956-atomic_t fscache_n_acquires_ok;
64957-atomic_t fscache_n_acquires_nobufs;
64958-atomic_t fscache_n_acquires_oom;
64959+atomic_unchecked_t fscache_n_acquires;
64960+atomic_unchecked_t fscache_n_acquires_null;
64961+atomic_unchecked_t fscache_n_acquires_no_cache;
64962+atomic_unchecked_t fscache_n_acquires_ok;
64963+atomic_unchecked_t fscache_n_acquires_nobufs;
64964+atomic_unchecked_t fscache_n_acquires_oom;
64965
64966-atomic_t fscache_n_invalidates;
64967-atomic_t fscache_n_invalidates_run;
64968+atomic_unchecked_t fscache_n_invalidates;
64969+atomic_unchecked_t fscache_n_invalidates_run;
64970
64971-atomic_t fscache_n_updates;
64972-atomic_t fscache_n_updates_null;
64973-atomic_t fscache_n_updates_run;
64974+atomic_unchecked_t fscache_n_updates;
64975+atomic_unchecked_t fscache_n_updates_null;
64976+atomic_unchecked_t fscache_n_updates_run;
64977
64978-atomic_t fscache_n_relinquishes;
64979-atomic_t fscache_n_relinquishes_null;
64980-atomic_t fscache_n_relinquishes_waitcrt;
64981-atomic_t fscache_n_relinquishes_retire;
64982+atomic_unchecked_t fscache_n_relinquishes;
64983+atomic_unchecked_t fscache_n_relinquishes_null;
64984+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64985+atomic_unchecked_t fscache_n_relinquishes_retire;
64986
64987-atomic_t fscache_n_cookie_index;
64988-atomic_t fscache_n_cookie_data;
64989-atomic_t fscache_n_cookie_special;
64990+atomic_unchecked_t fscache_n_cookie_index;
64991+atomic_unchecked_t fscache_n_cookie_data;
64992+atomic_unchecked_t fscache_n_cookie_special;
64993
64994-atomic_t fscache_n_object_alloc;
64995-atomic_t fscache_n_object_no_alloc;
64996-atomic_t fscache_n_object_lookups;
64997-atomic_t fscache_n_object_lookups_negative;
64998-atomic_t fscache_n_object_lookups_positive;
64999-atomic_t fscache_n_object_lookups_timed_out;
65000-atomic_t fscache_n_object_created;
65001-atomic_t fscache_n_object_avail;
65002-atomic_t fscache_n_object_dead;
65003+atomic_unchecked_t fscache_n_object_alloc;
65004+atomic_unchecked_t fscache_n_object_no_alloc;
65005+atomic_unchecked_t fscache_n_object_lookups;
65006+atomic_unchecked_t fscache_n_object_lookups_negative;
65007+atomic_unchecked_t fscache_n_object_lookups_positive;
65008+atomic_unchecked_t fscache_n_object_lookups_timed_out;
65009+atomic_unchecked_t fscache_n_object_created;
65010+atomic_unchecked_t fscache_n_object_avail;
65011+atomic_unchecked_t fscache_n_object_dead;
65012
65013-atomic_t fscache_n_checkaux_none;
65014-atomic_t fscache_n_checkaux_okay;
65015-atomic_t fscache_n_checkaux_update;
65016-atomic_t fscache_n_checkaux_obsolete;
65017+atomic_unchecked_t fscache_n_checkaux_none;
65018+atomic_unchecked_t fscache_n_checkaux_okay;
65019+atomic_unchecked_t fscache_n_checkaux_update;
65020+atomic_unchecked_t fscache_n_checkaux_obsolete;
65021
65022 atomic_t fscache_n_cop_alloc_object;
65023 atomic_t fscache_n_cop_lookup_object;
65024@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
65025 seq_puts(m, "FS-Cache statistics\n");
65026
65027 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
65028- atomic_read(&fscache_n_cookie_index),
65029- atomic_read(&fscache_n_cookie_data),
65030- atomic_read(&fscache_n_cookie_special));
65031+ atomic_read_unchecked(&fscache_n_cookie_index),
65032+ atomic_read_unchecked(&fscache_n_cookie_data),
65033+ atomic_read_unchecked(&fscache_n_cookie_special));
65034
65035 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
65036- atomic_read(&fscache_n_object_alloc),
65037- atomic_read(&fscache_n_object_no_alloc),
65038- atomic_read(&fscache_n_object_avail),
65039- atomic_read(&fscache_n_object_dead));
65040+ atomic_read_unchecked(&fscache_n_object_alloc),
65041+ atomic_read_unchecked(&fscache_n_object_no_alloc),
65042+ atomic_read_unchecked(&fscache_n_object_avail),
65043+ atomic_read_unchecked(&fscache_n_object_dead));
65044 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
65045- atomic_read(&fscache_n_checkaux_none),
65046- atomic_read(&fscache_n_checkaux_okay),
65047- atomic_read(&fscache_n_checkaux_update),
65048- atomic_read(&fscache_n_checkaux_obsolete));
65049+ atomic_read_unchecked(&fscache_n_checkaux_none),
65050+ atomic_read_unchecked(&fscache_n_checkaux_okay),
65051+ atomic_read_unchecked(&fscache_n_checkaux_update),
65052+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
65053
65054 seq_printf(m, "Pages : mrk=%u unc=%u\n",
65055- atomic_read(&fscache_n_marks),
65056- atomic_read(&fscache_n_uncaches));
65057+ atomic_read_unchecked(&fscache_n_marks),
65058+ atomic_read_unchecked(&fscache_n_uncaches));
65059
65060 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
65061 " oom=%u\n",
65062- atomic_read(&fscache_n_acquires),
65063- atomic_read(&fscache_n_acquires_null),
65064- atomic_read(&fscache_n_acquires_no_cache),
65065- atomic_read(&fscache_n_acquires_ok),
65066- atomic_read(&fscache_n_acquires_nobufs),
65067- atomic_read(&fscache_n_acquires_oom));
65068+ atomic_read_unchecked(&fscache_n_acquires),
65069+ atomic_read_unchecked(&fscache_n_acquires_null),
65070+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
65071+ atomic_read_unchecked(&fscache_n_acquires_ok),
65072+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
65073+ atomic_read_unchecked(&fscache_n_acquires_oom));
65074
65075 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
65076- atomic_read(&fscache_n_object_lookups),
65077- atomic_read(&fscache_n_object_lookups_negative),
65078- atomic_read(&fscache_n_object_lookups_positive),
65079- atomic_read(&fscache_n_object_created),
65080- atomic_read(&fscache_n_object_lookups_timed_out));
65081+ atomic_read_unchecked(&fscache_n_object_lookups),
65082+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
65083+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
65084+ atomic_read_unchecked(&fscache_n_object_created),
65085+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
65086
65087 seq_printf(m, "Invals : n=%u run=%u\n",
65088- atomic_read(&fscache_n_invalidates),
65089- atomic_read(&fscache_n_invalidates_run));
65090+ atomic_read_unchecked(&fscache_n_invalidates),
65091+ atomic_read_unchecked(&fscache_n_invalidates_run));
65092
65093 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
65094- atomic_read(&fscache_n_updates),
65095- atomic_read(&fscache_n_updates_null),
65096- atomic_read(&fscache_n_updates_run));
65097+ atomic_read_unchecked(&fscache_n_updates),
65098+ atomic_read_unchecked(&fscache_n_updates_null),
65099+ atomic_read_unchecked(&fscache_n_updates_run));
65100
65101 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
65102- atomic_read(&fscache_n_relinquishes),
65103- atomic_read(&fscache_n_relinquishes_null),
65104- atomic_read(&fscache_n_relinquishes_waitcrt),
65105- atomic_read(&fscache_n_relinquishes_retire));
65106+ atomic_read_unchecked(&fscache_n_relinquishes),
65107+ atomic_read_unchecked(&fscache_n_relinquishes_null),
65108+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
65109+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
65110
65111 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
65112- atomic_read(&fscache_n_attr_changed),
65113- atomic_read(&fscache_n_attr_changed_ok),
65114- atomic_read(&fscache_n_attr_changed_nobufs),
65115- atomic_read(&fscache_n_attr_changed_nomem),
65116- atomic_read(&fscache_n_attr_changed_calls));
65117+ atomic_read_unchecked(&fscache_n_attr_changed),
65118+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
65119+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
65120+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
65121+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
65122
65123 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
65124- atomic_read(&fscache_n_allocs),
65125- atomic_read(&fscache_n_allocs_ok),
65126- atomic_read(&fscache_n_allocs_wait),
65127- atomic_read(&fscache_n_allocs_nobufs),
65128- atomic_read(&fscache_n_allocs_intr));
65129+ atomic_read_unchecked(&fscache_n_allocs),
65130+ atomic_read_unchecked(&fscache_n_allocs_ok),
65131+ atomic_read_unchecked(&fscache_n_allocs_wait),
65132+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
65133+ atomic_read_unchecked(&fscache_n_allocs_intr));
65134 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
65135- atomic_read(&fscache_n_alloc_ops),
65136- atomic_read(&fscache_n_alloc_op_waits),
65137- atomic_read(&fscache_n_allocs_object_dead));
65138+ atomic_read_unchecked(&fscache_n_alloc_ops),
65139+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
65140+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
65141
65142 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
65143 " int=%u oom=%u\n",
65144- atomic_read(&fscache_n_retrievals),
65145- atomic_read(&fscache_n_retrievals_ok),
65146- atomic_read(&fscache_n_retrievals_wait),
65147- atomic_read(&fscache_n_retrievals_nodata),
65148- atomic_read(&fscache_n_retrievals_nobufs),
65149- atomic_read(&fscache_n_retrievals_intr),
65150- atomic_read(&fscache_n_retrievals_nomem));
65151+ atomic_read_unchecked(&fscache_n_retrievals),
65152+ atomic_read_unchecked(&fscache_n_retrievals_ok),
65153+ atomic_read_unchecked(&fscache_n_retrievals_wait),
65154+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
65155+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
65156+ atomic_read_unchecked(&fscache_n_retrievals_intr),
65157+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
65158 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
65159- atomic_read(&fscache_n_retrieval_ops),
65160- atomic_read(&fscache_n_retrieval_op_waits),
65161- atomic_read(&fscache_n_retrievals_object_dead));
65162+ atomic_read_unchecked(&fscache_n_retrieval_ops),
65163+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
65164+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
65165
65166 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
65167- atomic_read(&fscache_n_stores),
65168- atomic_read(&fscache_n_stores_ok),
65169- atomic_read(&fscache_n_stores_again),
65170- atomic_read(&fscache_n_stores_nobufs),
65171- atomic_read(&fscache_n_stores_oom));
65172+ atomic_read_unchecked(&fscache_n_stores),
65173+ atomic_read_unchecked(&fscache_n_stores_ok),
65174+ atomic_read_unchecked(&fscache_n_stores_again),
65175+ atomic_read_unchecked(&fscache_n_stores_nobufs),
65176+ atomic_read_unchecked(&fscache_n_stores_oom));
65177 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
65178- atomic_read(&fscache_n_store_ops),
65179- atomic_read(&fscache_n_store_calls),
65180- atomic_read(&fscache_n_store_pages),
65181- atomic_read(&fscache_n_store_radix_deletes),
65182- atomic_read(&fscache_n_store_pages_over_limit));
65183+ atomic_read_unchecked(&fscache_n_store_ops),
65184+ atomic_read_unchecked(&fscache_n_store_calls),
65185+ atomic_read_unchecked(&fscache_n_store_pages),
65186+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
65187+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
65188
65189 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
65190- atomic_read(&fscache_n_store_vmscan_not_storing),
65191- atomic_read(&fscache_n_store_vmscan_gone),
65192- atomic_read(&fscache_n_store_vmscan_busy),
65193- atomic_read(&fscache_n_store_vmscan_cancelled),
65194- atomic_read(&fscache_n_store_vmscan_wait));
65195+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
65196+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
65197+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
65198+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
65199+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
65200
65201 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
65202- atomic_read(&fscache_n_op_pend),
65203- atomic_read(&fscache_n_op_run),
65204- atomic_read(&fscache_n_op_enqueue),
65205- atomic_read(&fscache_n_op_cancelled),
65206- atomic_read(&fscache_n_op_rejected));
65207+ atomic_read_unchecked(&fscache_n_op_pend),
65208+ atomic_read_unchecked(&fscache_n_op_run),
65209+ atomic_read_unchecked(&fscache_n_op_enqueue),
65210+ atomic_read_unchecked(&fscache_n_op_cancelled),
65211+ atomic_read_unchecked(&fscache_n_op_rejected));
65212 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
65213- atomic_read(&fscache_n_op_deferred_release),
65214- atomic_read(&fscache_n_op_release),
65215- atomic_read(&fscache_n_op_gc));
65216+ atomic_read_unchecked(&fscache_n_op_deferred_release),
65217+ atomic_read_unchecked(&fscache_n_op_release),
65218+ atomic_read_unchecked(&fscache_n_op_gc));
65219
65220 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
65221 atomic_read(&fscache_n_cop_alloc_object),
65222diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
65223index 966ace8..030a03a 100644
65224--- a/fs/fuse/cuse.c
65225+++ b/fs/fuse/cuse.c
65226@@ -611,10 +611,12 @@ static int __init cuse_init(void)
65227 INIT_LIST_HEAD(&cuse_conntbl[i]);
65228
65229 /* inherit and extend fuse_dev_operations */
65230- cuse_channel_fops = fuse_dev_operations;
65231- cuse_channel_fops.owner = THIS_MODULE;
65232- cuse_channel_fops.open = cuse_channel_open;
65233- cuse_channel_fops.release = cuse_channel_release;
65234+ pax_open_kernel();
65235+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
65236+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
65237+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
65238+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
65239+ pax_close_kernel();
65240
65241 cuse_class = class_create(THIS_MODULE, "cuse");
65242 if (IS_ERR(cuse_class))
65243diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
65244index ca88731..8e9c55d 100644
65245--- a/fs/fuse/dev.c
65246+++ b/fs/fuse/dev.c
65247@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
65248 ret = 0;
65249 pipe_lock(pipe);
65250
65251- if (!pipe->readers) {
65252+ if (!atomic_read(&pipe->readers)) {
65253 send_sig(SIGPIPE, current, 0);
65254 if (!ret)
65255 ret = -EPIPE;
65256@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
65257 page_nr++;
65258 ret += buf->len;
65259
65260- if (pipe->files)
65261+ if (atomic_read(&pipe->files))
65262 do_wakeup = 1;
65263 }
65264
65265diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
65266index 0c60482..025724f 100644
65267--- a/fs/fuse/dir.c
65268+++ b/fs/fuse/dir.c
65269@@ -1485,7 +1485,7 @@ static char *read_link(struct dentry *dentry)
65270 return link;
65271 }
65272
65273-static void free_link(char *link)
65274+static void free_link(const char *link)
65275 {
65276 if (!IS_ERR(link))
65277 free_page((unsigned long) link);
65278diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
65279index bb529f3..454c253 100644
65280--- a/fs/hostfs/hostfs_kern.c
65281+++ b/fs/hostfs/hostfs_kern.c
65282@@ -898,7 +898,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
65283
65284 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
65285 {
65286- char *s = nd_get_link(nd);
65287+ const char *s = nd_get_link(nd);
65288 if (!IS_ERR(s))
65289 __putname(s);
65290 }
65291diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
65292index 1e2872b..7aea000 100644
65293--- a/fs/hugetlbfs/inode.c
65294+++ b/fs/hugetlbfs/inode.c
65295@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
65296 struct mm_struct *mm = current->mm;
65297 struct vm_area_struct *vma;
65298 struct hstate *h = hstate_file(file);
65299+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
65300 struct vm_unmapped_area_info info;
65301
65302 if (len & ~huge_page_mask(h))
65303@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
65304 return addr;
65305 }
65306
65307+#ifdef CONFIG_PAX_RANDMMAP
65308+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65309+#endif
65310+
65311 if (addr) {
65312 addr = ALIGN(addr, huge_page_size(h));
65313 vma = find_vma(mm, addr);
65314- if (TASK_SIZE - len >= addr &&
65315- (!vma || addr + len <= vma->vm_start))
65316+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
65317 return addr;
65318 }
65319
65320 info.flags = 0;
65321 info.length = len;
65322 info.low_limit = TASK_UNMAPPED_BASE;
65323+
65324+#ifdef CONFIG_PAX_RANDMMAP
65325+ if (mm->pax_flags & MF_PAX_RANDMMAP)
65326+ info.low_limit += mm->delta_mmap;
65327+#endif
65328+
65329 info.high_limit = TASK_SIZE;
65330 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
65331 info.align_offset = 0;
65332@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
65333 };
65334 MODULE_ALIAS_FS("hugetlbfs");
65335
65336-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
65337+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
65338
65339 static int can_do_hugetlb_shm(void)
65340 {
65341diff --git a/fs/inode.c b/fs/inode.c
65342index 6eecb7f..abec305 100644
65343--- a/fs/inode.c
65344+++ b/fs/inode.c
65345@@ -839,16 +839,20 @@ unsigned int get_next_ino(void)
65346 unsigned int *p = &get_cpu_var(last_ino);
65347 unsigned int res = *p;
65348
65349+start:
65350+
65351 #ifdef CONFIG_SMP
65352 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
65353- static atomic_t shared_last_ino;
65354- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
65355+ static atomic_unchecked_t shared_last_ino;
65356+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
65357
65358 res = next - LAST_INO_BATCH;
65359 }
65360 #endif
65361
65362- *p = ++res;
65363+ if (unlikely(!++res))
65364+ goto start; /* never zero */
65365+ *p = res;
65366 put_cpu_var(last_ino);
65367 return res;
65368 }
65369diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
65370index 4a6cf28..d3a29d3 100644
65371--- a/fs/jffs2/erase.c
65372+++ b/fs/jffs2/erase.c
65373@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
65374 struct jffs2_unknown_node marker = {
65375 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
65376 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65377- .totlen = cpu_to_je32(c->cleanmarker_size)
65378+ .totlen = cpu_to_je32(c->cleanmarker_size),
65379+ .hdr_crc = cpu_to_je32(0)
65380 };
65381
65382 jffs2_prealloc_raw_node_refs(c, jeb, 1);
65383diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
65384index a6597d6..41b30ec 100644
65385--- a/fs/jffs2/wbuf.c
65386+++ b/fs/jffs2/wbuf.c
65387@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
65388 {
65389 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
65390 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65391- .totlen = constant_cpu_to_je32(8)
65392+ .totlen = constant_cpu_to_je32(8),
65393+ .hdr_crc = constant_cpu_to_je32(0)
65394 };
65395
65396 /*
65397diff --git a/fs/jfs/super.c b/fs/jfs/super.c
65398index adf8cb0..bb935fa 100644
65399--- a/fs/jfs/super.c
65400+++ b/fs/jfs/super.c
65401@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
65402
65403 jfs_inode_cachep =
65404 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
65405- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
65406+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
65407 init_once);
65408 if (jfs_inode_cachep == NULL)
65409 return -ENOMEM;
65410diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
65411index a693f5b..82276a1 100644
65412--- a/fs/kernfs/dir.c
65413+++ b/fs/kernfs/dir.c
65414@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
65415 *
65416 * Returns 31 bit hash of ns + name (so it fits in an off_t )
65417 */
65418-static unsigned int kernfs_name_hash(const char *name, const void *ns)
65419+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
65420 {
65421 unsigned long hash = init_name_hash();
65422 unsigned int len = strlen(name);
65423diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
65424index d895b4b..0b8af77 100644
65425--- a/fs/kernfs/file.c
65426+++ b/fs/kernfs/file.c
65427@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
65428
65429 struct kernfs_open_node {
65430 atomic_t refcnt;
65431- atomic_t event;
65432+ atomic_unchecked_t event;
65433 wait_queue_head_t poll;
65434 struct list_head files; /* goes through kernfs_open_file.list */
65435 };
65436@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
65437 {
65438 struct kernfs_open_file *of = sf->private;
65439
65440- of->event = atomic_read(&of->kn->attr.open->event);
65441+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65442
65443 return of->kn->attr.ops->seq_show(sf, v);
65444 }
65445@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
65446 return ret;
65447 }
65448
65449-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65450- void *buf, int len, int write)
65451+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65452+ void *buf, size_t len, int write)
65453 {
65454 struct file *file = vma->vm_file;
65455 struct kernfs_open_file *of = kernfs_of(file);
65456- int ret;
65457+ ssize_t ret;
65458
65459 if (!of->vm_ops)
65460 return -EINVAL;
65461@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
65462 return -ENOMEM;
65463
65464 atomic_set(&new_on->refcnt, 0);
65465- atomic_set(&new_on->event, 1);
65466+ atomic_set_unchecked(&new_on->event, 1);
65467 init_waitqueue_head(&new_on->poll);
65468 INIT_LIST_HEAD(&new_on->files);
65469 goto retry;
65470@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
65471
65472 kernfs_put_active(kn);
65473
65474- if (of->event != atomic_read(&on->event))
65475+ if (of->event != atomic_read_unchecked(&on->event))
65476 goto trigger;
65477
65478 return DEFAULT_POLLMASK;
65479@@ -818,7 +818,7 @@ repeat:
65480
65481 on = kn->attr.open;
65482 if (on) {
65483- atomic_inc(&on->event);
65484+ atomic_inc_unchecked(&on->event);
65485 wake_up_interruptible(&on->poll);
65486 }
65487
65488diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
65489index 8a19889..4c3069a 100644
65490--- a/fs/kernfs/symlink.c
65491+++ b/fs/kernfs/symlink.c
65492@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
65493 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
65494 void *cookie)
65495 {
65496- char *page = nd_get_link(nd);
65497+ const char *page = nd_get_link(nd);
65498 if (!IS_ERR(page))
65499 free_page((unsigned long)page);
65500 }
65501diff --git a/fs/libfs.c b/fs/libfs.c
65502index 88e3e00..979c262 100644
65503--- a/fs/libfs.c
65504+++ b/fs/libfs.c
65505@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65506
65507 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
65508 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
65509+ char d_name[sizeof(next->d_iname)];
65510+ const unsigned char *name;
65511+
65512 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
65513 if (!simple_positive(next)) {
65514 spin_unlock(&next->d_lock);
65515@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65516
65517 spin_unlock(&next->d_lock);
65518 spin_unlock(&dentry->d_lock);
65519- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
65520+ name = next->d_name.name;
65521+ if (name == next->d_iname) {
65522+ memcpy(d_name, name, next->d_name.len);
65523+ name = d_name;
65524+ }
65525+ if (!dir_emit(ctx, name, next->d_name.len,
65526 next->d_inode->i_ino, dt_type(next->d_inode)))
65527 return 0;
65528 spin_lock(&dentry->d_lock);
65529@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
65530 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
65531 void *cookie)
65532 {
65533- char *s = nd_get_link(nd);
65534+ const char *s = nd_get_link(nd);
65535 if (!IS_ERR(s))
65536 kfree(s);
65537 }
65538diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
65539index acd3947..1f896e2 100644
65540--- a/fs/lockd/clntproc.c
65541+++ b/fs/lockd/clntproc.c
65542@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
65543 /*
65544 * Cookie counter for NLM requests
65545 */
65546-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
65547+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
65548
65549 void nlmclnt_next_cookie(struct nlm_cookie *c)
65550 {
65551- u32 cookie = atomic_inc_return(&nlm_cookie);
65552+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
65553
65554 memcpy(c->data, &cookie, 4);
65555 c->len=4;
65556diff --git a/fs/locks.c b/fs/locks.c
65557index 717fbc4..74628c3 100644
65558--- a/fs/locks.c
65559+++ b/fs/locks.c
65560@@ -2327,7 +2327,7 @@ void locks_remove_file(struct file *filp)
65561 locks_remove_posix(filp, (fl_owner_t)filp);
65562
65563 if (filp->f_op->flock) {
65564- struct file_lock fl = {
65565+ struct file_lock flock = {
65566 .fl_owner = (fl_owner_t)filp,
65567 .fl_pid = current->tgid,
65568 .fl_file = filp,
65569@@ -2335,9 +2335,9 @@ void locks_remove_file(struct file *filp)
65570 .fl_type = F_UNLCK,
65571 .fl_end = OFFSET_MAX,
65572 };
65573- filp->f_op->flock(filp, F_SETLKW, &fl);
65574- if (fl.fl_ops && fl.fl_ops->fl_release_private)
65575- fl.fl_ops->fl_release_private(&fl);
65576+ filp->f_op->flock(filp, F_SETLKW, &flock);
65577+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
65578+ flock.fl_ops->fl_release_private(&flock);
65579 }
65580
65581 spin_lock(&inode->i_lock);
65582diff --git a/fs/mount.h b/fs/mount.h
65583index d55297f..f5b28c5 100644
65584--- a/fs/mount.h
65585+++ b/fs/mount.h
65586@@ -11,7 +11,7 @@ struct mnt_namespace {
65587 u64 seq; /* Sequence number to prevent loops */
65588 wait_queue_head_t poll;
65589 u64 event;
65590-};
65591+} __randomize_layout;
65592
65593 struct mnt_pcp {
65594 int mnt_count;
65595@@ -57,7 +57,7 @@ struct mount {
65596 int mnt_expiry_mark; /* true if marked for expiry */
65597 int mnt_pinned;
65598 struct path mnt_ex_mountpoint;
65599-};
65600+} __randomize_layout;
65601
65602 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
65603
65604diff --git a/fs/namei.c b/fs/namei.c
65605index 17ca8b8..2de9500 100644
65606--- a/fs/namei.c
65607+++ b/fs/namei.c
65608@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
65609 if (ret != -EACCES)
65610 return ret;
65611
65612+#ifdef CONFIG_GRKERNSEC
65613+ /* we'll block if we have to log due to a denied capability use */
65614+ if (mask & MAY_NOT_BLOCK)
65615+ return -ECHILD;
65616+#endif
65617+
65618 if (S_ISDIR(inode->i_mode)) {
65619 /* DACs are overridable for directories */
65620- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65621- return 0;
65622 if (!(mask & MAY_WRITE))
65623- if (capable_wrt_inode_uidgid(inode,
65624- CAP_DAC_READ_SEARCH))
65625+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65626+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65627 return 0;
65628+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65629+ return 0;
65630 return -EACCES;
65631 }
65632 /*
65633+ * Searching includes executable on directories, else just read.
65634+ */
65635+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65636+ if (mask == MAY_READ)
65637+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65638+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65639+ return 0;
65640+
65641+ /*
65642 * Read/write DACs are always overridable.
65643 * Executable DACs are overridable when there is
65644 * at least one exec bit set.
65645@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
65646 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65647 return 0;
65648
65649- /*
65650- * Searching includes executable on directories, else just read.
65651- */
65652- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65653- if (mask == MAY_READ)
65654- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65655- return 0;
65656-
65657 return -EACCES;
65658 }
65659 EXPORT_SYMBOL(generic_permission);
65660@@ -825,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65661 {
65662 struct dentry *dentry = link->dentry;
65663 int error;
65664- char *s;
65665+ const char *s;
65666
65667 BUG_ON(nd->flags & LOOKUP_RCU);
65668
65669@@ -846,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65670 if (error)
65671 goto out_put_nd_path;
65672
65673+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
65674+ dentry->d_inode, dentry, nd->path.mnt)) {
65675+ error = -EACCES;
65676+ goto out_put_nd_path;
65677+ }
65678+
65679 nd->last_type = LAST_BIND;
65680 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
65681 error = PTR_ERR(*p);
65682@@ -1597,6 +1610,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
65683 if (res)
65684 break;
65685 res = walk_component(nd, path, LOOKUP_FOLLOW);
65686+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
65687+ res = -EACCES;
65688 put_link(nd, &link, cookie);
65689 } while (res > 0);
65690
65691@@ -1669,7 +1684,7 @@ EXPORT_SYMBOL(full_name_hash);
65692 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
65693 {
65694 unsigned long a, b, adata, bdata, mask, hash, len;
65695- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65696+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65697
65698 hash = a = 0;
65699 len = -sizeof(unsigned long);
65700@@ -1953,6 +1968,8 @@ static int path_lookupat(int dfd, const char *name,
65701 if (err)
65702 break;
65703 err = lookup_last(nd, &path);
65704+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
65705+ err = -EACCES;
65706 put_link(nd, &link, cookie);
65707 }
65708 }
65709@@ -1960,6 +1977,13 @@ static int path_lookupat(int dfd, const char *name,
65710 if (!err)
65711 err = complete_walk(nd);
65712
65713+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
65714+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65715+ path_put(&nd->path);
65716+ err = -ENOENT;
65717+ }
65718+ }
65719+
65720 if (!err && nd->flags & LOOKUP_DIRECTORY) {
65721 if (!d_can_lookup(nd->path.dentry)) {
65722 path_put(&nd->path);
65723@@ -1987,8 +2011,15 @@ static int filename_lookup(int dfd, struct filename *name,
65724 retval = path_lookupat(dfd, name->name,
65725 flags | LOOKUP_REVAL, nd);
65726
65727- if (likely(!retval))
65728+ if (likely(!retval)) {
65729 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
65730+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
65731+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
65732+ path_put(&nd->path);
65733+ return -ENOENT;
65734+ }
65735+ }
65736+ }
65737 return retval;
65738 }
65739
65740@@ -2570,6 +2601,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
65741 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
65742 return -EPERM;
65743
65744+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
65745+ return -EPERM;
65746+ if (gr_handle_rawio(inode))
65747+ return -EPERM;
65748+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
65749+ return -EACCES;
65750+
65751 return 0;
65752 }
65753
65754@@ -2801,7 +2839,7 @@ looked_up:
65755 * cleared otherwise prior to returning.
65756 */
65757 static int lookup_open(struct nameidata *nd, struct path *path,
65758- struct file *file,
65759+ struct path *link, struct file *file,
65760 const struct open_flags *op,
65761 bool got_write, int *opened)
65762 {
65763@@ -2836,6 +2874,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65764 /* Negative dentry, just create the file */
65765 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
65766 umode_t mode = op->mode;
65767+
65768+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
65769+ error = -EACCES;
65770+ goto out_dput;
65771+ }
65772+
65773+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65774+ error = -EACCES;
65775+ goto out_dput;
65776+ }
65777+
65778 if (!IS_POSIXACL(dir->d_inode))
65779 mode &= ~current_umask();
65780 /*
65781@@ -2857,6 +2906,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65782 nd->flags & LOOKUP_EXCL);
65783 if (error)
65784 goto out_dput;
65785+ else
65786+ gr_handle_create(dentry, nd->path.mnt);
65787 }
65788 out_no_open:
65789 path->dentry = dentry;
65790@@ -2871,7 +2922,7 @@ out_dput:
65791 /*
65792 * Handle the last step of open()
65793 */
65794-static int do_last(struct nameidata *nd, struct path *path,
65795+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65796 struct file *file, const struct open_flags *op,
65797 int *opened, struct filename *name)
65798 {
65799@@ -2921,6 +2972,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65800 if (error)
65801 return error;
65802
65803+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65804+ error = -ENOENT;
65805+ goto out;
65806+ }
65807+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65808+ error = -EACCES;
65809+ goto out;
65810+ }
65811+
65812 audit_inode(name, dir, LOOKUP_PARENT);
65813 error = -EISDIR;
65814 /* trailing slashes? */
65815@@ -2940,7 +3000,7 @@ retry_lookup:
65816 */
65817 }
65818 mutex_lock(&dir->d_inode->i_mutex);
65819- error = lookup_open(nd, path, file, op, got_write, opened);
65820+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65821 mutex_unlock(&dir->d_inode->i_mutex);
65822
65823 if (error <= 0) {
65824@@ -2964,11 +3024,28 @@ retry_lookup:
65825 goto finish_open_created;
65826 }
65827
65828+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65829+ error = -ENOENT;
65830+ goto exit_dput;
65831+ }
65832+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65833+ error = -EACCES;
65834+ goto exit_dput;
65835+ }
65836+
65837 /*
65838 * create/update audit record if it already exists.
65839 */
65840- if (d_is_positive(path->dentry))
65841+ if (d_is_positive(path->dentry)) {
65842+ /* only check if O_CREAT is specified, all other checks need to go
65843+ into may_open */
65844+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65845+ error = -EACCES;
65846+ goto exit_dput;
65847+ }
65848+
65849 audit_inode(name, path->dentry, 0);
65850+ }
65851
65852 /*
65853 * If atomic_open() acquired write access it is dropped now due to
65854@@ -3009,6 +3086,11 @@ finish_lookup:
65855 }
65856 }
65857 BUG_ON(inode != path->dentry->d_inode);
65858+ /* if we're resolving a symlink to another symlink */
65859+ if (link && gr_handle_symlink_owner(link, inode)) {
65860+ error = -EACCES;
65861+ goto out;
65862+ }
65863 return 1;
65864 }
65865
65866@@ -3018,7 +3100,6 @@ finish_lookup:
65867 save_parent.dentry = nd->path.dentry;
65868 save_parent.mnt = mntget(path->mnt);
65869 nd->path.dentry = path->dentry;
65870-
65871 }
65872 nd->inode = inode;
65873 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
65874@@ -3028,7 +3109,18 @@ finish_open:
65875 path_put(&save_parent);
65876 return error;
65877 }
65878+
65879+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65880+ error = -ENOENT;
65881+ goto out;
65882+ }
65883+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65884+ error = -EACCES;
65885+ goto out;
65886+ }
65887+
65888 audit_inode(name, nd->path.dentry, 0);
65889+
65890 error = -EISDIR;
65891 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65892 goto out;
65893@@ -3191,7 +3283,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65894 if (unlikely(error))
65895 goto out;
65896
65897- error = do_last(nd, &path, file, op, &opened, pathname);
65898+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65899 while (unlikely(error > 0)) { /* trailing symlink */
65900 struct path link = path;
65901 void *cookie;
65902@@ -3209,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65903 error = follow_link(&link, nd, &cookie);
65904 if (unlikely(error))
65905 break;
65906- error = do_last(nd, &path, file, op, &opened, pathname);
65907+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65908 put_link(nd, &link, cookie);
65909 }
65910 out:
65911@@ -3309,9 +3401,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
65912 goto unlock;
65913
65914 error = -EEXIST;
65915- if (d_is_positive(dentry))
65916+ if (d_is_positive(dentry)) {
65917+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65918+ error = -ENOENT;
65919 goto fail;
65920-
65921+ }
65922 /*
65923 * Special case - lookup gave negative, but... we had foo/bar/
65924 * From the vfs_mknod() POV we just have a negative dentry -
65925@@ -3363,6 +3457,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65926 }
65927 EXPORT_SYMBOL(user_path_create);
65928
65929+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65930+{
65931+ struct filename *tmp = getname(pathname);
65932+ struct dentry *res;
65933+ if (IS_ERR(tmp))
65934+ return ERR_CAST(tmp);
65935+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65936+ if (IS_ERR(res))
65937+ putname(tmp);
65938+ else
65939+ *to = tmp;
65940+ return res;
65941+}
65942+
65943 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65944 {
65945 int error = may_create(dir, dentry);
65946@@ -3426,6 +3534,17 @@ retry:
65947
65948 if (!IS_POSIXACL(path.dentry->d_inode))
65949 mode &= ~current_umask();
65950+
65951+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65952+ error = -EPERM;
65953+ goto out;
65954+ }
65955+
65956+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65957+ error = -EACCES;
65958+ goto out;
65959+ }
65960+
65961 error = security_path_mknod(&path, dentry, mode, dev);
65962 if (error)
65963 goto out;
65964@@ -3442,6 +3561,8 @@ retry:
65965 break;
65966 }
65967 out:
65968+ if (!error)
65969+ gr_handle_create(dentry, path.mnt);
65970 done_path_create(&path, dentry);
65971 if (retry_estale(error, lookup_flags)) {
65972 lookup_flags |= LOOKUP_REVAL;
65973@@ -3495,9 +3616,16 @@ retry:
65974
65975 if (!IS_POSIXACL(path.dentry->d_inode))
65976 mode &= ~current_umask();
65977+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65978+ error = -EACCES;
65979+ goto out;
65980+ }
65981 error = security_path_mkdir(&path, dentry, mode);
65982 if (!error)
65983 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65984+ if (!error)
65985+ gr_handle_create(dentry, path.mnt);
65986+out:
65987 done_path_create(&path, dentry);
65988 if (retry_estale(error, lookup_flags)) {
65989 lookup_flags |= LOOKUP_REVAL;
65990@@ -3580,6 +3708,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65991 struct filename *name;
65992 struct dentry *dentry;
65993 struct nameidata nd;
65994+ ino_t saved_ino = 0;
65995+ dev_t saved_dev = 0;
65996 unsigned int lookup_flags = 0;
65997 retry:
65998 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65999@@ -3612,10 +3742,21 @@ retry:
66000 error = -ENOENT;
66001 goto exit3;
66002 }
66003+
66004+ saved_ino = dentry->d_inode->i_ino;
66005+ saved_dev = gr_get_dev_from_dentry(dentry);
66006+
66007+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
66008+ error = -EACCES;
66009+ goto exit3;
66010+ }
66011+
66012 error = security_path_rmdir(&nd.path, dentry);
66013 if (error)
66014 goto exit3;
66015 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
66016+ if (!error && (saved_dev || saved_ino))
66017+ gr_handle_delete(saved_ino, saved_dev);
66018 exit3:
66019 dput(dentry);
66020 exit2:
66021@@ -3706,6 +3847,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
66022 struct nameidata nd;
66023 struct inode *inode = NULL;
66024 struct inode *delegated_inode = NULL;
66025+ ino_t saved_ino = 0;
66026+ dev_t saved_dev = 0;
66027 unsigned int lookup_flags = 0;
66028 retry:
66029 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
66030@@ -3732,10 +3875,22 @@ retry_deleg:
66031 if (d_is_negative(dentry))
66032 goto slashes;
66033 ihold(inode);
66034+
66035+ if (inode->i_nlink <= 1) {
66036+ saved_ino = inode->i_ino;
66037+ saved_dev = gr_get_dev_from_dentry(dentry);
66038+ }
66039+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
66040+ error = -EACCES;
66041+ goto exit2;
66042+ }
66043+
66044 error = security_path_unlink(&nd.path, dentry);
66045 if (error)
66046 goto exit2;
66047 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
66048+ if (!error && (saved_ino || saved_dev))
66049+ gr_handle_delete(saved_ino, saved_dev);
66050 exit2:
66051 dput(dentry);
66052 }
66053@@ -3824,9 +3979,17 @@ retry:
66054 if (IS_ERR(dentry))
66055 goto out_putname;
66056
66057+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
66058+ error = -EACCES;
66059+ goto out;
66060+ }
66061+
66062 error = security_path_symlink(&path, dentry, from->name);
66063 if (!error)
66064 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
66065+ if (!error)
66066+ gr_handle_create(dentry, path.mnt);
66067+out:
66068 done_path_create(&path, dentry);
66069 if (retry_estale(error, lookup_flags)) {
66070 lookup_flags |= LOOKUP_REVAL;
66071@@ -3930,6 +4093,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
66072 struct dentry *new_dentry;
66073 struct path old_path, new_path;
66074 struct inode *delegated_inode = NULL;
66075+ struct filename *to = NULL;
66076 int how = 0;
66077 int error;
66078
66079@@ -3953,7 +4117,7 @@ retry:
66080 if (error)
66081 return error;
66082
66083- new_dentry = user_path_create(newdfd, newname, &new_path,
66084+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
66085 (how & LOOKUP_REVAL));
66086 error = PTR_ERR(new_dentry);
66087 if (IS_ERR(new_dentry))
66088@@ -3965,11 +4129,28 @@ retry:
66089 error = may_linkat(&old_path);
66090 if (unlikely(error))
66091 goto out_dput;
66092+
66093+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
66094+ old_path.dentry->d_inode,
66095+ old_path.dentry->d_inode->i_mode, to)) {
66096+ error = -EACCES;
66097+ goto out_dput;
66098+ }
66099+
66100+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
66101+ old_path.dentry, old_path.mnt, to)) {
66102+ error = -EACCES;
66103+ goto out_dput;
66104+ }
66105+
66106 error = security_path_link(old_path.dentry, &new_path, new_dentry);
66107 if (error)
66108 goto out_dput;
66109 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
66110+ if (!error)
66111+ gr_handle_create(new_dentry, new_path.mnt);
66112 out_dput:
66113+ putname(to);
66114 done_path_create(&new_path, new_dentry);
66115 if (delegated_inode) {
66116 error = break_deleg_wait(&delegated_inode);
66117@@ -4279,6 +4460,12 @@ retry_deleg:
66118 if (new_dentry == trap)
66119 goto exit5;
66120
66121+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
66122+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
66123+ to, flags);
66124+ if (error)
66125+ goto exit5;
66126+
66127 error = security_path_rename(&oldnd.path, old_dentry,
66128 &newnd.path, new_dentry, flags);
66129 if (error)
66130@@ -4286,6 +4473,9 @@ retry_deleg:
66131 error = vfs_rename(old_dir->d_inode, old_dentry,
66132 new_dir->d_inode, new_dentry,
66133 &delegated_inode, flags);
66134+ if (!error)
66135+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
66136+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
66137 exit5:
66138 dput(new_dentry);
66139 exit4:
66140@@ -4328,14 +4518,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
66141
66142 int readlink_copy(char __user *buffer, int buflen, const char *link)
66143 {
66144+ char tmpbuf[64];
66145+ const char *newlink;
66146 int len = PTR_ERR(link);
66147+
66148 if (IS_ERR(link))
66149 goto out;
66150
66151 len = strlen(link);
66152 if (len > (unsigned) buflen)
66153 len = buflen;
66154- if (copy_to_user(buffer, link, len))
66155+
66156+ if (len < sizeof(tmpbuf)) {
66157+ memcpy(tmpbuf, link, len);
66158+ newlink = tmpbuf;
66159+ } else
66160+ newlink = link;
66161+
66162+ if (copy_to_user(buffer, newlink, len))
66163 len = -EFAULT;
66164 out:
66165 return len;
66166diff --git a/fs/namespace.c b/fs/namespace.c
66167index 140d177..cef9c30 100644
66168--- a/fs/namespace.c
66169+++ b/fs/namespace.c
66170@@ -1378,6 +1378,9 @@ static int do_umount(struct mount *mnt, int flags)
66171 if (!(sb->s_flags & MS_RDONLY))
66172 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
66173 up_write(&sb->s_umount);
66174+
66175+ gr_log_remount(mnt->mnt_devname, retval);
66176+
66177 return retval;
66178 }
66179
66180@@ -1400,6 +1403,9 @@ static int do_umount(struct mount *mnt, int flags)
66181 }
66182 unlock_mount_hash();
66183 namespace_unlock();
66184+
66185+ gr_log_unmount(mnt->mnt_devname, retval);
66186+
66187 return retval;
66188 }
66189
66190@@ -1419,7 +1425,7 @@ static inline bool may_mount(void)
66191 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
66192 */
66193
66194-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
66195+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
66196 {
66197 struct path path;
66198 struct mount *mnt;
66199@@ -1461,7 +1467,7 @@ out:
66200 /*
66201 * The 2.0 compatible umount. No flags.
66202 */
66203-SYSCALL_DEFINE1(oldumount, char __user *, name)
66204+SYSCALL_DEFINE1(oldumount, const char __user *, name)
66205 {
66206 return sys_umount(name, 0);
66207 }
66208@@ -2510,6 +2516,16 @@ long do_mount(const char *dev_name, const char *dir_name,
66209 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
66210 MS_STRICTATIME);
66211
66212+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
66213+ retval = -EPERM;
66214+ goto dput_out;
66215+ }
66216+
66217+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
66218+ retval = -EPERM;
66219+ goto dput_out;
66220+ }
66221+
66222 if (flags & MS_REMOUNT)
66223 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
66224 data_page);
66225@@ -2524,6 +2540,9 @@ long do_mount(const char *dev_name, const char *dir_name,
66226 dev_name, data_page);
66227 dput_out:
66228 path_put(&path);
66229+
66230+ gr_log_mount(dev_name, dir_name, retval);
66231+
66232 return retval;
66233 }
66234
66235@@ -2541,7 +2560,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
66236 * number incrementing at 10Ghz will take 12,427 years to wrap which
66237 * is effectively never, so we can ignore the possibility.
66238 */
66239-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
66240+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
66241
66242 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
66243 {
66244@@ -2556,7 +2575,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
66245 kfree(new_ns);
66246 return ERR_PTR(ret);
66247 }
66248- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
66249+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
66250 atomic_set(&new_ns->count, 1);
66251 new_ns->root = NULL;
66252 INIT_LIST_HEAD(&new_ns->list);
66253@@ -2566,7 +2585,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
66254 return new_ns;
66255 }
66256
66257-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
66258+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
66259 struct user_namespace *user_ns, struct fs_struct *new_fs)
66260 {
66261 struct mnt_namespace *new_ns;
66262@@ -2687,8 +2706,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
66263 }
66264 EXPORT_SYMBOL(mount_subtree);
66265
66266-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
66267- char __user *, type, unsigned long, flags, void __user *, data)
66268+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
66269+ const char __user *, type, unsigned long, flags, void __user *, data)
66270 {
66271 int ret;
66272 char *kernel_type;
66273@@ -2801,6 +2820,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
66274 if (error)
66275 goto out2;
66276
66277+ if (gr_handle_chroot_pivot()) {
66278+ error = -EPERM;
66279+ goto out2;
66280+ }
66281+
66282 get_fs_root(current->fs, &root);
66283 old_mp = lock_mount(&old);
66284 error = PTR_ERR(old_mp);
66285@@ -3069,7 +3093,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
66286 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
66287 return -EPERM;
66288
66289- if (fs->users != 1)
66290+ if (atomic_read(&fs->users) != 1)
66291 return -EINVAL;
66292
66293 get_mnt_ns(mnt_ns);
66294diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
66295index f4ccfe6..a5cf064 100644
66296--- a/fs/nfs/callback_xdr.c
66297+++ b/fs/nfs/callback_xdr.c
66298@@ -51,7 +51,7 @@ struct callback_op {
66299 callback_decode_arg_t decode_args;
66300 callback_encode_res_t encode_res;
66301 long res_maxsize;
66302-};
66303+} __do_const;
66304
66305 static struct callback_op callback_ops[];
66306
66307diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
66308index 9927913..faffc5c 100644
66309--- a/fs/nfs/inode.c
66310+++ b/fs/nfs/inode.c
66311@@ -1219,16 +1219,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
66312 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
66313 }
66314
66315-static atomic_long_t nfs_attr_generation_counter;
66316+static atomic_long_unchecked_t nfs_attr_generation_counter;
66317
66318 static unsigned long nfs_read_attr_generation_counter(void)
66319 {
66320- return atomic_long_read(&nfs_attr_generation_counter);
66321+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
66322 }
66323
66324 unsigned long nfs_inc_attr_generation_counter(void)
66325 {
66326- return atomic_long_inc_return(&nfs_attr_generation_counter);
66327+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
66328 }
66329
66330 void nfs_fattr_init(struct nfs_fattr *fattr)
66331diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
66332index 8f029db..3688b84 100644
66333--- a/fs/nfsd/nfs4proc.c
66334+++ b/fs/nfsd/nfs4proc.c
66335@@ -1157,7 +1157,7 @@ struct nfsd4_operation {
66336 nfsd4op_rsize op_rsize_bop;
66337 stateid_getter op_get_currentstateid;
66338 stateid_setter op_set_currentstateid;
66339-};
66340+} __do_const;
66341
66342 static struct nfsd4_operation nfsd4_ops[];
66343
66344diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
66345index 944275c..6fc40a7 100644
66346--- a/fs/nfsd/nfs4xdr.c
66347+++ b/fs/nfsd/nfs4xdr.c
66348@@ -1539,7 +1539,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
66349
66350 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
66351
66352-static nfsd4_dec nfsd4_dec_ops[] = {
66353+static const nfsd4_dec nfsd4_dec_ops[] = {
66354 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
66355 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
66356 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
66357diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
66358index 6040da8..4348565 100644
66359--- a/fs/nfsd/nfscache.c
66360+++ b/fs/nfsd/nfscache.c
66361@@ -518,17 +518,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66362 {
66363 struct svc_cacherep *rp = rqstp->rq_cacherep;
66364 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
66365- int len;
66366+ long len;
66367 size_t bufsize = 0;
66368
66369 if (!rp)
66370 return;
66371
66372- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
66373- len >>= 2;
66374+ if (statp) {
66375+ len = (char*)statp - (char*)resv->iov_base;
66376+ len = resv->iov_len - len;
66377+ len >>= 2;
66378+ }
66379
66380 /* Don't cache excessive amounts of data and XDR failures */
66381- if (!statp || len > (256 >> 2)) {
66382+ if (!statp || len > (256 >> 2) || len < 0) {
66383 nfsd_reply_cache_free(rp);
66384 return;
66385 }
66386@@ -536,7 +539,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66387 switch (cachetype) {
66388 case RC_REPLSTAT:
66389 if (len != 1)
66390- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
66391+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
66392 rp->c_replstat = *statp;
66393 break;
66394 case RC_REPLBUFF:
66395diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
66396index 140c496..e9cbf14 100644
66397--- a/fs/nfsd/vfs.c
66398+++ b/fs/nfsd/vfs.c
66399@@ -855,7 +855,7 @@ int nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
66400
66401 oldfs = get_fs();
66402 set_fs(KERNEL_DS);
66403- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
66404+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
66405 set_fs(oldfs);
66406 return nfsd_finish_read(file, count, host_err);
66407 }
66408@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
66409
66410 /* Write the data. */
66411 oldfs = get_fs(); set_fs(KERNEL_DS);
66412- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
66413+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
66414 set_fs(oldfs);
66415 if (host_err < 0)
66416 goto out_nfserr;
66417@@ -1482,7 +1482,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
66418 */
66419
66420 oldfs = get_fs(); set_fs(KERNEL_DS);
66421- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
66422+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
66423 set_fs(oldfs);
66424
66425 if (host_err < 0)
66426diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
66427index 52ccd34..7a6b202 100644
66428--- a/fs/nls/nls_base.c
66429+++ b/fs/nls/nls_base.c
66430@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
66431
66432 int __register_nls(struct nls_table *nls, struct module *owner)
66433 {
66434- struct nls_table ** tmp = &tables;
66435+ struct nls_table *tmp = tables;
66436
66437 if (nls->next)
66438 return -EBUSY;
66439
66440- nls->owner = owner;
66441+ pax_open_kernel();
66442+ *(void **)&nls->owner = owner;
66443+ pax_close_kernel();
66444 spin_lock(&nls_lock);
66445- while (*tmp) {
66446- if (nls == *tmp) {
66447+ while (tmp) {
66448+ if (nls == tmp) {
66449 spin_unlock(&nls_lock);
66450 return -EBUSY;
66451 }
66452- tmp = &(*tmp)->next;
66453+ tmp = tmp->next;
66454 }
66455- nls->next = tables;
66456+ pax_open_kernel();
66457+ *(struct nls_table **)&nls->next = tables;
66458+ pax_close_kernel();
66459 tables = nls;
66460 spin_unlock(&nls_lock);
66461 return 0;
66462@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
66463
66464 int unregister_nls(struct nls_table * nls)
66465 {
66466- struct nls_table ** tmp = &tables;
66467+ struct nls_table * const * tmp = &tables;
66468
66469 spin_lock(&nls_lock);
66470 while (*tmp) {
66471 if (nls == *tmp) {
66472- *tmp = nls->next;
66473+ pax_open_kernel();
66474+ *(struct nls_table **)tmp = nls->next;
66475+ pax_close_kernel();
66476 spin_unlock(&nls_lock);
66477 return 0;
66478 }
66479@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
66480 return -EINVAL;
66481 }
66482
66483-static struct nls_table *find_nls(char *charset)
66484+static struct nls_table *find_nls(const char *charset)
66485 {
66486 struct nls_table *nls;
66487 spin_lock(&nls_lock);
66488@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
66489 return nls;
66490 }
66491
66492-struct nls_table *load_nls(char *charset)
66493+struct nls_table *load_nls(const char *charset)
66494 {
66495 return try_then_request_module(find_nls(charset), "nls_%s", charset);
66496 }
66497diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
66498index 162b3f1..6076a7c 100644
66499--- a/fs/nls/nls_euc-jp.c
66500+++ b/fs/nls/nls_euc-jp.c
66501@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
66502 p_nls = load_nls("cp932");
66503
66504 if (p_nls) {
66505- table.charset2upper = p_nls->charset2upper;
66506- table.charset2lower = p_nls->charset2lower;
66507+ pax_open_kernel();
66508+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66509+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66510+ pax_close_kernel();
66511 return register_nls(&table);
66512 }
66513
66514diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
66515index a80a741..7b96e1b 100644
66516--- a/fs/nls/nls_koi8-ru.c
66517+++ b/fs/nls/nls_koi8-ru.c
66518@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
66519 p_nls = load_nls("koi8-u");
66520
66521 if (p_nls) {
66522- table.charset2upper = p_nls->charset2upper;
66523- table.charset2lower = p_nls->charset2lower;
66524+ pax_open_kernel();
66525+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66526+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66527+ pax_close_kernel();
66528 return register_nls(&table);
66529 }
66530
66531diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
66532index 2685bc9..f3462c7 100644
66533--- a/fs/notify/fanotify/fanotify_user.c
66534+++ b/fs/notify/fanotify/fanotify_user.c
66535@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
66536
66537 fd = fanotify_event_metadata.fd;
66538 ret = -EFAULT;
66539- if (copy_to_user(buf, &fanotify_event_metadata,
66540- fanotify_event_metadata.event_len))
66541+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
66542+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
66543 goto out_close_fd;
66544
66545 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
66546diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
66547index 238a593..9d7e2b9 100644
66548--- a/fs/notify/fdinfo.c
66549+++ b/fs/notify/fdinfo.c
66550@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
66551 {
66552 struct {
66553 struct file_handle handle;
66554- u8 pad[64];
66555+ u8 pad[MAX_HANDLE_SZ];
66556 } f;
66557 int size, ret, i;
66558
66559@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
66560 size = f.handle.handle_bytes >> 2;
66561
66562 ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
66563- if ((ret == 255) || (ret == -ENOSPC)) {
66564+ if ((ret == FILEID_INVALID) || (ret < 0)) {
66565 WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
66566 return 0;
66567 }
66568diff --git a/fs/notify/notification.c b/fs/notify/notification.c
66569index 25a07c7..4f1163c 100644
66570--- a/fs/notify/notification.c
66571+++ b/fs/notify/notification.c
66572@@ -48,7 +48,7 @@
66573 #include <linux/fsnotify_backend.h>
66574 #include "fsnotify.h"
66575
66576-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66577+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66578
66579 /**
66580 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
66581@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66582 */
66583 u32 fsnotify_get_cookie(void)
66584 {
66585- return atomic_inc_return(&fsnotify_sync_cookie);
66586+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
66587 }
66588 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
66589
66590diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
66591index 9e38daf..5727cae 100644
66592--- a/fs/ntfs/dir.c
66593+++ b/fs/ntfs/dir.c
66594@@ -1310,7 +1310,7 @@ find_next_index_buffer:
66595 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
66596 ~(s64)(ndir->itype.index.block_size - 1)));
66597 /* Bounds checks. */
66598- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66599+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66600 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
66601 "inode 0x%lx or driver bug.", vdir->i_ino);
66602 goto err_out;
66603diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
66604index 5c9e2c8..96e4ba0 100644
66605--- a/fs/ntfs/file.c
66606+++ b/fs/ntfs/file.c
66607@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
66608 char *addr;
66609 size_t total = 0;
66610 unsigned len;
66611- int left;
66612+ unsigned left;
66613
66614 do {
66615 len = PAGE_CACHE_SIZE - ofs;
66616diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
66617index 6c3296e..c0b99f0 100644
66618--- a/fs/ntfs/super.c
66619+++ b/fs/ntfs/super.c
66620@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66621 if (!silent)
66622 ntfs_error(sb, "Primary boot sector is invalid.");
66623 } else if (!silent)
66624- ntfs_error(sb, read_err_str, "primary");
66625+ ntfs_error(sb, read_err_str, "%s", "primary");
66626 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
66627 if (bh_primary)
66628 brelse(bh_primary);
66629@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66630 goto hotfix_primary_boot_sector;
66631 brelse(bh_backup);
66632 } else if (!silent)
66633- ntfs_error(sb, read_err_str, "backup");
66634+ ntfs_error(sb, read_err_str, "%s", "backup");
66635 /* Try to read NT3.51- backup boot sector. */
66636 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
66637 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
66638@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66639 "sector.");
66640 brelse(bh_backup);
66641 } else if (!silent)
66642- ntfs_error(sb, read_err_str, "backup");
66643+ ntfs_error(sb, read_err_str, "%s", "backup");
66644 /* We failed. Cleanup and return. */
66645 if (bh_primary)
66646 brelse(bh_primary);
66647diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
66648index 0440134..d52c93a 100644
66649--- a/fs/ocfs2/localalloc.c
66650+++ b/fs/ocfs2/localalloc.c
66651@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
66652 goto bail;
66653 }
66654
66655- atomic_inc(&osb->alloc_stats.moves);
66656+ atomic_inc_unchecked(&osb->alloc_stats.moves);
66657
66658 bail:
66659 if (handle)
66660diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
66661index bbec539..7b266d5 100644
66662--- a/fs/ocfs2/ocfs2.h
66663+++ b/fs/ocfs2/ocfs2.h
66664@@ -236,11 +236,11 @@ enum ocfs2_vol_state
66665
66666 struct ocfs2_alloc_stats
66667 {
66668- atomic_t moves;
66669- atomic_t local_data;
66670- atomic_t bitmap_data;
66671- atomic_t bg_allocs;
66672- atomic_t bg_extends;
66673+ atomic_unchecked_t moves;
66674+ atomic_unchecked_t local_data;
66675+ atomic_unchecked_t bitmap_data;
66676+ atomic_unchecked_t bg_allocs;
66677+ atomic_unchecked_t bg_extends;
66678 };
66679
66680 enum ocfs2_local_alloc_state
66681diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
66682index 0cb889a..6a26b24 100644
66683--- a/fs/ocfs2/suballoc.c
66684+++ b/fs/ocfs2/suballoc.c
66685@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
66686 mlog_errno(status);
66687 goto bail;
66688 }
66689- atomic_inc(&osb->alloc_stats.bg_extends);
66690+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
66691
66692 /* You should never ask for this much metadata */
66693 BUG_ON(bits_wanted >
66694@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
66695 mlog_errno(status);
66696 goto bail;
66697 }
66698- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66699+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66700
66701 *suballoc_loc = res.sr_bg_blkno;
66702 *suballoc_bit_start = res.sr_bit_offset;
66703@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66704 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
66705 res->sr_bits);
66706
66707- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66708+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66709
66710 BUG_ON(res->sr_bits != 1);
66711
66712@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
66713 mlog_errno(status);
66714 goto bail;
66715 }
66716- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66717+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66718
66719 BUG_ON(res.sr_bits != 1);
66720
66721@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66722 cluster_start,
66723 num_clusters);
66724 if (!status)
66725- atomic_inc(&osb->alloc_stats.local_data);
66726+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
66727 } else {
66728 if (min_clusters > (osb->bitmap_cpg - 1)) {
66729 /* The only paths asking for contiguousness
66730@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66731 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
66732 res.sr_bg_blkno,
66733 res.sr_bit_offset);
66734- atomic_inc(&osb->alloc_stats.bitmap_data);
66735+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
66736 *num_clusters = res.sr_bits;
66737 }
66738 }
66739diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
66740index ddb662b..f701c83 100644
66741--- a/fs/ocfs2/super.c
66742+++ b/fs/ocfs2/super.c
66743@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
66744 "%10s => GlobalAllocs: %d LocalAllocs: %d "
66745 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
66746 "Stats",
66747- atomic_read(&osb->alloc_stats.bitmap_data),
66748- atomic_read(&osb->alloc_stats.local_data),
66749- atomic_read(&osb->alloc_stats.bg_allocs),
66750- atomic_read(&osb->alloc_stats.moves),
66751- atomic_read(&osb->alloc_stats.bg_extends));
66752+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
66753+ atomic_read_unchecked(&osb->alloc_stats.local_data),
66754+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
66755+ atomic_read_unchecked(&osb->alloc_stats.moves),
66756+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66757
66758 out += snprintf(buf + out, len - out,
66759 "%10s => State: %u Descriptor: %llu Size: %u bits "
66760@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66761
66762 mutex_init(&osb->system_file_mutex);
66763
66764- atomic_set(&osb->alloc_stats.moves, 0);
66765- atomic_set(&osb->alloc_stats.local_data, 0);
66766- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66767- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66768- atomic_set(&osb->alloc_stats.bg_extends, 0);
66769+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66770+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66771+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66772+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66773+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66774
66775 /* Copy the blockcheck stats from the superblock probe */
66776 osb->osb_ecc_stats = *stats;
66777diff --git a/fs/open.c b/fs/open.c
66778index d6fd3ac..6ccf474 100644
66779--- a/fs/open.c
66780+++ b/fs/open.c
66781@@ -32,6 +32,8 @@
66782 #include <linux/dnotify.h>
66783 #include <linux/compat.h>
66784
66785+#define CREATE_TRACE_POINTS
66786+#include <trace/events/fs.h>
66787 #include "internal.h"
66788
66789 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66790@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66791 error = locks_verify_truncate(inode, NULL, length);
66792 if (!error)
66793 error = security_path_truncate(path);
66794+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66795+ error = -EACCES;
66796 if (!error)
66797 error = do_truncate(path->dentry, length, 0, NULL);
66798
66799@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66800 error = locks_verify_truncate(inode, f.file, length);
66801 if (!error)
66802 error = security_path_truncate(&f.file->f_path);
66803+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66804+ error = -EACCES;
66805 if (!error)
66806 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66807 sb_end_write(inode->i_sb);
66808@@ -380,6 +386,9 @@ retry:
66809 if (__mnt_is_readonly(path.mnt))
66810 res = -EROFS;
66811
66812+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66813+ res = -EACCES;
66814+
66815 out_path_release:
66816 path_put(&path);
66817 if (retry_estale(res, lookup_flags)) {
66818@@ -411,6 +420,8 @@ retry:
66819 if (error)
66820 goto dput_and_out;
66821
66822+ gr_log_chdir(path.dentry, path.mnt);
66823+
66824 set_fs_pwd(current->fs, &path);
66825
66826 dput_and_out:
66827@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66828 goto out_putf;
66829
66830 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66831+
66832+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66833+ error = -EPERM;
66834+
66835+ if (!error)
66836+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66837+
66838 if (!error)
66839 set_fs_pwd(current->fs, &f.file->f_path);
66840 out_putf:
66841@@ -469,7 +487,13 @@ retry:
66842 if (error)
66843 goto dput_and_out;
66844
66845+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66846+ goto dput_and_out;
66847+
66848 set_fs_root(current->fs, &path);
66849+
66850+ gr_handle_chroot_chdir(&path);
66851+
66852 error = 0;
66853 dput_and_out:
66854 path_put(&path);
66855@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
66856 return error;
66857 retry_deleg:
66858 mutex_lock(&inode->i_mutex);
66859+
66860+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66861+ error = -EACCES;
66862+ goto out_unlock;
66863+ }
66864+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66865+ error = -EACCES;
66866+ goto out_unlock;
66867+ }
66868+
66869 error = security_path_chmod(path, mode);
66870 if (error)
66871 goto out_unlock;
66872@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66873 uid = make_kuid(current_user_ns(), user);
66874 gid = make_kgid(current_user_ns(), group);
66875
66876+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66877+ return -EACCES;
66878+
66879 newattrs.ia_valid = ATTR_CTIME;
66880 if (user != (uid_t) -1) {
66881 if (!uid_valid(uid))
66882@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66883 } else {
66884 fsnotify_open(f);
66885 fd_install(fd, f);
66886+ trace_do_sys_open(tmp->name, flags, mode);
66887 }
66888 }
66889 putname(tmp);
66890diff --git a/fs/pipe.c b/fs/pipe.c
66891index 21981e5..3d5f55c 100644
66892--- a/fs/pipe.c
66893+++ b/fs/pipe.c
66894@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66895
66896 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66897 {
66898- if (pipe->files)
66899+ if (atomic_read(&pipe->files))
66900 mutex_lock_nested(&pipe->mutex, subclass);
66901 }
66902
66903@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66904
66905 void pipe_unlock(struct pipe_inode_info *pipe)
66906 {
66907- if (pipe->files)
66908+ if (atomic_read(&pipe->files))
66909 mutex_unlock(&pipe->mutex);
66910 }
66911 EXPORT_SYMBOL(pipe_unlock);
66912@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66913 }
66914 if (bufs) /* More to do? */
66915 continue;
66916- if (!pipe->writers)
66917+ if (!atomic_read(&pipe->writers))
66918 break;
66919- if (!pipe->waiting_writers) {
66920+ if (!atomic_read(&pipe->waiting_writers)) {
66921 /* syscall merging: Usually we must not sleep
66922 * if O_NONBLOCK is set, or if we got some data.
66923 * But if a writer sleeps in kernel space, then
66924@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66925
66926 __pipe_lock(pipe);
66927
66928- if (!pipe->readers) {
66929+ if (!atomic_read(&pipe->readers)) {
66930 send_sig(SIGPIPE, current, 0);
66931 ret = -EPIPE;
66932 goto out;
66933@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66934 for (;;) {
66935 int bufs;
66936
66937- if (!pipe->readers) {
66938+ if (!atomic_read(&pipe->readers)) {
66939 send_sig(SIGPIPE, current, 0);
66940 if (!ret)
66941 ret = -EPIPE;
66942@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66943 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66944 do_wakeup = 0;
66945 }
66946- pipe->waiting_writers++;
66947+ atomic_inc(&pipe->waiting_writers);
66948 pipe_wait(pipe);
66949- pipe->waiting_writers--;
66950+ atomic_dec(&pipe->waiting_writers);
66951 }
66952 out:
66953 __pipe_unlock(pipe);
66954@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66955 mask = 0;
66956 if (filp->f_mode & FMODE_READ) {
66957 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66958- if (!pipe->writers && filp->f_version != pipe->w_counter)
66959+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66960 mask |= POLLHUP;
66961 }
66962
66963@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66964 * Most Unices do not set POLLERR for FIFOs but on Linux they
66965 * behave exactly like pipes for poll().
66966 */
66967- if (!pipe->readers)
66968+ if (!atomic_read(&pipe->readers))
66969 mask |= POLLERR;
66970 }
66971
66972@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66973 int kill = 0;
66974
66975 spin_lock(&inode->i_lock);
66976- if (!--pipe->files) {
66977+ if (atomic_dec_and_test(&pipe->files)) {
66978 inode->i_pipe = NULL;
66979 kill = 1;
66980 }
66981@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66982
66983 __pipe_lock(pipe);
66984 if (file->f_mode & FMODE_READ)
66985- pipe->readers--;
66986+ atomic_dec(&pipe->readers);
66987 if (file->f_mode & FMODE_WRITE)
66988- pipe->writers--;
66989+ atomic_dec(&pipe->writers);
66990
66991- if (pipe->readers || pipe->writers) {
66992+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66993 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66994 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66995 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66996@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66997 kfree(pipe);
66998 }
66999
67000-static struct vfsmount *pipe_mnt __read_mostly;
67001+struct vfsmount *pipe_mnt __read_mostly;
67002
67003 /*
67004 * pipefs_dname() is called from d_path().
67005@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
67006 goto fail_iput;
67007
67008 inode->i_pipe = pipe;
67009- pipe->files = 2;
67010- pipe->readers = pipe->writers = 1;
67011+ atomic_set(&pipe->files, 2);
67012+ atomic_set(&pipe->readers, 1);
67013+ atomic_set(&pipe->writers, 1);
67014 inode->i_fop = &pipefifo_fops;
67015
67016 /*
67017@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
67018 spin_lock(&inode->i_lock);
67019 if (inode->i_pipe) {
67020 pipe = inode->i_pipe;
67021- pipe->files++;
67022+ atomic_inc(&pipe->files);
67023 spin_unlock(&inode->i_lock);
67024 } else {
67025 spin_unlock(&inode->i_lock);
67026 pipe = alloc_pipe_info();
67027 if (!pipe)
67028 return -ENOMEM;
67029- pipe->files = 1;
67030+ atomic_set(&pipe->files, 1);
67031 spin_lock(&inode->i_lock);
67032 if (unlikely(inode->i_pipe)) {
67033- inode->i_pipe->files++;
67034+ atomic_inc(&inode->i_pipe->files);
67035 spin_unlock(&inode->i_lock);
67036 free_pipe_info(pipe);
67037 pipe = inode->i_pipe;
67038@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
67039 * opened, even when there is no process writing the FIFO.
67040 */
67041 pipe->r_counter++;
67042- if (pipe->readers++ == 0)
67043+ if (atomic_inc_return(&pipe->readers) == 1)
67044 wake_up_partner(pipe);
67045
67046- if (!is_pipe && !pipe->writers) {
67047+ if (!is_pipe && !atomic_read(&pipe->writers)) {
67048 if ((filp->f_flags & O_NONBLOCK)) {
67049 /* suppress POLLHUP until we have
67050 * seen a writer */
67051@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
67052 * errno=ENXIO when there is no process reading the FIFO.
67053 */
67054 ret = -ENXIO;
67055- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
67056+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
67057 goto err;
67058
67059 pipe->w_counter++;
67060- if (!pipe->writers++)
67061+ if (atomic_inc_return(&pipe->writers) == 1)
67062 wake_up_partner(pipe);
67063
67064- if (!is_pipe && !pipe->readers) {
67065+ if (!is_pipe && !atomic_read(&pipe->readers)) {
67066 if (wait_for_partner(pipe, &pipe->r_counter))
67067 goto err_wr;
67068 }
67069@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
67070 * the process can at least talk to itself.
67071 */
67072
67073- pipe->readers++;
67074- pipe->writers++;
67075+ atomic_inc(&pipe->readers);
67076+ atomic_inc(&pipe->writers);
67077 pipe->r_counter++;
67078 pipe->w_counter++;
67079- if (pipe->readers == 1 || pipe->writers == 1)
67080+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
67081 wake_up_partner(pipe);
67082 break;
67083
67084@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
67085 return 0;
67086
67087 err_rd:
67088- if (!--pipe->readers)
67089+ if (atomic_dec_and_test(&pipe->readers))
67090 wake_up_interruptible(&pipe->wait);
67091 ret = -ERESTARTSYS;
67092 goto err;
67093
67094 err_wr:
67095- if (!--pipe->writers)
67096+ if (atomic_dec_and_test(&pipe->writers))
67097 wake_up_interruptible(&pipe->wait);
67098 ret = -ERESTARTSYS;
67099 goto err;
67100diff --git a/fs/posix_acl.c b/fs/posix_acl.c
67101index 0855f77..6787d50 100644
67102--- a/fs/posix_acl.c
67103+++ b/fs/posix_acl.c
67104@@ -20,6 +20,7 @@
67105 #include <linux/xattr.h>
67106 #include <linux/export.h>
67107 #include <linux/user_namespace.h>
67108+#include <linux/grsecurity.h>
67109
67110 struct posix_acl **acl_by_type(struct inode *inode, int type)
67111 {
67112@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
67113 }
67114 }
67115 if (mode_p)
67116- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
67117+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
67118 return not_equiv;
67119 }
67120 EXPORT_SYMBOL(posix_acl_equiv_mode);
67121@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
67122 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
67123 }
67124
67125- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
67126+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
67127 return not_equiv;
67128 }
67129
67130@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
67131 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
67132 int err = -ENOMEM;
67133 if (clone) {
67134+ *mode_p &= ~gr_acl_umask();
67135+
67136 err = posix_acl_create_masq(clone, mode_p);
67137 if (err < 0) {
67138 posix_acl_release(clone);
67139@@ -659,11 +662,12 @@ struct posix_acl *
67140 posix_acl_from_xattr(struct user_namespace *user_ns,
67141 const void *value, size_t size)
67142 {
67143- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
67144- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
67145+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
67146+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
67147 int count;
67148 struct posix_acl *acl;
67149 struct posix_acl_entry *acl_e;
67150+ umode_t umask = gr_acl_umask();
67151
67152 if (!value)
67153 return NULL;
67154@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
67155
67156 switch(acl_e->e_tag) {
67157 case ACL_USER_OBJ:
67158+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
67159+ break;
67160 case ACL_GROUP_OBJ:
67161 case ACL_MASK:
67162+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
67163+ break;
67164 case ACL_OTHER:
67165+ acl_e->e_perm &= ~(umask & S_IRWXO);
67166 break;
67167
67168 case ACL_USER:
67169+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
67170 acl_e->e_uid =
67171 make_kuid(user_ns,
67172 le32_to_cpu(entry->e_id));
67173@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
67174 goto fail;
67175 break;
67176 case ACL_GROUP:
67177+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
67178 acl_e->e_gid =
67179 make_kgid(user_ns,
67180 le32_to_cpu(entry->e_id));
67181diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
67182index 2183fcf..3c32a98 100644
67183--- a/fs/proc/Kconfig
67184+++ b/fs/proc/Kconfig
67185@@ -30,7 +30,7 @@ config PROC_FS
67186
67187 config PROC_KCORE
67188 bool "/proc/kcore support" if !ARM
67189- depends on PROC_FS && MMU
67190+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
67191 help
67192 Provides a virtual ELF core file of the live kernel. This can
67193 be read with gdb and other ELF tools. No modifications can be
67194@@ -38,8 +38,8 @@ config PROC_KCORE
67195
67196 config PROC_VMCORE
67197 bool "/proc/vmcore support"
67198- depends on PROC_FS && CRASH_DUMP
67199- default y
67200+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
67201+ default n
67202 help
67203 Exports the dump image of crashed kernel in ELF format.
67204
67205@@ -63,8 +63,8 @@ config PROC_SYSCTL
67206 limited in memory.
67207
67208 config PROC_PAGE_MONITOR
67209- default y
67210- depends on PROC_FS && MMU
67211+ default n
67212+ depends on PROC_FS && MMU && !GRKERNSEC
67213 bool "Enable /proc page monitoring" if EXPERT
67214 help
67215 Various /proc files exist to monitor process memory utilization:
67216diff --git a/fs/proc/array.c b/fs/proc/array.c
67217index 3e1290b..7ebe5b5 100644
67218--- a/fs/proc/array.c
67219+++ b/fs/proc/array.c
67220@@ -60,6 +60,7 @@
67221 #include <linux/tty.h>
67222 #include <linux/string.h>
67223 #include <linux/mman.h>
67224+#include <linux/grsecurity.h>
67225 #include <linux/proc_fs.h>
67226 #include <linux/ioport.h>
67227 #include <linux/uaccess.h>
67228@@ -347,6 +348,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
67229 seq_putc(m, '\n');
67230 }
67231
67232+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67233+static inline void task_pax(struct seq_file *m, struct task_struct *p)
67234+{
67235+ if (p->mm)
67236+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
67237+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
67238+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
67239+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
67240+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
67241+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
67242+ else
67243+ seq_printf(m, "PaX:\t-----\n");
67244+}
67245+#endif
67246+
67247 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
67248 struct pid *pid, struct task_struct *task)
67249 {
67250@@ -365,9 +381,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
67251 task_cpus_allowed(m, task);
67252 cpuset_task_status_allowed(m, task);
67253 task_context_switch_counts(m, task);
67254+
67255+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67256+ task_pax(m, task);
67257+#endif
67258+
67259+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
67260+ task_grsec_rbac(m, task);
67261+#endif
67262+
67263 return 0;
67264 }
67265
67266+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67267+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67268+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67269+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67270+#endif
67271+
67272 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67273 struct pid *pid, struct task_struct *task, int whole)
67274 {
67275@@ -389,6 +420,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67276 char tcomm[sizeof(task->comm)];
67277 unsigned long flags;
67278
67279+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67280+ if (current->exec_id != m->exec_id) {
67281+ gr_log_badprocpid("stat");
67282+ return 0;
67283+ }
67284+#endif
67285+
67286 state = *get_task_state(task);
67287 vsize = eip = esp = 0;
67288 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67289@@ -459,6 +497,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67290 gtime = task_gtime(task);
67291 }
67292
67293+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67294+ if (PAX_RAND_FLAGS(mm)) {
67295+ eip = 0;
67296+ esp = 0;
67297+ wchan = 0;
67298+ }
67299+#endif
67300+#ifdef CONFIG_GRKERNSEC_HIDESYM
67301+ wchan = 0;
67302+ eip =0;
67303+ esp =0;
67304+#endif
67305+
67306 /* scale priority and nice values from timeslices to -20..20 */
67307 /* to make it look like a "normal" Unix priority/nice value */
67308 priority = task_prio(task);
67309@@ -495,9 +546,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67310 seq_put_decimal_ull(m, ' ', vsize);
67311 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
67312 seq_put_decimal_ull(m, ' ', rsslim);
67313+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67314+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
67315+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
67316+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
67317+#else
67318 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
67319 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
67320 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
67321+#endif
67322 seq_put_decimal_ull(m, ' ', esp);
67323 seq_put_decimal_ull(m, ' ', eip);
67324 /* The signal information here is obsolete.
67325@@ -519,7 +576,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67326 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
67327 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
67328
67329- if (mm && permitted) {
67330+ if (mm && permitted
67331+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67332+ && !PAX_RAND_FLAGS(mm)
67333+#endif
67334+ ) {
67335 seq_put_decimal_ull(m, ' ', mm->start_data);
67336 seq_put_decimal_ull(m, ' ', mm->end_data);
67337 seq_put_decimal_ull(m, ' ', mm->start_brk);
67338@@ -557,8 +618,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67339 struct pid *pid, struct task_struct *task)
67340 {
67341 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
67342- struct mm_struct *mm = get_task_mm(task);
67343+ struct mm_struct *mm;
67344
67345+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67346+ if (current->exec_id != m->exec_id) {
67347+ gr_log_badprocpid("statm");
67348+ return 0;
67349+ }
67350+#endif
67351+ mm = get_task_mm(task);
67352 if (mm) {
67353 size = task_statm(mm, &shared, &text, &data, &resident);
67354 mmput(mm);
67355@@ -581,6 +649,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67356 return 0;
67357 }
67358
67359+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67360+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
67361+{
67362+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
67363+}
67364+#endif
67365+
67366 #ifdef CONFIG_CHECKPOINT_RESTORE
67367 static struct pid *
67368 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
67369diff --git a/fs/proc/base.c b/fs/proc/base.c
67370index 2d696b0..b9da447 100644
67371--- a/fs/proc/base.c
67372+++ b/fs/proc/base.c
67373@@ -113,6 +113,14 @@ struct pid_entry {
67374 union proc_op op;
67375 };
67376
67377+struct getdents_callback {
67378+ struct linux_dirent __user * current_dir;
67379+ struct linux_dirent __user * previous;
67380+ struct file * file;
67381+ int count;
67382+ int error;
67383+};
67384+
67385 #define NOD(NAME, MODE, IOP, FOP, OP) { \
67386 .name = (NAME), \
67387 .len = sizeof(NAME) - 1, \
67388@@ -205,12 +213,28 @@ static int proc_pid_cmdline(struct task_struct *task, char *buffer)
67389 return get_cmdline(task, buffer, PAGE_SIZE);
67390 }
67391
67392+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67393+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67394+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67395+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67396+#endif
67397+
67398 static int proc_pid_auxv(struct task_struct *task, char *buffer)
67399 {
67400 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
67401 int res = PTR_ERR(mm);
67402 if (mm && !IS_ERR(mm)) {
67403 unsigned int nwords = 0;
67404+
67405+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67406+ /* allow if we're currently ptracing this task */
67407+ if (PAX_RAND_FLAGS(mm) &&
67408+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
67409+ mmput(mm);
67410+ return 0;
67411+ }
67412+#endif
67413+
67414 do {
67415 nwords += 2;
67416 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
67417@@ -224,7 +248,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
67418 }
67419
67420
67421-#ifdef CONFIG_KALLSYMS
67422+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67423 /*
67424 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
67425 * Returns the resolved symbol. If that fails, simply return the address.
67426@@ -263,7 +287,7 @@ static void unlock_trace(struct task_struct *task)
67427 mutex_unlock(&task->signal->cred_guard_mutex);
67428 }
67429
67430-#ifdef CONFIG_STACKTRACE
67431+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67432
67433 #define MAX_STACK_TRACE_DEPTH 64
67434
67435@@ -486,7 +510,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
67436 return count;
67437 }
67438
67439-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67440+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67441 static int proc_pid_syscall(struct task_struct *task, char *buffer)
67442 {
67443 long nr;
67444@@ -515,7 +539,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
67445 /************************************************************************/
67446
67447 /* permission checks */
67448-static int proc_fd_access_allowed(struct inode *inode)
67449+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
67450 {
67451 struct task_struct *task;
67452 int allowed = 0;
67453@@ -525,7 +549,10 @@ static int proc_fd_access_allowed(struct inode *inode)
67454 */
67455 task = get_proc_task(inode);
67456 if (task) {
67457- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67458+ if (log)
67459+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67460+ else
67461+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67462 put_task_struct(task);
67463 }
67464 return allowed;
67465@@ -556,10 +583,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
67466 struct task_struct *task,
67467 int hide_pid_min)
67468 {
67469+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67470+ return false;
67471+
67472+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67473+ rcu_read_lock();
67474+ {
67475+ const struct cred *tmpcred = current_cred();
67476+ const struct cred *cred = __task_cred(task);
67477+
67478+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
67479+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67480+ || in_group_p(grsec_proc_gid)
67481+#endif
67482+ ) {
67483+ rcu_read_unlock();
67484+ return true;
67485+ }
67486+ }
67487+ rcu_read_unlock();
67488+
67489+ if (!pid->hide_pid)
67490+ return false;
67491+#endif
67492+
67493 if (pid->hide_pid < hide_pid_min)
67494 return true;
67495 if (in_group_p(pid->pid_gid))
67496 return true;
67497+
67498 return ptrace_may_access(task, PTRACE_MODE_READ);
67499 }
67500
67501@@ -577,7 +629,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
67502 put_task_struct(task);
67503
67504 if (!has_perms) {
67505+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67506+ {
67507+#else
67508 if (pid->hide_pid == 2) {
67509+#endif
67510 /*
67511 * Let's make getdents(), stat(), and open()
67512 * consistent with each other. If a process
67513@@ -675,6 +731,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67514 if (!task)
67515 return -ESRCH;
67516
67517+ if (gr_acl_handle_procpidmem(task)) {
67518+ put_task_struct(task);
67519+ return -EPERM;
67520+ }
67521+
67522 mm = mm_access(task, mode);
67523 put_task_struct(task);
67524
67525@@ -690,6 +751,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67526
67527 file->private_data = mm;
67528
67529+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67530+ file->f_version = current->exec_id;
67531+#endif
67532+
67533 return 0;
67534 }
67535
67536@@ -711,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67537 ssize_t copied;
67538 char *page;
67539
67540+#ifdef CONFIG_GRKERNSEC
67541+ if (write)
67542+ return -EPERM;
67543+#endif
67544+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67545+ if (file->f_version != current->exec_id) {
67546+ gr_log_badprocpid("mem");
67547+ return 0;
67548+ }
67549+#endif
67550+
67551 if (!mm)
67552 return 0;
67553
67554@@ -723,7 +799,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67555 goto free;
67556
67557 while (count > 0) {
67558- int this_len = min_t(int, count, PAGE_SIZE);
67559+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
67560
67561 if (write && copy_from_user(page, buf, this_len)) {
67562 copied = -EFAULT;
67563@@ -815,6 +891,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67564 if (!mm)
67565 return 0;
67566
67567+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67568+ if (file->f_version != current->exec_id) {
67569+ gr_log_badprocpid("environ");
67570+ return 0;
67571+ }
67572+#endif
67573+
67574 page = (char *)__get_free_page(GFP_TEMPORARY);
67575 if (!page)
67576 return -ENOMEM;
67577@@ -824,7 +907,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67578 goto free;
67579 while (count > 0) {
67580 size_t this_len, max_len;
67581- int retval;
67582+ ssize_t retval;
67583
67584 if (src >= (mm->env_end - mm->env_start))
67585 break;
67586@@ -1438,7 +1521,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
67587 int error = -EACCES;
67588
67589 /* Are we allowed to snoop on the tasks file descriptors? */
67590- if (!proc_fd_access_allowed(inode))
67591+ if (!proc_fd_access_allowed(inode, 0))
67592 goto out;
67593
67594 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67595@@ -1482,8 +1565,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
67596 struct path path;
67597
67598 /* Are we allowed to snoop on the tasks file descriptors? */
67599- if (!proc_fd_access_allowed(inode))
67600- goto out;
67601+ /* logging this is needed for learning on chromium to work properly,
67602+ but we don't want to flood the logs from 'ps' which does a readlink
67603+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
67604+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
67605+ */
67606+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
67607+ if (!proc_fd_access_allowed(inode,0))
67608+ goto out;
67609+ } else {
67610+ if (!proc_fd_access_allowed(inode,1))
67611+ goto out;
67612+ }
67613
67614 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67615 if (error)
67616@@ -1533,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
67617 rcu_read_lock();
67618 cred = __task_cred(task);
67619 inode->i_uid = cred->euid;
67620+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67621+ inode->i_gid = grsec_proc_gid;
67622+#else
67623 inode->i_gid = cred->egid;
67624+#endif
67625 rcu_read_unlock();
67626 }
67627 security_task_to_inode(task, inode);
67628@@ -1569,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
67629 return -ENOENT;
67630 }
67631 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67632+#ifdef CONFIG_GRKERNSEC_PROC_USER
67633+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67634+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67635+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67636+#endif
67637 task_dumpable(task)) {
67638 cred = __task_cred(task);
67639 stat->uid = cred->euid;
67640+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67641+ stat->gid = grsec_proc_gid;
67642+#else
67643 stat->gid = cred->egid;
67644+#endif
67645 }
67646 }
67647 rcu_read_unlock();
67648@@ -1610,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
67649
67650 if (task) {
67651 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67652+#ifdef CONFIG_GRKERNSEC_PROC_USER
67653+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67654+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67655+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67656+#endif
67657 task_dumpable(task)) {
67658 rcu_read_lock();
67659 cred = __task_cred(task);
67660 inode->i_uid = cred->euid;
67661+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67662+ inode->i_gid = grsec_proc_gid;
67663+#else
67664 inode->i_gid = cred->egid;
67665+#endif
67666 rcu_read_unlock();
67667 } else {
67668 inode->i_uid = GLOBAL_ROOT_UID;
67669@@ -2149,6 +2264,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
67670 if (!task)
67671 goto out_no_task;
67672
67673+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67674+ goto out;
67675+
67676 /*
67677 * Yes, it does not scale. And it should not. Don't add
67678 * new entries into /proc/<tgid>/ without very good reasons.
67679@@ -2179,6 +2297,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
67680 if (!task)
67681 return -ENOENT;
67682
67683+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67684+ goto out;
67685+
67686 if (!dir_emit_dots(file, ctx))
67687 goto out;
67688
67689@@ -2568,7 +2689,7 @@ static const struct pid_entry tgid_base_stuff[] = {
67690 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
67691 #endif
67692 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67693-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67694+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67695 INF("syscall", S_IRUSR, proc_pid_syscall),
67696 #endif
67697 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67698@@ -2593,10 +2714,10 @@ static const struct pid_entry tgid_base_stuff[] = {
67699 #ifdef CONFIG_SECURITY
67700 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67701 #endif
67702-#ifdef CONFIG_KALLSYMS
67703+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67704 INF("wchan", S_IRUGO, proc_pid_wchan),
67705 #endif
67706-#ifdef CONFIG_STACKTRACE
67707+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67708 ONE("stack", S_IRUSR, proc_pid_stack),
67709 #endif
67710 #ifdef CONFIG_SCHEDSTATS
67711@@ -2630,6 +2751,9 @@ static const struct pid_entry tgid_base_stuff[] = {
67712 #ifdef CONFIG_HARDWALL
67713 INF("hardwall", S_IRUGO, proc_pid_hardwall),
67714 #endif
67715+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67716+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
67717+#endif
67718 #ifdef CONFIG_USER_NS
67719 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
67720 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
67721@@ -2760,7 +2884,14 @@ static int proc_pid_instantiate(struct inode *dir,
67722 if (!inode)
67723 goto out;
67724
67725+#ifdef CONFIG_GRKERNSEC_PROC_USER
67726+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
67727+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67728+ inode->i_gid = grsec_proc_gid;
67729+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
67730+#else
67731 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
67732+#endif
67733 inode->i_op = &proc_tgid_base_inode_operations;
67734 inode->i_fop = &proc_tgid_base_operations;
67735 inode->i_flags|=S_IMMUTABLE;
67736@@ -2798,7 +2929,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
67737 if (!task)
67738 goto out;
67739
67740+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67741+ goto out_put_task;
67742+
67743 result = proc_pid_instantiate(dir, dentry, task, NULL);
67744+out_put_task:
67745 put_task_struct(task);
67746 out:
67747 return ERR_PTR(result);
67748@@ -2904,7 +3039,7 @@ static const struct pid_entry tid_base_stuff[] = {
67749 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67750 #endif
67751 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67752-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67753+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67754 INF("syscall", S_IRUSR, proc_pid_syscall),
67755 #endif
67756 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67757@@ -2931,10 +3066,10 @@ static const struct pid_entry tid_base_stuff[] = {
67758 #ifdef CONFIG_SECURITY
67759 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67760 #endif
67761-#ifdef CONFIG_KALLSYMS
67762+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67763 INF("wchan", S_IRUGO, proc_pid_wchan),
67764 #endif
67765-#ifdef CONFIG_STACKTRACE
67766+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67767 ONE("stack", S_IRUSR, proc_pid_stack),
67768 #endif
67769 #ifdef CONFIG_SCHEDSTATS
67770diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67771index cbd82df..c0407d2 100644
67772--- a/fs/proc/cmdline.c
67773+++ b/fs/proc/cmdline.c
67774@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67775
67776 static int __init proc_cmdline_init(void)
67777 {
67778+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67779+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67780+#else
67781 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67782+#endif
67783 return 0;
67784 }
67785 fs_initcall(proc_cmdline_init);
67786diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67787index 50493ed..248166b 100644
67788--- a/fs/proc/devices.c
67789+++ b/fs/proc/devices.c
67790@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67791
67792 static int __init proc_devices_init(void)
67793 {
67794+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67795+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67796+#else
67797 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67798+#endif
67799 return 0;
67800 }
67801 fs_initcall(proc_devices_init);
67802diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67803index 0788d09..9cc1385 100644
67804--- a/fs/proc/fd.c
67805+++ b/fs/proc/fd.c
67806@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67807 if (!task)
67808 return -ENOENT;
67809
67810- files = get_files_struct(task);
67811+ if (!gr_acl_handle_procpidmem(task))
67812+ files = get_files_struct(task);
67813 put_task_struct(task);
67814
67815 if (files) {
67816@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67817 */
67818 int proc_fd_permission(struct inode *inode, int mask)
67819 {
67820+ struct task_struct *task;
67821 int rv = generic_permission(inode, mask);
67822- if (rv == 0)
67823- return 0;
67824+
67825 if (task_tgid(current) == proc_pid(inode))
67826 rv = 0;
67827+
67828+ task = get_proc_task(inode);
67829+ if (task == NULL)
67830+ return rv;
67831+
67832+ if (gr_acl_handle_procpidmem(task))
67833+ rv = -EACCES;
67834+
67835+ put_task_struct(task);
67836+
67837 return rv;
67838 }
67839
67840diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67841index b7f268e..3bea6b7 100644
67842--- a/fs/proc/generic.c
67843+++ b/fs/proc/generic.c
67844@@ -23,6 +23,7 @@
67845 #include <linux/bitops.h>
67846 #include <linux/spinlock.h>
67847 #include <linux/completion.h>
67848+#include <linux/grsecurity.h>
67849 #include <asm/uaccess.h>
67850
67851 #include "internal.h"
67852@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67853 return proc_lookup_de(PDE(dir), dir, dentry);
67854 }
67855
67856+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67857+ unsigned int flags)
67858+{
67859+ if (gr_proc_is_restricted())
67860+ return ERR_PTR(-EACCES);
67861+
67862+ return proc_lookup_de(PDE(dir), dir, dentry);
67863+}
67864+
67865 /*
67866 * This returns non-zero if at EOF, so that the /proc
67867 * root directory can use this and check if it should
67868@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67869 return proc_readdir_de(PDE(inode), file, ctx);
67870 }
67871
67872+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67873+{
67874+ struct inode *inode = file_inode(file);
67875+
67876+ if (gr_proc_is_restricted())
67877+ return -EACCES;
67878+
67879+ return proc_readdir_de(PDE(inode), file, ctx);
67880+}
67881+
67882 /*
67883 * These are the generic /proc directory operations. They
67884 * use the in-memory "struct proc_dir_entry" tree to parse
67885@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
67886 .iterate = proc_readdir,
67887 };
67888
67889+static const struct file_operations proc_dir_restricted_operations = {
67890+ .llseek = generic_file_llseek,
67891+ .read = generic_read_dir,
67892+ .iterate = proc_readdir_restrict,
67893+};
67894+
67895 /*
67896 * proc directories can do almost nothing..
67897 */
67898@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67899 .setattr = proc_notify_change,
67900 };
67901
67902+static const struct inode_operations proc_dir_restricted_inode_operations = {
67903+ .lookup = proc_lookup_restrict,
67904+ .getattr = proc_getattr,
67905+ .setattr = proc_notify_change,
67906+};
67907+
67908 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67909 {
67910 struct proc_dir_entry *tmp;
67911@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
67912 return ret;
67913
67914 if (S_ISDIR(dp->mode)) {
67915- dp->proc_fops = &proc_dir_operations;
67916- dp->proc_iops = &proc_dir_inode_operations;
67917+ if (dp->restricted) {
67918+ dp->proc_fops = &proc_dir_restricted_operations;
67919+ dp->proc_iops = &proc_dir_restricted_inode_operations;
67920+ } else {
67921+ dp->proc_fops = &proc_dir_operations;
67922+ dp->proc_iops = &proc_dir_inode_operations;
67923+ }
67924 dir->nlink++;
67925 } else if (S_ISLNK(dp->mode)) {
67926 dp->proc_iops = &proc_link_inode_operations;
67927@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67928 }
67929 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67930
67931+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67932+ struct proc_dir_entry *parent, void *data)
67933+{
67934+ struct proc_dir_entry *ent;
67935+
67936+ if (mode == 0)
67937+ mode = S_IRUGO | S_IXUGO;
67938+
67939+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67940+ if (ent) {
67941+ ent->data = data;
67942+ ent->restricted = 1;
67943+ if (proc_register(parent, ent) < 0) {
67944+ kfree(ent);
67945+ ent = NULL;
67946+ }
67947+ }
67948+ return ent;
67949+}
67950+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67951+
67952 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67953 struct proc_dir_entry *parent)
67954 {
67955@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67956 }
67957 EXPORT_SYMBOL(proc_mkdir);
67958
67959+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67960+ struct proc_dir_entry *parent)
67961+{
67962+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67963+}
67964+EXPORT_SYMBOL(proc_mkdir_restrict);
67965+
67966 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67967 struct proc_dir_entry *parent,
67968 const struct file_operations *proc_fops,
67969diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67970index 0adbc02..bee4d0b 100644
67971--- a/fs/proc/inode.c
67972+++ b/fs/proc/inode.c
67973@@ -23,11 +23,17 @@
67974 #include <linux/slab.h>
67975 #include <linux/mount.h>
67976 #include <linux/magic.h>
67977+#include <linux/grsecurity.h>
67978
67979 #include <asm/uaccess.h>
67980
67981 #include "internal.h"
67982
67983+#ifdef CONFIG_PROC_SYSCTL
67984+extern const struct inode_operations proc_sys_inode_operations;
67985+extern const struct inode_operations proc_sys_dir_operations;
67986+#endif
67987+
67988 static void proc_evict_inode(struct inode *inode)
67989 {
67990 struct proc_dir_entry *de;
67991@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
67992 ns = PROC_I(inode)->ns.ns;
67993 if (ns_ops && ns)
67994 ns_ops->put(ns);
67995+
67996+#ifdef CONFIG_PROC_SYSCTL
67997+ if (inode->i_op == &proc_sys_inode_operations ||
67998+ inode->i_op == &proc_sys_dir_operations)
67999+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
68000+#endif
68001+
68002 }
68003
68004 static struct kmem_cache * proc_inode_cachep;
68005@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
68006 if (de->mode) {
68007 inode->i_mode = de->mode;
68008 inode->i_uid = de->uid;
68009+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68010+ inode->i_gid = grsec_proc_gid;
68011+#else
68012 inode->i_gid = de->gid;
68013+#endif
68014 }
68015 if (de->size)
68016 inode->i_size = de->size;
68017diff --git a/fs/proc/internal.h b/fs/proc/internal.h
68018index 3ab6d14..b26174e 100644
68019--- a/fs/proc/internal.h
68020+++ b/fs/proc/internal.h
68021@@ -46,9 +46,10 @@ struct proc_dir_entry {
68022 struct completion *pde_unload_completion;
68023 struct list_head pde_openers; /* who did ->open, but not ->release */
68024 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
68025+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
68026 u8 namelen;
68027 char name[];
68028-};
68029+} __randomize_layout;
68030
68031 union proc_op {
68032 int (*proc_get_link)(struct dentry *, struct path *);
68033@@ -67,7 +68,7 @@ struct proc_inode {
68034 struct ctl_table *sysctl_entry;
68035 struct proc_ns ns;
68036 struct inode vfs_inode;
68037-};
68038+} __randomize_layout;
68039
68040 /*
68041 * General functions
68042@@ -155,6 +156,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
68043 struct pid *, struct task_struct *);
68044 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
68045 struct pid *, struct task_struct *);
68046+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
68047+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
68048+#endif
68049
68050 /*
68051 * base.c
68052@@ -181,9 +185,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
68053 extern spinlock_t proc_subdir_lock;
68054
68055 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
68056+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
68057 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
68058 struct dentry *);
68059 extern int proc_readdir(struct file *, struct dir_context *);
68060+extern int proc_readdir_restrict(struct file *, struct dir_context *);
68061 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
68062
68063 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
68064diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
68065index a352d57..cb94a5c 100644
68066--- a/fs/proc/interrupts.c
68067+++ b/fs/proc/interrupts.c
68068@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
68069
68070 static int __init proc_interrupts_init(void)
68071 {
68072+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68073+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
68074+#else
68075 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
68076+#endif
68077 return 0;
68078 }
68079 fs_initcall(proc_interrupts_init);
68080diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
68081index 39e6ef3..2f9cb5e 100644
68082--- a/fs/proc/kcore.c
68083+++ b/fs/proc/kcore.c
68084@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
68085 * the addresses in the elf_phdr on our list.
68086 */
68087 start = kc_offset_to_vaddr(*fpos - elf_buflen);
68088- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
68089+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
68090+ if (tsz > buflen)
68091 tsz = buflen;
68092-
68093+
68094 while (buflen) {
68095 struct kcore_list *m;
68096
68097@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
68098 kfree(elf_buf);
68099 } else {
68100 if (kern_addr_valid(start)) {
68101- unsigned long n;
68102+ char *elf_buf;
68103+ mm_segment_t oldfs;
68104
68105- n = copy_to_user(buffer, (char *)start, tsz);
68106- /*
68107- * We cannot distinguish between fault on source
68108- * and fault on destination. When this happens
68109- * we clear too and hope it will trigger the
68110- * EFAULT again.
68111- */
68112- if (n) {
68113- if (clear_user(buffer + tsz - n,
68114- n))
68115+ elf_buf = kmalloc(tsz, GFP_KERNEL);
68116+ if (!elf_buf)
68117+ return -ENOMEM;
68118+ oldfs = get_fs();
68119+ set_fs(KERNEL_DS);
68120+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
68121+ set_fs(oldfs);
68122+ if (copy_to_user(buffer, elf_buf, tsz)) {
68123+ kfree(elf_buf);
68124 return -EFAULT;
68125+ }
68126 }
68127+ set_fs(oldfs);
68128+ kfree(elf_buf);
68129 } else {
68130 if (clear_user(buffer, tsz))
68131 return -EFAULT;
68132@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
68133
68134 static int open_kcore(struct inode *inode, struct file *filp)
68135 {
68136+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
68137+ return -EPERM;
68138+#endif
68139 if (!capable(CAP_SYS_RAWIO))
68140 return -EPERM;
68141 if (kcore_need_update)
68142diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
68143index 7445af0..7c5113c 100644
68144--- a/fs/proc/meminfo.c
68145+++ b/fs/proc/meminfo.c
68146@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
68147 vmi.used >> 10,
68148 vmi.largest_chunk >> 10
68149 #ifdef CONFIG_MEMORY_FAILURE
68150- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
68151+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
68152 #endif
68153 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
68154 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
68155diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
68156index d4a3574..b421ce9 100644
68157--- a/fs/proc/nommu.c
68158+++ b/fs/proc/nommu.c
68159@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
68160
68161 if (file) {
68162 seq_pad(m, ' ');
68163- seq_path(m, &file->f_path, "");
68164+ seq_path(m, &file->f_path, "\n\\");
68165 }
68166
68167 seq_putc(m, '\n');
68168diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
68169index 4677bb7..dad3045 100644
68170--- a/fs/proc/proc_net.c
68171+++ b/fs/proc/proc_net.c
68172@@ -23,9 +23,27 @@
68173 #include <linux/nsproxy.h>
68174 #include <net/net_namespace.h>
68175 #include <linux/seq_file.h>
68176+#include <linux/grsecurity.h>
68177
68178 #include "internal.h"
68179
68180+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
68181+static struct seq_operations *ipv6_seq_ops_addr;
68182+
68183+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
68184+{
68185+ ipv6_seq_ops_addr = addr;
68186+}
68187+
68188+void unregister_ipv6_seq_ops_addr(void)
68189+{
68190+ ipv6_seq_ops_addr = NULL;
68191+}
68192+
68193+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
68194+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
68195+#endif
68196+
68197 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
68198 {
68199 return pde->parent->data;
68200@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
68201 return maybe_get_net(PDE_NET(PDE(inode)));
68202 }
68203
68204+extern const struct seq_operations dev_seq_ops;
68205+
68206 int seq_open_net(struct inode *ino, struct file *f,
68207 const struct seq_operations *ops, int size)
68208 {
68209@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
68210
68211 BUG_ON(size < sizeof(*p));
68212
68213+ /* only permit access to /proc/net/dev */
68214+ if (
68215+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
68216+ ops != ipv6_seq_ops_addr &&
68217+#endif
68218+ ops != &dev_seq_ops && gr_proc_is_restricted())
68219+ return -EACCES;
68220+
68221 net = get_proc_net(ino);
68222 if (net == NULL)
68223 return -ENXIO;
68224@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
68225 int err;
68226 struct net *net;
68227
68228+ if (gr_proc_is_restricted())
68229+ return -EACCES;
68230+
68231 err = -ENXIO;
68232 net = get_proc_net(inode);
68233 if (net == NULL)
68234diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
68235index 7129046..6914844 100644
68236--- a/fs/proc/proc_sysctl.c
68237+++ b/fs/proc/proc_sysctl.c
68238@@ -11,13 +11,21 @@
68239 #include <linux/namei.h>
68240 #include <linux/mm.h>
68241 #include <linux/module.h>
68242+#include <linux/nsproxy.h>
68243+#ifdef CONFIG_GRKERNSEC
68244+#include <net/net_namespace.h>
68245+#endif
68246 #include "internal.h"
68247
68248+extern int gr_handle_chroot_sysctl(const int op);
68249+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68250+ const int op);
68251+
68252 static const struct dentry_operations proc_sys_dentry_operations;
68253 static const struct file_operations proc_sys_file_operations;
68254-static const struct inode_operations proc_sys_inode_operations;
68255+const struct inode_operations proc_sys_inode_operations;
68256 static const struct file_operations proc_sys_dir_file_operations;
68257-static const struct inode_operations proc_sys_dir_operations;
68258+const struct inode_operations proc_sys_dir_operations;
68259
68260 void proc_sys_poll_notify(struct ctl_table_poll *poll)
68261 {
68262@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
68263
68264 err = NULL;
68265 d_set_d_op(dentry, &proc_sys_dentry_operations);
68266+
68267+ gr_handle_proc_create(dentry, inode);
68268+
68269 d_add(dentry, inode);
68270
68271 out:
68272@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68273 struct inode *inode = file_inode(filp);
68274 struct ctl_table_header *head = grab_header(inode);
68275 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
68276+ int op = write ? MAY_WRITE : MAY_READ;
68277 ssize_t error;
68278 size_t res;
68279
68280@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68281 * and won't be until we finish.
68282 */
68283 error = -EPERM;
68284- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
68285+ if (sysctl_perm(head, table, op))
68286 goto out;
68287
68288 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
68289@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68290 if (!table->proc_handler)
68291 goto out;
68292
68293+#ifdef CONFIG_GRKERNSEC
68294+ error = -EPERM;
68295+ if (gr_handle_chroot_sysctl(op))
68296+ goto out;
68297+ dget(filp->f_path.dentry);
68298+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
68299+ dput(filp->f_path.dentry);
68300+ goto out;
68301+ }
68302+ dput(filp->f_path.dentry);
68303+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
68304+ goto out;
68305+ if (write) {
68306+ if (current->nsproxy->net_ns != table->extra2) {
68307+ if (!capable(CAP_SYS_ADMIN))
68308+ goto out;
68309+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
68310+ goto out;
68311+ }
68312+#endif
68313+
68314 /* careful: calling conventions are nasty here */
68315 res = count;
68316 error = table->proc_handler(table, write, buf, &res, ppos);
68317@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
68318 return false;
68319 } else {
68320 d_set_d_op(child, &proc_sys_dentry_operations);
68321+
68322+ gr_handle_proc_create(child, inode);
68323+
68324 d_add(child, inode);
68325 }
68326 } else {
68327@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
68328 if ((*pos)++ < ctx->pos)
68329 return true;
68330
68331+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
68332+ return 0;
68333+
68334 if (unlikely(S_ISLNK(table->mode)))
68335 res = proc_sys_link_fill_cache(file, ctx, head, table);
68336 else
68337@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
68338 if (IS_ERR(head))
68339 return PTR_ERR(head);
68340
68341+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
68342+ return -ENOENT;
68343+
68344 generic_fillattr(inode, stat);
68345 if (table)
68346 stat->mode = (stat->mode & S_IFMT) | table->mode;
68347@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
68348 .llseek = generic_file_llseek,
68349 };
68350
68351-static const struct inode_operations proc_sys_inode_operations = {
68352+const struct inode_operations proc_sys_inode_operations = {
68353 .permission = proc_sys_permission,
68354 .setattr = proc_sys_setattr,
68355 .getattr = proc_sys_getattr,
68356 };
68357
68358-static const struct inode_operations proc_sys_dir_operations = {
68359+const struct inode_operations proc_sys_dir_operations = {
68360 .lookup = proc_sys_lookup,
68361 .permission = proc_sys_permission,
68362 .setattr = proc_sys_setattr,
68363@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
68364 static struct ctl_dir *new_dir(struct ctl_table_set *set,
68365 const char *name, int namelen)
68366 {
68367- struct ctl_table *table;
68368+ ctl_table_no_const *table;
68369 struct ctl_dir *new;
68370 struct ctl_node *node;
68371 char *new_name;
68372@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
68373 return NULL;
68374
68375 node = (struct ctl_node *)(new + 1);
68376- table = (struct ctl_table *)(node + 1);
68377+ table = (ctl_table_no_const *)(node + 1);
68378 new_name = (char *)(table + 2);
68379 memcpy(new_name, name, namelen);
68380 new_name[namelen] = '\0';
68381@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
68382 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
68383 struct ctl_table_root *link_root)
68384 {
68385- struct ctl_table *link_table, *entry, *link;
68386+ ctl_table_no_const *link_table, *link;
68387+ struct ctl_table *entry;
68388 struct ctl_table_header *links;
68389 struct ctl_node *node;
68390 char *link_name;
68391@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
68392 return NULL;
68393
68394 node = (struct ctl_node *)(links + 1);
68395- link_table = (struct ctl_table *)(node + nr_entries);
68396+ link_table = (ctl_table_no_const *)(node + nr_entries);
68397 link_name = (char *)&link_table[nr_entries + 1];
68398
68399 for (link = link_table, entry = table; entry->procname; link++, entry++) {
68400@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68401 struct ctl_table_header ***subheader, struct ctl_table_set *set,
68402 struct ctl_table *table)
68403 {
68404- struct ctl_table *ctl_table_arg = NULL;
68405- struct ctl_table *entry, *files;
68406+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
68407+ struct ctl_table *entry;
68408 int nr_files = 0;
68409 int nr_dirs = 0;
68410 int err = -ENOMEM;
68411@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68412 nr_files++;
68413 }
68414
68415- files = table;
68416 /* If there are mixed files and directories we need a new table */
68417 if (nr_dirs && nr_files) {
68418- struct ctl_table *new;
68419+ ctl_table_no_const *new;
68420 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
68421 GFP_KERNEL);
68422 if (!files)
68423@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68424 /* Register everything except a directory full of subdirectories */
68425 if (nr_files || !nr_dirs) {
68426 struct ctl_table_header *header;
68427- header = __register_sysctl_table(set, path, files);
68428+ header = __register_sysctl_table(set, path, files ? files : table);
68429 if (!header) {
68430 kfree(ctl_table_arg);
68431 goto out;
68432diff --git a/fs/proc/root.c b/fs/proc/root.c
68433index 5dbadec..473af2f 100644
68434--- a/fs/proc/root.c
68435+++ b/fs/proc/root.c
68436@@ -185,7 +185,15 @@ void __init proc_root_init(void)
68437 proc_mkdir("openprom", NULL);
68438 #endif
68439 proc_tty_init();
68440+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68441+#ifdef CONFIG_GRKERNSEC_PROC_USER
68442+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
68443+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68444+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
68445+#endif
68446+#else
68447 proc_mkdir("bus", NULL);
68448+#endif
68449 proc_sys_init();
68450 }
68451
68452diff --git a/fs/proc/stat.c b/fs/proc/stat.c
68453index bf2d03f..f058f9c 100644
68454--- a/fs/proc/stat.c
68455+++ b/fs/proc/stat.c
68456@@ -11,6 +11,7 @@
68457 #include <linux/irqnr.h>
68458 #include <linux/cputime.h>
68459 #include <linux/tick.h>
68460+#include <linux/grsecurity.h>
68461
68462 #ifndef arch_irq_stat_cpu
68463 #define arch_irq_stat_cpu(cpu) 0
68464@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
68465 u64 sum_softirq = 0;
68466 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
68467 struct timespec boottime;
68468+ int unrestricted = 1;
68469+
68470+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68471+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68472+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
68473+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68474+ && !in_group_p(grsec_proc_gid)
68475+#endif
68476+ )
68477+ unrestricted = 0;
68478+#endif
68479+#endif
68480
68481 user = nice = system = idle = iowait =
68482 irq = softirq = steal = 0;
68483@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
68484 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68485 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68486 idle += get_idle_time(i);
68487- iowait += get_iowait_time(i);
68488- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68489- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68490- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68491- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68492- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68493- sum += kstat_cpu_irqs_sum(i);
68494- sum += arch_irq_stat_cpu(i);
68495+ if (unrestricted) {
68496+ iowait += get_iowait_time(i);
68497+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68498+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68499+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68500+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68501+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68502+ sum += kstat_cpu_irqs_sum(i);
68503+ sum += arch_irq_stat_cpu(i);
68504+ for (j = 0; j < NR_SOFTIRQS; j++) {
68505+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68506
68507- for (j = 0; j < NR_SOFTIRQS; j++) {
68508- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68509-
68510- per_softirq_sums[j] += softirq_stat;
68511- sum_softirq += softirq_stat;
68512+ per_softirq_sums[j] += softirq_stat;
68513+ sum_softirq += softirq_stat;
68514+ }
68515 }
68516 }
68517- sum += arch_irq_stat();
68518+ if (unrestricted)
68519+ sum += arch_irq_stat();
68520
68521 seq_puts(p, "cpu ");
68522 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68523@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
68524 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68525 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68526 idle = get_idle_time(i);
68527- iowait = get_iowait_time(i);
68528- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68529- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68530- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68531- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68532- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68533+ if (unrestricted) {
68534+ iowait = get_iowait_time(i);
68535+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68536+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68537+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68538+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68539+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68540+ }
68541 seq_printf(p, "cpu%d", i);
68542 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68543 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
68544@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
68545
68546 /* sum again ? it could be updated? */
68547 for_each_irq_nr(j)
68548- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
68549+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
68550
68551 seq_printf(p,
68552 "\nctxt %llu\n"
68553@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
68554 "processes %lu\n"
68555 "procs_running %lu\n"
68556 "procs_blocked %lu\n",
68557- nr_context_switches(),
68558+ unrestricted ? nr_context_switches() : 0ULL,
68559 (unsigned long)jif,
68560- total_forks,
68561- nr_running(),
68562- nr_iowait());
68563+ unrestricted ? total_forks : 0UL,
68564+ unrestricted ? nr_running() : 0UL,
68565+ unrestricted ? nr_iowait() : 0UL);
68566
68567 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
68568
68569diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
68570index cfa63ee..fce112e 100644
68571--- a/fs/proc/task_mmu.c
68572+++ b/fs/proc/task_mmu.c
68573@@ -13,12 +13,19 @@
68574 #include <linux/swap.h>
68575 #include <linux/swapops.h>
68576 #include <linux/mmu_notifier.h>
68577+#include <linux/grsecurity.h>
68578
68579 #include <asm/elf.h>
68580 #include <asm/uaccess.h>
68581 #include <asm/tlbflush.h>
68582 #include "internal.h"
68583
68584+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68585+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
68586+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
68587+ _mm->pax_flags & MF_PAX_SEGMEXEC))
68588+#endif
68589+
68590 void task_mem(struct seq_file *m, struct mm_struct *mm)
68591 {
68592 unsigned long data, text, lib, swap;
68593@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68594 "VmExe:\t%8lu kB\n"
68595 "VmLib:\t%8lu kB\n"
68596 "VmPTE:\t%8lu kB\n"
68597- "VmSwap:\t%8lu kB\n",
68598- hiwater_vm << (PAGE_SHIFT-10),
68599+ "VmSwap:\t%8lu kB\n"
68600+
68601+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68602+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
68603+#endif
68604+
68605+ ,hiwater_vm << (PAGE_SHIFT-10),
68606 total_vm << (PAGE_SHIFT-10),
68607 mm->locked_vm << (PAGE_SHIFT-10),
68608 mm->pinned_vm << (PAGE_SHIFT-10),
68609@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68610 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
68611 (PTRS_PER_PTE * sizeof(pte_t) *
68612 atomic_long_read(&mm->nr_ptes)) >> 10,
68613- swap << (PAGE_SHIFT-10));
68614+ swap << (PAGE_SHIFT-10)
68615+
68616+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68617+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68618+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
68619+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
68620+#else
68621+ , mm->context.user_cs_base
68622+ , mm->context.user_cs_limit
68623+#endif
68624+#endif
68625+
68626+ );
68627 }
68628
68629 unsigned long task_vsize(struct mm_struct *mm)
68630@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68631 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
68632 }
68633
68634- /* We don't show the stack guard page in /proc/maps */
68635+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68636+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
68637+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
68638+#else
68639 start = vma->vm_start;
68640- if (stack_guard_page_start(vma, start))
68641- start += PAGE_SIZE;
68642 end = vma->vm_end;
68643- if (stack_guard_page_end(vma, end))
68644- end -= PAGE_SIZE;
68645+#endif
68646
68647 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
68648 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
68649@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68650 flags & VM_WRITE ? 'w' : '-',
68651 flags & VM_EXEC ? 'x' : '-',
68652 flags & VM_MAYSHARE ? 's' : 'p',
68653+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68654+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
68655+#else
68656 pgoff,
68657+#endif
68658 MAJOR(dev), MINOR(dev), ino);
68659
68660 /*
68661@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68662 */
68663 if (file) {
68664 seq_pad(m, ' ');
68665- seq_path(m, &file->f_path, "\n");
68666+ seq_path(m, &file->f_path, "\n\\");
68667 goto done;
68668 }
68669
68670@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68671 * Thread stack in /proc/PID/task/TID/maps or
68672 * the main process stack.
68673 */
68674- if (!is_pid || (vma->vm_start <= mm->start_stack &&
68675- vma->vm_end >= mm->start_stack)) {
68676+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
68677+ (vma->vm_start <= mm->start_stack &&
68678+ vma->vm_end >= mm->start_stack)) {
68679 name = "[stack]";
68680 } else {
68681 /* Thread stack in /proc/PID/maps */
68682@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
68683 struct proc_maps_private *priv = m->private;
68684 struct task_struct *task = priv->task;
68685
68686+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68687+ if (current->exec_id != m->exec_id) {
68688+ gr_log_badprocpid("maps");
68689+ return 0;
68690+ }
68691+#endif
68692+
68693 show_map_vma(m, vma, is_pid);
68694
68695 if (m->count < m->size) /* vma is copied successfully */
68696@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68697 .private = &mss,
68698 };
68699
68700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68701+ if (current->exec_id != m->exec_id) {
68702+ gr_log_badprocpid("smaps");
68703+ return 0;
68704+ }
68705+#endif
68706 memset(&mss, 0, sizeof mss);
68707- mss.vma = vma;
68708- /* mmap_sem is held in m_start */
68709- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68710- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68711-
68712+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68713+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
68714+#endif
68715+ mss.vma = vma;
68716+ /* mmap_sem is held in m_start */
68717+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68718+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68719+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68720+ }
68721+#endif
68722 show_map_vma(m, vma, is_pid);
68723
68724 seq_printf(m,
68725@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68726 "KernelPageSize: %8lu kB\n"
68727 "MMUPageSize: %8lu kB\n"
68728 "Locked: %8lu kB\n",
68729+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68730+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
68731+#else
68732 (vma->vm_end - vma->vm_start) >> 10,
68733+#endif
68734 mss.resident >> 10,
68735 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
68736 mss.shared_clean >> 10,
68737@@ -1398,6 +1449,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68738 char buffer[64];
68739 int nid;
68740
68741+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68742+ if (current->exec_id != m->exec_id) {
68743+ gr_log_badprocpid("numa_maps");
68744+ return 0;
68745+ }
68746+#endif
68747+
68748 if (!mm)
68749 return 0;
68750
68751@@ -1415,11 +1473,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68752 mpol_to_str(buffer, sizeof(buffer), pol);
68753 mpol_cond_put(pol);
68754
68755+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68756+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68757+#else
68758 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68759+#endif
68760
68761 if (file) {
68762 seq_puts(m, " file=");
68763- seq_path(m, &file->f_path, "\n\t= ");
68764+ seq_path(m, &file->f_path, "\n\t\\= ");
68765 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68766 seq_puts(m, " heap");
68767 } else {
68768diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68769index 678455d..ebd3245 100644
68770--- a/fs/proc/task_nommu.c
68771+++ b/fs/proc/task_nommu.c
68772@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68773 else
68774 bytes += kobjsize(mm);
68775
68776- if (current->fs && current->fs->users > 1)
68777+ if (current->fs && atomic_read(&current->fs->users) > 1)
68778 sbytes += kobjsize(current->fs);
68779 else
68780 bytes += kobjsize(current->fs);
68781@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68782
68783 if (file) {
68784 seq_pad(m, ' ');
68785- seq_path(m, &file->f_path, "");
68786+ seq_path(m, &file->f_path, "\n\\");
68787 } else if (mm) {
68788 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
68789
68790diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68791index 382aa89..6b03974 100644
68792--- a/fs/proc/vmcore.c
68793+++ b/fs/proc/vmcore.c
68794@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68795 nr_bytes = count;
68796
68797 /* If pfn is not ram, return zeros for sparse dump files */
68798- if (pfn_is_ram(pfn) == 0)
68799- memset(buf, 0, nr_bytes);
68800- else {
68801+ if (pfn_is_ram(pfn) == 0) {
68802+ if (userbuf) {
68803+ if (clear_user((char __force_user *)buf, nr_bytes))
68804+ return -EFAULT;
68805+ } else
68806+ memset(buf, 0, nr_bytes);
68807+ } else {
68808 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68809 offset, userbuf);
68810 if (tmp < 0)
68811@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68812 static int copy_to(void *target, void *src, size_t size, int userbuf)
68813 {
68814 if (userbuf) {
68815- if (copy_to_user((char __user *) target, src, size))
68816+ if (copy_to_user((char __force_user *) target, src, size))
68817 return -EFAULT;
68818 } else {
68819 memcpy(target, src, size);
68820@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68821 if (*fpos < m->offset + m->size) {
68822 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68823 start = m->paddr + *fpos - m->offset;
68824- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68825+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68826 if (tmp < 0)
68827 return tmp;
68828 buflen -= tsz;
68829@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68830 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68831 size_t buflen, loff_t *fpos)
68832 {
68833- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68834+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68835 }
68836
68837 /*
68838diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68839index b00fcc9..e0c6381 100644
68840--- a/fs/qnx6/qnx6.h
68841+++ b/fs/qnx6/qnx6.h
68842@@ -74,7 +74,7 @@ enum {
68843 BYTESEX_BE,
68844 };
68845
68846-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68847+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68848 {
68849 if (sbi->s_bytesex == BYTESEX_LE)
68850 return le64_to_cpu((__force __le64)n);
68851@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68852 return (__force __fs64)cpu_to_be64(n);
68853 }
68854
68855-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68856+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68857 {
68858 if (sbi->s_bytesex == BYTESEX_LE)
68859 return le32_to_cpu((__force __le32)n);
68860diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68861index 72d2917..c917c12 100644
68862--- a/fs/quota/netlink.c
68863+++ b/fs/quota/netlink.c
68864@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
68865 void quota_send_warning(struct kqid qid, dev_t dev,
68866 const char warntype)
68867 {
68868- static atomic_t seq;
68869+ static atomic_unchecked_t seq;
68870 struct sk_buff *skb;
68871 void *msg_head;
68872 int ret;
68873@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68874 "VFS: Not enough memory to send quota warning.\n");
68875 return;
68876 }
68877- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68878+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68879 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68880 if (!msg_head) {
68881 printk(KERN_ERR
68882diff --git a/fs/read_write.c b/fs/read_write.c
68883index 009d854..16ce214 100644
68884--- a/fs/read_write.c
68885+++ b/fs/read_write.c
68886@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68887
68888 old_fs = get_fs();
68889 set_fs(get_ds());
68890- p = (__force const char __user *)buf;
68891+ p = (const char __force_user *)buf;
68892 if (count > MAX_RW_COUNT)
68893 count = MAX_RW_COUNT;
68894 if (file->f_op->write)
68895diff --git a/fs/readdir.c b/fs/readdir.c
68896index 33fd922..e0d6094 100644
68897--- a/fs/readdir.c
68898+++ b/fs/readdir.c
68899@@ -18,6 +18,7 @@
68900 #include <linux/security.h>
68901 #include <linux/syscalls.h>
68902 #include <linux/unistd.h>
68903+#include <linux/namei.h>
68904
68905 #include <asm/uaccess.h>
68906
68907@@ -71,6 +72,7 @@ struct old_linux_dirent {
68908 struct readdir_callback {
68909 struct dir_context ctx;
68910 struct old_linux_dirent __user * dirent;
68911+ struct file * file;
68912 int result;
68913 };
68914
68915@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
68916 buf->result = -EOVERFLOW;
68917 return -EOVERFLOW;
68918 }
68919+
68920+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68921+ return 0;
68922+
68923 buf->result++;
68924 dirent = buf->dirent;
68925 if (!access_ok(VERIFY_WRITE, dirent,
68926@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68927 if (!f.file)
68928 return -EBADF;
68929
68930+ buf.file = f.file;
68931 error = iterate_dir(f.file, &buf.ctx);
68932 if (buf.result)
68933 error = buf.result;
68934@@ -144,6 +151,7 @@ struct getdents_callback {
68935 struct dir_context ctx;
68936 struct linux_dirent __user * current_dir;
68937 struct linux_dirent __user * previous;
68938+ struct file * file;
68939 int count;
68940 int error;
68941 };
68942@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
68943 buf->error = -EOVERFLOW;
68944 return -EOVERFLOW;
68945 }
68946+
68947+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68948+ return 0;
68949+
68950 dirent = buf->previous;
68951 if (dirent) {
68952 if (__put_user(offset, &dirent->d_off))
68953@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68954 if (!f.file)
68955 return -EBADF;
68956
68957+ buf.file = f.file;
68958 error = iterate_dir(f.file, &buf.ctx);
68959 if (error >= 0)
68960 error = buf.error;
68961@@ -228,6 +241,7 @@ struct getdents_callback64 {
68962 struct dir_context ctx;
68963 struct linux_dirent64 __user * current_dir;
68964 struct linux_dirent64 __user * previous;
68965+ struct file *file;
68966 int count;
68967 int error;
68968 };
68969@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
68970 buf->error = -EINVAL; /* only used if we fail.. */
68971 if (reclen > buf->count)
68972 return -EINVAL;
68973+
68974+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68975+ return 0;
68976+
68977 dirent = buf->previous;
68978 if (dirent) {
68979 if (__put_user(offset, &dirent->d_off))
68980@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68981 if (!f.file)
68982 return -EBADF;
68983
68984+ buf.file = f.file;
68985 error = iterate_dir(f.file, &buf.ctx);
68986 if (error >= 0)
68987 error = buf.error;
68988diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68989index 4d5e529..ccdbc84 100644
68990--- a/fs/reiserfs/do_balan.c
68991+++ b/fs/reiserfs/do_balan.c
68992@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68993 return;
68994 }
68995
68996- atomic_inc(&fs_generation(tb->tb_sb));
68997+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68998 do_balance_starts(tb);
68999
69000 /*
69001diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
69002index cfaee91..b9d0d60 100644
69003--- a/fs/reiserfs/item_ops.c
69004+++ b/fs/reiserfs/item_ops.c
69005@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
69006 }
69007
69008 static struct item_operations errcatch_ops = {
69009- errcatch_bytes_number,
69010- errcatch_decrement_key,
69011- errcatch_is_left_mergeable,
69012- errcatch_print_item,
69013- errcatch_check_item,
69014+ .bytes_number = errcatch_bytes_number,
69015+ .decrement_key = errcatch_decrement_key,
69016+ .is_left_mergeable = errcatch_is_left_mergeable,
69017+ .print_item = errcatch_print_item,
69018+ .check_item = errcatch_check_item,
69019
69020- errcatch_create_vi,
69021- errcatch_check_left,
69022- errcatch_check_right,
69023- errcatch_part_size,
69024- errcatch_unit_num,
69025- errcatch_print_vi
69026+ .create_vi = errcatch_create_vi,
69027+ .check_left = errcatch_check_left,
69028+ .check_right = errcatch_check_right,
69029+ .part_size = errcatch_part_size,
69030+ .unit_num = errcatch_unit_num,
69031+ .print_vi = errcatch_print_vi
69032 };
69033
69034 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
69035diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
69036index 02b0b7d..c85018b 100644
69037--- a/fs/reiserfs/procfs.c
69038+++ b/fs/reiserfs/procfs.c
69039@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
69040 "SMALL_TAILS " : "NO_TAILS ",
69041 replay_only(sb) ? "REPLAY_ONLY " : "",
69042 convert_reiserfs(sb) ? "CONV " : "",
69043- atomic_read(&r->s_generation_counter),
69044+ atomic_read_unchecked(&r->s_generation_counter),
69045 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
69046 SF(s_do_balance), SF(s_unneeded_left_neighbor),
69047 SF(s_good_search_by_key_reada), SF(s_bmaps),
69048diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
69049index 735c2c2..81b91af 100644
69050--- a/fs/reiserfs/reiserfs.h
69051+++ b/fs/reiserfs/reiserfs.h
69052@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
69053 /* Comment? -Hans */
69054 wait_queue_head_t s_wait;
69055 /* increased by one every time the tree gets re-balanced */
69056- atomic_t s_generation_counter;
69057+ atomic_unchecked_t s_generation_counter;
69058
69059 /* File system properties. Currently holds on-disk FS format */
69060 unsigned long s_properties;
69061@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69062 #define REISERFS_USER_MEM 1 /* user memory mode */
69063
69064 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69065-#define get_generation(s) atomic_read (&fs_generation(s))
69066+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69067 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69068 #define __fs_changed(gen,s) (gen != get_generation (s))
69069 #define fs_changed(gen,s) \
69070diff --git a/fs/select.c b/fs/select.c
69071index 467bb1c..cf9d65a 100644
69072--- a/fs/select.c
69073+++ b/fs/select.c
69074@@ -20,6 +20,7 @@
69075 #include <linux/export.h>
69076 #include <linux/slab.h>
69077 #include <linux/poll.h>
69078+#include <linux/security.h>
69079 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
69080 #include <linux/file.h>
69081 #include <linux/fdtable.h>
69082@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
69083 struct poll_list *walk = head;
69084 unsigned long todo = nfds;
69085
69086+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
69087 if (nfds > rlimit(RLIMIT_NOFILE))
69088 return -EINVAL;
69089
69090diff --git a/fs/seq_file.c b/fs/seq_file.c
69091index 3857b72..0b7281e 100644
69092--- a/fs/seq_file.c
69093+++ b/fs/seq_file.c
69094@@ -12,6 +12,8 @@
69095 #include <linux/slab.h>
69096 #include <linux/cred.h>
69097 #include <linux/mm.h>
69098+#include <linux/sched.h>
69099+#include <linux/grsecurity.h>
69100
69101 #include <asm/uaccess.h>
69102 #include <asm/page.h>
69103@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
69104
69105 static void *seq_buf_alloc(unsigned long size)
69106 {
69107- void *buf;
69108-
69109- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
69110- if (!buf && size > PAGE_SIZE)
69111- buf = vmalloc(size);
69112- return buf;
69113+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
69114 }
69115
69116 /**
69117@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
69118 #ifdef CONFIG_USER_NS
69119 p->user_ns = file->f_cred->user_ns;
69120 #endif
69121+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69122+ p->exec_id = current->exec_id;
69123+#endif
69124
69125 /*
69126 * Wrappers around seq_open(e.g. swaps_open) need to be
69127@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
69128 }
69129 EXPORT_SYMBOL(seq_open);
69130
69131+
69132+int seq_open_restrict(struct file *file, const struct seq_operations *op)
69133+{
69134+ if (gr_proc_is_restricted())
69135+ return -EACCES;
69136+
69137+ return seq_open(file, op);
69138+}
69139+EXPORT_SYMBOL(seq_open_restrict);
69140+
69141 static int traverse(struct seq_file *m, loff_t offset)
69142 {
69143 loff_t pos = 0, index;
69144@@ -165,7 +175,7 @@ Eoverflow:
69145 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
69146 {
69147 struct seq_file *m = file->private_data;
69148- size_t copied = 0;
69149+ ssize_t copied = 0;
69150 loff_t pos;
69151 size_t n;
69152 void *p;
69153@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
69154 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
69155 void *data)
69156 {
69157- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
69158+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
69159 int res = -ENOMEM;
69160
69161 if (op) {
69162@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
69163 }
69164 EXPORT_SYMBOL(single_open_size);
69165
69166+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
69167+ void *data)
69168+{
69169+ if (gr_proc_is_restricted())
69170+ return -EACCES;
69171+
69172+ return single_open(file, show, data);
69173+}
69174+EXPORT_SYMBOL(single_open_restrict);
69175+
69176+
69177 int single_release(struct inode *inode, struct file *file)
69178 {
69179 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
69180diff --git a/fs/splice.c b/fs/splice.c
69181index f5cb9ba..8ddb1e9 100644
69182--- a/fs/splice.c
69183+++ b/fs/splice.c
69184@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
69185 pipe_lock(pipe);
69186
69187 for (;;) {
69188- if (!pipe->readers) {
69189+ if (!atomic_read(&pipe->readers)) {
69190 send_sig(SIGPIPE, current, 0);
69191 if (!ret)
69192 ret = -EPIPE;
69193@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
69194 page_nr++;
69195 ret += buf->len;
69196
69197- if (pipe->files)
69198+ if (atomic_read(&pipe->files))
69199 do_wakeup = 1;
69200
69201 if (!--spd->nr_pages)
69202@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
69203 do_wakeup = 0;
69204 }
69205
69206- pipe->waiting_writers++;
69207+ atomic_inc(&pipe->waiting_writers);
69208 pipe_wait(pipe);
69209- pipe->waiting_writers--;
69210+ atomic_dec(&pipe->waiting_writers);
69211 }
69212
69213 pipe_unlock(pipe);
69214@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
69215 old_fs = get_fs();
69216 set_fs(get_ds());
69217 /* The cast to a user pointer is valid due to the set_fs() */
69218- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
69219+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
69220 set_fs(old_fs);
69221
69222 return res;
69223@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
69224 old_fs = get_fs();
69225 set_fs(get_ds());
69226 /* The cast to a user pointer is valid due to the set_fs() */
69227- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
69228+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
69229 set_fs(old_fs);
69230
69231 return res;
69232@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
69233 goto err;
69234
69235 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
69236- vec[i].iov_base = (void __user *) page_address(page);
69237+ vec[i].iov_base = (void __force_user *) page_address(page);
69238 vec[i].iov_len = this_len;
69239 spd.pages[i] = page;
69240 spd.nr_pages++;
69241@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
69242 ops->release(pipe, buf);
69243 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
69244 pipe->nrbufs--;
69245- if (pipe->files)
69246+ if (atomic_read(&pipe->files))
69247 sd->need_wakeup = true;
69248 }
69249
69250@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
69251 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
69252 {
69253 while (!pipe->nrbufs) {
69254- if (!pipe->writers)
69255+ if (!atomic_read(&pipe->writers))
69256 return 0;
69257
69258- if (!pipe->waiting_writers && sd->num_spliced)
69259+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
69260 return 0;
69261
69262 if (sd->flags & SPLICE_F_NONBLOCK)
69263@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
69264 ops->release(pipe, buf);
69265 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
69266 pipe->nrbufs--;
69267- if (pipe->files)
69268+ if (atomic_read(&pipe->files))
69269 sd.need_wakeup = true;
69270 } else {
69271 buf->offset += ret;
69272@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69273 * out of the pipe right after the splice_to_pipe(). So set
69274 * PIPE_READERS appropriately.
69275 */
69276- pipe->readers = 1;
69277+ atomic_set(&pipe->readers, 1);
69278
69279 current->splice_pipe = pipe;
69280 }
69281@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
69282
69283 partial[buffers].offset = off;
69284 partial[buffers].len = plen;
69285+ partial[buffers].private = 0;
69286
69287 off = 0;
69288 len -= plen;
69289@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69290 ret = -ERESTARTSYS;
69291 break;
69292 }
69293- if (!pipe->writers)
69294+ if (!atomic_read(&pipe->writers))
69295 break;
69296- if (!pipe->waiting_writers) {
69297+ if (!atomic_read(&pipe->waiting_writers)) {
69298 if (flags & SPLICE_F_NONBLOCK) {
69299 ret = -EAGAIN;
69300 break;
69301@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69302 pipe_lock(pipe);
69303
69304 while (pipe->nrbufs >= pipe->buffers) {
69305- if (!pipe->readers) {
69306+ if (!atomic_read(&pipe->readers)) {
69307 send_sig(SIGPIPE, current, 0);
69308 ret = -EPIPE;
69309 break;
69310@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69311 ret = -ERESTARTSYS;
69312 break;
69313 }
69314- pipe->waiting_writers++;
69315+ atomic_inc(&pipe->waiting_writers);
69316 pipe_wait(pipe);
69317- pipe->waiting_writers--;
69318+ atomic_dec(&pipe->waiting_writers);
69319 }
69320
69321 pipe_unlock(pipe);
69322@@ -1817,14 +1818,14 @@ retry:
69323 pipe_double_lock(ipipe, opipe);
69324
69325 do {
69326- if (!opipe->readers) {
69327+ if (!atomic_read(&opipe->readers)) {
69328 send_sig(SIGPIPE, current, 0);
69329 if (!ret)
69330 ret = -EPIPE;
69331 break;
69332 }
69333
69334- if (!ipipe->nrbufs && !ipipe->writers)
69335+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
69336 break;
69337
69338 /*
69339@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69340 pipe_double_lock(ipipe, opipe);
69341
69342 do {
69343- if (!opipe->readers) {
69344+ if (!atomic_read(&opipe->readers)) {
69345 send_sig(SIGPIPE, current, 0);
69346 if (!ret)
69347 ret = -EPIPE;
69348@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69349 * return EAGAIN if we have the potential of some data in the
69350 * future, otherwise just return 0
69351 */
69352- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
69353+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
69354 ret = -EAGAIN;
69355
69356 pipe_unlock(ipipe);
69357diff --git a/fs/stat.c b/fs/stat.c
69358index ae0c3ce..9ee641c 100644
69359--- a/fs/stat.c
69360+++ b/fs/stat.c
69361@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
69362 stat->gid = inode->i_gid;
69363 stat->rdev = inode->i_rdev;
69364 stat->size = i_size_read(inode);
69365- stat->atime = inode->i_atime;
69366- stat->mtime = inode->i_mtime;
69367+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69368+ stat->atime = inode->i_ctime;
69369+ stat->mtime = inode->i_ctime;
69370+ } else {
69371+ stat->atime = inode->i_atime;
69372+ stat->mtime = inode->i_mtime;
69373+ }
69374 stat->ctime = inode->i_ctime;
69375 stat->blksize = (1 << inode->i_blkbits);
69376 stat->blocks = inode->i_blocks;
69377@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
69378 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
69379 {
69380 struct inode *inode = path->dentry->d_inode;
69381+ int retval;
69382
69383- if (inode->i_op->getattr)
69384- return inode->i_op->getattr(path->mnt, path->dentry, stat);
69385+ if (inode->i_op->getattr) {
69386+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
69387+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69388+ stat->atime = stat->ctime;
69389+ stat->mtime = stat->ctime;
69390+ }
69391+ return retval;
69392+ }
69393
69394 generic_fillattr(inode, stat);
69395 return 0;
69396diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
69397index 0b45ff4..847de5b 100644
69398--- a/fs/sysfs/dir.c
69399+++ b/fs/sysfs/dir.c
69400@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69401 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69402 {
69403 struct kernfs_node *parent, *kn;
69404+ const char *name;
69405+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
69406+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69407+ const char *parent_name;
69408+#endif
69409
69410 BUG_ON(!kobj);
69411
69412+ name = kobject_name(kobj);
69413+
69414 if (kobj->parent)
69415 parent = kobj->parent->sd;
69416 else
69417@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69418 if (!parent)
69419 return -ENOENT;
69420
69421- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
69422- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
69423+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69424+ parent_name = parent->name;
69425+ mode = S_IRWXU;
69426+
69427+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
69428+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
69429+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
69430+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
69431+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69432+#endif
69433+
69434+ kn = kernfs_create_dir_ns(parent, name,
69435+ mode, kobj, ns);
69436 if (IS_ERR(kn)) {
69437 if (PTR_ERR(kn) == -EEXIST)
69438- sysfs_warn_dup(parent, kobject_name(kobj));
69439+ sysfs_warn_dup(parent, name);
69440 return PTR_ERR(kn);
69441 }
69442
69443diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
69444index 69d4889..a810bd4 100644
69445--- a/fs/sysv/sysv.h
69446+++ b/fs/sysv/sysv.h
69447@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
69448 #endif
69449 }
69450
69451-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69452+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69453 {
69454 if (sbi->s_bytesex == BYTESEX_PDP)
69455 return PDP_swab((__force __u32)n);
69456diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
69457index 2290d58..7791371 100644
69458--- a/fs/ubifs/io.c
69459+++ b/fs/ubifs/io.c
69460@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
69461 return err;
69462 }
69463
69464-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69465+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69466 {
69467 int err;
69468
69469diff --git a/fs/udf/inode.c b/fs/udf/inode.c
69470index 236cd48..a6a4053 100644
69471--- a/fs/udf/inode.c
69472+++ b/fs/udf/inode.c
69473@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL");
69474
69475 static umode_t udf_convert_permissions(struct fileEntry *);
69476 static int udf_update_inode(struct inode *, int);
69477-static void udf_fill_inode(struct inode *, struct buffer_head *);
69478 static int udf_sync_inode(struct inode *inode);
69479 static int udf_alloc_i_data(struct inode *inode, size_t size);
69480 static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
69481@@ -1271,13 +1270,25 @@ update_time:
69482 return 0;
69483 }
69484
69485+/*
69486+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
69487+ * arbitrary - just that we hopefully don't limit any real use of rewritten
69488+ * inode on write-once media but avoid looping for too long on corrupted media.
69489+ */
69490+#define UDF_MAX_ICB_NESTING 1024
69491+
69492 static void __udf_read_inode(struct inode *inode)
69493 {
69494 struct buffer_head *bh = NULL;
69495 struct fileEntry *fe;
69496+ struct extendedFileEntry *efe;
69497 uint16_t ident;
69498 struct udf_inode_info *iinfo = UDF_I(inode);
69499+ struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
69500+ unsigned int link_count;
69501+ unsigned int indirections = 0;
69502
69503+reread:
69504 /*
69505 * Set defaults, but the inode is still incomplete!
69506 * Note: get_new_inode() sets the following on a new inode:
69507@@ -1307,6 +1318,7 @@ static void __udf_read_inode(struct inode *inode)
69508 }
69509
69510 fe = (struct fileEntry *)bh->b_data;
69511+ efe = (struct extendedFileEntry *)bh->b_data;
69512
69513 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
69514 struct buffer_head *ibh;
69515@@ -1314,28 +1326,26 @@ static void __udf_read_inode(struct inode *inode)
69516 ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
69517 &ident);
69518 if (ident == TAG_IDENT_IE && ibh) {
69519- struct buffer_head *nbh = NULL;
69520 struct kernel_lb_addr loc;
69521 struct indirectEntry *ie;
69522
69523 ie = (struct indirectEntry *)ibh->b_data;
69524 loc = lelb_to_cpu(ie->indirectICB.extLocation);
69525
69526- if (ie->indirectICB.extLength &&
69527- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
69528- &ident))) {
69529- if (ident == TAG_IDENT_FE ||
69530- ident == TAG_IDENT_EFE) {
69531- memcpy(&iinfo->i_location,
69532- &loc,
69533- sizeof(struct kernel_lb_addr));
69534- brelse(bh);
69535- brelse(ibh);
69536- brelse(nbh);
69537- __udf_read_inode(inode);
69538+ if (ie->indirectICB.extLength) {
69539+ brelse(bh);
69540+ brelse(ibh);
69541+ memcpy(&iinfo->i_location, &loc,
69542+ sizeof(struct kernel_lb_addr));
69543+ if (++indirections > UDF_MAX_ICB_NESTING) {
69544+ udf_err(inode->i_sb,
69545+ "too many ICBs in ICB hierarchy"
69546+ " (max %d supported)\n",
69547+ UDF_MAX_ICB_NESTING);
69548+ make_bad_inode(inode);
69549 return;
69550 }
69551- brelse(nbh);
69552+ goto reread;
69553 }
69554 }
69555 brelse(ibh);
69556@@ -1346,22 +1356,6 @@ static void __udf_read_inode(struct inode *inode)
69557 make_bad_inode(inode);
69558 return;
69559 }
69560- udf_fill_inode(inode, bh);
69561-
69562- brelse(bh);
69563-}
69564-
69565-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
69566-{
69567- struct fileEntry *fe;
69568- struct extendedFileEntry *efe;
69569- struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
69570- struct udf_inode_info *iinfo = UDF_I(inode);
69571- unsigned int link_count;
69572-
69573- fe = (struct fileEntry *)bh->b_data;
69574- efe = (struct extendedFileEntry *)bh->b_data;
69575-
69576 if (fe->icbTag.strategyType == cpu_to_le16(4))
69577 iinfo->i_strat4096 = 0;
69578 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
69579@@ -1551,6 +1545,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
69580 } else
69581 make_bad_inode(inode);
69582 }
69583+ brelse(bh);
69584 }
69585
69586 static int udf_alloc_i_data(struct inode *inode, size_t size)
69587@@ -1664,7 +1659,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
69588 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
69589 fe->permissions = cpu_to_le32(udfperms);
69590
69591- if (S_ISDIR(inode->i_mode))
69592+ if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
69593 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
69594 else
69595 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
69596diff --git a/fs/udf/misc.c b/fs/udf/misc.c
69597index c175b4d..8f36a16 100644
69598--- a/fs/udf/misc.c
69599+++ b/fs/udf/misc.c
69600@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
69601
69602 u8 udf_tag_checksum(const struct tag *t)
69603 {
69604- u8 *data = (u8 *)t;
69605+ const u8 *data = (const u8 *)t;
69606 u8 checksum = 0;
69607 int i;
69608 for (i = 0; i < sizeof(struct tag); ++i)
69609diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
69610index 8d974c4..b82f6ec 100644
69611--- a/fs/ufs/swab.h
69612+++ b/fs/ufs/swab.h
69613@@ -22,7 +22,7 @@ enum {
69614 BYTESEX_BE
69615 };
69616
69617-static inline u64
69618+static inline u64 __intentional_overflow(-1)
69619 fs64_to_cpu(struct super_block *sbp, __fs64 n)
69620 {
69621 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69622@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
69623 return (__force __fs64)cpu_to_be64(n);
69624 }
69625
69626-static inline u32
69627+static inline u32 __intentional_overflow(-1)
69628 fs32_to_cpu(struct super_block *sbp, __fs32 n)
69629 {
69630 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69631diff --git a/fs/utimes.c b/fs/utimes.c
69632index aa138d6..5f3a811 100644
69633--- a/fs/utimes.c
69634+++ b/fs/utimes.c
69635@@ -1,6 +1,7 @@
69636 #include <linux/compiler.h>
69637 #include <linux/file.h>
69638 #include <linux/fs.h>
69639+#include <linux/security.h>
69640 #include <linux/linkage.h>
69641 #include <linux/mount.h>
69642 #include <linux/namei.h>
69643@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
69644 }
69645 }
69646 retry_deleg:
69647+
69648+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
69649+ error = -EACCES;
69650+ goto mnt_drop_write_and_out;
69651+ }
69652+
69653 mutex_lock(&inode->i_mutex);
69654 error = notify_change(path->dentry, &newattrs, &delegated_inode);
69655 mutex_unlock(&inode->i_mutex);
69656diff --git a/fs/xattr.c b/fs/xattr.c
69657index c69e6d4..cc56af5 100644
69658--- a/fs/xattr.c
69659+++ b/fs/xattr.c
69660@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
69661 return rc;
69662 }
69663
69664+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
69665+ssize_t
69666+pax_getxattr(struct dentry *dentry, void *value, size_t size)
69667+{
69668+ struct inode *inode = dentry->d_inode;
69669+ ssize_t error;
69670+
69671+ error = inode_permission(inode, MAY_EXEC);
69672+ if (error)
69673+ return error;
69674+
69675+ if (inode->i_op->getxattr)
69676+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
69677+ else
69678+ error = -EOPNOTSUPP;
69679+
69680+ return error;
69681+}
69682+EXPORT_SYMBOL(pax_getxattr);
69683+#endif
69684+
69685 ssize_t
69686 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
69687 {
69688@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
69689 * Extended attribute SET operations
69690 */
69691 static long
69692-setxattr(struct dentry *d, const char __user *name, const void __user *value,
69693+setxattr(struct path *path, const char __user *name, const void __user *value,
69694 size_t size, int flags)
69695 {
69696 int error;
69697@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
69698 posix_acl_fix_xattr_from_user(kvalue, size);
69699 }
69700
69701- error = vfs_setxattr(d, kname, kvalue, size, flags);
69702+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
69703+ error = -EACCES;
69704+ goto out;
69705+ }
69706+
69707+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
69708 out:
69709 if (vvalue)
69710 vfree(vvalue);
69711@@ -377,7 +403,7 @@ retry:
69712 return error;
69713 error = mnt_want_write(path.mnt);
69714 if (!error) {
69715- error = setxattr(path.dentry, name, value, size, flags);
69716+ error = setxattr(&path, name, value, size, flags);
69717 mnt_drop_write(path.mnt);
69718 }
69719 path_put(&path);
69720@@ -401,7 +427,7 @@ retry:
69721 return error;
69722 error = mnt_want_write(path.mnt);
69723 if (!error) {
69724- error = setxattr(path.dentry, name, value, size, flags);
69725+ error = setxattr(&path, name, value, size, flags);
69726 mnt_drop_write(path.mnt);
69727 }
69728 path_put(&path);
69729@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
69730 const void __user *,value, size_t, size, int, flags)
69731 {
69732 struct fd f = fdget(fd);
69733- struct dentry *dentry;
69734 int error = -EBADF;
69735
69736 if (!f.file)
69737 return error;
69738- dentry = f.file->f_path.dentry;
69739- audit_inode(NULL, dentry, 0);
69740+ audit_inode(NULL, f.file->f_path.dentry, 0);
69741 error = mnt_want_write_file(f.file);
69742 if (!error) {
69743- error = setxattr(dentry, name, value, size, flags);
69744+ error = setxattr(&f.file->f_path, name, value, size, flags);
69745 mnt_drop_write_file(f.file);
69746 }
69747 fdput(f);
69748@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
69749 * Extended attribute REMOVE operations
69750 */
69751 static long
69752-removexattr(struct dentry *d, const char __user *name)
69753+removexattr(struct path *path, const char __user *name)
69754 {
69755 int error;
69756 char kname[XATTR_NAME_MAX + 1];
69757@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
69758 if (error < 0)
69759 return error;
69760
69761- return vfs_removexattr(d, kname);
69762+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
69763+ return -EACCES;
69764+
69765+ return vfs_removexattr(path->dentry, kname);
69766 }
69767
69768 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
69769@@ -652,7 +679,7 @@ retry:
69770 return error;
69771 error = mnt_want_write(path.mnt);
69772 if (!error) {
69773- error = removexattr(path.dentry, name);
69774+ error = removexattr(&path, name);
69775 mnt_drop_write(path.mnt);
69776 }
69777 path_put(&path);
69778@@ -675,7 +702,7 @@ retry:
69779 return error;
69780 error = mnt_want_write(path.mnt);
69781 if (!error) {
69782- error = removexattr(path.dentry, name);
69783+ error = removexattr(&path, name);
69784 mnt_drop_write(path.mnt);
69785 }
69786 path_put(&path);
69787@@ -689,16 +716,16 @@ retry:
69788 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
69789 {
69790 struct fd f = fdget(fd);
69791- struct dentry *dentry;
69792+ struct path *path;
69793 int error = -EBADF;
69794
69795 if (!f.file)
69796 return error;
69797- dentry = f.file->f_path.dentry;
69798- audit_inode(NULL, dentry, 0);
69799+ path = &f.file->f_path;
69800+ audit_inode(NULL, path->dentry, 0);
69801 error = mnt_want_write_file(f.file);
69802 if (!error) {
69803- error = removexattr(dentry, name);
69804+ error = removexattr(path, name);
69805 mnt_drop_write_file(f.file);
69806 }
69807 fdput(f);
69808diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
69809index 75c3fe5..b0f6bbe 100644
69810--- a/fs/xfs/xfs_bmap.c
69811+++ b/fs/xfs/xfs_bmap.c
69812@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
69813
69814 #else
69815 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
69816-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
69817+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
69818 #endif /* DEBUG */
69819
69820 /*
69821diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
69822index 48e99af..54ebae3 100644
69823--- a/fs/xfs/xfs_dir2_readdir.c
69824+++ b/fs/xfs/xfs_dir2_readdir.c
69825@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
69826 ino = dp->d_ops->sf_get_ino(sfp, sfep);
69827 filetype = dp->d_ops->sf_get_ftype(sfep);
69828 ctx->pos = off & 0x7fffffff;
69829- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69830+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
69831+ char name[sfep->namelen];
69832+ memcpy(name, sfep->name, sfep->namelen);
69833+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
69834+ return 0;
69835+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69836 xfs_dir3_get_dtype(dp->i_mount, filetype)))
69837 return 0;
69838 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
69839diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
69840index 8bc1bbc..0d6911b 100644
69841--- a/fs/xfs/xfs_ioctl.c
69842+++ b/fs/xfs/xfs_ioctl.c
69843@@ -122,7 +122,7 @@ xfs_find_handle(
69844 }
69845
69846 error = -EFAULT;
69847- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
69848+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
69849 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
69850 goto out_put;
69851
69852diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
69853new file mode 100644
69854index 0000000..27cec32
69855--- /dev/null
69856+++ b/grsecurity/Kconfig
69857@@ -0,0 +1,1166 @@
69858+#
69859+# grecurity configuration
69860+#
69861+menu "Memory Protections"
69862+depends on GRKERNSEC
69863+
69864+config GRKERNSEC_KMEM
69865+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69866+ default y if GRKERNSEC_CONFIG_AUTO
69867+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69868+ help
69869+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69870+ be written to or read from to modify or leak the contents of the running
69871+ kernel. /dev/port will also not be allowed to be opened, writing to
69872+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69873+ If you have module support disabled, enabling this will close up several
69874+ ways that are currently used to insert malicious code into the running
69875+ kernel.
69876+
69877+ Even with this feature enabled, we still highly recommend that
69878+ you use the RBAC system, as it is still possible for an attacker to
69879+ modify the running kernel through other more obscure methods.
69880+
69881+ It is highly recommended that you say Y here if you meet all the
69882+ conditions above.
69883+
69884+config GRKERNSEC_VM86
69885+ bool "Restrict VM86 mode"
69886+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69887+ depends on X86_32
69888+
69889+ help
69890+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69891+ make use of a special execution mode on 32bit x86 processors called
69892+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69893+ video cards and will still work with this option enabled. The purpose
69894+ of the option is to prevent exploitation of emulation errors in
69895+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69896+ Nearly all users should be able to enable this option.
69897+
69898+config GRKERNSEC_IO
69899+ bool "Disable privileged I/O"
69900+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69901+ depends on X86
69902+ select RTC_CLASS
69903+ select RTC_INTF_DEV
69904+ select RTC_DRV_CMOS
69905+
69906+ help
69907+ If you say Y here, all ioperm and iopl calls will return an error.
69908+ Ioperm and iopl can be used to modify the running kernel.
69909+ Unfortunately, some programs need this access to operate properly,
69910+ the most notable of which are XFree86 and hwclock. hwclock can be
69911+ remedied by having RTC support in the kernel, so real-time
69912+ clock support is enabled if this option is enabled, to ensure
69913+ that hwclock operates correctly. If hwclock still does not work,
69914+ either update udev or symlink /dev/rtc to /dev/rtc0.
69915+
69916+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69917+ you may not be able to boot into a graphical environment with this
69918+ option enabled. In this case, you should use the RBAC system instead.
69919+
69920+config GRKERNSEC_JIT_HARDEN
69921+ bool "Harden BPF JIT against spray attacks"
69922+ default y if GRKERNSEC_CONFIG_AUTO
69923+ depends on BPF_JIT && X86
69924+ help
69925+ If you say Y here, the native code generated by the kernel's Berkeley
69926+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
69927+ attacks that attempt to fit attacker-beneficial instructions in
69928+ 32bit immediate fields of JIT-generated native instructions. The
69929+ attacker will generally aim to cause an unintended instruction sequence
69930+ of JIT-generated native code to execute by jumping into the middle of
69931+ a generated instruction. This feature effectively randomizes the 32bit
69932+ immediate constants present in the generated code to thwart such attacks.
69933+
69934+ If you're using KERNEXEC, it's recommended that you enable this option
69935+ to supplement the hardening of the kernel.
69936+
69937+config GRKERNSEC_PERF_HARDEN
69938+ bool "Disable unprivileged PERF_EVENTS usage by default"
69939+ default y if GRKERNSEC_CONFIG_AUTO
69940+ depends on PERF_EVENTS
69941+ help
69942+ If you say Y here, the range of acceptable values for the
69943+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69944+ default to a new value: 3. When the sysctl is set to this value, no
69945+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69946+
69947+ Though PERF_EVENTS can be used legitimately for performance monitoring
69948+ and low-level application profiling, it is forced on regardless of
69949+ configuration, has been at fault for several vulnerabilities, and
69950+ creates new opportunities for side channels and other information leaks.
69951+
69952+ This feature puts PERF_EVENTS into a secure default state and permits
69953+ the administrator to change out of it temporarily if unprivileged
69954+ application profiling is needed.
69955+
69956+config GRKERNSEC_RAND_THREADSTACK
69957+ bool "Insert random gaps between thread stacks"
69958+ default y if GRKERNSEC_CONFIG_AUTO
69959+ depends on PAX_RANDMMAP && !PPC
69960+ help
69961+ If you say Y here, a random-sized gap will be enforced between allocated
69962+ thread stacks. Glibc's NPTL and other threading libraries that
69963+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69964+ The implementation currently provides 8 bits of entropy for the gap.
69965+
69966+ Many distributions do not compile threaded remote services with the
69967+ -fstack-check argument to GCC, causing the variable-sized stack-based
69968+ allocator, alloca(), to not probe the stack on allocation. This
69969+ permits an unbounded alloca() to skip over any guard page and potentially
69970+ modify another thread's stack reliably. An enforced random gap
69971+ reduces the reliability of such an attack and increases the chance
69972+ that such a read/write to another thread's stack instead lands in
69973+ an unmapped area, causing a crash and triggering grsecurity's
69974+ anti-bruteforcing logic.
69975+
69976+config GRKERNSEC_PROC_MEMMAP
69977+ bool "Harden ASLR against information leaks and entropy reduction"
69978+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69979+ depends on PAX_NOEXEC || PAX_ASLR
69980+ help
69981+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69982+ give no information about the addresses of its mappings if
69983+ PaX features that rely on random addresses are enabled on the task.
69984+ In addition to sanitizing this information and disabling other
69985+ dangerous sources of information, this option causes reads of sensitive
69986+ /proc/<pid> entries where the file descriptor was opened in a different
69987+ task than the one performing the read. Such attempts are logged.
69988+ This option also limits argv/env strings for suid/sgid binaries
69989+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69990+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69991+ binaries to prevent alternative mmap layouts from being abused.
69992+
69993+ If you use PaX it is essential that you say Y here as it closes up
69994+ several holes that make full ASLR useless locally.
69995+
69996+
69997+config GRKERNSEC_KSTACKOVERFLOW
69998+ bool "Prevent kernel stack overflows"
69999+ default y if GRKERNSEC_CONFIG_AUTO
70000+ depends on !IA64 && 64BIT
70001+ help
70002+ If you say Y here, the kernel's process stacks will be allocated
70003+ with vmalloc instead of the kernel's default allocator. This
70004+ introduces guard pages that in combination with the alloca checking
70005+ of the STACKLEAK feature prevents all forms of kernel process stack
70006+ overflow abuse. Note that this is different from kernel stack
70007+ buffer overflows.
70008+
70009+config GRKERNSEC_BRUTE
70010+ bool "Deter exploit bruteforcing"
70011+ default y if GRKERNSEC_CONFIG_AUTO
70012+ help
70013+ If you say Y here, attempts to bruteforce exploits against forking
70014+ daemons such as apache or sshd, as well as against suid/sgid binaries
70015+ will be deterred. When a child of a forking daemon is killed by PaX
70016+ or crashes due to an illegal instruction or other suspicious signal,
70017+ the parent process will be delayed 30 seconds upon every subsequent
70018+ fork until the administrator is able to assess the situation and
70019+ restart the daemon.
70020+ In the suid/sgid case, the attempt is logged, the user has all their
70021+ existing instances of the suid/sgid binary terminated and will
70022+ be unable to execute any suid/sgid binaries for 15 minutes.
70023+
70024+ It is recommended that you also enable signal logging in the auditing
70025+ section so that logs are generated when a process triggers a suspicious
70026+ signal.
70027+ If the sysctl option is enabled, a sysctl option with name
70028+ "deter_bruteforce" is created.
70029+
70030+config GRKERNSEC_MODHARDEN
70031+ bool "Harden module auto-loading"
70032+ default y if GRKERNSEC_CONFIG_AUTO
70033+ depends on MODULES
70034+ help
70035+ If you say Y here, module auto-loading in response to use of some
70036+ feature implemented by an unloaded module will be restricted to
70037+ root users. Enabling this option helps defend against attacks
70038+ by unprivileged users who abuse the auto-loading behavior to
70039+ cause a vulnerable module to load that is then exploited.
70040+
70041+ If this option prevents a legitimate use of auto-loading for a
70042+ non-root user, the administrator can execute modprobe manually
70043+ with the exact name of the module mentioned in the alert log.
70044+ Alternatively, the administrator can add the module to the list
70045+ of modules loaded at boot by modifying init scripts.
70046+
70047+ Modification of init scripts will most likely be needed on
70048+ Ubuntu servers with encrypted home directory support enabled,
70049+ as the first non-root user logging in will cause the ecb(aes),
70050+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
70051+
70052+config GRKERNSEC_HIDESYM
70053+ bool "Hide kernel symbols"
70054+ default y if GRKERNSEC_CONFIG_AUTO
70055+ select PAX_USERCOPY_SLABS
70056+ help
70057+ If you say Y here, getting information on loaded modules, and
70058+ displaying all kernel symbols through a syscall will be restricted
70059+ to users with CAP_SYS_MODULE. For software compatibility reasons,
70060+ /proc/kallsyms will be restricted to the root user. The RBAC
70061+ system can hide that entry even from root.
70062+
70063+ This option also prevents leaking of kernel addresses through
70064+ several /proc entries.
70065+
70066+ Note that this option is only effective provided the following
70067+ conditions are met:
70068+ 1) The kernel using grsecurity is not precompiled by some distribution
70069+ 2) You have also enabled GRKERNSEC_DMESG
70070+ 3) You are using the RBAC system and hiding other files such as your
70071+ kernel image and System.map. Alternatively, enabling this option
70072+ causes the permissions on /boot, /lib/modules, and the kernel
70073+ source directory to change at compile time to prevent
70074+ reading by non-root users.
70075+ If the above conditions are met, this option will aid in providing a
70076+ useful protection against local kernel exploitation of overflows
70077+ and arbitrary read/write vulnerabilities.
70078+
70079+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
70080+ in addition to this feature.
70081+
70082+config GRKERNSEC_RANDSTRUCT
70083+ bool "Randomize layout of sensitive kernel structures"
70084+ default y if GRKERNSEC_CONFIG_AUTO
70085+ select GRKERNSEC_HIDESYM
70086+ select MODVERSIONS if MODULES
70087+ help
70088+ If you say Y here, the layouts of a number of sensitive kernel
70089+ structures (task, fs, cred, etc) and all structures composed entirely
70090+ of function pointers (aka "ops" structs) will be randomized at compile-time.
70091+ This can introduce the requirement of an additional infoleak
70092+ vulnerability for exploits targeting these structure types.
70093+
70094+ Enabling this feature will introduce some performance impact, slightly
70095+ increase memory usage, and prevent the use of forensic tools like
70096+ Volatility against the system (unless the kernel source tree isn't
70097+ cleaned after kernel installation).
70098+
70099+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
70100+ It remains after a make clean to allow for external modules to be compiled
70101+ with the existing seed and will be removed by a make mrproper or
70102+ make distclean.
70103+
70104+ Note that the implementation requires gcc 4.6.4. or newer. You may need
70105+ to install the supporting headers explicitly in addition to the normal
70106+ gcc package.
70107+
70108+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
70109+ bool "Use cacheline-aware structure randomization"
70110+ depends on GRKERNSEC_RANDSTRUCT
70111+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
70112+ help
70113+ If you say Y here, the RANDSTRUCT randomization will make a best effort
70114+ at restricting randomization to cacheline-sized groups of elements. It
70115+ will further not randomize bitfields in structures. This reduces the
70116+ performance hit of RANDSTRUCT at the cost of weakened randomization.
70117+
70118+config GRKERNSEC_KERN_LOCKOUT
70119+ bool "Active kernel exploit response"
70120+ default y if GRKERNSEC_CONFIG_AUTO
70121+ depends on X86 || ARM || PPC || SPARC
70122+ help
70123+ If you say Y here, when a PaX alert is triggered due to suspicious
70124+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
70125+ or an OOPS occurs due to bad memory accesses, instead of just
70126+ terminating the offending process (and potentially allowing
70127+ a subsequent exploit from the same user), we will take one of two
70128+ actions:
70129+ If the user was root, we will panic the system
70130+ If the user was non-root, we will log the attempt, terminate
70131+ all processes owned by the user, then prevent them from creating
70132+ any new processes until the system is restarted
70133+ This deters repeated kernel exploitation/bruteforcing attempts
70134+ and is useful for later forensics.
70135+
70136+config GRKERNSEC_OLD_ARM_USERLAND
70137+ bool "Old ARM userland compatibility"
70138+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
70139+ help
70140+ If you say Y here, stubs of executable code to perform such operations
70141+ as "compare-exchange" will be placed at fixed locations in the ARM vector
70142+ table. This is unfortunately needed for old ARM userland meant to run
70143+ across a wide range of processors. Without this option enabled,
70144+ the get_tls and data memory barrier stubs will be emulated by the kernel,
70145+ which is enough for Linaro userlands or other userlands designed for v6
70146+ and newer ARM CPUs. It's recommended that you try without this option enabled
70147+ first, and only enable it if your userland does not boot (it will likely fail
70148+ at init time).
70149+
70150+endmenu
70151+menu "Role Based Access Control Options"
70152+depends on GRKERNSEC
70153+
70154+config GRKERNSEC_RBAC_DEBUG
70155+ bool
70156+
70157+config GRKERNSEC_NO_RBAC
70158+ bool "Disable RBAC system"
70159+ help
70160+ If you say Y here, the /dev/grsec device will be removed from the kernel,
70161+ preventing the RBAC system from being enabled. You should only say Y
70162+ here if you have no intention of using the RBAC system, so as to prevent
70163+ an attacker with root access from misusing the RBAC system to hide files
70164+ and processes when loadable module support and /dev/[k]mem have been
70165+ locked down.
70166+
70167+config GRKERNSEC_ACL_HIDEKERN
70168+ bool "Hide kernel processes"
70169+ help
70170+ If you say Y here, all kernel threads will be hidden to all
70171+ processes but those whose subject has the "view hidden processes"
70172+ flag.
70173+
70174+config GRKERNSEC_ACL_MAXTRIES
70175+ int "Maximum tries before password lockout"
70176+ default 3
70177+ help
70178+ This option enforces the maximum number of times a user can attempt
70179+ to authorize themselves with the grsecurity RBAC system before being
70180+ denied the ability to attempt authorization again for a specified time.
70181+ The lower the number, the harder it will be to brute-force a password.
70182+
70183+config GRKERNSEC_ACL_TIMEOUT
70184+ int "Time to wait after max password tries, in seconds"
70185+ default 30
70186+ help
70187+ This option specifies the time the user must wait after attempting to
70188+ authorize to the RBAC system with the maximum number of invalid
70189+ passwords. The higher the number, the harder it will be to brute-force
70190+ a password.
70191+
70192+endmenu
70193+menu "Filesystem Protections"
70194+depends on GRKERNSEC
70195+
70196+config GRKERNSEC_PROC
70197+ bool "Proc restrictions"
70198+ default y if GRKERNSEC_CONFIG_AUTO
70199+ help
70200+ If you say Y here, the permissions of the /proc filesystem
70201+ will be altered to enhance system security and privacy. You MUST
70202+ choose either a user only restriction or a user and group restriction.
70203+ Depending upon the option you choose, you can either restrict users to
70204+ see only the processes they themselves run, or choose a group that can
70205+ view all processes and files normally restricted to root if you choose
70206+ the "restrict to user only" option. NOTE: If you're running identd or
70207+ ntpd as a non-root user, you will have to run it as the group you
70208+ specify here.
70209+
70210+config GRKERNSEC_PROC_USER
70211+ bool "Restrict /proc to user only"
70212+ depends on GRKERNSEC_PROC
70213+ help
70214+ If you say Y here, non-root users will only be able to view their own
70215+ processes, and restricts them from viewing network-related information,
70216+ and viewing kernel symbol and module information.
70217+
70218+config GRKERNSEC_PROC_USERGROUP
70219+ bool "Allow special group"
70220+ default y if GRKERNSEC_CONFIG_AUTO
70221+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
70222+ help
70223+ If you say Y here, you will be able to select a group that will be
70224+ able to view all processes and network-related information. If you've
70225+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
70226+ remain hidden. This option is useful if you want to run identd as
70227+ a non-root user. The group you select may also be chosen at boot time
70228+ via "grsec_proc_gid=" on the kernel commandline.
70229+
70230+config GRKERNSEC_PROC_GID
70231+ int "GID for special group"
70232+ depends on GRKERNSEC_PROC_USERGROUP
70233+ default 1001
70234+
70235+config GRKERNSEC_PROC_ADD
70236+ bool "Additional restrictions"
70237+ default y if GRKERNSEC_CONFIG_AUTO
70238+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
70239+ help
70240+ If you say Y here, additional restrictions will be placed on
70241+ /proc that keep normal users from viewing device information and
70242+ slabinfo information that could be useful for exploits.
70243+
70244+config GRKERNSEC_LINK
70245+ bool "Linking restrictions"
70246+ default y if GRKERNSEC_CONFIG_AUTO
70247+ help
70248+ If you say Y here, /tmp race exploits will be prevented, since users
70249+ will no longer be able to follow symlinks owned by other users in
70250+ world-writable +t directories (e.g. /tmp), unless the owner of the
70251+ symlink is the owner of the directory. users will also not be
70252+ able to hardlink to files they do not own. If the sysctl option is
70253+ enabled, a sysctl option with name "linking_restrictions" is created.
70254+
70255+config GRKERNSEC_SYMLINKOWN
70256+ bool "Kernel-enforced SymlinksIfOwnerMatch"
70257+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70258+ help
70259+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
70260+ that prevents it from being used as a security feature. As Apache
70261+ verifies the symlink by performing a stat() against the target of
70262+ the symlink before it is followed, an attacker can setup a symlink
70263+ to point to a same-owned file, then replace the symlink with one
70264+ that targets another user's file just after Apache "validates" the
70265+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
70266+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
70267+ will be in place for the group you specify. If the sysctl option
70268+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
70269+ created.
70270+
70271+config GRKERNSEC_SYMLINKOWN_GID
70272+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
70273+ depends on GRKERNSEC_SYMLINKOWN
70274+ default 1006
70275+ help
70276+ Setting this GID determines what group kernel-enforced
70277+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
70278+ is enabled, a sysctl option with name "symlinkown_gid" is created.
70279+
70280+config GRKERNSEC_FIFO
70281+ bool "FIFO restrictions"
70282+ default y if GRKERNSEC_CONFIG_AUTO
70283+ help
70284+ If you say Y here, users will not be able to write to FIFOs they don't
70285+ own in world-writable +t directories (e.g. /tmp), unless the owner of
70286+ the FIFO is the same owner of the directory it's held in. If the sysctl
70287+ option is enabled, a sysctl option with name "fifo_restrictions" is
70288+ created.
70289+
70290+config GRKERNSEC_SYSFS_RESTRICT
70291+ bool "Sysfs/debugfs restriction"
70292+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
70293+ depends on SYSFS
70294+ help
70295+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
70296+ any filesystem normally mounted under it (e.g. debugfs) will be
70297+ mostly accessible only by root. These filesystems generally provide access
70298+ to hardware and debug information that isn't appropriate for unprivileged
70299+ users of the system. Sysfs and debugfs have also become a large source
70300+ of new vulnerabilities, ranging from infoleaks to local compromise.
70301+ There has been very little oversight with an eye toward security involved
70302+ in adding new exporters of information to these filesystems, so their
70303+ use is discouraged.
70304+ For reasons of compatibility, a few directories have been whitelisted
70305+ for access by non-root users:
70306+ /sys/fs/selinux
70307+ /sys/fs/fuse
70308+ /sys/devices/system/cpu
70309+
70310+config GRKERNSEC_ROFS
70311+ bool "Runtime read-only mount protection"
70312+ depends on SYSCTL
70313+ help
70314+ If you say Y here, a sysctl option with name "romount_protect" will
70315+ be created. By setting this option to 1 at runtime, filesystems
70316+ will be protected in the following ways:
70317+ * No new writable mounts will be allowed
70318+ * Existing read-only mounts won't be able to be remounted read/write
70319+ * Write operations will be denied on all block devices
70320+ This option acts independently of grsec_lock: once it is set to 1,
70321+ it cannot be turned off. Therefore, please be mindful of the resulting
70322+ behavior if this option is enabled in an init script on a read-only
70323+ filesystem.
70324+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
70325+ and GRKERNSEC_IO should be enabled and module loading disabled via
70326+ config or at runtime.
70327+ This feature is mainly intended for secure embedded systems.
70328+
70329+
70330+config GRKERNSEC_DEVICE_SIDECHANNEL
70331+ bool "Eliminate stat/notify-based device sidechannels"
70332+ default y if GRKERNSEC_CONFIG_AUTO
70333+ help
70334+ If you say Y here, timing analyses on block or character
70335+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
70336+ will be thwarted for unprivileged users. If a process without
70337+ CAP_MKNOD stats such a device, the last access and last modify times
70338+ will match the device's create time. No access or modify events
70339+ will be triggered through inotify/dnotify/fanotify for such devices.
70340+ This feature will prevent attacks that may at a minimum
70341+ allow an attacker to determine the administrator's password length.
70342+
70343+config GRKERNSEC_CHROOT
70344+ bool "Chroot jail restrictions"
70345+ default y if GRKERNSEC_CONFIG_AUTO
70346+ help
70347+ If you say Y here, you will be able to choose several options that will
70348+ make breaking out of a chrooted jail much more difficult. If you
70349+ encounter no software incompatibilities with the following options, it
70350+ is recommended that you enable each one.
70351+
70352+ Note that the chroot restrictions are not intended to apply to "chroots"
70353+ to directories that are simple bind mounts of the global root filesystem.
70354+ For several other reasons, a user shouldn't expect any significant
70355+ security by performing such a chroot.
70356+
70357+config GRKERNSEC_CHROOT_MOUNT
70358+ bool "Deny mounts"
70359+ default y if GRKERNSEC_CONFIG_AUTO
70360+ depends on GRKERNSEC_CHROOT
70361+ help
70362+ If you say Y here, processes inside a chroot will not be able to
70363+ mount or remount filesystems. If the sysctl option is enabled, a
70364+ sysctl option with name "chroot_deny_mount" is created.
70365+
70366+config GRKERNSEC_CHROOT_DOUBLE
70367+ bool "Deny double-chroots"
70368+ default y if GRKERNSEC_CONFIG_AUTO
70369+ depends on GRKERNSEC_CHROOT
70370+ help
70371+ If you say Y here, processes inside a chroot will not be able to chroot
70372+ again outside the chroot. This is a widely used method of breaking
70373+ out of a chroot jail and should not be allowed. If the sysctl
70374+ option is enabled, a sysctl option with name
70375+ "chroot_deny_chroot" is created.
70376+
70377+config GRKERNSEC_CHROOT_PIVOT
70378+ bool "Deny pivot_root in chroot"
70379+ default y if GRKERNSEC_CONFIG_AUTO
70380+ depends on GRKERNSEC_CHROOT
70381+ help
70382+ If you say Y here, processes inside a chroot will not be able to use
70383+ a function called pivot_root() that was introduced in Linux 2.3.41. It
70384+ works similar to chroot in that it changes the root filesystem. This
70385+ function could be misused in a chrooted process to attempt to break out
70386+ of the chroot, and therefore should not be allowed. If the sysctl
70387+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
70388+ created.
70389+
70390+config GRKERNSEC_CHROOT_CHDIR
70391+ bool "Enforce chdir(\"/\") on all chroots"
70392+ default y if GRKERNSEC_CONFIG_AUTO
70393+ depends on GRKERNSEC_CHROOT
70394+ help
70395+ If you say Y here, the current working directory of all newly-chrooted
70396+ applications will be set to the the root directory of the chroot.
70397+ The man page on chroot(2) states:
70398+ Note that this call does not change the current working
70399+ directory, so that `.' can be outside the tree rooted at
70400+ `/'. In particular, the super-user can escape from a
70401+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
70402+
70403+ It is recommended that you say Y here, since it's not known to break
70404+ any software. If the sysctl option is enabled, a sysctl option with
70405+ name "chroot_enforce_chdir" is created.
70406+
70407+config GRKERNSEC_CHROOT_CHMOD
70408+ bool "Deny (f)chmod +s"
70409+ default y if GRKERNSEC_CONFIG_AUTO
70410+ depends on GRKERNSEC_CHROOT
70411+ help
70412+ If you say Y here, processes inside a chroot will not be able to chmod
70413+ or fchmod files to make them have suid or sgid bits. This protects
70414+ against another published method of breaking a chroot. If the sysctl
70415+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
70416+ created.
70417+
70418+config GRKERNSEC_CHROOT_FCHDIR
70419+ bool "Deny fchdir and fhandle out of chroot"
70420+ default y if GRKERNSEC_CONFIG_AUTO
70421+ depends on GRKERNSEC_CHROOT
70422+ help
70423+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
70424+ to a file descriptor of the chrooting process that points to a directory
70425+ outside the filesystem will be stopped. Additionally, this option prevents
70426+ use of the recently-created syscall for opening files by a guessable "file
70427+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
70428+ with name "chroot_deny_fchdir" is created.
70429+
70430+config GRKERNSEC_CHROOT_MKNOD
70431+ bool "Deny mknod"
70432+ default y if GRKERNSEC_CONFIG_AUTO
70433+ depends on GRKERNSEC_CHROOT
70434+ help
70435+ If you say Y here, processes inside a chroot will not be allowed to
70436+ mknod. The problem with using mknod inside a chroot is that it
70437+ would allow an attacker to create a device entry that is the same
70438+ as one on the physical root of your system, which could range from
70439+ anything from the console device to a device for your harddrive (which
70440+ they could then use to wipe the drive or steal data). It is recommended
70441+ that you say Y here, unless you run into software incompatibilities.
70442+ If the sysctl option is enabled, a sysctl option with name
70443+ "chroot_deny_mknod" is created.
70444+
70445+config GRKERNSEC_CHROOT_SHMAT
70446+ bool "Deny shmat() out of chroot"
70447+ default y if GRKERNSEC_CONFIG_AUTO
70448+ depends on GRKERNSEC_CHROOT
70449+ help
70450+ If you say Y here, processes inside a chroot will not be able to attach
70451+ to shared memory segments that were created outside of the chroot jail.
70452+ It is recommended that you say Y here. If the sysctl option is enabled,
70453+ a sysctl option with name "chroot_deny_shmat" is created.
70454+
70455+config GRKERNSEC_CHROOT_UNIX
70456+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
70457+ default y if GRKERNSEC_CONFIG_AUTO
70458+ depends on GRKERNSEC_CHROOT
70459+ help
70460+ If you say Y here, processes inside a chroot will not be able to
70461+ connect to abstract (meaning not belonging to a filesystem) Unix
70462+ domain sockets that were bound outside of a chroot. It is recommended
70463+ that you say Y here. If the sysctl option is enabled, a sysctl option
70464+ with name "chroot_deny_unix" is created.
70465+
70466+config GRKERNSEC_CHROOT_FINDTASK
70467+ bool "Protect outside processes"
70468+ default y if GRKERNSEC_CONFIG_AUTO
70469+ depends on GRKERNSEC_CHROOT
70470+ help
70471+ If you say Y here, processes inside a chroot will not be able to
70472+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
70473+ getsid, or view any process outside of the chroot. If the sysctl
70474+ option is enabled, a sysctl option with name "chroot_findtask" is
70475+ created.
70476+
70477+config GRKERNSEC_CHROOT_NICE
70478+ bool "Restrict priority changes"
70479+ default y if GRKERNSEC_CONFIG_AUTO
70480+ depends on GRKERNSEC_CHROOT
70481+ help
70482+ If you say Y here, processes inside a chroot will not be able to raise
70483+ the priority of processes in the chroot, or alter the priority of
70484+ processes outside the chroot. This provides more security than simply
70485+ removing CAP_SYS_NICE from the process' capability set. If the
70486+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
70487+ is created.
70488+
70489+config GRKERNSEC_CHROOT_SYSCTL
70490+ bool "Deny sysctl writes"
70491+ default y if GRKERNSEC_CONFIG_AUTO
70492+ depends on GRKERNSEC_CHROOT
70493+ help
70494+ If you say Y here, an attacker in a chroot will not be able to
70495+ write to sysctl entries, either by sysctl(2) or through a /proc
70496+ interface. It is strongly recommended that you say Y here. If the
70497+ sysctl option is enabled, a sysctl option with name
70498+ "chroot_deny_sysctl" is created.
70499+
70500+config GRKERNSEC_CHROOT_CAPS
70501+ bool "Capability restrictions"
70502+ default y if GRKERNSEC_CONFIG_AUTO
70503+ depends on GRKERNSEC_CHROOT
70504+ help
70505+ If you say Y here, the capabilities on all processes within a
70506+ chroot jail will be lowered to stop module insertion, raw i/o,
70507+ system and net admin tasks, rebooting the system, modifying immutable
70508+ files, modifying IPC owned by another, and changing the system time.
70509+ This is left an option because it can break some apps. Disable this
70510+ if your chrooted apps are having problems performing those kinds of
70511+ tasks. If the sysctl option is enabled, a sysctl option with
70512+ name "chroot_caps" is created.
70513+
70514+config GRKERNSEC_CHROOT_INITRD
70515+ bool "Exempt initrd tasks from restrictions"
70516+ default y if GRKERNSEC_CONFIG_AUTO
70517+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
70518+ help
70519+ If you say Y here, tasks started prior to init will be exempted from
70520+ grsecurity's chroot restrictions. This option is mainly meant to
70521+ resolve Plymouth's performing privileged operations unnecessarily
70522+ in a chroot.
70523+
70524+endmenu
70525+menu "Kernel Auditing"
70526+depends on GRKERNSEC
70527+
70528+config GRKERNSEC_AUDIT_GROUP
70529+ bool "Single group for auditing"
70530+ help
70531+ If you say Y here, the exec and chdir logging features will only operate
70532+ on a group you specify. This option is recommended if you only want to
70533+ watch certain users instead of having a large amount of logs from the
70534+ entire system. If the sysctl option is enabled, a sysctl option with
70535+ name "audit_group" is created.
70536+
70537+config GRKERNSEC_AUDIT_GID
70538+ int "GID for auditing"
70539+ depends on GRKERNSEC_AUDIT_GROUP
70540+ default 1007
70541+
70542+config GRKERNSEC_EXECLOG
70543+ bool "Exec logging"
70544+ help
70545+ If you say Y here, all execve() calls will be logged (since the
70546+ other exec*() calls are frontends to execve(), all execution
70547+ will be logged). Useful for shell-servers that like to keep track
70548+ of their users. If the sysctl option is enabled, a sysctl option with
70549+ name "exec_logging" is created.
70550+ WARNING: This option when enabled will produce a LOT of logs, especially
70551+ on an active system.
70552+
70553+config GRKERNSEC_RESLOG
70554+ bool "Resource logging"
70555+ default y if GRKERNSEC_CONFIG_AUTO
70556+ help
70557+ If you say Y here, all attempts to overstep resource limits will
70558+ be logged with the resource name, the requested size, and the current
70559+ limit. It is highly recommended that you say Y here. If the sysctl
70560+ option is enabled, a sysctl option with name "resource_logging" is
70561+ created. If the RBAC system is enabled, the sysctl value is ignored.
70562+
70563+config GRKERNSEC_CHROOT_EXECLOG
70564+ bool "Log execs within chroot"
70565+ help
70566+ If you say Y here, all executions inside a chroot jail will be logged
70567+ to syslog. This can cause a large amount of logs if certain
70568+ applications (eg. djb's daemontools) are installed on the system, and
70569+ is therefore left as an option. If the sysctl option is enabled, a
70570+ sysctl option with name "chroot_execlog" is created.
70571+
70572+config GRKERNSEC_AUDIT_PTRACE
70573+ bool "Ptrace logging"
70574+ help
70575+ If you say Y here, all attempts to attach to a process via ptrace
70576+ will be logged. If the sysctl option is enabled, a sysctl option
70577+ with name "audit_ptrace" is created.
70578+
70579+config GRKERNSEC_AUDIT_CHDIR
70580+ bool "Chdir logging"
70581+ help
70582+ If you say Y here, all chdir() calls will be logged. If the sysctl
70583+ option is enabled, a sysctl option with name "audit_chdir" is created.
70584+
70585+config GRKERNSEC_AUDIT_MOUNT
70586+ bool "(Un)Mount logging"
70587+ help
70588+ If you say Y here, all mounts and unmounts will be logged. If the
70589+ sysctl option is enabled, a sysctl option with name "audit_mount" is
70590+ created.
70591+
70592+config GRKERNSEC_SIGNAL
70593+ bool "Signal logging"
70594+ default y if GRKERNSEC_CONFIG_AUTO
70595+ help
70596+ If you say Y here, certain important signals will be logged, such as
70597+ SIGSEGV, which will as a result inform you of when a error in a program
70598+ occurred, which in some cases could mean a possible exploit attempt.
70599+ If the sysctl option is enabled, a sysctl option with name
70600+ "signal_logging" is created.
70601+
70602+config GRKERNSEC_FORKFAIL
70603+ bool "Fork failure logging"
70604+ help
70605+ If you say Y here, all failed fork() attempts will be logged.
70606+ This could suggest a fork bomb, or someone attempting to overstep
70607+ their process limit. If the sysctl option is enabled, a sysctl option
70608+ with name "forkfail_logging" is created.
70609+
70610+config GRKERNSEC_TIME
70611+ bool "Time change logging"
70612+ default y if GRKERNSEC_CONFIG_AUTO
70613+ help
70614+ If you say Y here, any changes of the system clock will be logged.
70615+ If the sysctl option is enabled, a sysctl option with name
70616+ "timechange_logging" is created.
70617+
70618+config GRKERNSEC_PROC_IPADDR
70619+ bool "/proc/<pid>/ipaddr support"
70620+ default y if GRKERNSEC_CONFIG_AUTO
70621+ help
70622+ If you say Y here, a new entry will be added to each /proc/<pid>
70623+ directory that contains the IP address of the person using the task.
70624+ The IP is carried across local TCP and AF_UNIX stream sockets.
70625+ This information can be useful for IDS/IPSes to perform remote response
70626+ to a local attack. The entry is readable by only the owner of the
70627+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
70628+ the RBAC system), and thus does not create privacy concerns.
70629+
70630+config GRKERNSEC_RWXMAP_LOG
70631+ bool 'Denied RWX mmap/mprotect logging'
70632+ default y if GRKERNSEC_CONFIG_AUTO
70633+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
70634+ help
70635+ If you say Y here, calls to mmap() and mprotect() with explicit
70636+ usage of PROT_WRITE and PROT_EXEC together will be logged when
70637+ denied by the PAX_MPROTECT feature. This feature will also
70638+ log other problematic scenarios that can occur when PAX_MPROTECT
70639+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
70640+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
70641+ is created.
70642+
70643+endmenu
70644+
70645+menu "Executable Protections"
70646+depends on GRKERNSEC
70647+
70648+config GRKERNSEC_DMESG
70649+ bool "Dmesg(8) restriction"
70650+ default y if GRKERNSEC_CONFIG_AUTO
70651+ help
70652+ If you say Y here, non-root users will not be able to use dmesg(8)
70653+ to view the contents of the kernel's circular log buffer.
70654+ The kernel's log buffer often contains kernel addresses and other
70655+ identifying information useful to an attacker in fingerprinting a
70656+ system for a targeted exploit.
70657+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
70658+ created.
70659+
70660+config GRKERNSEC_HARDEN_PTRACE
70661+ bool "Deter ptrace-based process snooping"
70662+ default y if GRKERNSEC_CONFIG_AUTO
70663+ help
70664+ If you say Y here, TTY sniffers and other malicious monitoring
70665+ programs implemented through ptrace will be defeated. If you
70666+ have been using the RBAC system, this option has already been
70667+ enabled for several years for all users, with the ability to make
70668+ fine-grained exceptions.
70669+
70670+ This option only affects the ability of non-root users to ptrace
70671+ processes that are not a descendent of the ptracing process.
70672+ This means that strace ./binary and gdb ./binary will still work,
70673+ but attaching to arbitrary processes will not. If the sysctl
70674+ option is enabled, a sysctl option with name "harden_ptrace" is
70675+ created.
70676+
70677+config GRKERNSEC_PTRACE_READEXEC
70678+ bool "Require read access to ptrace sensitive binaries"
70679+ default y if GRKERNSEC_CONFIG_AUTO
70680+ help
70681+ If you say Y here, unprivileged users will not be able to ptrace unreadable
70682+ binaries. This option is useful in environments that
70683+ remove the read bits (e.g. file mode 4711) from suid binaries to
70684+ prevent infoleaking of their contents. This option adds
70685+ consistency to the use of that file mode, as the binary could normally
70686+ be read out when run without privileges while ptracing.
70687+
70688+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
70689+ is created.
70690+
70691+config GRKERNSEC_SETXID
70692+ bool "Enforce consistent multithreaded privileges"
70693+ default y if GRKERNSEC_CONFIG_AUTO
70694+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
70695+ help
70696+ If you say Y here, a change from a root uid to a non-root uid
70697+ in a multithreaded application will cause the resulting uids,
70698+ gids, supplementary groups, and capabilities in that thread
70699+ to be propagated to the other threads of the process. In most
70700+ cases this is unnecessary, as glibc will emulate this behavior
70701+ on behalf of the application. Other libcs do not act in the
70702+ same way, allowing the other threads of the process to continue
70703+ running with root privileges. If the sysctl option is enabled,
70704+ a sysctl option with name "consistent_setxid" is created.
70705+
70706+config GRKERNSEC_HARDEN_IPC
70707+ bool "Disallow access to overly-permissive IPC objects"
70708+ default y if GRKERNSEC_CONFIG_AUTO
70709+ depends on SYSVIPC
70710+ help
70711+ If you say Y here, access to overly-permissive IPC objects (shared
70712+ memory, message queues, and semaphores) will be denied for processes
70713+ given the following criteria beyond normal permission checks:
70714+ 1) If the IPC object is world-accessible and the euid doesn't match
70715+ that of the creator or current uid for the IPC object
70716+ 2) If the IPC object is group-accessible and the egid doesn't
70717+ match that of the creator or current gid for the IPC object
70718+ It's a common error to grant too much permission to these objects,
70719+ with impact ranging from denial of service and information leaking to
70720+ privilege escalation. This feature was developed in response to
70721+ research by Tim Brown:
70722+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
70723+ who found hundreds of such insecure usages. Processes with
70724+ CAP_IPC_OWNER are still permitted to access these IPC objects.
70725+ If the sysctl option is enabled, a sysctl option with name
70726+ "harden_ipc" is created.
70727+
70728+config GRKERNSEC_TPE
70729+ bool "Trusted Path Execution (TPE)"
70730+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70731+ help
70732+ If you say Y here, you will be able to choose a gid to add to the
70733+ supplementary groups of users you want to mark as "untrusted."
70734+ These users will not be able to execute any files that are not in
70735+ root-owned directories writable only by root. If the sysctl option
70736+ is enabled, a sysctl option with name "tpe" is created.
70737+
70738+config GRKERNSEC_TPE_ALL
70739+ bool "Partially restrict all non-root users"
70740+ depends on GRKERNSEC_TPE
70741+ help
70742+ If you say Y here, all non-root users will be covered under
70743+ a weaker TPE restriction. This is separate from, and in addition to,
70744+ the main TPE options that you have selected elsewhere. Thus, if a
70745+ "trusted" GID is chosen, this restriction applies to even that GID.
70746+ Under this restriction, all non-root users will only be allowed to
70747+ execute files in directories they own that are not group or
70748+ world-writable, or in directories owned by root and writable only by
70749+ root. If the sysctl option is enabled, a sysctl option with name
70750+ "tpe_restrict_all" is created.
70751+
70752+config GRKERNSEC_TPE_INVERT
70753+ bool "Invert GID option"
70754+ depends on GRKERNSEC_TPE
70755+ help
70756+ If you say Y here, the group you specify in the TPE configuration will
70757+ decide what group TPE restrictions will be *disabled* for. This
70758+ option is useful if you want TPE restrictions to be applied to most
70759+ users on the system. If the sysctl option is enabled, a sysctl option
70760+ with name "tpe_invert" is created. Unlike other sysctl options, this
70761+ entry will default to on for backward-compatibility.
70762+
70763+config GRKERNSEC_TPE_GID
70764+ int
70765+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
70766+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
70767+
70768+config GRKERNSEC_TPE_UNTRUSTED_GID
70769+ int "GID for TPE-untrusted users"
70770+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
70771+ default 1005
70772+ help
70773+ Setting this GID determines what group TPE restrictions will be
70774+ *enabled* for. If the sysctl option is enabled, a sysctl option
70775+ with name "tpe_gid" is created.
70776+
70777+config GRKERNSEC_TPE_TRUSTED_GID
70778+ int "GID for TPE-trusted users"
70779+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
70780+ default 1005
70781+ help
70782+ Setting this GID determines what group TPE restrictions will be
70783+ *disabled* for. If the sysctl option is enabled, a sysctl option
70784+ with name "tpe_gid" is created.
70785+
70786+endmenu
70787+menu "Network Protections"
70788+depends on GRKERNSEC
70789+
70790+config GRKERNSEC_BLACKHOLE
70791+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
70792+ default y if GRKERNSEC_CONFIG_AUTO
70793+ depends on NET
70794+ help
70795+ If you say Y here, neither TCP resets nor ICMP
70796+ destination-unreachable packets will be sent in response to packets
70797+ sent to ports for which no associated listening process exists.
70798+ This feature supports both IPV4 and IPV6 and exempts the
70799+ loopback interface from blackholing. Enabling this feature
70800+ makes a host more resilient to DoS attacks and reduces network
70801+ visibility against scanners.
70802+
70803+ The blackhole feature as-implemented is equivalent to the FreeBSD
70804+ blackhole feature, as it prevents RST responses to all packets, not
70805+ just SYNs. Under most application behavior this causes no
70806+ problems, but applications (like haproxy) may not close certain
70807+ connections in a way that cleanly terminates them on the remote
70808+ end, leaving the remote host in LAST_ACK state. Because of this
70809+ side-effect and to prevent intentional LAST_ACK DoSes, this
70810+ feature also adds automatic mitigation against such attacks.
70811+ The mitigation drastically reduces the amount of time a socket
70812+ can spend in LAST_ACK state. If you're using haproxy and not
70813+ all servers it connects to have this option enabled, consider
70814+ disabling this feature on the haproxy host.
70815+
70816+ If the sysctl option is enabled, two sysctl options with names
70817+ "ip_blackhole" and "lastack_retries" will be created.
70818+ While "ip_blackhole" takes the standard zero/non-zero on/off
70819+ toggle, "lastack_retries" uses the same kinds of values as
70820+ "tcp_retries1" and "tcp_retries2". The default value of 4
70821+ prevents a socket from lasting more than 45 seconds in LAST_ACK
70822+ state.
70823+
70824+config GRKERNSEC_NO_SIMULT_CONNECT
70825+ bool "Disable TCP Simultaneous Connect"
70826+ default y if GRKERNSEC_CONFIG_AUTO
70827+ depends on NET
70828+ help
70829+ If you say Y here, a feature by Willy Tarreau will be enabled that
70830+ removes a weakness in Linux's strict implementation of TCP that
70831+ allows two clients to connect to each other without either entering
70832+ a listening state. The weakness allows an attacker to easily prevent
70833+ a client from connecting to a known server provided the source port
70834+ for the connection is guessed correctly.
70835+
70836+ As the weakness could be used to prevent an antivirus or IPS from
70837+ fetching updates, or prevent an SSL gateway from fetching a CRL,
70838+ it should be eliminated by enabling this option. Though Linux is
70839+ one of few operating systems supporting simultaneous connect, it
70840+ has no legitimate use in practice and is rarely supported by firewalls.
70841+
70842+config GRKERNSEC_SOCKET
70843+ bool "Socket restrictions"
70844+ depends on NET
70845+ help
70846+ If you say Y here, you will be able to choose from several options.
70847+ If you assign a GID on your system and add it to the supplementary
70848+ groups of users you want to restrict socket access to, this patch
70849+ will perform up to three things, based on the option(s) you choose.
70850+
70851+config GRKERNSEC_SOCKET_ALL
70852+ bool "Deny any sockets to group"
70853+ depends on GRKERNSEC_SOCKET
70854+ help
70855+ If you say Y here, you will be able to choose a GID of whose users will
70856+ be unable to connect to other hosts from your machine or run server
70857+ applications from your machine. If the sysctl option is enabled, a
70858+ sysctl option with name "socket_all" is created.
70859+
70860+config GRKERNSEC_SOCKET_ALL_GID
70861+ int "GID to deny all sockets for"
70862+ depends on GRKERNSEC_SOCKET_ALL
70863+ default 1004
70864+ help
70865+ Here you can choose the GID to disable socket access for. Remember to
70866+ add the users you want socket access disabled for to the GID
70867+ specified here. If the sysctl option is enabled, a sysctl option
70868+ with name "socket_all_gid" is created.
70869+
70870+config GRKERNSEC_SOCKET_CLIENT
70871+ bool "Deny client sockets to group"
70872+ depends on GRKERNSEC_SOCKET
70873+ help
70874+ If you say Y here, you will be able to choose a GID of whose users will
70875+ be unable to connect to other hosts from your machine, but will be
70876+ able to run servers. If this option is enabled, all users in the group
70877+ you specify will have to use passive mode when initiating ftp transfers
70878+ from the shell on your machine. If the sysctl option is enabled, a
70879+ sysctl option with name "socket_client" is created.
70880+
70881+config GRKERNSEC_SOCKET_CLIENT_GID
70882+ int "GID to deny client sockets for"
70883+ depends on GRKERNSEC_SOCKET_CLIENT
70884+ default 1003
70885+ help
70886+ Here you can choose the GID to disable client socket access for.
70887+ Remember to add the users you want client socket access disabled for to
70888+ the GID specified here. If the sysctl option is enabled, a sysctl
70889+ option with name "socket_client_gid" is created.
70890+
70891+config GRKERNSEC_SOCKET_SERVER
70892+ bool "Deny server sockets to group"
70893+ depends on GRKERNSEC_SOCKET
70894+ help
70895+ If you say Y here, you will be able to choose a GID of whose users will
70896+ be unable to run server applications from your machine. If the sysctl
70897+ option is enabled, a sysctl option with name "socket_server" is created.
70898+
70899+config GRKERNSEC_SOCKET_SERVER_GID
70900+ int "GID to deny server sockets for"
70901+ depends on GRKERNSEC_SOCKET_SERVER
70902+ default 1002
70903+ help
70904+ Here you can choose the GID to disable server socket access for.
70905+ Remember to add the users you want server socket access disabled for to
70906+ the GID specified here. If the sysctl option is enabled, a sysctl
70907+ option with name "socket_server_gid" is created.
70908+
70909+endmenu
70910+
70911+menu "Physical Protections"
70912+depends on GRKERNSEC
70913+
70914+config GRKERNSEC_DENYUSB
70915+ bool "Deny new USB connections after toggle"
70916+ default y if GRKERNSEC_CONFIG_AUTO
70917+ depends on SYSCTL && USB_SUPPORT
70918+ help
70919+ If you say Y here, a new sysctl option with name "deny_new_usb"
70920+ will be created. Setting its value to 1 will prevent any new
70921+ USB devices from being recognized by the OS. Any attempted USB
70922+ device insertion will be logged. This option is intended to be
70923+ used against custom USB devices designed to exploit vulnerabilities
70924+ in various USB device drivers.
70925+
70926+ For greatest effectiveness, this sysctl should be set after any
70927+ relevant init scripts. This option is safe to enable in distros
70928+ as each user can choose whether or not to toggle the sysctl.
70929+
70930+config GRKERNSEC_DENYUSB_FORCE
70931+ bool "Reject all USB devices not connected at boot"
70932+ select USB
70933+ depends on GRKERNSEC_DENYUSB
70934+ help
70935+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70936+ that doesn't involve a sysctl entry. This option should only be
70937+ enabled if you're sure you want to deny all new USB connections
70938+ at runtime and don't want to modify init scripts. This should not
70939+ be enabled by distros. It forces the core USB code to be built
70940+ into the kernel image so that all devices connected at boot time
70941+ can be recognized and new USB device connections can be prevented
70942+ prior to init running.
70943+
70944+endmenu
70945+
70946+menu "Sysctl Support"
70947+depends on GRKERNSEC && SYSCTL
70948+
70949+config GRKERNSEC_SYSCTL
70950+ bool "Sysctl support"
70951+ default y if GRKERNSEC_CONFIG_AUTO
70952+ help
70953+ If you say Y here, you will be able to change the options that
70954+ grsecurity runs with at bootup, without having to recompile your
70955+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70956+ to enable (1) or disable (0) various features. All the sysctl entries
70957+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70958+ All features enabled in the kernel configuration are disabled at boot
70959+ if you do not say Y to the "Turn on features by default" option.
70960+ All options should be set at startup, and the grsec_lock entry should
70961+ be set to a non-zero value after all the options are set.
70962+ *THIS IS EXTREMELY IMPORTANT*
70963+
70964+config GRKERNSEC_SYSCTL_DISTRO
70965+ bool "Extra sysctl support for distro makers (READ HELP)"
70966+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70967+ help
70968+ If you say Y here, additional sysctl options will be created
70969+ for features that affect processes running as root. Therefore,
70970+ it is critical when using this option that the grsec_lock entry be
70971+ enabled after boot. Only distros with prebuilt kernel packages
70972+ with this option enabled that can ensure grsec_lock is enabled
70973+ after boot should use this option.
70974+ *Failure to set grsec_lock after boot makes all grsec features
70975+ this option covers useless*
70976+
70977+ Currently this option creates the following sysctl entries:
70978+ "Disable Privileged I/O": "disable_priv_io"
70979+
70980+config GRKERNSEC_SYSCTL_ON
70981+ bool "Turn on features by default"
70982+ default y if GRKERNSEC_CONFIG_AUTO
70983+ depends on GRKERNSEC_SYSCTL
70984+ help
70985+ If you say Y here, instead of having all features enabled in the
70986+ kernel configuration disabled at boot time, the features will be
70987+ enabled at boot time. It is recommended you say Y here unless
70988+ there is some reason you would want all sysctl-tunable features to
70989+ be disabled by default. As mentioned elsewhere, it is important
70990+ to enable the grsec_lock entry once you have finished modifying
70991+ the sysctl entries.
70992+
70993+endmenu
70994+menu "Logging Options"
70995+depends on GRKERNSEC
70996+
70997+config GRKERNSEC_FLOODTIME
70998+ int "Seconds in between log messages (minimum)"
70999+ default 10
71000+ help
71001+ This option allows you to enforce the number of seconds between
71002+ grsecurity log messages. The default should be suitable for most
71003+ people, however, if you choose to change it, choose a value small enough
71004+ to allow informative logs to be produced, but large enough to
71005+ prevent flooding.
71006+
71007+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
71008+ any rate limiting on grsecurity log messages.
71009+
71010+config GRKERNSEC_FLOODBURST
71011+ int "Number of messages in a burst (maximum)"
71012+ default 6
71013+ help
71014+ This option allows you to choose the maximum number of messages allowed
71015+ within the flood time interval you chose in a separate option. The
71016+ default should be suitable for most people, however if you find that
71017+ many of your logs are being interpreted as flooding, you may want to
71018+ raise this value.
71019+
71020+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
71021+ any rate limiting on grsecurity log messages.
71022+
71023+endmenu
71024diff --git a/grsecurity/Makefile b/grsecurity/Makefile
71025new file mode 100644
71026index 0000000..30ababb
71027--- /dev/null
71028+++ b/grsecurity/Makefile
71029@@ -0,0 +1,54 @@
71030+# grsecurity – access control and security hardening for Linux
71031+# All code in this directory and various hooks located throughout the Linux kernel are
71032+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
71033+# http://www.grsecurity.net spender@grsecurity.net
71034+#
71035+# This program is free software; you can redistribute it and/or
71036+# modify it under the terms of the GNU General Public License version 2
71037+# as published by the Free Software Foundation.
71038+#
71039+# This program is distributed in the hope that it will be useful,
71040+# but WITHOUT ANY WARRANTY; without even the implied warranty of
71041+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
71042+# GNU General Public License for more details.
71043+#
71044+# You should have received a copy of the GNU General Public License
71045+# along with this program; if not, write to the Free Software
71046+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
71047+
71048+KBUILD_CFLAGS += -Werror
71049+
71050+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
71051+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
71052+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
71053+ grsec_usb.o grsec_ipc.o grsec_proc.o
71054+
71055+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
71056+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
71057+ gracl_learn.o grsec_log.o gracl_policy.o
71058+ifdef CONFIG_COMPAT
71059+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
71060+endif
71061+
71062+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
71063+
71064+ifdef CONFIG_NET
71065+obj-y += grsec_sock.o
71066+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
71067+endif
71068+
71069+ifndef CONFIG_GRKERNSEC
71070+obj-y += grsec_disabled.o
71071+endif
71072+
71073+ifdef CONFIG_GRKERNSEC_HIDESYM
71074+extra-y := grsec_hidesym.o
71075+$(obj)/grsec_hidesym.o:
71076+ @-chmod -f 500 /boot
71077+ @-chmod -f 500 /lib/modules
71078+ @-chmod -f 500 /lib64/modules
71079+ @-chmod -f 500 /lib32/modules
71080+ @-chmod -f 700 .
71081+ @-chmod -f 700 $(objtree)
71082+ @echo ' grsec: protected kernel image paths'
71083+endif
71084diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
71085new file mode 100644
71086index 0000000..58223f6
71087--- /dev/null
71088+++ b/grsecurity/gracl.c
71089@@ -0,0 +1,2702 @@
71090+#include <linux/kernel.h>
71091+#include <linux/module.h>
71092+#include <linux/sched.h>
71093+#include <linux/mm.h>
71094+#include <linux/file.h>
71095+#include <linux/fs.h>
71096+#include <linux/namei.h>
71097+#include <linux/mount.h>
71098+#include <linux/tty.h>
71099+#include <linux/proc_fs.h>
71100+#include <linux/lglock.h>
71101+#include <linux/slab.h>
71102+#include <linux/vmalloc.h>
71103+#include <linux/types.h>
71104+#include <linux/sysctl.h>
71105+#include <linux/netdevice.h>
71106+#include <linux/ptrace.h>
71107+#include <linux/gracl.h>
71108+#include <linux/gralloc.h>
71109+#include <linux/security.h>
71110+#include <linux/grinternal.h>
71111+#include <linux/pid_namespace.h>
71112+#include <linux/stop_machine.h>
71113+#include <linux/fdtable.h>
71114+#include <linux/percpu.h>
71115+#include <linux/lglock.h>
71116+#include <linux/hugetlb.h>
71117+#include <linux/posix-timers.h>
71118+#include <linux/prefetch.h>
71119+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
71120+#include <linux/magic.h>
71121+#include <linux/pagemap.h>
71122+#include "../fs/btrfs/async-thread.h"
71123+#include "../fs/btrfs/ctree.h"
71124+#include "../fs/btrfs/btrfs_inode.h"
71125+#endif
71126+#include "../fs/mount.h"
71127+
71128+#include <asm/uaccess.h>
71129+#include <asm/errno.h>
71130+#include <asm/mman.h>
71131+
71132+#define FOR_EACH_ROLE_START(role) \
71133+ role = running_polstate.role_list; \
71134+ while (role) {
71135+
71136+#define FOR_EACH_ROLE_END(role) \
71137+ role = role->prev; \
71138+ }
71139+
71140+extern struct path gr_real_root;
71141+
71142+static struct gr_policy_state running_polstate;
71143+struct gr_policy_state *polstate = &running_polstate;
71144+extern struct gr_alloc_state *current_alloc_state;
71145+
71146+extern char *gr_shared_page[4];
71147+DEFINE_RWLOCK(gr_inode_lock);
71148+
71149+static unsigned int gr_status __read_only = GR_STATUS_INIT;
71150+
71151+#ifdef CONFIG_NET
71152+extern struct vfsmount *sock_mnt;
71153+#endif
71154+
71155+extern struct vfsmount *pipe_mnt;
71156+extern struct vfsmount *shm_mnt;
71157+
71158+#ifdef CONFIG_HUGETLBFS
71159+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71160+#endif
71161+
71162+extern u16 acl_sp_role_value;
71163+extern struct acl_object_label *fakefs_obj_rw;
71164+extern struct acl_object_label *fakefs_obj_rwx;
71165+
71166+int gr_acl_is_enabled(void)
71167+{
71168+ return (gr_status & GR_READY);
71169+}
71170+
71171+void gr_enable_rbac_system(void)
71172+{
71173+ pax_open_kernel();
71174+ gr_status |= GR_READY;
71175+ pax_close_kernel();
71176+}
71177+
71178+int gr_rbac_disable(void *unused)
71179+{
71180+ pax_open_kernel();
71181+ gr_status &= ~GR_READY;
71182+ pax_close_kernel();
71183+
71184+ return 0;
71185+}
71186+
71187+static inline dev_t __get_dev(const struct dentry *dentry)
71188+{
71189+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
71190+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
71191+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
71192+ else
71193+#endif
71194+ return dentry->d_sb->s_dev;
71195+}
71196+
71197+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
71198+{
71199+ return __get_dev(dentry);
71200+}
71201+
71202+static char gr_task_roletype_to_char(struct task_struct *task)
71203+{
71204+ switch (task->role->roletype &
71205+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
71206+ GR_ROLE_SPECIAL)) {
71207+ case GR_ROLE_DEFAULT:
71208+ return 'D';
71209+ case GR_ROLE_USER:
71210+ return 'U';
71211+ case GR_ROLE_GROUP:
71212+ return 'G';
71213+ case GR_ROLE_SPECIAL:
71214+ return 'S';
71215+ }
71216+
71217+ return 'X';
71218+}
71219+
71220+char gr_roletype_to_char(void)
71221+{
71222+ return gr_task_roletype_to_char(current);
71223+}
71224+
71225+__inline__ int
71226+gr_acl_tpe_check(void)
71227+{
71228+ if (unlikely(!(gr_status & GR_READY)))
71229+ return 0;
71230+ if (current->role->roletype & GR_ROLE_TPE)
71231+ return 1;
71232+ else
71233+ return 0;
71234+}
71235+
71236+int
71237+gr_handle_rawio(const struct inode *inode)
71238+{
71239+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
71240+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
71241+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
71242+ !capable(CAP_SYS_RAWIO))
71243+ return 1;
71244+#endif
71245+ return 0;
71246+}
71247+
71248+int
71249+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
71250+{
71251+ if (likely(lena != lenb))
71252+ return 0;
71253+
71254+ return !memcmp(a, b, lena);
71255+}
71256+
71257+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
71258+{
71259+ *buflen -= namelen;
71260+ if (*buflen < 0)
71261+ return -ENAMETOOLONG;
71262+ *buffer -= namelen;
71263+ memcpy(*buffer, str, namelen);
71264+ return 0;
71265+}
71266+
71267+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
71268+{
71269+ return prepend(buffer, buflen, name->name, name->len);
71270+}
71271+
71272+static int prepend_path(const struct path *path, struct path *root,
71273+ char **buffer, int *buflen)
71274+{
71275+ struct dentry *dentry = path->dentry;
71276+ struct vfsmount *vfsmnt = path->mnt;
71277+ struct mount *mnt = real_mount(vfsmnt);
71278+ bool slash = false;
71279+ int error = 0;
71280+
71281+ while (dentry != root->dentry || vfsmnt != root->mnt) {
71282+ struct dentry * parent;
71283+
71284+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
71285+ /* Global root? */
71286+ if (!mnt_has_parent(mnt)) {
71287+ goto out;
71288+ }
71289+ dentry = mnt->mnt_mountpoint;
71290+ mnt = mnt->mnt_parent;
71291+ vfsmnt = &mnt->mnt;
71292+ continue;
71293+ }
71294+ parent = dentry->d_parent;
71295+ prefetch(parent);
71296+ spin_lock(&dentry->d_lock);
71297+ error = prepend_name(buffer, buflen, &dentry->d_name);
71298+ spin_unlock(&dentry->d_lock);
71299+ if (!error)
71300+ error = prepend(buffer, buflen, "/", 1);
71301+ if (error)
71302+ break;
71303+
71304+ slash = true;
71305+ dentry = parent;
71306+ }
71307+
71308+out:
71309+ if (!error && !slash)
71310+ error = prepend(buffer, buflen, "/", 1);
71311+
71312+ return error;
71313+}
71314+
71315+/* this must be called with mount_lock and rename_lock held */
71316+
71317+static char *__our_d_path(const struct path *path, struct path *root,
71318+ char *buf, int buflen)
71319+{
71320+ char *res = buf + buflen;
71321+ int error;
71322+
71323+ prepend(&res, &buflen, "\0", 1);
71324+ error = prepend_path(path, root, &res, &buflen);
71325+ if (error)
71326+ return ERR_PTR(error);
71327+
71328+ return res;
71329+}
71330+
71331+static char *
71332+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
71333+{
71334+ char *retval;
71335+
71336+ retval = __our_d_path(path, root, buf, buflen);
71337+ if (unlikely(IS_ERR(retval)))
71338+ retval = strcpy(buf, "<path too long>");
71339+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
71340+ retval[1] = '\0';
71341+
71342+ return retval;
71343+}
71344+
71345+static char *
71346+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71347+ char *buf, int buflen)
71348+{
71349+ struct path path;
71350+ char *res;
71351+
71352+ path.dentry = (struct dentry *)dentry;
71353+ path.mnt = (struct vfsmount *)vfsmnt;
71354+
71355+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
71356+ by the RBAC system */
71357+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
71358+
71359+ return res;
71360+}
71361+
71362+static char *
71363+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71364+ char *buf, int buflen)
71365+{
71366+ char *res;
71367+ struct path path;
71368+ struct path root;
71369+ struct task_struct *reaper = init_pid_ns.child_reaper;
71370+
71371+ path.dentry = (struct dentry *)dentry;
71372+ path.mnt = (struct vfsmount *)vfsmnt;
71373+
71374+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
71375+ get_fs_root(reaper->fs, &root);
71376+
71377+ read_seqlock_excl(&mount_lock);
71378+ write_seqlock(&rename_lock);
71379+ res = gen_full_path(&path, &root, buf, buflen);
71380+ write_sequnlock(&rename_lock);
71381+ read_sequnlock_excl(&mount_lock);
71382+
71383+ path_put(&root);
71384+ return res;
71385+}
71386+
71387+char *
71388+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71389+{
71390+ char *ret;
71391+ read_seqlock_excl(&mount_lock);
71392+ write_seqlock(&rename_lock);
71393+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71394+ PAGE_SIZE);
71395+ write_sequnlock(&rename_lock);
71396+ read_sequnlock_excl(&mount_lock);
71397+ return ret;
71398+}
71399+
71400+static char *
71401+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71402+{
71403+ char *ret;
71404+ char *buf;
71405+ int buflen;
71406+
71407+ read_seqlock_excl(&mount_lock);
71408+ write_seqlock(&rename_lock);
71409+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
71410+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
71411+ buflen = (int)(ret - buf);
71412+ if (buflen >= 5)
71413+ prepend(&ret, &buflen, "/proc", 5);
71414+ else
71415+ ret = strcpy(buf, "<path too long>");
71416+ write_sequnlock(&rename_lock);
71417+ read_sequnlock_excl(&mount_lock);
71418+ return ret;
71419+}
71420+
71421+char *
71422+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
71423+{
71424+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71425+ PAGE_SIZE);
71426+}
71427+
71428+char *
71429+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
71430+{
71431+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71432+ PAGE_SIZE);
71433+}
71434+
71435+char *
71436+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
71437+{
71438+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
71439+ PAGE_SIZE);
71440+}
71441+
71442+char *
71443+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
71444+{
71445+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
71446+ PAGE_SIZE);
71447+}
71448+
71449+char *
71450+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
71451+{
71452+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
71453+ PAGE_SIZE);
71454+}
71455+
71456+__inline__ __u32
71457+to_gr_audit(const __u32 reqmode)
71458+{
71459+ /* masks off auditable permission flags, then shifts them to create
71460+ auditing flags, and adds the special case of append auditing if
71461+ we're requesting write */
71462+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
71463+}
71464+
71465+struct acl_role_label *
71466+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
71467+ const gid_t gid)
71468+{
71469+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
71470+ struct acl_role_label *match;
71471+ struct role_allowed_ip *ipp;
71472+ unsigned int x;
71473+ u32 curr_ip = task->signal->saved_ip;
71474+
71475+ match = state->acl_role_set.r_hash[index];
71476+
71477+ while (match) {
71478+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
71479+ for (x = 0; x < match->domain_child_num; x++) {
71480+ if (match->domain_children[x] == uid)
71481+ goto found;
71482+ }
71483+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
71484+ break;
71485+ match = match->next;
71486+ }
71487+found:
71488+ if (match == NULL) {
71489+ try_group:
71490+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
71491+ match = state->acl_role_set.r_hash[index];
71492+
71493+ while (match) {
71494+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
71495+ for (x = 0; x < match->domain_child_num; x++) {
71496+ if (match->domain_children[x] == gid)
71497+ goto found2;
71498+ }
71499+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
71500+ break;
71501+ match = match->next;
71502+ }
71503+found2:
71504+ if (match == NULL)
71505+ match = state->default_role;
71506+ if (match->allowed_ips == NULL)
71507+ return match;
71508+ else {
71509+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71510+ if (likely
71511+ ((ntohl(curr_ip) & ipp->netmask) ==
71512+ (ntohl(ipp->addr) & ipp->netmask)))
71513+ return match;
71514+ }
71515+ match = state->default_role;
71516+ }
71517+ } else if (match->allowed_ips == NULL) {
71518+ return match;
71519+ } else {
71520+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71521+ if (likely
71522+ ((ntohl(curr_ip) & ipp->netmask) ==
71523+ (ntohl(ipp->addr) & ipp->netmask)))
71524+ return match;
71525+ }
71526+ goto try_group;
71527+ }
71528+
71529+ return match;
71530+}
71531+
71532+static struct acl_role_label *
71533+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
71534+ const gid_t gid)
71535+{
71536+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
71537+}
71538+
71539+struct acl_subject_label *
71540+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
71541+ const struct acl_role_label *role)
71542+{
71543+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71544+ struct acl_subject_label *match;
71545+
71546+ match = role->subj_hash[index];
71547+
71548+ while (match && (match->inode != ino || match->device != dev ||
71549+ (match->mode & GR_DELETED))) {
71550+ match = match->next;
71551+ }
71552+
71553+ if (match && !(match->mode & GR_DELETED))
71554+ return match;
71555+ else
71556+ return NULL;
71557+}
71558+
71559+struct acl_subject_label *
71560+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
71561+ const struct acl_role_label *role)
71562+{
71563+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71564+ struct acl_subject_label *match;
71565+
71566+ match = role->subj_hash[index];
71567+
71568+ while (match && (match->inode != ino || match->device != dev ||
71569+ !(match->mode & GR_DELETED))) {
71570+ match = match->next;
71571+ }
71572+
71573+ if (match && (match->mode & GR_DELETED))
71574+ return match;
71575+ else
71576+ return NULL;
71577+}
71578+
71579+static struct acl_object_label *
71580+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
71581+ const struct acl_subject_label *subj)
71582+{
71583+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71584+ struct acl_object_label *match;
71585+
71586+ match = subj->obj_hash[index];
71587+
71588+ while (match && (match->inode != ino || match->device != dev ||
71589+ (match->mode & GR_DELETED))) {
71590+ match = match->next;
71591+ }
71592+
71593+ if (match && !(match->mode & GR_DELETED))
71594+ return match;
71595+ else
71596+ return NULL;
71597+}
71598+
71599+static struct acl_object_label *
71600+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
71601+ const struct acl_subject_label *subj)
71602+{
71603+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71604+ struct acl_object_label *match;
71605+
71606+ match = subj->obj_hash[index];
71607+
71608+ while (match && (match->inode != ino || match->device != dev ||
71609+ !(match->mode & GR_DELETED))) {
71610+ match = match->next;
71611+ }
71612+
71613+ if (match && (match->mode & GR_DELETED))
71614+ return match;
71615+
71616+ match = subj->obj_hash[index];
71617+
71618+ while (match && (match->inode != ino || match->device != dev ||
71619+ (match->mode & GR_DELETED))) {
71620+ match = match->next;
71621+ }
71622+
71623+ if (match && !(match->mode & GR_DELETED))
71624+ return match;
71625+ else
71626+ return NULL;
71627+}
71628+
71629+struct name_entry *
71630+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
71631+{
71632+ unsigned int len = strlen(name);
71633+ unsigned int key = full_name_hash(name, len);
71634+ unsigned int index = key % state->name_set.n_size;
71635+ struct name_entry *match;
71636+
71637+ match = state->name_set.n_hash[index];
71638+
71639+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
71640+ match = match->next;
71641+
71642+ return match;
71643+}
71644+
71645+static struct name_entry *
71646+lookup_name_entry(const char *name)
71647+{
71648+ return __lookup_name_entry(&running_polstate, name);
71649+}
71650+
71651+static struct name_entry *
71652+lookup_name_entry_create(const char *name)
71653+{
71654+ unsigned int len = strlen(name);
71655+ unsigned int key = full_name_hash(name, len);
71656+ unsigned int index = key % running_polstate.name_set.n_size;
71657+ struct name_entry *match;
71658+
71659+ match = running_polstate.name_set.n_hash[index];
71660+
71661+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71662+ !match->deleted))
71663+ match = match->next;
71664+
71665+ if (match && match->deleted)
71666+ return match;
71667+
71668+ match = running_polstate.name_set.n_hash[index];
71669+
71670+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71671+ match->deleted))
71672+ match = match->next;
71673+
71674+ if (match && !match->deleted)
71675+ return match;
71676+ else
71677+ return NULL;
71678+}
71679+
71680+static struct inodev_entry *
71681+lookup_inodev_entry(const ino_t ino, const dev_t dev)
71682+{
71683+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
71684+ struct inodev_entry *match;
71685+
71686+ match = running_polstate.inodev_set.i_hash[index];
71687+
71688+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
71689+ match = match->next;
71690+
71691+ return match;
71692+}
71693+
71694+void
71695+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
71696+{
71697+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
71698+ state->inodev_set.i_size);
71699+ struct inodev_entry **curr;
71700+
71701+ entry->prev = NULL;
71702+
71703+ curr = &state->inodev_set.i_hash[index];
71704+ if (*curr != NULL)
71705+ (*curr)->prev = entry;
71706+
71707+ entry->next = *curr;
71708+ *curr = entry;
71709+
71710+ return;
71711+}
71712+
71713+static void
71714+insert_inodev_entry(struct inodev_entry *entry)
71715+{
71716+ __insert_inodev_entry(&running_polstate, entry);
71717+}
71718+
71719+void
71720+insert_acl_obj_label(struct acl_object_label *obj,
71721+ struct acl_subject_label *subj)
71722+{
71723+ unsigned int index =
71724+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
71725+ struct acl_object_label **curr;
71726+
71727+ obj->prev = NULL;
71728+
71729+ curr = &subj->obj_hash[index];
71730+ if (*curr != NULL)
71731+ (*curr)->prev = obj;
71732+
71733+ obj->next = *curr;
71734+ *curr = obj;
71735+
71736+ return;
71737+}
71738+
71739+void
71740+insert_acl_subj_label(struct acl_subject_label *obj,
71741+ struct acl_role_label *role)
71742+{
71743+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
71744+ struct acl_subject_label **curr;
71745+
71746+ obj->prev = NULL;
71747+
71748+ curr = &role->subj_hash[index];
71749+ if (*curr != NULL)
71750+ (*curr)->prev = obj;
71751+
71752+ obj->next = *curr;
71753+ *curr = obj;
71754+
71755+ return;
71756+}
71757+
71758+/* derived from glibc fnmatch() 0: match, 1: no match*/
71759+
71760+static int
71761+glob_match(const char *p, const char *n)
71762+{
71763+ char c;
71764+
71765+ while ((c = *p++) != '\0') {
71766+ switch (c) {
71767+ case '?':
71768+ if (*n == '\0')
71769+ return 1;
71770+ else if (*n == '/')
71771+ return 1;
71772+ break;
71773+ case '\\':
71774+ if (*n != c)
71775+ return 1;
71776+ break;
71777+ case '*':
71778+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
71779+ if (*n == '/')
71780+ return 1;
71781+ else if (c == '?') {
71782+ if (*n == '\0')
71783+ return 1;
71784+ else
71785+ ++n;
71786+ }
71787+ }
71788+ if (c == '\0') {
71789+ return 0;
71790+ } else {
71791+ const char *endp;
71792+
71793+ if ((endp = strchr(n, '/')) == NULL)
71794+ endp = n + strlen(n);
71795+
71796+ if (c == '[') {
71797+ for (--p; n < endp; ++n)
71798+ if (!glob_match(p, n))
71799+ return 0;
71800+ } else if (c == '/') {
71801+ while (*n != '\0' && *n != '/')
71802+ ++n;
71803+ if (*n == '/' && !glob_match(p, n + 1))
71804+ return 0;
71805+ } else {
71806+ for (--p; n < endp; ++n)
71807+ if (*n == c && !glob_match(p, n))
71808+ return 0;
71809+ }
71810+
71811+ return 1;
71812+ }
71813+ case '[':
71814+ {
71815+ int not;
71816+ char cold;
71817+
71818+ if (*n == '\0' || *n == '/')
71819+ return 1;
71820+
71821+ not = (*p == '!' || *p == '^');
71822+ if (not)
71823+ ++p;
71824+
71825+ c = *p++;
71826+ for (;;) {
71827+ unsigned char fn = (unsigned char)*n;
71828+
71829+ if (c == '\0')
71830+ return 1;
71831+ else {
71832+ if (c == fn)
71833+ goto matched;
71834+ cold = c;
71835+ c = *p++;
71836+
71837+ if (c == '-' && *p != ']') {
71838+ unsigned char cend = *p++;
71839+
71840+ if (cend == '\0')
71841+ return 1;
71842+
71843+ if (cold <= fn && fn <= cend)
71844+ goto matched;
71845+
71846+ c = *p++;
71847+ }
71848+ }
71849+
71850+ if (c == ']')
71851+ break;
71852+ }
71853+ if (!not)
71854+ return 1;
71855+ break;
71856+ matched:
71857+ while (c != ']') {
71858+ if (c == '\0')
71859+ return 1;
71860+
71861+ c = *p++;
71862+ }
71863+ if (not)
71864+ return 1;
71865+ }
71866+ break;
71867+ default:
71868+ if (c != *n)
71869+ return 1;
71870+ }
71871+
71872+ ++n;
71873+ }
71874+
71875+ if (*n == '\0')
71876+ return 0;
71877+
71878+ if (*n == '/')
71879+ return 0;
71880+
71881+ return 1;
71882+}
71883+
71884+static struct acl_object_label *
71885+chk_glob_label(struct acl_object_label *globbed,
71886+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71887+{
71888+ struct acl_object_label *tmp;
71889+
71890+ if (*path == NULL)
71891+ *path = gr_to_filename_nolock(dentry, mnt);
71892+
71893+ tmp = globbed;
71894+
71895+ while (tmp) {
71896+ if (!glob_match(tmp->filename, *path))
71897+ return tmp;
71898+ tmp = tmp->next;
71899+ }
71900+
71901+ return NULL;
71902+}
71903+
71904+static struct acl_object_label *
71905+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71906+ const ino_t curr_ino, const dev_t curr_dev,
71907+ const struct acl_subject_label *subj, char **path, const int checkglob)
71908+{
71909+ struct acl_subject_label *tmpsubj;
71910+ struct acl_object_label *retval;
71911+ struct acl_object_label *retval2;
71912+
71913+ tmpsubj = (struct acl_subject_label *) subj;
71914+ read_lock(&gr_inode_lock);
71915+ do {
71916+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71917+ if (retval) {
71918+ if (checkglob && retval->globbed) {
71919+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71920+ if (retval2)
71921+ retval = retval2;
71922+ }
71923+ break;
71924+ }
71925+ } while ((tmpsubj = tmpsubj->parent_subject));
71926+ read_unlock(&gr_inode_lock);
71927+
71928+ return retval;
71929+}
71930+
71931+static __inline__ struct acl_object_label *
71932+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71933+ struct dentry *curr_dentry,
71934+ const struct acl_subject_label *subj, char **path, const int checkglob)
71935+{
71936+ int newglob = checkglob;
71937+ ino_t inode;
71938+ dev_t device;
71939+
71940+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71941+ as we don't want a / * rule to match instead of the / object
71942+ don't do this for create lookups that call this function though, since they're looking up
71943+ on the parent and thus need globbing checks on all paths
71944+ */
71945+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71946+ newglob = GR_NO_GLOB;
71947+
71948+ spin_lock(&curr_dentry->d_lock);
71949+ inode = curr_dentry->d_inode->i_ino;
71950+ device = __get_dev(curr_dentry);
71951+ spin_unlock(&curr_dentry->d_lock);
71952+
71953+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71954+}
71955+
71956+#ifdef CONFIG_HUGETLBFS
71957+static inline bool
71958+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71959+{
71960+ int i;
71961+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71962+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71963+ return true;
71964+ }
71965+
71966+ return false;
71967+}
71968+#endif
71969+
71970+static struct acl_object_label *
71971+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71972+ const struct acl_subject_label *subj, char *path, const int checkglob)
71973+{
71974+ struct dentry *dentry = (struct dentry *) l_dentry;
71975+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71976+ struct mount *real_mnt = real_mount(mnt);
71977+ struct acl_object_label *retval;
71978+ struct dentry *parent;
71979+
71980+ read_seqlock_excl(&mount_lock);
71981+ write_seqlock(&rename_lock);
71982+
71983+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71984+#ifdef CONFIG_NET
71985+ mnt == sock_mnt ||
71986+#endif
71987+#ifdef CONFIG_HUGETLBFS
71988+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71989+#endif
71990+ /* ignore Eric Biederman */
71991+ IS_PRIVATE(l_dentry->d_inode))) {
71992+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71993+ goto out;
71994+ }
71995+
71996+ for (;;) {
71997+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71998+ break;
71999+
72000+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
72001+ if (!mnt_has_parent(real_mnt))
72002+ break;
72003+
72004+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
72005+ if (retval != NULL)
72006+ goto out;
72007+
72008+ dentry = real_mnt->mnt_mountpoint;
72009+ real_mnt = real_mnt->mnt_parent;
72010+ mnt = &real_mnt->mnt;
72011+ continue;
72012+ }
72013+
72014+ parent = dentry->d_parent;
72015+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
72016+ if (retval != NULL)
72017+ goto out;
72018+
72019+ dentry = parent;
72020+ }
72021+
72022+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
72023+
72024+ /* gr_real_root is pinned so we don't have to hold a reference */
72025+ if (retval == NULL)
72026+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
72027+out:
72028+ write_sequnlock(&rename_lock);
72029+ read_sequnlock_excl(&mount_lock);
72030+
72031+ BUG_ON(retval == NULL);
72032+
72033+ return retval;
72034+}
72035+
72036+static __inline__ struct acl_object_label *
72037+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
72038+ const struct acl_subject_label *subj)
72039+{
72040+ char *path = NULL;
72041+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
72042+}
72043+
72044+static __inline__ struct acl_object_label *
72045+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
72046+ const struct acl_subject_label *subj)
72047+{
72048+ char *path = NULL;
72049+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
72050+}
72051+
72052+static __inline__ struct acl_object_label *
72053+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
72054+ const struct acl_subject_label *subj, char *path)
72055+{
72056+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
72057+}
72058+
72059+struct acl_subject_label *
72060+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
72061+ const struct acl_role_label *role)
72062+{
72063+ struct dentry *dentry = (struct dentry *) l_dentry;
72064+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
72065+ struct mount *real_mnt = real_mount(mnt);
72066+ struct acl_subject_label *retval;
72067+ struct dentry *parent;
72068+
72069+ read_seqlock_excl(&mount_lock);
72070+ write_seqlock(&rename_lock);
72071+
72072+ for (;;) {
72073+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
72074+ break;
72075+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
72076+ if (!mnt_has_parent(real_mnt))
72077+ break;
72078+
72079+ spin_lock(&dentry->d_lock);
72080+ read_lock(&gr_inode_lock);
72081+ retval =
72082+ lookup_acl_subj_label(dentry->d_inode->i_ino,
72083+ __get_dev(dentry), role);
72084+ read_unlock(&gr_inode_lock);
72085+ spin_unlock(&dentry->d_lock);
72086+ if (retval != NULL)
72087+ goto out;
72088+
72089+ dentry = real_mnt->mnt_mountpoint;
72090+ real_mnt = real_mnt->mnt_parent;
72091+ mnt = &real_mnt->mnt;
72092+ continue;
72093+ }
72094+
72095+ spin_lock(&dentry->d_lock);
72096+ read_lock(&gr_inode_lock);
72097+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
72098+ __get_dev(dentry), role);
72099+ read_unlock(&gr_inode_lock);
72100+ parent = dentry->d_parent;
72101+ spin_unlock(&dentry->d_lock);
72102+
72103+ if (retval != NULL)
72104+ goto out;
72105+
72106+ dentry = parent;
72107+ }
72108+
72109+ spin_lock(&dentry->d_lock);
72110+ read_lock(&gr_inode_lock);
72111+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
72112+ __get_dev(dentry), role);
72113+ read_unlock(&gr_inode_lock);
72114+ spin_unlock(&dentry->d_lock);
72115+
72116+ if (unlikely(retval == NULL)) {
72117+ /* gr_real_root is pinned, we don't need to hold a reference */
72118+ read_lock(&gr_inode_lock);
72119+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
72120+ __get_dev(gr_real_root.dentry), role);
72121+ read_unlock(&gr_inode_lock);
72122+ }
72123+out:
72124+ write_sequnlock(&rename_lock);
72125+ read_sequnlock_excl(&mount_lock);
72126+
72127+ BUG_ON(retval == NULL);
72128+
72129+ return retval;
72130+}
72131+
72132+void
72133+assign_special_role(const char *rolename)
72134+{
72135+ struct acl_object_label *obj;
72136+ struct acl_role_label *r;
72137+ struct acl_role_label *assigned = NULL;
72138+ struct task_struct *tsk;
72139+ struct file *filp;
72140+
72141+ FOR_EACH_ROLE_START(r)
72142+ if (!strcmp(rolename, r->rolename) &&
72143+ (r->roletype & GR_ROLE_SPECIAL)) {
72144+ assigned = r;
72145+ break;
72146+ }
72147+ FOR_EACH_ROLE_END(r)
72148+
72149+ if (!assigned)
72150+ return;
72151+
72152+ read_lock(&tasklist_lock);
72153+ read_lock(&grsec_exec_file_lock);
72154+
72155+ tsk = current->real_parent;
72156+ if (tsk == NULL)
72157+ goto out_unlock;
72158+
72159+ filp = tsk->exec_file;
72160+ if (filp == NULL)
72161+ goto out_unlock;
72162+
72163+ tsk->is_writable = 0;
72164+ tsk->inherited = 0;
72165+
72166+ tsk->acl_sp_role = 1;
72167+ tsk->acl_role_id = ++acl_sp_role_value;
72168+ tsk->role = assigned;
72169+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
72170+
72171+ /* ignore additional mmap checks for processes that are writable
72172+ by the default ACL */
72173+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72174+ if (unlikely(obj->mode & GR_WRITE))
72175+ tsk->is_writable = 1;
72176+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
72177+ if (unlikely(obj->mode & GR_WRITE))
72178+ tsk->is_writable = 1;
72179+
72180+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72181+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
72182+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
72183+#endif
72184+
72185+out_unlock:
72186+ read_unlock(&grsec_exec_file_lock);
72187+ read_unlock(&tasklist_lock);
72188+ return;
72189+}
72190+
72191+
72192+static void
72193+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
72194+{
72195+ struct task_struct *task = current;
72196+ const struct cred *cred = current_cred();
72197+
72198+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
72199+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
72200+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
72201+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
72202+
72203+ return;
72204+}
72205+
72206+static void
72207+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
72208+{
72209+ struct task_struct *task = current;
72210+ const struct cred *cred = current_cred();
72211+
72212+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
72213+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
72214+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
72215+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
72216+
72217+ return;
72218+}
72219+
72220+static void
72221+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
72222+{
72223+ struct task_struct *task = current;
72224+ const struct cred *cred = current_cred();
72225+
72226+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
72227+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
72228+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
72229+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
72230+
72231+ return;
72232+}
72233+
72234+static void
72235+gr_set_proc_res(struct task_struct *task)
72236+{
72237+ struct acl_subject_label *proc;
72238+ unsigned short i;
72239+
72240+ proc = task->acl;
72241+
72242+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
72243+ return;
72244+
72245+ for (i = 0; i < RLIM_NLIMITS; i++) {
72246+ if (!(proc->resmask & (1U << i)))
72247+ continue;
72248+
72249+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
72250+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
72251+
72252+ if (i == RLIMIT_CPU)
72253+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
72254+ }
72255+
72256+ return;
72257+}
72258+
72259+/* both of the below must be called with
72260+ rcu_read_lock();
72261+ read_lock(&tasklist_lock);
72262+ read_lock(&grsec_exec_file_lock);
72263+*/
72264+
72265+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
72266+{
72267+ char *tmpname;
72268+ struct acl_subject_label *tmpsubj;
72269+ struct file *filp;
72270+ struct name_entry *nmatch;
72271+
72272+ filp = task->exec_file;
72273+ if (filp == NULL)
72274+ return NULL;
72275+
72276+ /* the following is to apply the correct subject
72277+ on binaries running when the RBAC system
72278+ is enabled, when the binaries have been
72279+ replaced or deleted since their execution
72280+ -----
72281+ when the RBAC system starts, the inode/dev
72282+ from exec_file will be one the RBAC system
72283+ is unaware of. It only knows the inode/dev
72284+ of the present file on disk, or the absence
72285+ of it.
72286+ */
72287+
72288+ if (filename)
72289+ nmatch = __lookup_name_entry(state, filename);
72290+ else {
72291+ preempt_disable();
72292+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
72293+
72294+ nmatch = __lookup_name_entry(state, tmpname);
72295+ preempt_enable();
72296+ }
72297+ tmpsubj = NULL;
72298+ if (nmatch) {
72299+ if (nmatch->deleted)
72300+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
72301+ else
72302+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
72303+ }
72304+ /* this also works for the reload case -- if we don't match a potentially inherited subject
72305+ then we fall back to a normal lookup based on the binary's ino/dev
72306+ */
72307+ if (tmpsubj == NULL)
72308+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
72309+
72310+ return tmpsubj;
72311+}
72312+
72313+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
72314+{
72315+ return __gr_get_subject_for_task(&running_polstate, task, filename);
72316+}
72317+
72318+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
72319+{
72320+ struct acl_object_label *obj;
72321+ struct file *filp;
72322+
72323+ filp = task->exec_file;
72324+
72325+ task->acl = subj;
72326+ task->is_writable = 0;
72327+ /* ignore additional mmap checks for processes that are writable
72328+ by the default ACL */
72329+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
72330+ if (unlikely(obj->mode & GR_WRITE))
72331+ task->is_writable = 1;
72332+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72333+ if (unlikely(obj->mode & GR_WRITE))
72334+ task->is_writable = 1;
72335+
72336+ gr_set_proc_res(task);
72337+
72338+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72339+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72340+#endif
72341+}
72342+
72343+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
72344+{
72345+ __gr_apply_subject_to_task(&running_polstate, task, subj);
72346+}
72347+
72348+__u32
72349+gr_search_file(const struct dentry * dentry, const __u32 mode,
72350+ const struct vfsmount * mnt)
72351+{
72352+ __u32 retval = mode;
72353+ struct acl_subject_label *curracl;
72354+ struct acl_object_label *currobj;
72355+
72356+ if (unlikely(!(gr_status & GR_READY)))
72357+ return (mode & ~GR_AUDITS);
72358+
72359+ curracl = current->acl;
72360+
72361+ currobj = chk_obj_label(dentry, mnt, curracl);
72362+ retval = currobj->mode & mode;
72363+
72364+ /* if we're opening a specified transfer file for writing
72365+ (e.g. /dev/initctl), then transfer our role to init
72366+ */
72367+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
72368+ current->role->roletype & GR_ROLE_PERSIST)) {
72369+ struct task_struct *task = init_pid_ns.child_reaper;
72370+
72371+ if (task->role != current->role) {
72372+ struct acl_subject_label *subj;
72373+
72374+ task->acl_sp_role = 0;
72375+ task->acl_role_id = current->acl_role_id;
72376+ task->role = current->role;
72377+ rcu_read_lock();
72378+ read_lock(&grsec_exec_file_lock);
72379+ subj = gr_get_subject_for_task(task, NULL);
72380+ gr_apply_subject_to_task(task, subj);
72381+ read_unlock(&grsec_exec_file_lock);
72382+ rcu_read_unlock();
72383+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
72384+ }
72385+ }
72386+
72387+ if (unlikely
72388+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
72389+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
72390+ __u32 new_mode = mode;
72391+
72392+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72393+
72394+ retval = new_mode;
72395+
72396+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
72397+ new_mode |= GR_INHERIT;
72398+
72399+ if (!(mode & GR_NOLEARN))
72400+ gr_log_learn(dentry, mnt, new_mode);
72401+ }
72402+
72403+ return retval;
72404+}
72405+
72406+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
72407+ const struct dentry *parent,
72408+ const struct vfsmount *mnt)
72409+{
72410+ struct name_entry *match;
72411+ struct acl_object_label *matchpo;
72412+ struct acl_subject_label *curracl;
72413+ char *path;
72414+
72415+ if (unlikely(!(gr_status & GR_READY)))
72416+ return NULL;
72417+
72418+ preempt_disable();
72419+ path = gr_to_filename_rbac(new_dentry, mnt);
72420+ match = lookup_name_entry_create(path);
72421+
72422+ curracl = current->acl;
72423+
72424+ if (match) {
72425+ read_lock(&gr_inode_lock);
72426+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
72427+ read_unlock(&gr_inode_lock);
72428+
72429+ if (matchpo) {
72430+ preempt_enable();
72431+ return matchpo;
72432+ }
72433+ }
72434+
72435+ // lookup parent
72436+
72437+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
72438+
72439+ preempt_enable();
72440+ return matchpo;
72441+}
72442+
72443+__u32
72444+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
72445+ const struct vfsmount * mnt, const __u32 mode)
72446+{
72447+ struct acl_object_label *matchpo;
72448+ __u32 retval;
72449+
72450+ if (unlikely(!(gr_status & GR_READY)))
72451+ return (mode & ~GR_AUDITS);
72452+
72453+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
72454+
72455+ retval = matchpo->mode & mode;
72456+
72457+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
72458+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72459+ __u32 new_mode = mode;
72460+
72461+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72462+
72463+ gr_log_learn(new_dentry, mnt, new_mode);
72464+ return new_mode;
72465+ }
72466+
72467+ return retval;
72468+}
72469+
72470+__u32
72471+gr_check_link(const struct dentry * new_dentry,
72472+ const struct dentry * parent_dentry,
72473+ const struct vfsmount * parent_mnt,
72474+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
72475+{
72476+ struct acl_object_label *obj;
72477+ __u32 oldmode, newmode;
72478+ __u32 needmode;
72479+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
72480+ GR_DELETE | GR_INHERIT;
72481+
72482+ if (unlikely(!(gr_status & GR_READY)))
72483+ return (GR_CREATE | GR_LINK);
72484+
72485+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
72486+ oldmode = obj->mode;
72487+
72488+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
72489+ newmode = obj->mode;
72490+
72491+ needmode = newmode & checkmodes;
72492+
72493+ // old name for hardlink must have at least the permissions of the new name
72494+ if ((oldmode & needmode) != needmode)
72495+ goto bad;
72496+
72497+ // if old name had restrictions/auditing, make sure the new name does as well
72498+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
72499+
72500+ // don't allow hardlinking of suid/sgid/fcapped files without permission
72501+ if (is_privileged_binary(old_dentry))
72502+ needmode |= GR_SETID;
72503+
72504+ if ((newmode & needmode) != needmode)
72505+ goto bad;
72506+
72507+ // enforce minimum permissions
72508+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
72509+ return newmode;
72510+bad:
72511+ needmode = oldmode;
72512+ if (is_privileged_binary(old_dentry))
72513+ needmode |= GR_SETID;
72514+
72515+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72516+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
72517+ return (GR_CREATE | GR_LINK);
72518+ } else if (newmode & GR_SUPPRESS)
72519+ return GR_SUPPRESS;
72520+ else
72521+ return 0;
72522+}
72523+
72524+int
72525+gr_check_hidden_task(const struct task_struct *task)
72526+{
72527+ if (unlikely(!(gr_status & GR_READY)))
72528+ return 0;
72529+
72530+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
72531+ return 1;
72532+
72533+ return 0;
72534+}
72535+
72536+int
72537+gr_check_protected_task(const struct task_struct *task)
72538+{
72539+ if (unlikely(!(gr_status & GR_READY) || !task))
72540+ return 0;
72541+
72542+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72543+ task->acl != current->acl)
72544+ return 1;
72545+
72546+ return 0;
72547+}
72548+
72549+int
72550+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72551+{
72552+ struct task_struct *p;
72553+ int ret = 0;
72554+
72555+ if (unlikely(!(gr_status & GR_READY) || !pid))
72556+ return ret;
72557+
72558+ read_lock(&tasklist_lock);
72559+ do_each_pid_task(pid, type, p) {
72560+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72561+ p->acl != current->acl) {
72562+ ret = 1;
72563+ goto out;
72564+ }
72565+ } while_each_pid_task(pid, type, p);
72566+out:
72567+ read_unlock(&tasklist_lock);
72568+
72569+ return ret;
72570+}
72571+
72572+void
72573+gr_copy_label(struct task_struct *tsk)
72574+{
72575+ struct task_struct *p = current;
72576+
72577+ tsk->inherited = p->inherited;
72578+ tsk->acl_sp_role = 0;
72579+ tsk->acl_role_id = p->acl_role_id;
72580+ tsk->acl = p->acl;
72581+ tsk->role = p->role;
72582+ tsk->signal->used_accept = 0;
72583+ tsk->signal->curr_ip = p->signal->curr_ip;
72584+ tsk->signal->saved_ip = p->signal->saved_ip;
72585+ if (p->exec_file)
72586+ get_file(p->exec_file);
72587+ tsk->exec_file = p->exec_file;
72588+ tsk->is_writable = p->is_writable;
72589+ if (unlikely(p->signal->used_accept)) {
72590+ p->signal->curr_ip = 0;
72591+ p->signal->saved_ip = 0;
72592+ }
72593+
72594+ return;
72595+}
72596+
72597+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
72598+
72599+int
72600+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72601+{
72602+ unsigned int i;
72603+ __u16 num;
72604+ uid_t *uidlist;
72605+ uid_t curuid;
72606+ int realok = 0;
72607+ int effectiveok = 0;
72608+ int fsok = 0;
72609+ uid_t globalreal, globaleffective, globalfs;
72610+
72611+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
72612+ struct user_struct *user;
72613+
72614+ if (!uid_valid(real))
72615+ goto skipit;
72616+
72617+ /* find user based on global namespace */
72618+
72619+ globalreal = GR_GLOBAL_UID(real);
72620+
72621+ user = find_user(make_kuid(&init_user_ns, globalreal));
72622+ if (user == NULL)
72623+ goto skipit;
72624+
72625+ if (gr_process_kernel_setuid_ban(user)) {
72626+ /* for find_user */
72627+ free_uid(user);
72628+ return 1;
72629+ }
72630+
72631+ /* for find_user */
72632+ free_uid(user);
72633+
72634+skipit:
72635+#endif
72636+
72637+ if (unlikely(!(gr_status & GR_READY)))
72638+ return 0;
72639+
72640+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72641+ gr_log_learn_uid_change(real, effective, fs);
72642+
72643+ num = current->acl->user_trans_num;
72644+ uidlist = current->acl->user_transitions;
72645+
72646+ if (uidlist == NULL)
72647+ return 0;
72648+
72649+ if (!uid_valid(real)) {
72650+ realok = 1;
72651+ globalreal = (uid_t)-1;
72652+ } else {
72653+ globalreal = GR_GLOBAL_UID(real);
72654+ }
72655+ if (!uid_valid(effective)) {
72656+ effectiveok = 1;
72657+ globaleffective = (uid_t)-1;
72658+ } else {
72659+ globaleffective = GR_GLOBAL_UID(effective);
72660+ }
72661+ if (!uid_valid(fs)) {
72662+ fsok = 1;
72663+ globalfs = (uid_t)-1;
72664+ } else {
72665+ globalfs = GR_GLOBAL_UID(fs);
72666+ }
72667+
72668+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
72669+ for (i = 0; i < num; i++) {
72670+ curuid = uidlist[i];
72671+ if (globalreal == curuid)
72672+ realok = 1;
72673+ if (globaleffective == curuid)
72674+ effectiveok = 1;
72675+ if (globalfs == curuid)
72676+ fsok = 1;
72677+ }
72678+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
72679+ for (i = 0; i < num; i++) {
72680+ curuid = uidlist[i];
72681+ if (globalreal == curuid)
72682+ break;
72683+ if (globaleffective == curuid)
72684+ break;
72685+ if (globalfs == curuid)
72686+ break;
72687+ }
72688+ /* not in deny list */
72689+ if (i == num) {
72690+ realok = 1;
72691+ effectiveok = 1;
72692+ fsok = 1;
72693+ }
72694+ }
72695+
72696+ if (realok && effectiveok && fsok)
72697+ return 0;
72698+ else {
72699+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72700+ return 1;
72701+ }
72702+}
72703+
72704+int
72705+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72706+{
72707+ unsigned int i;
72708+ __u16 num;
72709+ gid_t *gidlist;
72710+ gid_t curgid;
72711+ int realok = 0;
72712+ int effectiveok = 0;
72713+ int fsok = 0;
72714+ gid_t globalreal, globaleffective, globalfs;
72715+
72716+ if (unlikely(!(gr_status & GR_READY)))
72717+ return 0;
72718+
72719+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72720+ gr_log_learn_gid_change(real, effective, fs);
72721+
72722+ num = current->acl->group_trans_num;
72723+ gidlist = current->acl->group_transitions;
72724+
72725+ if (gidlist == NULL)
72726+ return 0;
72727+
72728+ if (!gid_valid(real)) {
72729+ realok = 1;
72730+ globalreal = (gid_t)-1;
72731+ } else {
72732+ globalreal = GR_GLOBAL_GID(real);
72733+ }
72734+ if (!gid_valid(effective)) {
72735+ effectiveok = 1;
72736+ globaleffective = (gid_t)-1;
72737+ } else {
72738+ globaleffective = GR_GLOBAL_GID(effective);
72739+ }
72740+ if (!gid_valid(fs)) {
72741+ fsok = 1;
72742+ globalfs = (gid_t)-1;
72743+ } else {
72744+ globalfs = GR_GLOBAL_GID(fs);
72745+ }
72746+
72747+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
72748+ for (i = 0; i < num; i++) {
72749+ curgid = gidlist[i];
72750+ if (globalreal == curgid)
72751+ realok = 1;
72752+ if (globaleffective == curgid)
72753+ effectiveok = 1;
72754+ if (globalfs == curgid)
72755+ fsok = 1;
72756+ }
72757+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
72758+ for (i = 0; i < num; i++) {
72759+ curgid = gidlist[i];
72760+ if (globalreal == curgid)
72761+ break;
72762+ if (globaleffective == curgid)
72763+ break;
72764+ if (globalfs == curgid)
72765+ break;
72766+ }
72767+ /* not in deny list */
72768+ if (i == num) {
72769+ realok = 1;
72770+ effectiveok = 1;
72771+ fsok = 1;
72772+ }
72773+ }
72774+
72775+ if (realok && effectiveok && fsok)
72776+ return 0;
72777+ else {
72778+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72779+ return 1;
72780+ }
72781+}
72782+
72783+extern int gr_acl_is_capable(const int cap);
72784+
72785+void
72786+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
72787+{
72788+ struct acl_role_label *role = task->role;
72789+ struct acl_subject_label *subj = NULL;
72790+ struct acl_object_label *obj;
72791+ struct file *filp;
72792+ uid_t uid;
72793+ gid_t gid;
72794+
72795+ if (unlikely(!(gr_status & GR_READY)))
72796+ return;
72797+
72798+ uid = GR_GLOBAL_UID(kuid);
72799+ gid = GR_GLOBAL_GID(kgid);
72800+
72801+ filp = task->exec_file;
72802+
72803+ /* kernel process, we'll give them the kernel role */
72804+ if (unlikely(!filp)) {
72805+ task->role = running_polstate.kernel_role;
72806+ task->acl = running_polstate.kernel_role->root_label;
72807+ return;
72808+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
72809+ /* save the current ip at time of role lookup so that the proper
72810+ IP will be learned for role_allowed_ip */
72811+ task->signal->saved_ip = task->signal->curr_ip;
72812+ role = lookup_acl_role_label(task, uid, gid);
72813+ }
72814+
72815+ /* don't change the role if we're not a privileged process */
72816+ if (role && task->role != role &&
72817+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
72818+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
72819+ return;
72820+
72821+ /* perform subject lookup in possibly new role
72822+ we can use this result below in the case where role == task->role
72823+ */
72824+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
72825+
72826+ /* if we changed uid/gid, but result in the same role
72827+ and are using inheritance, don't lose the inherited subject
72828+ if current subject is other than what normal lookup
72829+ would result in, we arrived via inheritance, don't
72830+ lose subject
72831+ */
72832+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
72833+ (subj == task->acl)))
72834+ task->acl = subj;
72835+
72836+ /* leave task->inherited unaffected */
72837+
72838+ task->role = role;
72839+
72840+ task->is_writable = 0;
72841+
72842+ /* ignore additional mmap checks for processes that are writable
72843+ by the default ACL */
72844+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72845+ if (unlikely(obj->mode & GR_WRITE))
72846+ task->is_writable = 1;
72847+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72848+ if (unlikely(obj->mode & GR_WRITE))
72849+ task->is_writable = 1;
72850+
72851+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72852+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72853+#endif
72854+
72855+ gr_set_proc_res(task);
72856+
72857+ return;
72858+}
72859+
72860+int
72861+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72862+ const int unsafe_flags)
72863+{
72864+ struct task_struct *task = current;
72865+ struct acl_subject_label *newacl;
72866+ struct acl_object_label *obj;
72867+ __u32 retmode;
72868+
72869+ if (unlikely(!(gr_status & GR_READY)))
72870+ return 0;
72871+
72872+ newacl = chk_subj_label(dentry, mnt, task->role);
72873+
72874+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72875+ did an exec
72876+ */
72877+ rcu_read_lock();
72878+ read_lock(&tasklist_lock);
72879+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72880+ (task->parent->acl->mode & GR_POVERRIDE))) {
72881+ read_unlock(&tasklist_lock);
72882+ rcu_read_unlock();
72883+ goto skip_check;
72884+ }
72885+ read_unlock(&tasklist_lock);
72886+ rcu_read_unlock();
72887+
72888+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72889+ !(task->role->roletype & GR_ROLE_GOD) &&
72890+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72891+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72892+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72893+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72894+ else
72895+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72896+ return -EACCES;
72897+ }
72898+
72899+skip_check:
72900+
72901+ obj = chk_obj_label(dentry, mnt, task->acl);
72902+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72903+
72904+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72905+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72906+ if (obj->nested)
72907+ task->acl = obj->nested;
72908+ else
72909+ task->acl = newacl;
72910+ task->inherited = 0;
72911+ } else {
72912+ task->inherited = 1;
72913+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72914+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72915+ }
72916+
72917+ task->is_writable = 0;
72918+
72919+ /* ignore additional mmap checks for processes that are writable
72920+ by the default ACL */
72921+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72922+ if (unlikely(obj->mode & GR_WRITE))
72923+ task->is_writable = 1;
72924+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72925+ if (unlikely(obj->mode & GR_WRITE))
72926+ task->is_writable = 1;
72927+
72928+ gr_set_proc_res(task);
72929+
72930+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72931+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72932+#endif
72933+ return 0;
72934+}
72935+
72936+/* always called with valid inodev ptr */
72937+static void
72938+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
72939+{
72940+ struct acl_object_label *matchpo;
72941+ struct acl_subject_label *matchps;
72942+ struct acl_subject_label *subj;
72943+ struct acl_role_label *role;
72944+ unsigned int x;
72945+
72946+ FOR_EACH_ROLE_START(role)
72947+ FOR_EACH_SUBJECT_START(role, subj, x)
72948+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72949+ matchpo->mode |= GR_DELETED;
72950+ FOR_EACH_SUBJECT_END(subj,x)
72951+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72952+ /* nested subjects aren't in the role's subj_hash table */
72953+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72954+ matchpo->mode |= GR_DELETED;
72955+ FOR_EACH_NESTED_SUBJECT_END(subj)
72956+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72957+ matchps->mode |= GR_DELETED;
72958+ FOR_EACH_ROLE_END(role)
72959+
72960+ inodev->nentry->deleted = 1;
72961+
72962+ return;
72963+}
72964+
72965+void
72966+gr_handle_delete(const ino_t ino, const dev_t dev)
72967+{
72968+ struct inodev_entry *inodev;
72969+
72970+ if (unlikely(!(gr_status & GR_READY)))
72971+ return;
72972+
72973+ write_lock(&gr_inode_lock);
72974+ inodev = lookup_inodev_entry(ino, dev);
72975+ if (inodev != NULL)
72976+ do_handle_delete(inodev, ino, dev);
72977+ write_unlock(&gr_inode_lock);
72978+
72979+ return;
72980+}
72981+
72982+static void
72983+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
72984+ const ino_t newinode, const dev_t newdevice,
72985+ struct acl_subject_label *subj)
72986+{
72987+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72988+ struct acl_object_label *match;
72989+
72990+ match = subj->obj_hash[index];
72991+
72992+ while (match && (match->inode != oldinode ||
72993+ match->device != olddevice ||
72994+ !(match->mode & GR_DELETED)))
72995+ match = match->next;
72996+
72997+ if (match && (match->inode == oldinode)
72998+ && (match->device == olddevice)
72999+ && (match->mode & GR_DELETED)) {
73000+ if (match->prev == NULL) {
73001+ subj->obj_hash[index] = match->next;
73002+ if (match->next != NULL)
73003+ match->next->prev = NULL;
73004+ } else {
73005+ match->prev->next = match->next;
73006+ if (match->next != NULL)
73007+ match->next->prev = match->prev;
73008+ }
73009+ match->prev = NULL;
73010+ match->next = NULL;
73011+ match->inode = newinode;
73012+ match->device = newdevice;
73013+ match->mode &= ~GR_DELETED;
73014+
73015+ insert_acl_obj_label(match, subj);
73016+ }
73017+
73018+ return;
73019+}
73020+
73021+static void
73022+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
73023+ const ino_t newinode, const dev_t newdevice,
73024+ struct acl_role_label *role)
73025+{
73026+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
73027+ struct acl_subject_label *match;
73028+
73029+ match = role->subj_hash[index];
73030+
73031+ while (match && (match->inode != oldinode ||
73032+ match->device != olddevice ||
73033+ !(match->mode & GR_DELETED)))
73034+ match = match->next;
73035+
73036+ if (match && (match->inode == oldinode)
73037+ && (match->device == olddevice)
73038+ && (match->mode & GR_DELETED)) {
73039+ if (match->prev == NULL) {
73040+ role->subj_hash[index] = match->next;
73041+ if (match->next != NULL)
73042+ match->next->prev = NULL;
73043+ } else {
73044+ match->prev->next = match->next;
73045+ if (match->next != NULL)
73046+ match->next->prev = match->prev;
73047+ }
73048+ match->prev = NULL;
73049+ match->next = NULL;
73050+ match->inode = newinode;
73051+ match->device = newdevice;
73052+ match->mode &= ~GR_DELETED;
73053+
73054+ insert_acl_subj_label(match, role);
73055+ }
73056+
73057+ return;
73058+}
73059+
73060+static void
73061+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
73062+ const ino_t newinode, const dev_t newdevice)
73063+{
73064+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
73065+ struct inodev_entry *match;
73066+
73067+ match = running_polstate.inodev_set.i_hash[index];
73068+
73069+ while (match && (match->nentry->inode != oldinode ||
73070+ match->nentry->device != olddevice || !match->nentry->deleted))
73071+ match = match->next;
73072+
73073+ if (match && (match->nentry->inode == oldinode)
73074+ && (match->nentry->device == olddevice) &&
73075+ match->nentry->deleted) {
73076+ if (match->prev == NULL) {
73077+ running_polstate.inodev_set.i_hash[index] = match->next;
73078+ if (match->next != NULL)
73079+ match->next->prev = NULL;
73080+ } else {
73081+ match->prev->next = match->next;
73082+ if (match->next != NULL)
73083+ match->next->prev = match->prev;
73084+ }
73085+ match->prev = NULL;
73086+ match->next = NULL;
73087+ match->nentry->inode = newinode;
73088+ match->nentry->device = newdevice;
73089+ match->nentry->deleted = 0;
73090+
73091+ insert_inodev_entry(match);
73092+ }
73093+
73094+ return;
73095+}
73096+
73097+static void
73098+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
73099+{
73100+ struct acl_subject_label *subj;
73101+ struct acl_role_label *role;
73102+ unsigned int x;
73103+
73104+ FOR_EACH_ROLE_START(role)
73105+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
73106+
73107+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
73108+ if ((subj->inode == ino) && (subj->device == dev)) {
73109+ subj->inode = ino;
73110+ subj->device = dev;
73111+ }
73112+ /* nested subjects aren't in the role's subj_hash table */
73113+ update_acl_obj_label(matchn->inode, matchn->device,
73114+ ino, dev, subj);
73115+ FOR_EACH_NESTED_SUBJECT_END(subj)
73116+ FOR_EACH_SUBJECT_START(role, subj, x)
73117+ update_acl_obj_label(matchn->inode, matchn->device,
73118+ ino, dev, subj);
73119+ FOR_EACH_SUBJECT_END(subj,x)
73120+ FOR_EACH_ROLE_END(role)
73121+
73122+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
73123+
73124+ return;
73125+}
73126+
73127+static void
73128+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
73129+ const struct vfsmount *mnt)
73130+{
73131+ ino_t ino = dentry->d_inode->i_ino;
73132+ dev_t dev = __get_dev(dentry);
73133+
73134+ __do_handle_create(matchn, ino, dev);
73135+
73136+ return;
73137+}
73138+
73139+void
73140+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
73141+{
73142+ struct name_entry *matchn;
73143+
73144+ if (unlikely(!(gr_status & GR_READY)))
73145+ return;
73146+
73147+ preempt_disable();
73148+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
73149+
73150+ if (unlikely((unsigned long)matchn)) {
73151+ write_lock(&gr_inode_lock);
73152+ do_handle_create(matchn, dentry, mnt);
73153+ write_unlock(&gr_inode_lock);
73154+ }
73155+ preempt_enable();
73156+
73157+ return;
73158+}
73159+
73160+void
73161+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
73162+{
73163+ struct name_entry *matchn;
73164+
73165+ if (unlikely(!(gr_status & GR_READY)))
73166+ return;
73167+
73168+ preempt_disable();
73169+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
73170+
73171+ if (unlikely((unsigned long)matchn)) {
73172+ write_lock(&gr_inode_lock);
73173+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
73174+ write_unlock(&gr_inode_lock);
73175+ }
73176+ preempt_enable();
73177+
73178+ return;
73179+}
73180+
73181+void
73182+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
73183+ struct dentry *old_dentry,
73184+ struct dentry *new_dentry,
73185+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
73186+{
73187+ struct name_entry *matchn;
73188+ struct name_entry *matchn2 = NULL;
73189+ struct inodev_entry *inodev;
73190+ struct inode *inode = new_dentry->d_inode;
73191+ ino_t old_ino = old_dentry->d_inode->i_ino;
73192+ dev_t old_dev = __get_dev(old_dentry);
73193+ unsigned int exchange = flags & RENAME_EXCHANGE;
73194+
73195+ /* vfs_rename swaps the name and parent link for old_dentry and
73196+ new_dentry
73197+ at this point, old_dentry has the new name, parent link, and inode
73198+ for the renamed file
73199+ if a file is being replaced by a rename, new_dentry has the inode
73200+ and name for the replaced file
73201+ */
73202+
73203+ if (unlikely(!(gr_status & GR_READY)))
73204+ return;
73205+
73206+ preempt_disable();
73207+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
73208+
73209+ /* exchange cases:
73210+ a filename exists for the source, but not dest
73211+ do a recreate on source
73212+ a filename exists for the dest, but not source
73213+ do a recreate on dest
73214+ a filename exists for both source and dest
73215+ delete source and dest, then create source and dest
73216+ a filename exists for neither source nor dest
73217+ no updates needed
73218+
73219+ the name entry lookups get us the old inode/dev associated with
73220+ each name, so do the deletes first (if possible) so that when
73221+ we do the create, we pick up on the right entries
73222+ */
73223+
73224+ if (exchange)
73225+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
73226+
73227+ /* we wouldn't have to check d_inode if it weren't for
73228+ NFS silly-renaming
73229+ */
73230+
73231+ write_lock(&gr_inode_lock);
73232+ if (unlikely((replace || exchange) && inode)) {
73233+ ino_t new_ino = inode->i_ino;
73234+ dev_t new_dev = __get_dev(new_dentry);
73235+
73236+ inodev = lookup_inodev_entry(new_ino, new_dev);
73237+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
73238+ do_handle_delete(inodev, new_ino, new_dev);
73239+ }
73240+
73241+ inodev = lookup_inodev_entry(old_ino, old_dev);
73242+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
73243+ do_handle_delete(inodev, old_ino, old_dev);
73244+
73245+ if (unlikely(matchn != NULL))
73246+ do_handle_create(matchn, old_dentry, mnt);
73247+
73248+ if (unlikely(matchn2 != NULL))
73249+ do_handle_create(matchn2, new_dentry, mnt);
73250+
73251+ write_unlock(&gr_inode_lock);
73252+ preempt_enable();
73253+
73254+ return;
73255+}
73256+
73257+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
73258+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
73259+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
73260+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
73261+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
73262+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
73263+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
73264+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
73265+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
73266+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
73267+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
73268+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
73269+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
73270+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
73271+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
73272+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
73273+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
73274+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
73275+};
73276+
73277+void
73278+gr_learn_resource(const struct task_struct *task,
73279+ const int res, const unsigned long wanted, const int gt)
73280+{
73281+ struct acl_subject_label *acl;
73282+ const struct cred *cred;
73283+
73284+ if (unlikely((gr_status & GR_READY) &&
73285+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
73286+ goto skip_reslog;
73287+
73288+ gr_log_resource(task, res, wanted, gt);
73289+skip_reslog:
73290+
73291+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
73292+ return;
73293+
73294+ acl = task->acl;
73295+
73296+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
73297+ !(acl->resmask & (1U << (unsigned short) res))))
73298+ return;
73299+
73300+ if (wanted >= acl->res[res].rlim_cur) {
73301+ unsigned long res_add;
73302+
73303+ res_add = wanted + res_learn_bumps[res];
73304+
73305+ acl->res[res].rlim_cur = res_add;
73306+
73307+ if (wanted > acl->res[res].rlim_max)
73308+ acl->res[res].rlim_max = res_add;
73309+
73310+ /* only log the subject filename, since resource logging is supported for
73311+ single-subject learning only */
73312+ rcu_read_lock();
73313+ cred = __task_cred(task);
73314+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73315+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
73316+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
73317+ "", (unsigned long) res, &task->signal->saved_ip);
73318+ rcu_read_unlock();
73319+ }
73320+
73321+ return;
73322+}
73323+EXPORT_SYMBOL_GPL(gr_learn_resource);
73324+#endif
73325+
73326+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
73327+void
73328+pax_set_initial_flags(struct linux_binprm *bprm)
73329+{
73330+ struct task_struct *task = current;
73331+ struct acl_subject_label *proc;
73332+ unsigned long flags;
73333+
73334+ if (unlikely(!(gr_status & GR_READY)))
73335+ return;
73336+
73337+ flags = pax_get_flags(task);
73338+
73339+ proc = task->acl;
73340+
73341+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
73342+ flags &= ~MF_PAX_PAGEEXEC;
73343+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
73344+ flags &= ~MF_PAX_SEGMEXEC;
73345+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
73346+ flags &= ~MF_PAX_RANDMMAP;
73347+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
73348+ flags &= ~MF_PAX_EMUTRAMP;
73349+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
73350+ flags &= ~MF_PAX_MPROTECT;
73351+
73352+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
73353+ flags |= MF_PAX_PAGEEXEC;
73354+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
73355+ flags |= MF_PAX_SEGMEXEC;
73356+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
73357+ flags |= MF_PAX_RANDMMAP;
73358+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
73359+ flags |= MF_PAX_EMUTRAMP;
73360+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
73361+ flags |= MF_PAX_MPROTECT;
73362+
73363+ pax_set_flags(task, flags);
73364+
73365+ return;
73366+}
73367+#endif
73368+
73369+int
73370+gr_handle_proc_ptrace(struct task_struct *task)
73371+{
73372+ struct file *filp;
73373+ struct task_struct *tmp = task;
73374+ struct task_struct *curtemp = current;
73375+ __u32 retmode;
73376+
73377+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73378+ if (unlikely(!(gr_status & GR_READY)))
73379+ return 0;
73380+#endif
73381+
73382+ read_lock(&tasklist_lock);
73383+ read_lock(&grsec_exec_file_lock);
73384+ filp = task->exec_file;
73385+
73386+ while (task_pid_nr(tmp) > 0) {
73387+ if (tmp == curtemp)
73388+ break;
73389+ tmp = tmp->real_parent;
73390+ }
73391+
73392+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73393+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
73394+ read_unlock(&grsec_exec_file_lock);
73395+ read_unlock(&tasklist_lock);
73396+ return 1;
73397+ }
73398+
73399+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73400+ if (!(gr_status & GR_READY)) {
73401+ read_unlock(&grsec_exec_file_lock);
73402+ read_unlock(&tasklist_lock);
73403+ return 0;
73404+ }
73405+#endif
73406+
73407+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
73408+ read_unlock(&grsec_exec_file_lock);
73409+ read_unlock(&tasklist_lock);
73410+
73411+ if (retmode & GR_NOPTRACE)
73412+ return 1;
73413+
73414+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
73415+ && (current->acl != task->acl || (current->acl != current->role->root_label
73416+ && task_pid_nr(current) != task_pid_nr(task))))
73417+ return 1;
73418+
73419+ return 0;
73420+}
73421+
73422+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
73423+{
73424+ if (unlikely(!(gr_status & GR_READY)))
73425+ return;
73426+
73427+ if (!(current->role->roletype & GR_ROLE_GOD))
73428+ return;
73429+
73430+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
73431+ p->role->rolename, gr_task_roletype_to_char(p),
73432+ p->acl->filename);
73433+}
73434+
73435+int
73436+gr_handle_ptrace(struct task_struct *task, const long request)
73437+{
73438+ struct task_struct *tmp = task;
73439+ struct task_struct *curtemp = current;
73440+ __u32 retmode;
73441+
73442+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73443+ if (unlikely(!(gr_status & GR_READY)))
73444+ return 0;
73445+#endif
73446+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
73447+ read_lock(&tasklist_lock);
73448+ while (task_pid_nr(tmp) > 0) {
73449+ if (tmp == curtemp)
73450+ break;
73451+ tmp = tmp->real_parent;
73452+ }
73453+
73454+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73455+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
73456+ read_unlock(&tasklist_lock);
73457+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73458+ return 1;
73459+ }
73460+ read_unlock(&tasklist_lock);
73461+ }
73462+
73463+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73464+ if (!(gr_status & GR_READY))
73465+ return 0;
73466+#endif
73467+
73468+ read_lock(&grsec_exec_file_lock);
73469+ if (unlikely(!task->exec_file)) {
73470+ read_unlock(&grsec_exec_file_lock);
73471+ return 0;
73472+ }
73473+
73474+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
73475+ read_unlock(&grsec_exec_file_lock);
73476+
73477+ if (retmode & GR_NOPTRACE) {
73478+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73479+ return 1;
73480+ }
73481+
73482+ if (retmode & GR_PTRACERD) {
73483+ switch (request) {
73484+ case PTRACE_SEIZE:
73485+ case PTRACE_POKETEXT:
73486+ case PTRACE_POKEDATA:
73487+ case PTRACE_POKEUSR:
73488+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
73489+ case PTRACE_SETREGS:
73490+ case PTRACE_SETFPREGS:
73491+#endif
73492+#ifdef CONFIG_X86
73493+ case PTRACE_SETFPXREGS:
73494+#endif
73495+#ifdef CONFIG_ALTIVEC
73496+ case PTRACE_SETVRREGS:
73497+#endif
73498+ return 1;
73499+ default:
73500+ return 0;
73501+ }
73502+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
73503+ !(current->role->roletype & GR_ROLE_GOD) &&
73504+ (current->acl != task->acl)) {
73505+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73506+ return 1;
73507+ }
73508+
73509+ return 0;
73510+}
73511+
73512+static int is_writable_mmap(const struct file *filp)
73513+{
73514+ struct task_struct *task = current;
73515+ struct acl_object_label *obj, *obj2;
73516+
73517+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
73518+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
73519+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
73520+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
73521+ task->role->root_label);
73522+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
73523+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
73524+ return 1;
73525+ }
73526+ }
73527+ return 0;
73528+}
73529+
73530+int
73531+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
73532+{
73533+ __u32 mode;
73534+
73535+ if (unlikely(!file || !(prot & PROT_EXEC)))
73536+ return 1;
73537+
73538+ if (is_writable_mmap(file))
73539+ return 0;
73540+
73541+ mode =
73542+ gr_search_file(file->f_path.dentry,
73543+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73544+ file->f_path.mnt);
73545+
73546+ if (!gr_tpe_allow(file))
73547+ return 0;
73548+
73549+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73550+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73551+ return 0;
73552+ } else if (unlikely(!(mode & GR_EXEC))) {
73553+ return 0;
73554+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73555+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73556+ return 1;
73557+ }
73558+
73559+ return 1;
73560+}
73561+
73562+int
73563+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73564+{
73565+ __u32 mode;
73566+
73567+ if (unlikely(!file || !(prot & PROT_EXEC)))
73568+ return 1;
73569+
73570+ if (is_writable_mmap(file))
73571+ return 0;
73572+
73573+ mode =
73574+ gr_search_file(file->f_path.dentry,
73575+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73576+ file->f_path.mnt);
73577+
73578+ if (!gr_tpe_allow(file))
73579+ return 0;
73580+
73581+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73582+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73583+ return 0;
73584+ } else if (unlikely(!(mode & GR_EXEC))) {
73585+ return 0;
73586+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73587+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73588+ return 1;
73589+ }
73590+
73591+ return 1;
73592+}
73593+
73594+void
73595+gr_acl_handle_psacct(struct task_struct *task, const long code)
73596+{
73597+ unsigned long runtime, cputime;
73598+ cputime_t utime, stime;
73599+ unsigned int wday, cday;
73600+ __u8 whr, chr;
73601+ __u8 wmin, cmin;
73602+ __u8 wsec, csec;
73603+ struct timespec timeval;
73604+
73605+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
73606+ !(task->acl->mode & GR_PROCACCT)))
73607+ return;
73608+
73609+ do_posix_clock_monotonic_gettime(&timeval);
73610+ runtime = timeval.tv_sec - task->start_time.tv_sec;
73611+ wday = runtime / (60 * 60 * 24);
73612+ runtime -= wday * (60 * 60 * 24);
73613+ whr = runtime / (60 * 60);
73614+ runtime -= whr * (60 * 60);
73615+ wmin = runtime / 60;
73616+ runtime -= wmin * 60;
73617+ wsec = runtime;
73618+
73619+ task_cputime(task, &utime, &stime);
73620+ cputime = cputime_to_secs(utime + stime);
73621+ cday = cputime / (60 * 60 * 24);
73622+ cputime -= cday * (60 * 60 * 24);
73623+ chr = cputime / (60 * 60);
73624+ cputime -= chr * (60 * 60);
73625+ cmin = cputime / 60;
73626+ cputime -= cmin * 60;
73627+ csec = cputime;
73628+
73629+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
73630+
73631+ return;
73632+}
73633+
73634+#ifdef CONFIG_TASKSTATS
73635+int gr_is_taskstats_denied(int pid)
73636+{
73637+ struct task_struct *task;
73638+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73639+ const struct cred *cred;
73640+#endif
73641+ int ret = 0;
73642+
73643+ /* restrict taskstats viewing to un-chrooted root users
73644+ who have the 'view' subject flag if the RBAC system is enabled
73645+ */
73646+
73647+ rcu_read_lock();
73648+ read_lock(&tasklist_lock);
73649+ task = find_task_by_vpid(pid);
73650+ if (task) {
73651+#ifdef CONFIG_GRKERNSEC_CHROOT
73652+ if (proc_is_chrooted(task))
73653+ ret = -EACCES;
73654+#endif
73655+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73656+ cred = __task_cred(task);
73657+#ifdef CONFIG_GRKERNSEC_PROC_USER
73658+ if (gr_is_global_nonroot(cred->uid))
73659+ ret = -EACCES;
73660+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73661+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
73662+ ret = -EACCES;
73663+#endif
73664+#endif
73665+ if (gr_status & GR_READY) {
73666+ if (!(task->acl->mode & GR_VIEW))
73667+ ret = -EACCES;
73668+ }
73669+ } else
73670+ ret = -ENOENT;
73671+
73672+ read_unlock(&tasklist_lock);
73673+ rcu_read_unlock();
73674+
73675+ return ret;
73676+}
73677+#endif
73678+
73679+/* AUXV entries are filled via a descendant of search_binary_handler
73680+ after we've already applied the subject for the target
73681+*/
73682+int gr_acl_enable_at_secure(void)
73683+{
73684+ if (unlikely(!(gr_status & GR_READY)))
73685+ return 0;
73686+
73687+ if (current->acl->mode & GR_ATSECURE)
73688+ return 1;
73689+
73690+ return 0;
73691+}
73692+
73693+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
73694+{
73695+ struct task_struct *task = current;
73696+ struct dentry *dentry = file->f_path.dentry;
73697+ struct vfsmount *mnt = file->f_path.mnt;
73698+ struct acl_object_label *obj, *tmp;
73699+ struct acl_subject_label *subj;
73700+ unsigned int bufsize;
73701+ int is_not_root;
73702+ char *path;
73703+ dev_t dev = __get_dev(dentry);
73704+
73705+ if (unlikely(!(gr_status & GR_READY)))
73706+ return 1;
73707+
73708+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
73709+ return 1;
73710+
73711+ /* ignore Eric Biederman */
73712+ if (IS_PRIVATE(dentry->d_inode))
73713+ return 1;
73714+
73715+ subj = task->acl;
73716+ read_lock(&gr_inode_lock);
73717+ do {
73718+ obj = lookup_acl_obj_label(ino, dev, subj);
73719+ if (obj != NULL) {
73720+ read_unlock(&gr_inode_lock);
73721+ return (obj->mode & GR_FIND) ? 1 : 0;
73722+ }
73723+ } while ((subj = subj->parent_subject));
73724+ read_unlock(&gr_inode_lock);
73725+
73726+ /* this is purely an optimization since we're looking for an object
73727+ for the directory we're doing a readdir on
73728+ if it's possible for any globbed object to match the entry we're
73729+ filling into the directory, then the object we find here will be
73730+ an anchor point with attached globbed objects
73731+ */
73732+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
73733+ if (obj->globbed == NULL)
73734+ return (obj->mode & GR_FIND) ? 1 : 0;
73735+
73736+ is_not_root = ((obj->filename[0] == '/') &&
73737+ (obj->filename[1] == '\0')) ? 0 : 1;
73738+ bufsize = PAGE_SIZE - namelen - is_not_root;
73739+
73740+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
73741+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
73742+ return 1;
73743+
73744+ preempt_disable();
73745+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
73746+ bufsize);
73747+
73748+ bufsize = strlen(path);
73749+
73750+ /* if base is "/", don't append an additional slash */
73751+ if (is_not_root)
73752+ *(path + bufsize) = '/';
73753+ memcpy(path + bufsize + is_not_root, name, namelen);
73754+ *(path + bufsize + namelen + is_not_root) = '\0';
73755+
73756+ tmp = obj->globbed;
73757+ while (tmp) {
73758+ if (!glob_match(tmp->filename, path)) {
73759+ preempt_enable();
73760+ return (tmp->mode & GR_FIND) ? 1 : 0;
73761+ }
73762+ tmp = tmp->next;
73763+ }
73764+ preempt_enable();
73765+ return (obj->mode & GR_FIND) ? 1 : 0;
73766+}
73767+
73768+void gr_put_exec_file(struct task_struct *task)
73769+{
73770+ struct file *filp;
73771+
73772+ write_lock(&grsec_exec_file_lock);
73773+ filp = task->exec_file;
73774+ task->exec_file = NULL;
73775+ write_unlock(&grsec_exec_file_lock);
73776+
73777+ if (filp)
73778+ fput(filp);
73779+
73780+ return;
73781+}
73782+
73783+
73784+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
73785+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
73786+#endif
73787+#ifdef CONFIG_SECURITY
73788+EXPORT_SYMBOL_GPL(gr_check_user_change);
73789+EXPORT_SYMBOL_GPL(gr_check_group_change);
73790+#endif
73791+
73792diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
73793new file mode 100644
73794index 0000000..18ffbbd
73795--- /dev/null
73796+++ b/grsecurity/gracl_alloc.c
73797@@ -0,0 +1,105 @@
73798+#include <linux/kernel.h>
73799+#include <linux/mm.h>
73800+#include <linux/slab.h>
73801+#include <linux/vmalloc.h>
73802+#include <linux/gracl.h>
73803+#include <linux/grsecurity.h>
73804+
73805+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
73806+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
73807+
73808+static __inline__ int
73809+alloc_pop(void)
73810+{
73811+ if (current_alloc_state->alloc_stack_next == 1)
73812+ return 0;
73813+
73814+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
73815+
73816+ current_alloc_state->alloc_stack_next--;
73817+
73818+ return 1;
73819+}
73820+
73821+static __inline__ int
73822+alloc_push(void *buf)
73823+{
73824+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
73825+ return 1;
73826+
73827+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
73828+
73829+ current_alloc_state->alloc_stack_next++;
73830+
73831+ return 0;
73832+}
73833+
73834+void *
73835+acl_alloc(unsigned long len)
73836+{
73837+ void *ret = NULL;
73838+
73839+ if (!len || len > PAGE_SIZE)
73840+ goto out;
73841+
73842+ ret = kmalloc(len, GFP_KERNEL);
73843+
73844+ if (ret) {
73845+ if (alloc_push(ret)) {
73846+ kfree(ret);
73847+ ret = NULL;
73848+ }
73849+ }
73850+
73851+out:
73852+ return ret;
73853+}
73854+
73855+void *
73856+acl_alloc_num(unsigned long num, unsigned long len)
73857+{
73858+ if (!len || (num > (PAGE_SIZE / len)))
73859+ return NULL;
73860+
73861+ return acl_alloc(num * len);
73862+}
73863+
73864+void
73865+acl_free_all(void)
73866+{
73867+ if (!current_alloc_state->alloc_stack)
73868+ return;
73869+
73870+ while (alloc_pop()) ;
73871+
73872+ if (current_alloc_state->alloc_stack) {
73873+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73874+ kfree(current_alloc_state->alloc_stack);
73875+ else
73876+ vfree(current_alloc_state->alloc_stack);
73877+ }
73878+
73879+ current_alloc_state->alloc_stack = NULL;
73880+ current_alloc_state->alloc_stack_size = 1;
73881+ current_alloc_state->alloc_stack_next = 1;
73882+
73883+ return;
73884+}
73885+
73886+int
73887+acl_alloc_stack_init(unsigned long size)
73888+{
73889+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73890+ current_alloc_state->alloc_stack =
73891+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73892+ else
73893+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73894+
73895+ current_alloc_state->alloc_stack_size = size;
73896+ current_alloc_state->alloc_stack_next = 1;
73897+
73898+ if (!current_alloc_state->alloc_stack)
73899+ return 0;
73900+ else
73901+ return 1;
73902+}
73903diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73904new file mode 100644
73905index 0000000..1a94c11
73906--- /dev/null
73907+++ b/grsecurity/gracl_cap.c
73908@@ -0,0 +1,127 @@
73909+#include <linux/kernel.h>
73910+#include <linux/module.h>
73911+#include <linux/sched.h>
73912+#include <linux/gracl.h>
73913+#include <linux/grsecurity.h>
73914+#include <linux/grinternal.h>
73915+
73916+extern const char *captab_log[];
73917+extern int captab_log_entries;
73918+
73919+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73920+{
73921+ struct acl_subject_label *curracl;
73922+
73923+ if (!gr_acl_is_enabled())
73924+ return 1;
73925+
73926+ curracl = task->acl;
73927+
73928+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73929+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73930+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73931+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73932+ gr_to_filename(task->exec_file->f_path.dentry,
73933+ task->exec_file->f_path.mnt) : curracl->filename,
73934+ curracl->filename, 0UL,
73935+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73936+ return 1;
73937+ }
73938+
73939+ return 0;
73940+}
73941+
73942+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73943+{
73944+ struct acl_subject_label *curracl;
73945+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73946+ kernel_cap_t cap_audit = __cap_empty_set;
73947+
73948+ if (!gr_acl_is_enabled())
73949+ return 1;
73950+
73951+ curracl = task->acl;
73952+
73953+ cap_drop = curracl->cap_lower;
73954+ cap_mask = curracl->cap_mask;
73955+ cap_audit = curracl->cap_invert_audit;
73956+
73957+ while ((curracl = curracl->parent_subject)) {
73958+ /* if the cap isn't specified in the current computed mask but is specified in the
73959+ current level subject, and is lowered in the current level subject, then add
73960+ it to the set of dropped capabilities
73961+ otherwise, add the current level subject's mask to the current computed mask
73962+ */
73963+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73964+ cap_raise(cap_mask, cap);
73965+ if (cap_raised(curracl->cap_lower, cap))
73966+ cap_raise(cap_drop, cap);
73967+ if (cap_raised(curracl->cap_invert_audit, cap))
73968+ cap_raise(cap_audit, cap);
73969+ }
73970+ }
73971+
73972+ if (!cap_raised(cap_drop, cap)) {
73973+ if (cap_raised(cap_audit, cap))
73974+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73975+ return 1;
73976+ }
73977+
73978+ /* only learn the capability use if the process has the capability in the
73979+ general case, the two uses in sys.c of gr_learn_cap are an exception
73980+ to this rule to ensure any role transition involves what the full-learned
73981+ policy believes in a privileged process
73982+ */
73983+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73984+ return 1;
73985+
73986+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73987+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73988+
73989+ return 0;
73990+}
73991+
73992+int
73993+gr_acl_is_capable(const int cap)
73994+{
73995+ return gr_task_acl_is_capable(current, current_cred(), cap);
73996+}
73997+
73998+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73999+{
74000+ struct acl_subject_label *curracl;
74001+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
74002+
74003+ if (!gr_acl_is_enabled())
74004+ return 1;
74005+
74006+ curracl = task->acl;
74007+
74008+ cap_drop = curracl->cap_lower;
74009+ cap_mask = curracl->cap_mask;
74010+
74011+ while ((curracl = curracl->parent_subject)) {
74012+ /* if the cap isn't specified in the current computed mask but is specified in the
74013+ current level subject, and is lowered in the current level subject, then add
74014+ it to the set of dropped capabilities
74015+ otherwise, add the current level subject's mask to the current computed mask
74016+ */
74017+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
74018+ cap_raise(cap_mask, cap);
74019+ if (cap_raised(curracl->cap_lower, cap))
74020+ cap_raise(cap_drop, cap);
74021+ }
74022+ }
74023+
74024+ if (!cap_raised(cap_drop, cap))
74025+ return 1;
74026+
74027+ return 0;
74028+}
74029+
74030+int
74031+gr_acl_is_capable_nolog(const int cap)
74032+{
74033+ return gr_task_acl_is_capable_nolog(current, cap);
74034+}
74035+
74036diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
74037new file mode 100644
74038index 0000000..ca25605
74039--- /dev/null
74040+++ b/grsecurity/gracl_compat.c
74041@@ -0,0 +1,270 @@
74042+#include <linux/kernel.h>
74043+#include <linux/gracl.h>
74044+#include <linux/compat.h>
74045+#include <linux/gracl_compat.h>
74046+
74047+#include <asm/uaccess.h>
74048+
74049+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
74050+{
74051+ struct gr_arg_wrapper_compat uwrapcompat;
74052+
74053+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
74054+ return -EFAULT;
74055+
74056+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
74057+ (uwrapcompat.version != 0x2901)) ||
74058+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
74059+ return -EINVAL;
74060+
74061+ uwrap->arg = compat_ptr(uwrapcompat.arg);
74062+ uwrap->version = uwrapcompat.version;
74063+ uwrap->size = sizeof(struct gr_arg);
74064+
74065+ return 0;
74066+}
74067+
74068+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
74069+{
74070+ struct gr_arg_compat argcompat;
74071+
74072+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
74073+ return -EFAULT;
74074+
74075+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
74076+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
74077+ arg->role_db.num_roles = argcompat.role_db.num_roles;
74078+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
74079+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
74080+ arg->role_db.num_objects = argcompat.role_db.num_objects;
74081+
74082+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
74083+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
74084+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
74085+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
74086+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
74087+ arg->segv_device = argcompat.segv_device;
74088+ arg->segv_inode = argcompat.segv_inode;
74089+ arg->segv_uid = argcompat.segv_uid;
74090+ arg->num_sprole_pws = argcompat.num_sprole_pws;
74091+ arg->mode = argcompat.mode;
74092+
74093+ return 0;
74094+}
74095+
74096+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
74097+{
74098+ struct acl_object_label_compat objcompat;
74099+
74100+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
74101+ return -EFAULT;
74102+
74103+ obj->filename = compat_ptr(objcompat.filename);
74104+ obj->inode = objcompat.inode;
74105+ obj->device = objcompat.device;
74106+ obj->mode = objcompat.mode;
74107+
74108+ obj->nested = compat_ptr(objcompat.nested);
74109+ obj->globbed = compat_ptr(objcompat.globbed);
74110+
74111+ obj->prev = compat_ptr(objcompat.prev);
74112+ obj->next = compat_ptr(objcompat.next);
74113+
74114+ return 0;
74115+}
74116+
74117+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74118+{
74119+ unsigned int i;
74120+ struct acl_subject_label_compat subjcompat;
74121+
74122+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
74123+ return -EFAULT;
74124+
74125+ subj->filename = compat_ptr(subjcompat.filename);
74126+ subj->inode = subjcompat.inode;
74127+ subj->device = subjcompat.device;
74128+ subj->mode = subjcompat.mode;
74129+ subj->cap_mask = subjcompat.cap_mask;
74130+ subj->cap_lower = subjcompat.cap_lower;
74131+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
74132+
74133+ for (i = 0; i < GR_NLIMITS; i++) {
74134+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
74135+ subj->res[i].rlim_cur = RLIM_INFINITY;
74136+ else
74137+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
74138+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
74139+ subj->res[i].rlim_max = RLIM_INFINITY;
74140+ else
74141+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
74142+ }
74143+ subj->resmask = subjcompat.resmask;
74144+
74145+ subj->user_trans_type = subjcompat.user_trans_type;
74146+ subj->group_trans_type = subjcompat.group_trans_type;
74147+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
74148+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
74149+ subj->user_trans_num = subjcompat.user_trans_num;
74150+ subj->group_trans_num = subjcompat.group_trans_num;
74151+
74152+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
74153+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
74154+ subj->ip_type = subjcompat.ip_type;
74155+ subj->ips = compat_ptr(subjcompat.ips);
74156+ subj->ip_num = subjcompat.ip_num;
74157+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
74158+
74159+ subj->crashes = subjcompat.crashes;
74160+ subj->expires = subjcompat.expires;
74161+
74162+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
74163+ subj->hash = compat_ptr(subjcompat.hash);
74164+ subj->prev = compat_ptr(subjcompat.prev);
74165+ subj->next = compat_ptr(subjcompat.next);
74166+
74167+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
74168+ subj->obj_hash_size = subjcompat.obj_hash_size;
74169+ subj->pax_flags = subjcompat.pax_flags;
74170+
74171+ return 0;
74172+}
74173+
74174+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
74175+{
74176+ struct acl_role_label_compat rolecompat;
74177+
74178+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
74179+ return -EFAULT;
74180+
74181+ role->rolename = compat_ptr(rolecompat.rolename);
74182+ role->uidgid = rolecompat.uidgid;
74183+ role->roletype = rolecompat.roletype;
74184+
74185+ role->auth_attempts = rolecompat.auth_attempts;
74186+ role->expires = rolecompat.expires;
74187+
74188+ role->root_label = compat_ptr(rolecompat.root_label);
74189+ role->hash = compat_ptr(rolecompat.hash);
74190+
74191+ role->prev = compat_ptr(rolecompat.prev);
74192+ role->next = compat_ptr(rolecompat.next);
74193+
74194+ role->transitions = compat_ptr(rolecompat.transitions);
74195+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
74196+ role->domain_children = compat_ptr(rolecompat.domain_children);
74197+ role->domain_child_num = rolecompat.domain_child_num;
74198+
74199+ role->umask = rolecompat.umask;
74200+
74201+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
74202+ role->subj_hash_size = rolecompat.subj_hash_size;
74203+
74204+ return 0;
74205+}
74206+
74207+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74208+{
74209+ struct role_allowed_ip_compat roleip_compat;
74210+
74211+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
74212+ return -EFAULT;
74213+
74214+ roleip->addr = roleip_compat.addr;
74215+ roleip->netmask = roleip_compat.netmask;
74216+
74217+ roleip->prev = compat_ptr(roleip_compat.prev);
74218+ roleip->next = compat_ptr(roleip_compat.next);
74219+
74220+ return 0;
74221+}
74222+
74223+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
74224+{
74225+ struct role_transition_compat trans_compat;
74226+
74227+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
74228+ return -EFAULT;
74229+
74230+ trans->rolename = compat_ptr(trans_compat.rolename);
74231+
74232+ trans->prev = compat_ptr(trans_compat.prev);
74233+ trans->next = compat_ptr(trans_compat.next);
74234+
74235+ return 0;
74236+
74237+}
74238+
74239+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74240+{
74241+ struct gr_hash_struct_compat hash_compat;
74242+
74243+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
74244+ return -EFAULT;
74245+
74246+ hash->table = compat_ptr(hash_compat.table);
74247+ hash->nametable = compat_ptr(hash_compat.nametable);
74248+ hash->first = compat_ptr(hash_compat.first);
74249+
74250+ hash->table_size = hash_compat.table_size;
74251+ hash->used_size = hash_compat.used_size;
74252+
74253+ hash->type = hash_compat.type;
74254+
74255+ return 0;
74256+}
74257+
74258+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
74259+{
74260+ compat_uptr_t ptrcompat;
74261+
74262+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
74263+ return -EFAULT;
74264+
74265+ *(void **)ptr = compat_ptr(ptrcompat);
74266+
74267+ return 0;
74268+}
74269+
74270+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74271+{
74272+ struct acl_ip_label_compat ip_compat;
74273+
74274+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
74275+ return -EFAULT;
74276+
74277+ ip->iface = compat_ptr(ip_compat.iface);
74278+ ip->addr = ip_compat.addr;
74279+ ip->netmask = ip_compat.netmask;
74280+ ip->low = ip_compat.low;
74281+ ip->high = ip_compat.high;
74282+ ip->mode = ip_compat.mode;
74283+ ip->type = ip_compat.type;
74284+
74285+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
74286+
74287+ ip->prev = compat_ptr(ip_compat.prev);
74288+ ip->next = compat_ptr(ip_compat.next);
74289+
74290+ return 0;
74291+}
74292+
74293+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74294+{
74295+ struct sprole_pw_compat pw_compat;
74296+
74297+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
74298+ return -EFAULT;
74299+
74300+ pw->rolename = compat_ptr(pw_compat.rolename);
74301+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
74302+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
74303+
74304+ return 0;
74305+}
74306+
74307+size_t get_gr_arg_wrapper_size_compat(void)
74308+{
74309+ return sizeof(struct gr_arg_wrapper_compat);
74310+}
74311+
74312diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
74313new file mode 100644
74314index 0000000..4008fdc
74315--- /dev/null
74316+++ b/grsecurity/gracl_fs.c
74317@@ -0,0 +1,445 @@
74318+#include <linux/kernel.h>
74319+#include <linux/sched.h>
74320+#include <linux/types.h>
74321+#include <linux/fs.h>
74322+#include <linux/file.h>
74323+#include <linux/stat.h>
74324+#include <linux/grsecurity.h>
74325+#include <linux/grinternal.h>
74326+#include <linux/gracl.h>
74327+
74328+umode_t
74329+gr_acl_umask(void)
74330+{
74331+ if (unlikely(!gr_acl_is_enabled()))
74332+ return 0;
74333+
74334+ return current->role->umask;
74335+}
74336+
74337+__u32
74338+gr_acl_handle_hidden_file(const struct dentry * dentry,
74339+ const struct vfsmount * mnt)
74340+{
74341+ __u32 mode;
74342+
74343+ if (unlikely(d_is_negative(dentry)))
74344+ return GR_FIND;
74345+
74346+ mode =
74347+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
74348+
74349+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
74350+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74351+ return mode;
74352+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
74353+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74354+ return 0;
74355+ } else if (unlikely(!(mode & GR_FIND)))
74356+ return 0;
74357+
74358+ return GR_FIND;
74359+}
74360+
74361+__u32
74362+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
74363+ int acc_mode)
74364+{
74365+ __u32 reqmode = GR_FIND;
74366+ __u32 mode;
74367+
74368+ if (unlikely(d_is_negative(dentry)))
74369+ return reqmode;
74370+
74371+ if (acc_mode & MAY_APPEND)
74372+ reqmode |= GR_APPEND;
74373+ else if (acc_mode & MAY_WRITE)
74374+ reqmode |= GR_WRITE;
74375+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
74376+ reqmode |= GR_READ;
74377+
74378+ mode =
74379+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74380+ mnt);
74381+
74382+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74383+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74384+ reqmode & GR_READ ? " reading" : "",
74385+ reqmode & GR_WRITE ? " writing" : reqmode &
74386+ GR_APPEND ? " appending" : "");
74387+ return reqmode;
74388+ } else
74389+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74390+ {
74391+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74392+ reqmode & GR_READ ? " reading" : "",
74393+ reqmode & GR_WRITE ? " writing" : reqmode &
74394+ GR_APPEND ? " appending" : "");
74395+ return 0;
74396+ } else if (unlikely((mode & reqmode) != reqmode))
74397+ return 0;
74398+
74399+ return reqmode;
74400+}
74401+
74402+__u32
74403+gr_acl_handle_creat(const struct dentry * dentry,
74404+ const struct dentry * p_dentry,
74405+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
74406+ const int imode)
74407+{
74408+ __u32 reqmode = GR_WRITE | GR_CREATE;
74409+ __u32 mode;
74410+
74411+ if (acc_mode & MAY_APPEND)
74412+ reqmode |= GR_APPEND;
74413+ // if a directory was required or the directory already exists, then
74414+ // don't count this open as a read
74415+ if ((acc_mode & MAY_READ) &&
74416+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
74417+ reqmode |= GR_READ;
74418+ if ((open_flags & O_CREAT) &&
74419+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74420+ reqmode |= GR_SETID;
74421+
74422+ mode =
74423+ gr_check_create(dentry, p_dentry, p_mnt,
74424+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74425+
74426+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74427+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74428+ reqmode & GR_READ ? " reading" : "",
74429+ reqmode & GR_WRITE ? " writing" : reqmode &
74430+ GR_APPEND ? " appending" : "");
74431+ return reqmode;
74432+ } else
74433+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74434+ {
74435+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74436+ reqmode & GR_READ ? " reading" : "",
74437+ reqmode & GR_WRITE ? " writing" : reqmode &
74438+ GR_APPEND ? " appending" : "");
74439+ return 0;
74440+ } else if (unlikely((mode & reqmode) != reqmode))
74441+ return 0;
74442+
74443+ return reqmode;
74444+}
74445+
74446+__u32
74447+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
74448+ const int fmode)
74449+{
74450+ __u32 mode, reqmode = GR_FIND;
74451+
74452+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
74453+ reqmode |= GR_EXEC;
74454+ if (fmode & S_IWOTH)
74455+ reqmode |= GR_WRITE;
74456+ if (fmode & S_IROTH)
74457+ reqmode |= GR_READ;
74458+
74459+ mode =
74460+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74461+ mnt);
74462+
74463+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74464+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74465+ reqmode & GR_READ ? " reading" : "",
74466+ reqmode & GR_WRITE ? " writing" : "",
74467+ reqmode & GR_EXEC ? " executing" : "");
74468+ return reqmode;
74469+ } else
74470+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74471+ {
74472+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74473+ reqmode & GR_READ ? " reading" : "",
74474+ reqmode & GR_WRITE ? " writing" : "",
74475+ reqmode & GR_EXEC ? " executing" : "");
74476+ return 0;
74477+ } else if (unlikely((mode & reqmode) != reqmode))
74478+ return 0;
74479+
74480+ return reqmode;
74481+}
74482+
74483+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
74484+{
74485+ __u32 mode;
74486+
74487+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
74488+
74489+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74490+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
74491+ return mode;
74492+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74493+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
74494+ return 0;
74495+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74496+ return 0;
74497+
74498+ return (reqmode);
74499+}
74500+
74501+__u32
74502+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
74503+{
74504+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
74505+}
74506+
74507+__u32
74508+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
74509+{
74510+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
74511+}
74512+
74513+__u32
74514+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
74515+{
74516+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
74517+}
74518+
74519+__u32
74520+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
74521+{
74522+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
74523+}
74524+
74525+__u32
74526+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
74527+ umode_t *modeptr)
74528+{
74529+ umode_t mode;
74530+
74531+ *modeptr &= ~gr_acl_umask();
74532+ mode = *modeptr;
74533+
74534+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
74535+ return 1;
74536+
74537+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
74538+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
74539+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
74540+ GR_CHMOD_ACL_MSG);
74541+ } else {
74542+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
74543+ }
74544+}
74545+
74546+__u32
74547+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
74548+{
74549+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
74550+}
74551+
74552+__u32
74553+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
74554+{
74555+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
74556+}
74557+
74558+__u32
74559+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
74560+{
74561+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
74562+}
74563+
74564+__u32
74565+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
74566+{
74567+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
74568+}
74569+
74570+__u32
74571+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
74572+{
74573+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
74574+ GR_UNIXCONNECT_ACL_MSG);
74575+}
74576+
74577+/* hardlinks require at minimum create and link permission,
74578+ any additional privilege required is based on the
74579+ privilege of the file being linked to
74580+*/
74581+__u32
74582+gr_acl_handle_link(const struct dentry * new_dentry,
74583+ const struct dentry * parent_dentry,
74584+ const struct vfsmount * parent_mnt,
74585+ const struct dentry * old_dentry,
74586+ const struct vfsmount * old_mnt, const struct filename *to)
74587+{
74588+ __u32 mode;
74589+ __u32 needmode = GR_CREATE | GR_LINK;
74590+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
74591+
74592+ mode =
74593+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
74594+ old_mnt);
74595+
74596+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
74597+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74598+ return mode;
74599+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74600+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74601+ return 0;
74602+ } else if (unlikely((mode & needmode) != needmode))
74603+ return 0;
74604+
74605+ return 1;
74606+}
74607+
74608+__u32
74609+gr_acl_handle_symlink(const struct dentry * new_dentry,
74610+ const struct dentry * parent_dentry,
74611+ const struct vfsmount * parent_mnt, const struct filename *from)
74612+{
74613+ __u32 needmode = GR_WRITE | GR_CREATE;
74614+ __u32 mode;
74615+
74616+ mode =
74617+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
74618+ GR_CREATE | GR_AUDIT_CREATE |
74619+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
74620+
74621+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
74622+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74623+ return mode;
74624+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74625+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74626+ return 0;
74627+ } else if (unlikely((mode & needmode) != needmode))
74628+ return 0;
74629+
74630+ return (GR_WRITE | GR_CREATE);
74631+}
74632+
74633+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
74634+{
74635+ __u32 mode;
74636+
74637+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74638+
74639+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74640+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
74641+ return mode;
74642+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74643+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
74644+ return 0;
74645+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74646+ return 0;
74647+
74648+ return (reqmode);
74649+}
74650+
74651+__u32
74652+gr_acl_handle_mknod(const struct dentry * new_dentry,
74653+ const struct dentry * parent_dentry,
74654+ const struct vfsmount * parent_mnt,
74655+ const int mode)
74656+{
74657+ __u32 reqmode = GR_WRITE | GR_CREATE;
74658+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74659+ reqmode |= GR_SETID;
74660+
74661+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74662+ reqmode, GR_MKNOD_ACL_MSG);
74663+}
74664+
74665+__u32
74666+gr_acl_handle_mkdir(const struct dentry *new_dentry,
74667+ const struct dentry *parent_dentry,
74668+ const struct vfsmount *parent_mnt)
74669+{
74670+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74671+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
74672+}
74673+
74674+#define RENAME_CHECK_SUCCESS(old, new) \
74675+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
74676+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
74677+
74678+int
74679+gr_acl_handle_rename(struct dentry *new_dentry,
74680+ struct dentry *parent_dentry,
74681+ const struct vfsmount *parent_mnt,
74682+ struct dentry *old_dentry,
74683+ struct inode *old_parent_inode,
74684+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
74685+{
74686+ __u32 comp1, comp2;
74687+ int error = 0;
74688+
74689+ if (unlikely(!gr_acl_is_enabled()))
74690+ return 0;
74691+
74692+ if (flags & RENAME_EXCHANGE) {
74693+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74694+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74695+ GR_SUPPRESS, parent_mnt);
74696+ comp2 =
74697+ gr_search_file(old_dentry,
74698+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74699+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74700+ } else if (d_is_negative(new_dentry)) {
74701+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
74702+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
74703+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
74704+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
74705+ GR_DELETE | GR_AUDIT_DELETE |
74706+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74707+ GR_SUPPRESS, old_mnt);
74708+ } else {
74709+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74710+ GR_CREATE | GR_DELETE |
74711+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
74712+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74713+ GR_SUPPRESS, parent_mnt);
74714+ comp2 =
74715+ gr_search_file(old_dentry,
74716+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74717+ GR_DELETE | GR_AUDIT_DELETE |
74718+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74719+ }
74720+
74721+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
74722+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
74723+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74724+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
74725+ && !(comp2 & GR_SUPPRESS)) {
74726+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74727+ error = -EACCES;
74728+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
74729+ error = -EACCES;
74730+
74731+ return error;
74732+}
74733+
74734+void
74735+gr_acl_handle_exit(void)
74736+{
74737+ u16 id;
74738+ char *rolename;
74739+
74740+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
74741+ !(current->role->roletype & GR_ROLE_PERSIST))) {
74742+ id = current->acl_role_id;
74743+ rolename = current->role->rolename;
74744+ gr_set_acls(1);
74745+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
74746+ }
74747+
74748+ gr_put_exec_file(current);
74749+ return;
74750+}
74751+
74752+int
74753+gr_acl_handle_procpidmem(const struct task_struct *task)
74754+{
74755+ if (unlikely(!gr_acl_is_enabled()))
74756+ return 0;
74757+
74758+ if (task != current && task->acl->mode & GR_PROTPROCFD)
74759+ return -EACCES;
74760+
74761+ return 0;
74762+}
74763diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
74764new file mode 100644
74765index 0000000..f056b81
74766--- /dev/null
74767+++ b/grsecurity/gracl_ip.c
74768@@ -0,0 +1,386 @@
74769+#include <linux/kernel.h>
74770+#include <asm/uaccess.h>
74771+#include <asm/errno.h>
74772+#include <net/sock.h>
74773+#include <linux/file.h>
74774+#include <linux/fs.h>
74775+#include <linux/net.h>
74776+#include <linux/in.h>
74777+#include <linux/skbuff.h>
74778+#include <linux/ip.h>
74779+#include <linux/udp.h>
74780+#include <linux/types.h>
74781+#include <linux/sched.h>
74782+#include <linux/netdevice.h>
74783+#include <linux/inetdevice.h>
74784+#include <linux/gracl.h>
74785+#include <linux/grsecurity.h>
74786+#include <linux/grinternal.h>
74787+
74788+#define GR_BIND 0x01
74789+#define GR_CONNECT 0x02
74790+#define GR_INVERT 0x04
74791+#define GR_BINDOVERRIDE 0x08
74792+#define GR_CONNECTOVERRIDE 0x10
74793+#define GR_SOCK_FAMILY 0x20
74794+
74795+static const char * gr_protocols[IPPROTO_MAX] = {
74796+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
74797+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
74798+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
74799+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
74800+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
74801+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
74802+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
74803+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
74804+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
74805+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
74806+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
74807+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
74808+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
74809+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
74810+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
74811+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
74812+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
74813+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
74814+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
74815+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
74816+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
74817+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
74818+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
74819+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
74820+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
74821+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
74822+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
74823+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
74824+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
74825+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
74826+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
74827+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
74828+ };
74829+
74830+static const char * gr_socktypes[SOCK_MAX] = {
74831+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
74832+ "unknown:7", "unknown:8", "unknown:9", "packet"
74833+ };
74834+
74835+static const char * gr_sockfamilies[AF_MAX+1] = {
74836+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
74837+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
74838+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
74839+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
74840+ };
74841+
74842+const char *
74843+gr_proto_to_name(unsigned char proto)
74844+{
74845+ return gr_protocols[proto];
74846+}
74847+
74848+const char *
74849+gr_socktype_to_name(unsigned char type)
74850+{
74851+ return gr_socktypes[type];
74852+}
74853+
74854+const char *
74855+gr_sockfamily_to_name(unsigned char family)
74856+{
74857+ return gr_sockfamilies[family];
74858+}
74859+
74860+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74861+
74862+int
74863+gr_search_socket(const int domain, const int type, const int protocol)
74864+{
74865+ struct acl_subject_label *curr;
74866+ const struct cred *cred = current_cred();
74867+
74868+ if (unlikely(!gr_acl_is_enabled()))
74869+ goto exit;
74870+
74871+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74872+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74873+ goto exit; // let the kernel handle it
74874+
74875+ curr = current->acl;
74876+
74877+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74878+ /* the family is allowed, if this is PF_INET allow it only if
74879+ the extra sock type/protocol checks pass */
74880+ if (domain == PF_INET)
74881+ goto inet_check;
74882+ goto exit;
74883+ } else {
74884+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74885+ __u32 fakeip = 0;
74886+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74887+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74888+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74889+ gr_to_filename(current->exec_file->f_path.dentry,
74890+ current->exec_file->f_path.mnt) :
74891+ curr->filename, curr->filename,
74892+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74893+ &current->signal->saved_ip);
74894+ goto exit;
74895+ }
74896+ goto exit_fail;
74897+ }
74898+
74899+inet_check:
74900+ /* the rest of this checking is for IPv4 only */
74901+ if (!curr->ips)
74902+ goto exit;
74903+
74904+ if ((curr->ip_type & (1U << type)) &&
74905+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74906+ goto exit;
74907+
74908+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74909+ /* we don't place acls on raw sockets , and sometimes
74910+ dgram/ip sockets are opened for ioctl and not
74911+ bind/connect, so we'll fake a bind learn log */
74912+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74913+ __u32 fakeip = 0;
74914+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74915+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74916+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74917+ gr_to_filename(current->exec_file->f_path.dentry,
74918+ current->exec_file->f_path.mnt) :
74919+ curr->filename, curr->filename,
74920+ &fakeip, 0, type,
74921+ protocol, GR_CONNECT, &current->signal->saved_ip);
74922+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74923+ __u32 fakeip = 0;
74924+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74925+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74926+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74927+ gr_to_filename(current->exec_file->f_path.dentry,
74928+ current->exec_file->f_path.mnt) :
74929+ curr->filename, curr->filename,
74930+ &fakeip, 0, type,
74931+ protocol, GR_BIND, &current->signal->saved_ip);
74932+ }
74933+ /* we'll log when they use connect or bind */
74934+ goto exit;
74935+ }
74936+
74937+exit_fail:
74938+ if (domain == PF_INET)
74939+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74940+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74941+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74942+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74943+ gr_socktype_to_name(type), protocol);
74944+
74945+ return 0;
74946+exit:
74947+ return 1;
74948+}
74949+
74950+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74951+{
74952+ if ((ip->mode & mode) &&
74953+ (ip_port >= ip->low) &&
74954+ (ip_port <= ip->high) &&
74955+ ((ntohl(ip_addr) & our_netmask) ==
74956+ (ntohl(our_addr) & our_netmask))
74957+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74958+ && (ip->type & (1U << type))) {
74959+ if (ip->mode & GR_INVERT)
74960+ return 2; // specifically denied
74961+ else
74962+ return 1; // allowed
74963+ }
74964+
74965+ return 0; // not specifically allowed, may continue parsing
74966+}
74967+
74968+static int
74969+gr_search_connectbind(const int full_mode, struct sock *sk,
74970+ struct sockaddr_in *addr, const int type)
74971+{
74972+ char iface[IFNAMSIZ] = {0};
74973+ struct acl_subject_label *curr;
74974+ struct acl_ip_label *ip;
74975+ struct inet_sock *isk;
74976+ struct net_device *dev;
74977+ struct in_device *idev;
74978+ unsigned long i;
74979+ int ret;
74980+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74981+ __u32 ip_addr = 0;
74982+ __u32 our_addr;
74983+ __u32 our_netmask;
74984+ char *p;
74985+ __u16 ip_port = 0;
74986+ const struct cred *cred = current_cred();
74987+
74988+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74989+ return 0;
74990+
74991+ curr = current->acl;
74992+ isk = inet_sk(sk);
74993+
74994+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74995+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74996+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74997+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74998+ struct sockaddr_in saddr;
74999+ int err;
75000+
75001+ saddr.sin_family = AF_INET;
75002+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
75003+ saddr.sin_port = isk->inet_sport;
75004+
75005+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
75006+ if (err)
75007+ return err;
75008+
75009+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
75010+ if (err)
75011+ return err;
75012+ }
75013+
75014+ if (!curr->ips)
75015+ return 0;
75016+
75017+ ip_addr = addr->sin_addr.s_addr;
75018+ ip_port = ntohs(addr->sin_port);
75019+
75020+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
75021+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
75022+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
75023+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
75024+ gr_to_filename(current->exec_file->f_path.dentry,
75025+ current->exec_file->f_path.mnt) :
75026+ curr->filename, curr->filename,
75027+ &ip_addr, ip_port, type,
75028+ sk->sk_protocol, mode, &current->signal->saved_ip);
75029+ return 0;
75030+ }
75031+
75032+ for (i = 0; i < curr->ip_num; i++) {
75033+ ip = *(curr->ips + i);
75034+ if (ip->iface != NULL) {
75035+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
75036+ p = strchr(iface, ':');
75037+ if (p != NULL)
75038+ *p = '\0';
75039+ dev = dev_get_by_name(sock_net(sk), iface);
75040+ if (dev == NULL)
75041+ continue;
75042+ idev = in_dev_get(dev);
75043+ if (idev == NULL) {
75044+ dev_put(dev);
75045+ continue;
75046+ }
75047+ rcu_read_lock();
75048+ for_ifa(idev) {
75049+ if (!strcmp(ip->iface, ifa->ifa_label)) {
75050+ our_addr = ifa->ifa_address;
75051+ our_netmask = 0xffffffff;
75052+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
75053+ if (ret == 1) {
75054+ rcu_read_unlock();
75055+ in_dev_put(idev);
75056+ dev_put(dev);
75057+ return 0;
75058+ } else if (ret == 2) {
75059+ rcu_read_unlock();
75060+ in_dev_put(idev);
75061+ dev_put(dev);
75062+ goto denied;
75063+ }
75064+ }
75065+ } endfor_ifa(idev);
75066+ rcu_read_unlock();
75067+ in_dev_put(idev);
75068+ dev_put(dev);
75069+ } else {
75070+ our_addr = ip->addr;
75071+ our_netmask = ip->netmask;
75072+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
75073+ if (ret == 1)
75074+ return 0;
75075+ else if (ret == 2)
75076+ goto denied;
75077+ }
75078+ }
75079+
75080+denied:
75081+ if (mode == GR_BIND)
75082+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
75083+ else if (mode == GR_CONNECT)
75084+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
75085+
75086+ return -EACCES;
75087+}
75088+
75089+int
75090+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
75091+{
75092+ /* always allow disconnection of dgram sockets with connect */
75093+ if (addr->sin_family == AF_UNSPEC)
75094+ return 0;
75095+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
75096+}
75097+
75098+int
75099+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
75100+{
75101+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
75102+}
75103+
75104+int gr_search_listen(struct socket *sock)
75105+{
75106+ struct sock *sk = sock->sk;
75107+ struct sockaddr_in addr;
75108+
75109+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
75110+ addr.sin_port = inet_sk(sk)->inet_sport;
75111+
75112+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
75113+}
75114+
75115+int gr_search_accept(struct socket *sock)
75116+{
75117+ struct sock *sk = sock->sk;
75118+ struct sockaddr_in addr;
75119+
75120+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
75121+ addr.sin_port = inet_sk(sk)->inet_sport;
75122+
75123+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
75124+}
75125+
75126+int
75127+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
75128+{
75129+ if (addr)
75130+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
75131+ else {
75132+ struct sockaddr_in sin;
75133+ const struct inet_sock *inet = inet_sk(sk);
75134+
75135+ sin.sin_addr.s_addr = inet->inet_daddr;
75136+ sin.sin_port = inet->inet_dport;
75137+
75138+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
75139+ }
75140+}
75141+
75142+int
75143+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
75144+{
75145+ struct sockaddr_in sin;
75146+
75147+ if (unlikely(skb->len < sizeof (struct udphdr)))
75148+ return 0; // skip this packet
75149+
75150+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
75151+ sin.sin_port = udp_hdr(skb)->source;
75152+
75153+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
75154+}
75155diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
75156new file mode 100644
75157index 0000000..25f54ef
75158--- /dev/null
75159+++ b/grsecurity/gracl_learn.c
75160@@ -0,0 +1,207 @@
75161+#include <linux/kernel.h>
75162+#include <linux/mm.h>
75163+#include <linux/sched.h>
75164+#include <linux/poll.h>
75165+#include <linux/string.h>
75166+#include <linux/file.h>
75167+#include <linux/types.h>
75168+#include <linux/vmalloc.h>
75169+#include <linux/grinternal.h>
75170+
75171+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
75172+ size_t count, loff_t *ppos);
75173+extern int gr_acl_is_enabled(void);
75174+
75175+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
75176+static int gr_learn_attached;
75177+
75178+/* use a 512k buffer */
75179+#define LEARN_BUFFER_SIZE (512 * 1024)
75180+
75181+static DEFINE_SPINLOCK(gr_learn_lock);
75182+static DEFINE_MUTEX(gr_learn_user_mutex);
75183+
75184+/* we need to maintain two buffers, so that the kernel context of grlearn
75185+ uses a semaphore around the userspace copying, and the other kernel contexts
75186+ use a spinlock when copying into the buffer, since they cannot sleep
75187+*/
75188+static char *learn_buffer;
75189+static char *learn_buffer_user;
75190+static int learn_buffer_len;
75191+static int learn_buffer_user_len;
75192+
75193+static ssize_t
75194+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
75195+{
75196+ DECLARE_WAITQUEUE(wait, current);
75197+ ssize_t retval = 0;
75198+
75199+ add_wait_queue(&learn_wait, &wait);
75200+ set_current_state(TASK_INTERRUPTIBLE);
75201+ do {
75202+ mutex_lock(&gr_learn_user_mutex);
75203+ spin_lock(&gr_learn_lock);
75204+ if (learn_buffer_len)
75205+ break;
75206+ spin_unlock(&gr_learn_lock);
75207+ mutex_unlock(&gr_learn_user_mutex);
75208+ if (file->f_flags & O_NONBLOCK) {
75209+ retval = -EAGAIN;
75210+ goto out;
75211+ }
75212+ if (signal_pending(current)) {
75213+ retval = -ERESTARTSYS;
75214+ goto out;
75215+ }
75216+
75217+ schedule();
75218+ } while (1);
75219+
75220+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
75221+ learn_buffer_user_len = learn_buffer_len;
75222+ retval = learn_buffer_len;
75223+ learn_buffer_len = 0;
75224+
75225+ spin_unlock(&gr_learn_lock);
75226+
75227+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
75228+ retval = -EFAULT;
75229+
75230+ mutex_unlock(&gr_learn_user_mutex);
75231+out:
75232+ set_current_state(TASK_RUNNING);
75233+ remove_wait_queue(&learn_wait, &wait);
75234+ return retval;
75235+}
75236+
75237+static unsigned int
75238+poll_learn(struct file * file, poll_table * wait)
75239+{
75240+ poll_wait(file, &learn_wait, wait);
75241+
75242+ if (learn_buffer_len)
75243+ return (POLLIN | POLLRDNORM);
75244+
75245+ return 0;
75246+}
75247+
75248+void
75249+gr_clear_learn_entries(void)
75250+{
75251+ char *tmp;
75252+
75253+ mutex_lock(&gr_learn_user_mutex);
75254+ spin_lock(&gr_learn_lock);
75255+ tmp = learn_buffer;
75256+ learn_buffer = NULL;
75257+ spin_unlock(&gr_learn_lock);
75258+ if (tmp)
75259+ vfree(tmp);
75260+ if (learn_buffer_user != NULL) {
75261+ vfree(learn_buffer_user);
75262+ learn_buffer_user = NULL;
75263+ }
75264+ learn_buffer_len = 0;
75265+ mutex_unlock(&gr_learn_user_mutex);
75266+
75267+ return;
75268+}
75269+
75270+void
75271+gr_add_learn_entry(const char *fmt, ...)
75272+{
75273+ va_list args;
75274+ unsigned int len;
75275+
75276+ if (!gr_learn_attached)
75277+ return;
75278+
75279+ spin_lock(&gr_learn_lock);
75280+
75281+ /* leave a gap at the end so we know when it's "full" but don't have to
75282+ compute the exact length of the string we're trying to append
75283+ */
75284+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
75285+ spin_unlock(&gr_learn_lock);
75286+ wake_up_interruptible(&learn_wait);
75287+ return;
75288+ }
75289+ if (learn_buffer == NULL) {
75290+ spin_unlock(&gr_learn_lock);
75291+ return;
75292+ }
75293+
75294+ va_start(args, fmt);
75295+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
75296+ va_end(args);
75297+
75298+ learn_buffer_len += len + 1;
75299+
75300+ spin_unlock(&gr_learn_lock);
75301+ wake_up_interruptible(&learn_wait);
75302+
75303+ return;
75304+}
75305+
75306+static int
75307+open_learn(struct inode *inode, struct file *file)
75308+{
75309+ if (file->f_mode & FMODE_READ && gr_learn_attached)
75310+ return -EBUSY;
75311+ if (file->f_mode & FMODE_READ) {
75312+ int retval = 0;
75313+ mutex_lock(&gr_learn_user_mutex);
75314+ if (learn_buffer == NULL)
75315+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
75316+ if (learn_buffer_user == NULL)
75317+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
75318+ if (learn_buffer == NULL) {
75319+ retval = -ENOMEM;
75320+ goto out_error;
75321+ }
75322+ if (learn_buffer_user == NULL) {
75323+ retval = -ENOMEM;
75324+ goto out_error;
75325+ }
75326+ learn_buffer_len = 0;
75327+ learn_buffer_user_len = 0;
75328+ gr_learn_attached = 1;
75329+out_error:
75330+ mutex_unlock(&gr_learn_user_mutex);
75331+ return retval;
75332+ }
75333+ return 0;
75334+}
75335+
75336+static int
75337+close_learn(struct inode *inode, struct file *file)
75338+{
75339+ if (file->f_mode & FMODE_READ) {
75340+ char *tmp = NULL;
75341+ mutex_lock(&gr_learn_user_mutex);
75342+ spin_lock(&gr_learn_lock);
75343+ tmp = learn_buffer;
75344+ learn_buffer = NULL;
75345+ spin_unlock(&gr_learn_lock);
75346+ if (tmp)
75347+ vfree(tmp);
75348+ if (learn_buffer_user != NULL) {
75349+ vfree(learn_buffer_user);
75350+ learn_buffer_user = NULL;
75351+ }
75352+ learn_buffer_len = 0;
75353+ learn_buffer_user_len = 0;
75354+ gr_learn_attached = 0;
75355+ mutex_unlock(&gr_learn_user_mutex);
75356+ }
75357+
75358+ return 0;
75359+}
75360+
75361+const struct file_operations grsec_fops = {
75362+ .read = read_learn,
75363+ .write = write_grsec_handler,
75364+ .open = open_learn,
75365+ .release = close_learn,
75366+ .poll = poll_learn,
75367+};
75368diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
75369new file mode 100644
75370index 0000000..3f8ade0
75371--- /dev/null
75372+++ b/grsecurity/gracl_policy.c
75373@@ -0,0 +1,1782 @@
75374+#include <linux/kernel.h>
75375+#include <linux/module.h>
75376+#include <linux/sched.h>
75377+#include <linux/mm.h>
75378+#include <linux/file.h>
75379+#include <linux/fs.h>
75380+#include <linux/namei.h>
75381+#include <linux/mount.h>
75382+#include <linux/tty.h>
75383+#include <linux/proc_fs.h>
75384+#include <linux/lglock.h>
75385+#include <linux/slab.h>
75386+#include <linux/vmalloc.h>
75387+#include <linux/types.h>
75388+#include <linux/sysctl.h>
75389+#include <linux/netdevice.h>
75390+#include <linux/ptrace.h>
75391+#include <linux/gracl.h>
75392+#include <linux/gralloc.h>
75393+#include <linux/security.h>
75394+#include <linux/grinternal.h>
75395+#include <linux/pid_namespace.h>
75396+#include <linux/stop_machine.h>
75397+#include <linux/fdtable.h>
75398+#include <linux/percpu.h>
75399+#include <linux/lglock.h>
75400+#include <linux/hugetlb.h>
75401+#include <linux/posix-timers.h>
75402+#include "../fs/mount.h"
75403+
75404+#include <asm/uaccess.h>
75405+#include <asm/errno.h>
75406+#include <asm/mman.h>
75407+
75408+extern struct gr_policy_state *polstate;
75409+
75410+#define FOR_EACH_ROLE_START(role) \
75411+ role = polstate->role_list; \
75412+ while (role) {
75413+
75414+#define FOR_EACH_ROLE_END(role) \
75415+ role = role->prev; \
75416+ }
75417+
75418+struct path gr_real_root;
75419+
75420+extern struct gr_alloc_state *current_alloc_state;
75421+
75422+u16 acl_sp_role_value;
75423+
75424+static DEFINE_MUTEX(gr_dev_mutex);
75425+
75426+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
75427+extern void gr_clear_learn_entries(void);
75428+
75429+struct gr_arg *gr_usermode __read_only;
75430+unsigned char *gr_system_salt __read_only;
75431+unsigned char *gr_system_sum __read_only;
75432+
75433+static unsigned int gr_auth_attempts = 0;
75434+static unsigned long gr_auth_expires = 0UL;
75435+
75436+struct acl_object_label *fakefs_obj_rw;
75437+struct acl_object_label *fakefs_obj_rwx;
75438+
75439+extern int gr_init_uidset(void);
75440+extern void gr_free_uidset(void);
75441+extern void gr_remove_uid(uid_t uid);
75442+extern int gr_find_uid(uid_t uid);
75443+
75444+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
75445+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
75446+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
75447+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
75448+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
75449+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
75450+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
75451+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
75452+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
75453+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
75454+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
75455+extern void assign_special_role(const char *rolename);
75456+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
75457+extern int gr_rbac_disable(void *unused);
75458+extern void gr_enable_rbac_system(void);
75459+
75460+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
75461+{
75462+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
75463+ return -EFAULT;
75464+
75465+ return 0;
75466+}
75467+
75468+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
75469+{
75470+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
75471+ return -EFAULT;
75472+
75473+ return 0;
75474+}
75475+
75476+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
75477+{
75478+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
75479+ return -EFAULT;
75480+
75481+ return 0;
75482+}
75483+
75484+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
75485+{
75486+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
75487+ return -EFAULT;
75488+
75489+ return 0;
75490+}
75491+
75492+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
75493+{
75494+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
75495+ return -EFAULT;
75496+
75497+ return 0;
75498+}
75499+
75500+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
75501+{
75502+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
75503+ return -EFAULT;
75504+
75505+ return 0;
75506+}
75507+
75508+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
75509+{
75510+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
75511+ return -EFAULT;
75512+
75513+ return 0;
75514+}
75515+
75516+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
75517+{
75518+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
75519+ return -EFAULT;
75520+
75521+ return 0;
75522+}
75523+
75524+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
75525+{
75526+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
75527+ return -EFAULT;
75528+
75529+ return 0;
75530+}
75531+
75532+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
75533+{
75534+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
75535+ return -EFAULT;
75536+
75537+ if (((uwrap->version != GRSECURITY_VERSION) &&
75538+ (uwrap->version != 0x2901)) ||
75539+ (uwrap->size != sizeof(struct gr_arg)))
75540+ return -EINVAL;
75541+
75542+ return 0;
75543+}
75544+
75545+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
75546+{
75547+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
75548+ return -EFAULT;
75549+
75550+ return 0;
75551+}
75552+
75553+static size_t get_gr_arg_wrapper_size_normal(void)
75554+{
75555+ return sizeof(struct gr_arg_wrapper);
75556+}
75557+
75558+#ifdef CONFIG_COMPAT
75559+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
75560+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
75561+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
75562+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
75563+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
75564+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
75565+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
75566+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
75567+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
75568+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
75569+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
75570+extern size_t get_gr_arg_wrapper_size_compat(void);
75571+
75572+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
75573+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
75574+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
75575+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
75576+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
75577+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
75578+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
75579+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
75580+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
75581+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
75582+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
75583+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
75584+
75585+#else
75586+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
75587+#define copy_gr_arg copy_gr_arg_normal
75588+#define copy_gr_hash_struct copy_gr_hash_struct_normal
75589+#define copy_acl_object_label copy_acl_object_label_normal
75590+#define copy_acl_subject_label copy_acl_subject_label_normal
75591+#define copy_acl_role_label copy_acl_role_label_normal
75592+#define copy_acl_ip_label copy_acl_ip_label_normal
75593+#define copy_pointer_from_array copy_pointer_from_array_normal
75594+#define copy_sprole_pw copy_sprole_pw_normal
75595+#define copy_role_transition copy_role_transition_normal
75596+#define copy_role_allowed_ip copy_role_allowed_ip_normal
75597+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
75598+#endif
75599+
75600+static struct acl_subject_label *
75601+lookup_subject_map(const struct acl_subject_label *userp)
75602+{
75603+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
75604+ struct subject_map *match;
75605+
75606+ match = polstate->subj_map_set.s_hash[index];
75607+
75608+ while (match && match->user != userp)
75609+ match = match->next;
75610+
75611+ if (match != NULL)
75612+ return match->kernel;
75613+ else
75614+ return NULL;
75615+}
75616+
75617+static void
75618+insert_subj_map_entry(struct subject_map *subjmap)
75619+{
75620+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
75621+ struct subject_map **curr;
75622+
75623+ subjmap->prev = NULL;
75624+
75625+ curr = &polstate->subj_map_set.s_hash[index];
75626+ if (*curr != NULL)
75627+ (*curr)->prev = subjmap;
75628+
75629+ subjmap->next = *curr;
75630+ *curr = subjmap;
75631+
75632+ return;
75633+}
75634+
75635+static void
75636+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75637+{
75638+ unsigned int index =
75639+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
75640+ struct acl_role_label **curr;
75641+ struct acl_role_label *tmp, *tmp2;
75642+
75643+ curr = &polstate->acl_role_set.r_hash[index];
75644+
75645+ /* simple case, slot is empty, just set it to our role */
75646+ if (*curr == NULL) {
75647+ *curr = role;
75648+ } else {
75649+ /* example:
75650+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75651+ 2 -> 3
75652+ */
75653+ /* first check to see if we can already be reached via this slot */
75654+ tmp = *curr;
75655+ while (tmp && tmp != role)
75656+ tmp = tmp->next;
75657+ if (tmp == role) {
75658+ /* we don't need to add ourselves to this slot's chain */
75659+ return;
75660+ }
75661+ /* we need to add ourselves to this chain, two cases */
75662+ if (role->next == NULL) {
75663+ /* simple case, append the current chain to our role */
75664+ role->next = *curr;
75665+ *curr = role;
75666+ } else {
75667+ /* 1 -> 2 -> 3 -> 4
75668+ 2 -> 3 -> 4
75669+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75670+ */
75671+ /* trickier case: walk our role's chain until we find
75672+ the role for the start of the current slot's chain */
75673+ tmp = role;
75674+ tmp2 = *curr;
75675+ while (tmp->next && tmp->next != tmp2)
75676+ tmp = tmp->next;
75677+ if (tmp->next == tmp2) {
75678+ /* from example above, we found 3, so just
75679+ replace this slot's chain with ours */
75680+ *curr = role;
75681+ } else {
75682+ /* we didn't find a subset of our role's chain
75683+ in the current slot's chain, so append their
75684+ chain to ours, and set us as the first role in
75685+ the slot's chain
75686+
75687+ we could fold this case with the case above,
75688+ but making it explicit for clarity
75689+ */
75690+ tmp->next = tmp2;
75691+ *curr = role;
75692+ }
75693+ }
75694+ }
75695+
75696+ return;
75697+}
75698+
75699+static void
75700+insert_acl_role_label(struct acl_role_label *role)
75701+{
75702+ int i;
75703+
75704+ if (polstate->role_list == NULL) {
75705+ polstate->role_list = role;
75706+ role->prev = NULL;
75707+ } else {
75708+ role->prev = polstate->role_list;
75709+ polstate->role_list = role;
75710+ }
75711+
75712+ /* used for hash chains */
75713+ role->next = NULL;
75714+
75715+ if (role->roletype & GR_ROLE_DOMAIN) {
75716+ for (i = 0; i < role->domain_child_num; i++)
75717+ __insert_acl_role_label(role, role->domain_children[i]);
75718+ } else
75719+ __insert_acl_role_label(role, role->uidgid);
75720+}
75721+
75722+static int
75723+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75724+{
75725+ struct name_entry **curr, *nentry;
75726+ struct inodev_entry *ientry;
75727+ unsigned int len = strlen(name);
75728+ unsigned int key = full_name_hash(name, len);
75729+ unsigned int index = key % polstate->name_set.n_size;
75730+
75731+ curr = &polstate->name_set.n_hash[index];
75732+
75733+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75734+ curr = &((*curr)->next);
75735+
75736+ if (*curr != NULL)
75737+ return 1;
75738+
75739+ nentry = acl_alloc(sizeof (struct name_entry));
75740+ if (nentry == NULL)
75741+ return 0;
75742+ ientry = acl_alloc(sizeof (struct inodev_entry));
75743+ if (ientry == NULL)
75744+ return 0;
75745+ ientry->nentry = nentry;
75746+
75747+ nentry->key = key;
75748+ nentry->name = name;
75749+ nentry->inode = inode;
75750+ nentry->device = device;
75751+ nentry->len = len;
75752+ nentry->deleted = deleted;
75753+
75754+ nentry->prev = NULL;
75755+ curr = &polstate->name_set.n_hash[index];
75756+ if (*curr != NULL)
75757+ (*curr)->prev = nentry;
75758+ nentry->next = *curr;
75759+ *curr = nentry;
75760+
75761+ /* insert us into the table searchable by inode/dev */
75762+ __insert_inodev_entry(polstate, ientry);
75763+
75764+ return 1;
75765+}
75766+
75767+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75768+
75769+static void *
75770+create_table(__u32 * len, int elementsize)
75771+{
75772+ unsigned int table_sizes[] = {
75773+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75774+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75775+ 4194301, 8388593, 16777213, 33554393, 67108859
75776+ };
75777+ void *newtable = NULL;
75778+ unsigned int pwr = 0;
75779+
75780+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75781+ table_sizes[pwr] <= *len)
75782+ pwr++;
75783+
75784+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75785+ return newtable;
75786+
75787+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75788+ newtable =
75789+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75790+ else
75791+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75792+
75793+ *len = table_sizes[pwr];
75794+
75795+ return newtable;
75796+}
75797+
75798+static int
75799+init_variables(const struct gr_arg *arg, bool reload)
75800+{
75801+ struct task_struct *reaper = init_pid_ns.child_reaper;
75802+ unsigned int stacksize;
75803+
75804+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
75805+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75806+ polstate->name_set.n_size = arg->role_db.num_objects;
75807+ polstate->inodev_set.i_size = arg->role_db.num_objects;
75808+
75809+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
75810+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
75811+ return 1;
75812+
75813+ if (!reload) {
75814+ if (!gr_init_uidset())
75815+ return 1;
75816+ }
75817+
75818+ /* set up the stack that holds allocation info */
75819+
75820+ stacksize = arg->role_db.num_pointers + 5;
75821+
75822+ if (!acl_alloc_stack_init(stacksize))
75823+ return 1;
75824+
75825+ if (!reload) {
75826+ /* grab reference for the real root dentry and vfsmount */
75827+ get_fs_root(reaper->fs, &gr_real_root);
75828+
75829+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75830+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
75831+#endif
75832+
75833+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75834+ if (fakefs_obj_rw == NULL)
75835+ return 1;
75836+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75837+
75838+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75839+ if (fakefs_obj_rwx == NULL)
75840+ return 1;
75841+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75842+ }
75843+
75844+ polstate->subj_map_set.s_hash =
75845+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
75846+ polstate->acl_role_set.r_hash =
75847+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
75848+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
75849+ polstate->inodev_set.i_hash =
75850+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
75851+
75852+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
75853+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
75854+ return 1;
75855+
75856+ memset(polstate->subj_map_set.s_hash, 0,
75857+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
75858+ memset(polstate->acl_role_set.r_hash, 0,
75859+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
75860+ memset(polstate->name_set.n_hash, 0,
75861+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75862+ memset(polstate->inodev_set.i_hash, 0,
75863+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75864+
75865+ return 0;
75866+}
75867+
75868+/* free information not needed after startup
75869+ currently contains user->kernel pointer mappings for subjects
75870+*/
75871+
75872+static void
75873+free_init_variables(void)
75874+{
75875+ __u32 i;
75876+
75877+ if (polstate->subj_map_set.s_hash) {
75878+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75879+ if (polstate->subj_map_set.s_hash[i]) {
75880+ kfree(polstate->subj_map_set.s_hash[i]);
75881+ polstate->subj_map_set.s_hash[i] = NULL;
75882+ }
75883+ }
75884+
75885+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75886+ PAGE_SIZE)
75887+ kfree(polstate->subj_map_set.s_hash);
75888+ else
75889+ vfree(polstate->subj_map_set.s_hash);
75890+ }
75891+
75892+ return;
75893+}
75894+
75895+static void
75896+free_variables(bool reload)
75897+{
75898+ struct acl_subject_label *s;
75899+ struct acl_role_label *r;
75900+ struct task_struct *task, *task2;
75901+ unsigned int x;
75902+
75903+ if (!reload) {
75904+ gr_clear_learn_entries();
75905+
75906+ read_lock(&tasklist_lock);
75907+ do_each_thread(task2, task) {
75908+ task->acl_sp_role = 0;
75909+ task->acl_role_id = 0;
75910+ task->inherited = 0;
75911+ task->acl = NULL;
75912+ task->role = NULL;
75913+ } while_each_thread(task2, task);
75914+ read_unlock(&tasklist_lock);
75915+
75916+ kfree(fakefs_obj_rw);
75917+ fakefs_obj_rw = NULL;
75918+ kfree(fakefs_obj_rwx);
75919+ fakefs_obj_rwx = NULL;
75920+
75921+ /* release the reference to the real root dentry and vfsmount */
75922+ path_put(&gr_real_root);
75923+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75924+ }
75925+
75926+ /* free all object hash tables */
75927+
75928+ FOR_EACH_ROLE_START(r)
75929+ if (r->subj_hash == NULL)
75930+ goto next_role;
75931+ FOR_EACH_SUBJECT_START(r, s, x)
75932+ if (s->obj_hash == NULL)
75933+ break;
75934+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75935+ kfree(s->obj_hash);
75936+ else
75937+ vfree(s->obj_hash);
75938+ FOR_EACH_SUBJECT_END(s, x)
75939+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75940+ if (s->obj_hash == NULL)
75941+ break;
75942+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75943+ kfree(s->obj_hash);
75944+ else
75945+ vfree(s->obj_hash);
75946+ FOR_EACH_NESTED_SUBJECT_END(s)
75947+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75948+ kfree(r->subj_hash);
75949+ else
75950+ vfree(r->subj_hash);
75951+ r->subj_hash = NULL;
75952+next_role:
75953+ FOR_EACH_ROLE_END(r)
75954+
75955+ acl_free_all();
75956+
75957+ if (polstate->acl_role_set.r_hash) {
75958+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75959+ PAGE_SIZE)
75960+ kfree(polstate->acl_role_set.r_hash);
75961+ else
75962+ vfree(polstate->acl_role_set.r_hash);
75963+ }
75964+ if (polstate->name_set.n_hash) {
75965+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75966+ PAGE_SIZE)
75967+ kfree(polstate->name_set.n_hash);
75968+ else
75969+ vfree(polstate->name_set.n_hash);
75970+ }
75971+
75972+ if (polstate->inodev_set.i_hash) {
75973+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75974+ PAGE_SIZE)
75975+ kfree(polstate->inodev_set.i_hash);
75976+ else
75977+ vfree(polstate->inodev_set.i_hash);
75978+ }
75979+
75980+ if (!reload)
75981+ gr_free_uidset();
75982+
75983+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75984+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75985+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75986+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75987+
75988+ polstate->default_role = NULL;
75989+ polstate->kernel_role = NULL;
75990+ polstate->role_list = NULL;
75991+
75992+ return;
75993+}
75994+
75995+static struct acl_subject_label *
75996+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75997+
75998+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75999+{
76000+ unsigned int len = strnlen_user(*name, maxlen);
76001+ char *tmp;
76002+
76003+ if (!len || len >= maxlen)
76004+ return -EINVAL;
76005+
76006+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76007+ return -ENOMEM;
76008+
76009+ if (copy_from_user(tmp, *name, len))
76010+ return -EFAULT;
76011+
76012+ tmp[len-1] = '\0';
76013+ *name = tmp;
76014+
76015+ return 0;
76016+}
76017+
76018+static int
76019+copy_user_glob(struct acl_object_label *obj)
76020+{
76021+ struct acl_object_label *g_tmp, **guser;
76022+ int error;
76023+
76024+ if (obj->globbed == NULL)
76025+ return 0;
76026+
76027+ guser = &obj->globbed;
76028+ while (*guser) {
76029+ g_tmp = (struct acl_object_label *)
76030+ acl_alloc(sizeof (struct acl_object_label));
76031+ if (g_tmp == NULL)
76032+ return -ENOMEM;
76033+
76034+ if (copy_acl_object_label(g_tmp, *guser))
76035+ return -EFAULT;
76036+
76037+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
76038+ if (error)
76039+ return error;
76040+
76041+ *guser = g_tmp;
76042+ guser = &(g_tmp->next);
76043+ }
76044+
76045+ return 0;
76046+}
76047+
76048+static int
76049+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
76050+ struct acl_role_label *role)
76051+{
76052+ struct acl_object_label *o_tmp;
76053+ int ret;
76054+
76055+ while (userp) {
76056+ if ((o_tmp = (struct acl_object_label *)
76057+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
76058+ return -ENOMEM;
76059+
76060+ if (copy_acl_object_label(o_tmp, userp))
76061+ return -EFAULT;
76062+
76063+ userp = o_tmp->prev;
76064+
76065+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
76066+ if (ret)
76067+ return ret;
76068+
76069+ insert_acl_obj_label(o_tmp, subj);
76070+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
76071+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
76072+ return -ENOMEM;
76073+
76074+ ret = copy_user_glob(o_tmp);
76075+ if (ret)
76076+ return ret;
76077+
76078+ if (o_tmp->nested) {
76079+ int already_copied;
76080+
76081+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
76082+ if (IS_ERR(o_tmp->nested))
76083+ return PTR_ERR(o_tmp->nested);
76084+
76085+ /* insert into nested subject list if we haven't copied this one yet
76086+ to prevent duplicate entries */
76087+ if (!already_copied) {
76088+ o_tmp->nested->next = role->hash->first;
76089+ role->hash->first = o_tmp->nested;
76090+ }
76091+ }
76092+ }
76093+
76094+ return 0;
76095+}
76096+
76097+static __u32
76098+count_user_subjs(struct acl_subject_label *userp)
76099+{
76100+ struct acl_subject_label s_tmp;
76101+ __u32 num = 0;
76102+
76103+ while (userp) {
76104+ if (copy_acl_subject_label(&s_tmp, userp))
76105+ break;
76106+
76107+ userp = s_tmp.prev;
76108+ }
76109+
76110+ return num;
76111+}
76112+
76113+static int
76114+copy_user_allowedips(struct acl_role_label *rolep)
76115+{
76116+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
76117+
76118+ ruserip = rolep->allowed_ips;
76119+
76120+ while (ruserip) {
76121+ rlast = rtmp;
76122+
76123+ if ((rtmp = (struct role_allowed_ip *)
76124+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
76125+ return -ENOMEM;
76126+
76127+ if (copy_role_allowed_ip(rtmp, ruserip))
76128+ return -EFAULT;
76129+
76130+ ruserip = rtmp->prev;
76131+
76132+ if (!rlast) {
76133+ rtmp->prev = NULL;
76134+ rolep->allowed_ips = rtmp;
76135+ } else {
76136+ rlast->next = rtmp;
76137+ rtmp->prev = rlast;
76138+ }
76139+
76140+ if (!ruserip)
76141+ rtmp->next = NULL;
76142+ }
76143+
76144+ return 0;
76145+}
76146+
76147+static int
76148+copy_user_transitions(struct acl_role_label *rolep)
76149+{
76150+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
76151+ int error;
76152+
76153+ rusertp = rolep->transitions;
76154+
76155+ while (rusertp) {
76156+ rlast = rtmp;
76157+
76158+ if ((rtmp = (struct role_transition *)
76159+ acl_alloc(sizeof (struct role_transition))) == NULL)
76160+ return -ENOMEM;
76161+
76162+ if (copy_role_transition(rtmp, rusertp))
76163+ return -EFAULT;
76164+
76165+ rusertp = rtmp->prev;
76166+
76167+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
76168+ if (error)
76169+ return error;
76170+
76171+ if (!rlast) {
76172+ rtmp->prev = NULL;
76173+ rolep->transitions = rtmp;
76174+ } else {
76175+ rlast->next = rtmp;
76176+ rtmp->prev = rlast;
76177+ }
76178+
76179+ if (!rusertp)
76180+ rtmp->next = NULL;
76181+ }
76182+
76183+ return 0;
76184+}
76185+
76186+static __u32 count_user_objs(const struct acl_object_label __user *userp)
76187+{
76188+ struct acl_object_label o_tmp;
76189+ __u32 num = 0;
76190+
76191+ while (userp) {
76192+ if (copy_acl_object_label(&o_tmp, userp))
76193+ break;
76194+
76195+ userp = o_tmp.prev;
76196+ num++;
76197+ }
76198+
76199+ return num;
76200+}
76201+
76202+static struct acl_subject_label *
76203+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
76204+{
76205+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
76206+ __u32 num_objs;
76207+ struct acl_ip_label **i_tmp, *i_utmp2;
76208+ struct gr_hash_struct ghash;
76209+ struct subject_map *subjmap;
76210+ unsigned int i_num;
76211+ int err;
76212+
76213+ if (already_copied != NULL)
76214+ *already_copied = 0;
76215+
76216+ s_tmp = lookup_subject_map(userp);
76217+
76218+ /* we've already copied this subject into the kernel, just return
76219+ the reference to it, and don't copy it over again
76220+ */
76221+ if (s_tmp) {
76222+ if (already_copied != NULL)
76223+ *already_copied = 1;
76224+ return(s_tmp);
76225+ }
76226+
76227+ if ((s_tmp = (struct acl_subject_label *)
76228+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
76229+ return ERR_PTR(-ENOMEM);
76230+
76231+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
76232+ if (subjmap == NULL)
76233+ return ERR_PTR(-ENOMEM);
76234+
76235+ subjmap->user = userp;
76236+ subjmap->kernel = s_tmp;
76237+ insert_subj_map_entry(subjmap);
76238+
76239+ if (copy_acl_subject_label(s_tmp, userp))
76240+ return ERR_PTR(-EFAULT);
76241+
76242+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
76243+ if (err)
76244+ return ERR_PTR(err);
76245+
76246+ if (!strcmp(s_tmp->filename, "/"))
76247+ role->root_label = s_tmp;
76248+
76249+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
76250+ return ERR_PTR(-EFAULT);
76251+
76252+ /* copy user and group transition tables */
76253+
76254+ if (s_tmp->user_trans_num) {
76255+ uid_t *uidlist;
76256+
76257+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76258+ if (uidlist == NULL)
76259+ return ERR_PTR(-ENOMEM);
76260+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76261+ return ERR_PTR(-EFAULT);
76262+
76263+ s_tmp->user_transitions = uidlist;
76264+ }
76265+
76266+ if (s_tmp->group_trans_num) {
76267+ gid_t *gidlist;
76268+
76269+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76270+ if (gidlist == NULL)
76271+ return ERR_PTR(-ENOMEM);
76272+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76273+ return ERR_PTR(-EFAULT);
76274+
76275+ s_tmp->group_transitions = gidlist;
76276+ }
76277+
76278+ /* set up object hash table */
76279+ num_objs = count_user_objs(ghash.first);
76280+
76281+ s_tmp->obj_hash_size = num_objs;
76282+ s_tmp->obj_hash =
76283+ (struct acl_object_label **)
76284+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76285+
76286+ if (!s_tmp->obj_hash)
76287+ return ERR_PTR(-ENOMEM);
76288+
76289+ memset(s_tmp->obj_hash, 0,
76290+ s_tmp->obj_hash_size *
76291+ sizeof (struct acl_object_label *));
76292+
76293+ /* add in objects */
76294+ err = copy_user_objs(ghash.first, s_tmp, role);
76295+
76296+ if (err)
76297+ return ERR_PTR(err);
76298+
76299+ /* set pointer for parent subject */
76300+ if (s_tmp->parent_subject) {
76301+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
76302+
76303+ if (IS_ERR(s_tmp2))
76304+ return s_tmp2;
76305+
76306+ s_tmp->parent_subject = s_tmp2;
76307+ }
76308+
76309+ /* add in ip acls */
76310+
76311+ if (!s_tmp->ip_num) {
76312+ s_tmp->ips = NULL;
76313+ goto insert;
76314+ }
76315+
76316+ i_tmp =
76317+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76318+ sizeof (struct acl_ip_label *));
76319+
76320+ if (!i_tmp)
76321+ return ERR_PTR(-ENOMEM);
76322+
76323+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76324+ *(i_tmp + i_num) =
76325+ (struct acl_ip_label *)
76326+ acl_alloc(sizeof (struct acl_ip_label));
76327+ if (!*(i_tmp + i_num))
76328+ return ERR_PTR(-ENOMEM);
76329+
76330+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
76331+ return ERR_PTR(-EFAULT);
76332+
76333+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
76334+ return ERR_PTR(-EFAULT);
76335+
76336+ if ((*(i_tmp + i_num))->iface == NULL)
76337+ continue;
76338+
76339+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
76340+ if (err)
76341+ return ERR_PTR(err);
76342+ }
76343+
76344+ s_tmp->ips = i_tmp;
76345+
76346+insert:
76347+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76348+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76349+ return ERR_PTR(-ENOMEM);
76350+
76351+ return s_tmp;
76352+}
76353+
76354+static int
76355+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76356+{
76357+ struct acl_subject_label s_pre;
76358+ struct acl_subject_label * ret;
76359+ int err;
76360+
76361+ while (userp) {
76362+ if (copy_acl_subject_label(&s_pre, userp))
76363+ return -EFAULT;
76364+
76365+ ret = do_copy_user_subj(userp, role, NULL);
76366+
76367+ err = PTR_ERR(ret);
76368+ if (IS_ERR(ret))
76369+ return err;
76370+
76371+ insert_acl_subj_label(ret, role);
76372+
76373+ userp = s_pre.prev;
76374+ }
76375+
76376+ return 0;
76377+}
76378+
76379+static int
76380+copy_user_acl(struct gr_arg *arg)
76381+{
76382+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76383+ struct acl_subject_label *subj_list;
76384+ struct sprole_pw *sptmp;
76385+ struct gr_hash_struct *ghash;
76386+ uid_t *domainlist;
76387+ unsigned int r_num;
76388+ int err = 0;
76389+ __u16 i;
76390+ __u32 num_subjs;
76391+
76392+ /* we need a default and kernel role */
76393+ if (arg->role_db.num_roles < 2)
76394+ return -EINVAL;
76395+
76396+ /* copy special role authentication info from userspace */
76397+
76398+ polstate->num_sprole_pws = arg->num_sprole_pws;
76399+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
76400+
76401+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
76402+ return -ENOMEM;
76403+
76404+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76405+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76406+ if (!sptmp)
76407+ return -ENOMEM;
76408+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
76409+ return -EFAULT;
76410+
76411+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
76412+ if (err)
76413+ return err;
76414+
76415+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76416+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
76417+#endif
76418+
76419+ polstate->acl_special_roles[i] = sptmp;
76420+ }
76421+
76422+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76423+
76424+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76425+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76426+
76427+ if (!r_tmp)
76428+ return -ENOMEM;
76429+
76430+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
76431+ return -EFAULT;
76432+
76433+ if (copy_acl_role_label(r_tmp, r_utmp2))
76434+ return -EFAULT;
76435+
76436+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
76437+ if (err)
76438+ return err;
76439+
76440+ if (!strcmp(r_tmp->rolename, "default")
76441+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76442+ polstate->default_role = r_tmp;
76443+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76444+ polstate->kernel_role = r_tmp;
76445+ }
76446+
76447+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
76448+ return -ENOMEM;
76449+
76450+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
76451+ return -EFAULT;
76452+
76453+ r_tmp->hash = ghash;
76454+
76455+ num_subjs = count_user_subjs(r_tmp->hash->first);
76456+
76457+ r_tmp->subj_hash_size = num_subjs;
76458+ r_tmp->subj_hash =
76459+ (struct acl_subject_label **)
76460+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76461+
76462+ if (!r_tmp->subj_hash)
76463+ return -ENOMEM;
76464+
76465+ err = copy_user_allowedips(r_tmp);
76466+ if (err)
76467+ return err;
76468+
76469+ /* copy domain info */
76470+ if (r_tmp->domain_children != NULL) {
76471+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76472+ if (domainlist == NULL)
76473+ return -ENOMEM;
76474+
76475+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
76476+ return -EFAULT;
76477+
76478+ r_tmp->domain_children = domainlist;
76479+ }
76480+
76481+ err = copy_user_transitions(r_tmp);
76482+ if (err)
76483+ return err;
76484+
76485+ memset(r_tmp->subj_hash, 0,
76486+ r_tmp->subj_hash_size *
76487+ sizeof (struct acl_subject_label *));
76488+
76489+ /* acquire the list of subjects, then NULL out
76490+ the list prior to parsing the subjects for this role,
76491+ as during this parsing the list is replaced with a list
76492+ of *nested* subjects for the role
76493+ */
76494+ subj_list = r_tmp->hash->first;
76495+
76496+ /* set nested subject list to null */
76497+ r_tmp->hash->first = NULL;
76498+
76499+ err = copy_user_subjs(subj_list, r_tmp);
76500+
76501+ if (err)
76502+ return err;
76503+
76504+ insert_acl_role_label(r_tmp);
76505+ }
76506+
76507+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
76508+ return -EINVAL;
76509+
76510+ return err;
76511+}
76512+
76513+static int gracl_reload_apply_policies(void *reload)
76514+{
76515+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
76516+ struct task_struct *task, *task2;
76517+ struct acl_role_label *role, *rtmp;
76518+ struct acl_subject_label *subj;
76519+ const struct cred *cred;
76520+ int role_applied;
76521+ int ret = 0;
76522+
76523+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
76524+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
76525+
76526+ /* first make sure we'll be able to apply the new policy cleanly */
76527+ do_each_thread(task2, task) {
76528+ if (task->exec_file == NULL)
76529+ continue;
76530+ role_applied = 0;
76531+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76532+ /* preserve special roles */
76533+ FOR_EACH_ROLE_START(role)
76534+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76535+ rtmp = task->role;
76536+ task->role = role;
76537+ role_applied = 1;
76538+ break;
76539+ }
76540+ FOR_EACH_ROLE_END(role)
76541+ }
76542+ if (!role_applied) {
76543+ cred = __task_cred(task);
76544+ rtmp = task->role;
76545+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76546+ }
76547+ /* this handles non-nested inherited subjects, nested subjects will still
76548+ be dropped currently */
76549+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
76550+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
76551+ /* change the role back so that we've made no modifications to the policy */
76552+ task->role = rtmp;
76553+
76554+ if (subj == NULL || task->tmpacl == NULL) {
76555+ ret = -EINVAL;
76556+ goto out;
76557+ }
76558+ } while_each_thread(task2, task);
76559+
76560+ /* now actually apply the policy */
76561+
76562+ do_each_thread(task2, task) {
76563+ if (task->exec_file) {
76564+ role_applied = 0;
76565+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76566+ /* preserve special roles */
76567+ FOR_EACH_ROLE_START(role)
76568+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76569+ task->role = role;
76570+ role_applied = 1;
76571+ break;
76572+ }
76573+ FOR_EACH_ROLE_END(role)
76574+ }
76575+ if (!role_applied) {
76576+ cred = __task_cred(task);
76577+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76578+ }
76579+ /* this handles non-nested inherited subjects, nested subjects will still
76580+ be dropped currently */
76581+ if (!reload_state->oldmode && task->inherited)
76582+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
76583+ else {
76584+ /* looked up and tagged to the task previously */
76585+ subj = task->tmpacl;
76586+ }
76587+ /* subj will be non-null */
76588+ __gr_apply_subject_to_task(polstate, task, subj);
76589+ if (reload_state->oldmode) {
76590+ task->acl_role_id = 0;
76591+ task->acl_sp_role = 0;
76592+ task->inherited = 0;
76593+ }
76594+ } else {
76595+ // it's a kernel process
76596+ task->role = polstate->kernel_role;
76597+ task->acl = polstate->kernel_role->root_label;
76598+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76599+ task->acl->mode &= ~GR_PROCFIND;
76600+#endif
76601+ }
76602+ } while_each_thread(task2, task);
76603+
76604+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
76605+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
76606+
76607+out:
76608+
76609+ return ret;
76610+}
76611+
76612+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
76613+{
76614+ struct gr_reload_state new_reload_state = { };
76615+ int err;
76616+
76617+ new_reload_state.oldpolicy_ptr = polstate;
76618+ new_reload_state.oldalloc_ptr = current_alloc_state;
76619+ new_reload_state.oldmode = oldmode;
76620+
76621+ current_alloc_state = &new_reload_state.newalloc;
76622+ polstate = &new_reload_state.newpolicy;
76623+
76624+ /* everything relevant is now saved off, copy in the new policy */
76625+ if (init_variables(args, true)) {
76626+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76627+ err = -ENOMEM;
76628+ goto error;
76629+ }
76630+
76631+ err = copy_user_acl(args);
76632+ free_init_variables();
76633+ if (err)
76634+ goto error;
76635+ /* the new policy is copied in, with the old policy available via saved_state
76636+ first go through applying roles, making sure to preserve special roles
76637+ then apply new subjects, making sure to preserve inherited and nested subjects,
76638+ though currently only inherited subjects will be preserved
76639+ */
76640+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
76641+ if (err)
76642+ goto error;
76643+
76644+ /* we've now applied the new policy, so restore the old policy state to free it */
76645+ polstate = &new_reload_state.oldpolicy;
76646+ current_alloc_state = &new_reload_state.oldalloc;
76647+ free_variables(true);
76648+
76649+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
76650+ to running_polstate/current_alloc_state inside stop_machine
76651+ */
76652+ err = 0;
76653+ goto out;
76654+error:
76655+ /* on error of loading the new policy, we'll just keep the previous
76656+ policy set around
76657+ */
76658+ free_variables(true);
76659+
76660+ /* doesn't affect runtime, but maintains consistent state */
76661+out:
76662+ polstate = new_reload_state.oldpolicy_ptr;
76663+ current_alloc_state = new_reload_state.oldalloc_ptr;
76664+
76665+ return err;
76666+}
76667+
76668+static int
76669+gracl_init(struct gr_arg *args)
76670+{
76671+ int error = 0;
76672+
76673+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76674+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76675+
76676+ if (init_variables(args, false)) {
76677+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76678+ error = -ENOMEM;
76679+ goto out;
76680+ }
76681+
76682+ error = copy_user_acl(args);
76683+ free_init_variables();
76684+ if (error)
76685+ goto out;
76686+
76687+ error = gr_set_acls(0);
76688+ if (error)
76689+ goto out;
76690+
76691+ gr_enable_rbac_system();
76692+
76693+ return 0;
76694+
76695+out:
76696+ free_variables(false);
76697+ return error;
76698+}
76699+
76700+static int
76701+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
76702+ unsigned char **sum)
76703+{
76704+ struct acl_role_label *r;
76705+ struct role_allowed_ip *ipp;
76706+ struct role_transition *trans;
76707+ unsigned int i;
76708+ int found = 0;
76709+ u32 curr_ip = current->signal->curr_ip;
76710+
76711+ current->signal->saved_ip = curr_ip;
76712+
76713+ /* check transition table */
76714+
76715+ for (trans = current->role->transitions; trans; trans = trans->next) {
76716+ if (!strcmp(rolename, trans->rolename)) {
76717+ found = 1;
76718+ break;
76719+ }
76720+ }
76721+
76722+ if (!found)
76723+ return 0;
76724+
76725+ /* handle special roles that do not require authentication
76726+ and check ip */
76727+
76728+ FOR_EACH_ROLE_START(r)
76729+ if (!strcmp(rolename, r->rolename) &&
76730+ (r->roletype & GR_ROLE_SPECIAL)) {
76731+ found = 0;
76732+ if (r->allowed_ips != NULL) {
76733+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
76734+ if ((ntohl(curr_ip) & ipp->netmask) ==
76735+ (ntohl(ipp->addr) & ipp->netmask))
76736+ found = 1;
76737+ }
76738+ } else
76739+ found = 2;
76740+ if (!found)
76741+ return 0;
76742+
76743+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
76744+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
76745+ *salt = NULL;
76746+ *sum = NULL;
76747+ return 1;
76748+ }
76749+ }
76750+ FOR_EACH_ROLE_END(r)
76751+
76752+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76753+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
76754+ *salt = polstate->acl_special_roles[i]->salt;
76755+ *sum = polstate->acl_special_roles[i]->sum;
76756+ return 1;
76757+ }
76758+ }
76759+
76760+ return 0;
76761+}
76762+
76763+int gr_check_secure_terminal(struct task_struct *task)
76764+{
76765+ struct task_struct *p, *p2, *p3;
76766+ struct files_struct *files;
76767+ struct fdtable *fdt;
76768+ struct file *our_file = NULL, *file;
76769+ int i;
76770+
76771+ if (task->signal->tty == NULL)
76772+ return 1;
76773+
76774+ files = get_files_struct(task);
76775+ if (files != NULL) {
76776+ rcu_read_lock();
76777+ fdt = files_fdtable(files);
76778+ for (i=0; i < fdt->max_fds; i++) {
76779+ file = fcheck_files(files, i);
76780+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
76781+ get_file(file);
76782+ our_file = file;
76783+ }
76784+ }
76785+ rcu_read_unlock();
76786+ put_files_struct(files);
76787+ }
76788+
76789+ if (our_file == NULL)
76790+ return 1;
76791+
76792+ read_lock(&tasklist_lock);
76793+ do_each_thread(p2, p) {
76794+ files = get_files_struct(p);
76795+ if (files == NULL ||
76796+ (p->signal && p->signal->tty == task->signal->tty)) {
76797+ if (files != NULL)
76798+ put_files_struct(files);
76799+ continue;
76800+ }
76801+ rcu_read_lock();
76802+ fdt = files_fdtable(files);
76803+ for (i=0; i < fdt->max_fds; i++) {
76804+ file = fcheck_files(files, i);
76805+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
76806+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
76807+ p3 = task;
76808+ while (task_pid_nr(p3) > 0) {
76809+ if (p3 == p)
76810+ break;
76811+ p3 = p3->real_parent;
76812+ }
76813+ if (p3 == p)
76814+ break;
76815+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
76816+ gr_handle_alertkill(p);
76817+ rcu_read_unlock();
76818+ put_files_struct(files);
76819+ read_unlock(&tasklist_lock);
76820+ fput(our_file);
76821+ return 0;
76822+ }
76823+ }
76824+ rcu_read_unlock();
76825+ put_files_struct(files);
76826+ } while_each_thread(p2, p);
76827+ read_unlock(&tasklist_lock);
76828+
76829+ fput(our_file);
76830+ return 1;
76831+}
76832+
76833+ssize_t
76834+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
76835+{
76836+ struct gr_arg_wrapper uwrap;
76837+ unsigned char *sprole_salt = NULL;
76838+ unsigned char *sprole_sum = NULL;
76839+ int error = 0;
76840+ int error2 = 0;
76841+ size_t req_count = 0;
76842+ unsigned char oldmode = 0;
76843+
76844+ mutex_lock(&gr_dev_mutex);
76845+
76846+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
76847+ error = -EPERM;
76848+ goto out;
76849+ }
76850+
76851+#ifdef CONFIG_COMPAT
76852+ pax_open_kernel();
76853+ if (is_compat_task()) {
76854+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
76855+ copy_gr_arg = &copy_gr_arg_compat;
76856+ copy_acl_object_label = &copy_acl_object_label_compat;
76857+ copy_acl_subject_label = &copy_acl_subject_label_compat;
76858+ copy_acl_role_label = &copy_acl_role_label_compat;
76859+ copy_acl_ip_label = &copy_acl_ip_label_compat;
76860+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76861+ copy_role_transition = &copy_role_transition_compat;
76862+ copy_sprole_pw = &copy_sprole_pw_compat;
76863+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76864+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76865+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76866+ } else {
76867+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76868+ copy_gr_arg = &copy_gr_arg_normal;
76869+ copy_acl_object_label = &copy_acl_object_label_normal;
76870+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76871+ copy_acl_role_label = &copy_acl_role_label_normal;
76872+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76873+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76874+ copy_role_transition = &copy_role_transition_normal;
76875+ copy_sprole_pw = &copy_sprole_pw_normal;
76876+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76877+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76878+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76879+ }
76880+ pax_close_kernel();
76881+#endif
76882+
76883+ req_count = get_gr_arg_wrapper_size();
76884+
76885+ if (count != req_count) {
76886+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76887+ error = -EINVAL;
76888+ goto out;
76889+ }
76890+
76891+
76892+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76893+ gr_auth_expires = 0;
76894+ gr_auth_attempts = 0;
76895+ }
76896+
76897+ error = copy_gr_arg_wrapper(buf, &uwrap);
76898+ if (error)
76899+ goto out;
76900+
76901+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76902+ if (error)
76903+ goto out;
76904+
76905+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76906+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76907+ time_after(gr_auth_expires, get_seconds())) {
76908+ error = -EBUSY;
76909+ goto out;
76910+ }
76911+
76912+ /* if non-root trying to do anything other than use a special role,
76913+ do not attempt authentication, do not count towards authentication
76914+ locking
76915+ */
76916+
76917+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76918+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76919+ gr_is_global_nonroot(current_uid())) {
76920+ error = -EPERM;
76921+ goto out;
76922+ }
76923+
76924+ /* ensure pw and special role name are null terminated */
76925+
76926+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76927+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76928+
76929+ /* Okay.
76930+ * We have our enough of the argument structure..(we have yet
76931+ * to copy_from_user the tables themselves) . Copy the tables
76932+ * only if we need them, i.e. for loading operations. */
76933+
76934+ switch (gr_usermode->mode) {
76935+ case GR_STATUS:
76936+ if (gr_acl_is_enabled()) {
76937+ error = 1;
76938+ if (!gr_check_secure_terminal(current))
76939+ error = 3;
76940+ } else
76941+ error = 2;
76942+ goto out;
76943+ case GR_SHUTDOWN:
76944+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76945+ stop_machine(gr_rbac_disable, NULL, NULL);
76946+ free_variables(false);
76947+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76948+ memset(gr_system_salt, 0, GR_SALT_LEN);
76949+ memset(gr_system_sum, 0, GR_SHA_LEN);
76950+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76951+ } else if (gr_acl_is_enabled()) {
76952+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76953+ error = -EPERM;
76954+ } else {
76955+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76956+ error = -EAGAIN;
76957+ }
76958+ break;
76959+ case GR_ENABLE:
76960+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76961+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76962+ else {
76963+ if (gr_acl_is_enabled())
76964+ error = -EAGAIN;
76965+ else
76966+ error = error2;
76967+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76968+ }
76969+ break;
76970+ case GR_OLDRELOAD:
76971+ oldmode = 1;
76972+ case GR_RELOAD:
76973+ if (!gr_acl_is_enabled()) {
76974+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76975+ error = -EAGAIN;
76976+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76977+ error2 = gracl_reload(gr_usermode, oldmode);
76978+ if (!error2)
76979+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76980+ else {
76981+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76982+ error = error2;
76983+ }
76984+ } else {
76985+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76986+ error = -EPERM;
76987+ }
76988+ break;
76989+ case GR_SEGVMOD:
76990+ if (unlikely(!gr_acl_is_enabled())) {
76991+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76992+ error = -EAGAIN;
76993+ break;
76994+ }
76995+
76996+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76997+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76998+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76999+ struct acl_subject_label *segvacl;
77000+ segvacl =
77001+ lookup_acl_subj_label(gr_usermode->segv_inode,
77002+ gr_usermode->segv_device,
77003+ current->role);
77004+ if (segvacl) {
77005+ segvacl->crashes = 0;
77006+ segvacl->expires = 0;
77007+ }
77008+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
77009+ gr_remove_uid(gr_usermode->segv_uid);
77010+ }
77011+ } else {
77012+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
77013+ error = -EPERM;
77014+ }
77015+ break;
77016+ case GR_SPROLE:
77017+ case GR_SPROLEPAM:
77018+ if (unlikely(!gr_acl_is_enabled())) {
77019+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
77020+ error = -EAGAIN;
77021+ break;
77022+ }
77023+
77024+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
77025+ current->role->expires = 0;
77026+ current->role->auth_attempts = 0;
77027+ }
77028+
77029+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
77030+ time_after(current->role->expires, get_seconds())) {
77031+ error = -EBUSY;
77032+ goto out;
77033+ }
77034+
77035+ if (lookup_special_role_auth
77036+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
77037+ && ((!sprole_salt && !sprole_sum)
77038+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
77039+ char *p = "";
77040+ assign_special_role(gr_usermode->sp_role);
77041+ read_lock(&tasklist_lock);
77042+ if (current->real_parent)
77043+ p = current->real_parent->role->rolename;
77044+ read_unlock(&tasklist_lock);
77045+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
77046+ p, acl_sp_role_value);
77047+ } else {
77048+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
77049+ error = -EPERM;
77050+ if(!(current->role->auth_attempts++))
77051+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
77052+
77053+ goto out;
77054+ }
77055+ break;
77056+ case GR_UNSPROLE:
77057+ if (unlikely(!gr_acl_is_enabled())) {
77058+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
77059+ error = -EAGAIN;
77060+ break;
77061+ }
77062+
77063+ if (current->role->roletype & GR_ROLE_SPECIAL) {
77064+ char *p = "";
77065+ int i = 0;
77066+
77067+ read_lock(&tasklist_lock);
77068+ if (current->real_parent) {
77069+ p = current->real_parent->role->rolename;
77070+ i = current->real_parent->acl_role_id;
77071+ }
77072+ read_unlock(&tasklist_lock);
77073+
77074+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
77075+ gr_set_acls(1);
77076+ } else {
77077+ error = -EPERM;
77078+ goto out;
77079+ }
77080+ break;
77081+ default:
77082+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
77083+ error = -EINVAL;
77084+ break;
77085+ }
77086+
77087+ if (error != -EPERM)
77088+ goto out;
77089+
77090+ if(!(gr_auth_attempts++))
77091+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
77092+
77093+ out:
77094+ mutex_unlock(&gr_dev_mutex);
77095+
77096+ if (!error)
77097+ error = req_count;
77098+
77099+ return error;
77100+}
77101+
77102+int
77103+gr_set_acls(const int type)
77104+{
77105+ struct task_struct *task, *task2;
77106+ struct acl_role_label *role = current->role;
77107+ struct acl_subject_label *subj;
77108+ __u16 acl_role_id = current->acl_role_id;
77109+ const struct cred *cred;
77110+ int ret;
77111+
77112+ rcu_read_lock();
77113+ read_lock(&tasklist_lock);
77114+ read_lock(&grsec_exec_file_lock);
77115+ do_each_thread(task2, task) {
77116+ /* check to see if we're called from the exit handler,
77117+ if so, only replace ACLs that have inherited the admin
77118+ ACL */
77119+
77120+ if (type && (task->role != role ||
77121+ task->acl_role_id != acl_role_id))
77122+ continue;
77123+
77124+ task->acl_role_id = 0;
77125+ task->acl_sp_role = 0;
77126+ task->inherited = 0;
77127+
77128+ if (task->exec_file) {
77129+ cred = __task_cred(task);
77130+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
77131+ subj = __gr_get_subject_for_task(polstate, task, NULL);
77132+ if (subj == NULL) {
77133+ ret = -EINVAL;
77134+ read_unlock(&grsec_exec_file_lock);
77135+ read_unlock(&tasklist_lock);
77136+ rcu_read_unlock();
77137+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
77138+ return ret;
77139+ }
77140+ __gr_apply_subject_to_task(polstate, task, subj);
77141+ } else {
77142+ // it's a kernel process
77143+ task->role = polstate->kernel_role;
77144+ task->acl = polstate->kernel_role->root_label;
77145+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
77146+ task->acl->mode &= ~GR_PROCFIND;
77147+#endif
77148+ }
77149+ } while_each_thread(task2, task);
77150+ read_unlock(&grsec_exec_file_lock);
77151+ read_unlock(&tasklist_lock);
77152+ rcu_read_unlock();
77153+
77154+ return 0;
77155+}
77156diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
77157new file mode 100644
77158index 0000000..39645c9
77159--- /dev/null
77160+++ b/grsecurity/gracl_res.c
77161@@ -0,0 +1,68 @@
77162+#include <linux/kernel.h>
77163+#include <linux/sched.h>
77164+#include <linux/gracl.h>
77165+#include <linux/grinternal.h>
77166+
77167+static const char *restab_log[] = {
77168+ [RLIMIT_CPU] = "RLIMIT_CPU",
77169+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
77170+ [RLIMIT_DATA] = "RLIMIT_DATA",
77171+ [RLIMIT_STACK] = "RLIMIT_STACK",
77172+ [RLIMIT_CORE] = "RLIMIT_CORE",
77173+ [RLIMIT_RSS] = "RLIMIT_RSS",
77174+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
77175+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
77176+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
77177+ [RLIMIT_AS] = "RLIMIT_AS",
77178+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
77179+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
77180+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
77181+ [RLIMIT_NICE] = "RLIMIT_NICE",
77182+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
77183+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
77184+ [GR_CRASH_RES] = "RLIMIT_CRASH"
77185+};
77186+
77187+void
77188+gr_log_resource(const struct task_struct *task,
77189+ const int res, const unsigned long wanted, const int gt)
77190+{
77191+ const struct cred *cred;
77192+ unsigned long rlim;
77193+
77194+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
77195+ return;
77196+
77197+ // not yet supported resource
77198+ if (unlikely(!restab_log[res]))
77199+ return;
77200+
77201+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
77202+ rlim = task_rlimit_max(task, res);
77203+ else
77204+ rlim = task_rlimit(task, res);
77205+
77206+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
77207+ return;
77208+
77209+ rcu_read_lock();
77210+ cred = __task_cred(task);
77211+
77212+ if (res == RLIMIT_NPROC &&
77213+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
77214+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
77215+ goto out_rcu_unlock;
77216+ else if (res == RLIMIT_MEMLOCK &&
77217+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
77218+ goto out_rcu_unlock;
77219+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
77220+ goto out_rcu_unlock;
77221+ rcu_read_unlock();
77222+
77223+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
77224+
77225+ return;
77226+out_rcu_unlock:
77227+ rcu_read_unlock();
77228+ return;
77229+}
77230diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
77231new file mode 100644
77232index 0000000..2040e61
77233--- /dev/null
77234+++ b/grsecurity/gracl_segv.c
77235@@ -0,0 +1,313 @@
77236+#include <linux/kernel.h>
77237+#include <linux/mm.h>
77238+#include <asm/uaccess.h>
77239+#include <asm/errno.h>
77240+#include <asm/mman.h>
77241+#include <net/sock.h>
77242+#include <linux/file.h>
77243+#include <linux/fs.h>
77244+#include <linux/net.h>
77245+#include <linux/in.h>
77246+#include <linux/slab.h>
77247+#include <linux/types.h>
77248+#include <linux/sched.h>
77249+#include <linux/timer.h>
77250+#include <linux/gracl.h>
77251+#include <linux/grsecurity.h>
77252+#include <linux/grinternal.h>
77253+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77254+#include <linux/magic.h>
77255+#include <linux/pagemap.h>
77256+#include "../fs/btrfs/async-thread.h"
77257+#include "../fs/btrfs/ctree.h"
77258+#include "../fs/btrfs/btrfs_inode.h"
77259+#endif
77260+
77261+static struct crash_uid *uid_set;
77262+static unsigned short uid_used;
77263+static DEFINE_SPINLOCK(gr_uid_lock);
77264+extern rwlock_t gr_inode_lock;
77265+extern struct acl_subject_label *
77266+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
77267+ struct acl_role_label *role);
77268+
77269+static inline dev_t __get_dev(const struct dentry *dentry)
77270+{
77271+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77272+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77273+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77274+ else
77275+#endif
77276+ return dentry->d_sb->s_dev;
77277+}
77278+
77279+int
77280+gr_init_uidset(void)
77281+{
77282+ uid_set =
77283+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
77284+ uid_used = 0;
77285+
77286+ return uid_set ? 1 : 0;
77287+}
77288+
77289+void
77290+gr_free_uidset(void)
77291+{
77292+ if (uid_set) {
77293+ struct crash_uid *tmpset;
77294+ spin_lock(&gr_uid_lock);
77295+ tmpset = uid_set;
77296+ uid_set = NULL;
77297+ uid_used = 0;
77298+ spin_unlock(&gr_uid_lock);
77299+ if (tmpset)
77300+ kfree(tmpset);
77301+ }
77302+
77303+ return;
77304+}
77305+
77306+int
77307+gr_find_uid(const uid_t uid)
77308+{
77309+ struct crash_uid *tmp = uid_set;
77310+ uid_t buid;
77311+ int low = 0, high = uid_used - 1, mid;
77312+
77313+ while (high >= low) {
77314+ mid = (low + high) >> 1;
77315+ buid = tmp[mid].uid;
77316+ if (buid == uid)
77317+ return mid;
77318+ if (buid > uid)
77319+ high = mid - 1;
77320+ if (buid < uid)
77321+ low = mid + 1;
77322+ }
77323+
77324+ return -1;
77325+}
77326+
77327+static __inline__ void
77328+gr_insertsort(void)
77329+{
77330+ unsigned short i, j;
77331+ struct crash_uid index;
77332+
77333+ for (i = 1; i < uid_used; i++) {
77334+ index = uid_set[i];
77335+ j = i;
77336+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
77337+ uid_set[j] = uid_set[j - 1];
77338+ j--;
77339+ }
77340+ uid_set[j] = index;
77341+ }
77342+
77343+ return;
77344+}
77345+
77346+static __inline__ void
77347+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
77348+{
77349+ int loc;
77350+ uid_t uid = GR_GLOBAL_UID(kuid);
77351+
77352+ if (uid_used == GR_UIDTABLE_MAX)
77353+ return;
77354+
77355+ loc = gr_find_uid(uid);
77356+
77357+ if (loc >= 0) {
77358+ uid_set[loc].expires = expires;
77359+ return;
77360+ }
77361+
77362+ uid_set[uid_used].uid = uid;
77363+ uid_set[uid_used].expires = expires;
77364+ uid_used++;
77365+
77366+ gr_insertsort();
77367+
77368+ return;
77369+}
77370+
77371+void
77372+gr_remove_uid(const unsigned short loc)
77373+{
77374+ unsigned short i;
77375+
77376+ for (i = loc + 1; i < uid_used; i++)
77377+ uid_set[i - 1] = uid_set[i];
77378+
77379+ uid_used--;
77380+
77381+ return;
77382+}
77383+
77384+int
77385+gr_check_crash_uid(const kuid_t kuid)
77386+{
77387+ int loc;
77388+ int ret = 0;
77389+ uid_t uid;
77390+
77391+ if (unlikely(!gr_acl_is_enabled()))
77392+ return 0;
77393+
77394+ uid = GR_GLOBAL_UID(kuid);
77395+
77396+ spin_lock(&gr_uid_lock);
77397+ loc = gr_find_uid(uid);
77398+
77399+ if (loc < 0)
77400+ goto out_unlock;
77401+
77402+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
77403+ gr_remove_uid(loc);
77404+ else
77405+ ret = 1;
77406+
77407+out_unlock:
77408+ spin_unlock(&gr_uid_lock);
77409+ return ret;
77410+}
77411+
77412+static __inline__ int
77413+proc_is_setxid(const struct cred *cred)
77414+{
77415+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
77416+ !uid_eq(cred->uid, cred->fsuid))
77417+ return 1;
77418+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
77419+ !gid_eq(cred->gid, cred->fsgid))
77420+ return 1;
77421+
77422+ return 0;
77423+}
77424+
77425+extern int gr_fake_force_sig(int sig, struct task_struct *t);
77426+
77427+void
77428+gr_handle_crash(struct task_struct *task, const int sig)
77429+{
77430+ struct acl_subject_label *curr;
77431+ struct task_struct *tsk, *tsk2;
77432+ const struct cred *cred;
77433+ const struct cred *cred2;
77434+
77435+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
77436+ return;
77437+
77438+ if (unlikely(!gr_acl_is_enabled()))
77439+ return;
77440+
77441+ curr = task->acl;
77442+
77443+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
77444+ return;
77445+
77446+ if (time_before_eq(curr->expires, get_seconds())) {
77447+ curr->expires = 0;
77448+ curr->crashes = 0;
77449+ }
77450+
77451+ curr->crashes++;
77452+
77453+ if (!curr->expires)
77454+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
77455+
77456+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77457+ time_after(curr->expires, get_seconds())) {
77458+ rcu_read_lock();
77459+ cred = __task_cred(task);
77460+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
77461+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77462+ spin_lock(&gr_uid_lock);
77463+ gr_insert_uid(cred->uid, curr->expires);
77464+ spin_unlock(&gr_uid_lock);
77465+ curr->expires = 0;
77466+ curr->crashes = 0;
77467+ read_lock(&tasklist_lock);
77468+ do_each_thread(tsk2, tsk) {
77469+ cred2 = __task_cred(tsk);
77470+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
77471+ gr_fake_force_sig(SIGKILL, tsk);
77472+ } while_each_thread(tsk2, tsk);
77473+ read_unlock(&tasklist_lock);
77474+ } else {
77475+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77476+ read_lock(&tasklist_lock);
77477+ read_lock(&grsec_exec_file_lock);
77478+ do_each_thread(tsk2, tsk) {
77479+ if (likely(tsk != task)) {
77480+ // if this thread has the same subject as the one that triggered
77481+ // RES_CRASH and it's the same binary, kill it
77482+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
77483+ gr_fake_force_sig(SIGKILL, tsk);
77484+ }
77485+ } while_each_thread(tsk2, tsk);
77486+ read_unlock(&grsec_exec_file_lock);
77487+ read_unlock(&tasklist_lock);
77488+ }
77489+ rcu_read_unlock();
77490+ }
77491+
77492+ return;
77493+}
77494+
77495+int
77496+gr_check_crash_exec(const struct file *filp)
77497+{
77498+ struct acl_subject_label *curr;
77499+
77500+ if (unlikely(!gr_acl_is_enabled()))
77501+ return 0;
77502+
77503+ read_lock(&gr_inode_lock);
77504+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
77505+ __get_dev(filp->f_path.dentry),
77506+ current->role);
77507+ read_unlock(&gr_inode_lock);
77508+
77509+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
77510+ (!curr->crashes && !curr->expires))
77511+ return 0;
77512+
77513+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77514+ time_after(curr->expires, get_seconds()))
77515+ return 1;
77516+ else if (time_before_eq(curr->expires, get_seconds())) {
77517+ curr->crashes = 0;
77518+ curr->expires = 0;
77519+ }
77520+
77521+ return 0;
77522+}
77523+
77524+void
77525+gr_handle_alertkill(struct task_struct *task)
77526+{
77527+ struct acl_subject_label *curracl;
77528+ __u32 curr_ip;
77529+ struct task_struct *p, *p2;
77530+
77531+ if (unlikely(!gr_acl_is_enabled()))
77532+ return;
77533+
77534+ curracl = task->acl;
77535+ curr_ip = task->signal->curr_ip;
77536+
77537+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
77538+ read_lock(&tasklist_lock);
77539+ do_each_thread(p2, p) {
77540+ if (p->signal->curr_ip == curr_ip)
77541+ gr_fake_force_sig(SIGKILL, p);
77542+ } while_each_thread(p2, p);
77543+ read_unlock(&tasklist_lock);
77544+ } else if (curracl->mode & GR_KILLPROC)
77545+ gr_fake_force_sig(SIGKILL, task);
77546+
77547+ return;
77548+}
77549diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
77550new file mode 100644
77551index 0000000..98011b0
77552--- /dev/null
77553+++ b/grsecurity/gracl_shm.c
77554@@ -0,0 +1,40 @@
77555+#include <linux/kernel.h>
77556+#include <linux/mm.h>
77557+#include <linux/sched.h>
77558+#include <linux/file.h>
77559+#include <linux/ipc.h>
77560+#include <linux/gracl.h>
77561+#include <linux/grsecurity.h>
77562+#include <linux/grinternal.h>
77563+
77564+int
77565+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77566+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
77567+{
77568+ struct task_struct *task;
77569+
77570+ if (!gr_acl_is_enabled())
77571+ return 1;
77572+
77573+ rcu_read_lock();
77574+ read_lock(&tasklist_lock);
77575+
77576+ task = find_task_by_vpid(shm_cprid);
77577+
77578+ if (unlikely(!task))
77579+ task = find_task_by_vpid(shm_lapid);
77580+
77581+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
77582+ (task_pid_nr(task) == shm_lapid)) &&
77583+ (task->acl->mode & GR_PROTSHM) &&
77584+ (task->acl != current->acl))) {
77585+ read_unlock(&tasklist_lock);
77586+ rcu_read_unlock();
77587+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
77588+ return 0;
77589+ }
77590+ read_unlock(&tasklist_lock);
77591+ rcu_read_unlock();
77592+
77593+ return 1;
77594+}
77595diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
77596new file mode 100644
77597index 0000000..bc0be01
77598--- /dev/null
77599+++ b/grsecurity/grsec_chdir.c
77600@@ -0,0 +1,19 @@
77601+#include <linux/kernel.h>
77602+#include <linux/sched.h>
77603+#include <linux/fs.h>
77604+#include <linux/file.h>
77605+#include <linux/grsecurity.h>
77606+#include <linux/grinternal.h>
77607+
77608+void
77609+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
77610+{
77611+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77612+ if ((grsec_enable_chdir && grsec_enable_group &&
77613+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
77614+ !grsec_enable_group)) {
77615+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
77616+ }
77617+#endif
77618+ return;
77619+}
77620diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
77621new file mode 100644
77622index 0000000..baa635c
77623--- /dev/null
77624+++ b/grsecurity/grsec_chroot.c
77625@@ -0,0 +1,387 @@
77626+#include <linux/kernel.h>
77627+#include <linux/module.h>
77628+#include <linux/sched.h>
77629+#include <linux/file.h>
77630+#include <linux/fs.h>
77631+#include <linux/mount.h>
77632+#include <linux/types.h>
77633+#include "../fs/mount.h"
77634+#include <linux/grsecurity.h>
77635+#include <linux/grinternal.h>
77636+
77637+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77638+int gr_init_ran;
77639+#endif
77640+
77641+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
77642+{
77643+#ifdef CONFIG_GRKERNSEC
77644+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
77645+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
77646+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77647+ && gr_init_ran
77648+#endif
77649+ )
77650+ task->gr_is_chrooted = 1;
77651+ else {
77652+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77653+ if (task_pid_nr(task) == 1 && !gr_init_ran)
77654+ gr_init_ran = 1;
77655+#endif
77656+ task->gr_is_chrooted = 0;
77657+ }
77658+
77659+ task->gr_chroot_dentry = path->dentry;
77660+#endif
77661+ return;
77662+}
77663+
77664+void gr_clear_chroot_entries(struct task_struct *task)
77665+{
77666+#ifdef CONFIG_GRKERNSEC
77667+ task->gr_is_chrooted = 0;
77668+ task->gr_chroot_dentry = NULL;
77669+#endif
77670+ return;
77671+}
77672+
77673+int
77674+gr_handle_chroot_unix(const pid_t pid)
77675+{
77676+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77677+ struct task_struct *p;
77678+
77679+ if (unlikely(!grsec_enable_chroot_unix))
77680+ return 1;
77681+
77682+ if (likely(!proc_is_chrooted(current)))
77683+ return 1;
77684+
77685+ rcu_read_lock();
77686+ read_lock(&tasklist_lock);
77687+ p = find_task_by_vpid_unrestricted(pid);
77688+ if (unlikely(p && !have_same_root(current, p))) {
77689+ read_unlock(&tasklist_lock);
77690+ rcu_read_unlock();
77691+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
77692+ return 0;
77693+ }
77694+ read_unlock(&tasklist_lock);
77695+ rcu_read_unlock();
77696+#endif
77697+ return 1;
77698+}
77699+
77700+int
77701+gr_handle_chroot_nice(void)
77702+{
77703+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77704+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
77705+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
77706+ return -EPERM;
77707+ }
77708+#endif
77709+ return 0;
77710+}
77711+
77712+int
77713+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
77714+{
77715+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77716+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
77717+ && proc_is_chrooted(current)) {
77718+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
77719+ return -EACCES;
77720+ }
77721+#endif
77722+ return 0;
77723+}
77724+
77725+int
77726+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
77727+{
77728+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77729+ struct task_struct *p;
77730+ int ret = 0;
77731+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
77732+ return ret;
77733+
77734+ read_lock(&tasklist_lock);
77735+ do_each_pid_task(pid, type, p) {
77736+ if (!have_same_root(current, p)) {
77737+ ret = 1;
77738+ goto out;
77739+ }
77740+ } while_each_pid_task(pid, type, p);
77741+out:
77742+ read_unlock(&tasklist_lock);
77743+ return ret;
77744+#endif
77745+ return 0;
77746+}
77747+
77748+int
77749+gr_pid_is_chrooted(struct task_struct *p)
77750+{
77751+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77752+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
77753+ return 0;
77754+
77755+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
77756+ !have_same_root(current, p)) {
77757+ return 1;
77758+ }
77759+#endif
77760+ return 0;
77761+}
77762+
77763+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
77764+
77765+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
77766+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
77767+{
77768+ struct path path, currentroot;
77769+ int ret = 0;
77770+
77771+ path.dentry = (struct dentry *)u_dentry;
77772+ path.mnt = (struct vfsmount *)u_mnt;
77773+ get_fs_root(current->fs, &currentroot);
77774+ if (path_is_under(&path, &currentroot))
77775+ ret = 1;
77776+ path_put(&currentroot);
77777+
77778+ return ret;
77779+}
77780+#endif
77781+
77782+int
77783+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
77784+{
77785+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77786+ if (!grsec_enable_chroot_fchdir)
77787+ return 1;
77788+
77789+ if (!proc_is_chrooted(current))
77790+ return 1;
77791+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
77792+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
77793+ return 0;
77794+ }
77795+#endif
77796+ return 1;
77797+}
77798+
77799+int
77800+gr_chroot_fhandle(void)
77801+{
77802+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77803+ if (!grsec_enable_chroot_fchdir)
77804+ return 1;
77805+
77806+ if (!proc_is_chrooted(current))
77807+ return 1;
77808+ else {
77809+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
77810+ return 0;
77811+ }
77812+#endif
77813+ return 1;
77814+}
77815+
77816+int
77817+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77818+ const time_t shm_createtime)
77819+{
77820+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77821+ struct task_struct *p;
77822+ time_t starttime;
77823+
77824+ if (unlikely(!grsec_enable_chroot_shmat))
77825+ return 1;
77826+
77827+ if (likely(!proc_is_chrooted(current)))
77828+ return 1;
77829+
77830+ rcu_read_lock();
77831+ read_lock(&tasklist_lock);
77832+
77833+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
77834+ starttime = p->start_time.tv_sec;
77835+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
77836+ if (have_same_root(current, p)) {
77837+ goto allow;
77838+ } else {
77839+ read_unlock(&tasklist_lock);
77840+ rcu_read_unlock();
77841+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77842+ return 0;
77843+ }
77844+ }
77845+ /* creator exited, pid reuse, fall through to next check */
77846+ }
77847+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
77848+ if (unlikely(!have_same_root(current, p))) {
77849+ read_unlock(&tasklist_lock);
77850+ rcu_read_unlock();
77851+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77852+ return 0;
77853+ }
77854+ }
77855+
77856+allow:
77857+ read_unlock(&tasklist_lock);
77858+ rcu_read_unlock();
77859+#endif
77860+ return 1;
77861+}
77862+
77863+void
77864+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77865+{
77866+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77867+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77868+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77869+#endif
77870+ return;
77871+}
77872+
77873+int
77874+gr_handle_chroot_mknod(const struct dentry *dentry,
77875+ const struct vfsmount *mnt, const int mode)
77876+{
77877+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77878+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77879+ proc_is_chrooted(current)) {
77880+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77881+ return -EPERM;
77882+ }
77883+#endif
77884+ return 0;
77885+}
77886+
77887+int
77888+gr_handle_chroot_mount(const struct dentry *dentry,
77889+ const struct vfsmount *mnt, const char *dev_name)
77890+{
77891+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77892+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77893+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77894+ return -EPERM;
77895+ }
77896+#endif
77897+ return 0;
77898+}
77899+
77900+int
77901+gr_handle_chroot_pivot(void)
77902+{
77903+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77904+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77905+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77906+ return -EPERM;
77907+ }
77908+#endif
77909+ return 0;
77910+}
77911+
77912+int
77913+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77914+{
77915+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77916+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77917+ !gr_is_outside_chroot(dentry, mnt)) {
77918+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77919+ return -EPERM;
77920+ }
77921+#endif
77922+ return 0;
77923+}
77924+
77925+extern const char *captab_log[];
77926+extern int captab_log_entries;
77927+
77928+int
77929+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77930+{
77931+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77932+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77933+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77934+ if (cap_raised(chroot_caps, cap)) {
77935+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77936+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77937+ }
77938+ return 0;
77939+ }
77940+ }
77941+#endif
77942+ return 1;
77943+}
77944+
77945+int
77946+gr_chroot_is_capable(const int cap)
77947+{
77948+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77949+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77950+#endif
77951+ return 1;
77952+}
77953+
77954+int
77955+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77956+{
77957+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77958+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77959+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77960+ if (cap_raised(chroot_caps, cap)) {
77961+ return 0;
77962+ }
77963+ }
77964+#endif
77965+ return 1;
77966+}
77967+
77968+int
77969+gr_chroot_is_capable_nolog(const int cap)
77970+{
77971+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77972+ return gr_task_chroot_is_capable_nolog(current, cap);
77973+#endif
77974+ return 1;
77975+}
77976+
77977+int
77978+gr_handle_chroot_sysctl(const int op)
77979+{
77980+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77981+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77982+ proc_is_chrooted(current))
77983+ return -EACCES;
77984+#endif
77985+ return 0;
77986+}
77987+
77988+void
77989+gr_handle_chroot_chdir(const struct path *path)
77990+{
77991+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77992+ if (grsec_enable_chroot_chdir)
77993+ set_fs_pwd(current->fs, path);
77994+#endif
77995+ return;
77996+}
77997+
77998+int
77999+gr_handle_chroot_chmod(const struct dentry *dentry,
78000+ const struct vfsmount *mnt, const int mode)
78001+{
78002+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78003+ /* allow chmod +s on directories, but not files */
78004+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
78005+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
78006+ proc_is_chrooted(current)) {
78007+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
78008+ return -EPERM;
78009+ }
78010+#endif
78011+ return 0;
78012+}
78013diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
78014new file mode 100644
78015index 0000000..2d3bcb7
78016--- /dev/null
78017+++ b/grsecurity/grsec_disabled.c
78018@@ -0,0 +1,440 @@
78019+#include <linux/kernel.h>
78020+#include <linux/module.h>
78021+#include <linux/sched.h>
78022+#include <linux/file.h>
78023+#include <linux/fs.h>
78024+#include <linux/kdev_t.h>
78025+#include <linux/net.h>
78026+#include <linux/in.h>
78027+#include <linux/ip.h>
78028+#include <linux/skbuff.h>
78029+#include <linux/sysctl.h>
78030+
78031+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
78032+void
78033+pax_set_initial_flags(struct linux_binprm *bprm)
78034+{
78035+ return;
78036+}
78037+#endif
78038+
78039+#ifdef CONFIG_SYSCTL
78040+__u32
78041+gr_handle_sysctl(const struct ctl_table * table, const int op)
78042+{
78043+ return 0;
78044+}
78045+#endif
78046+
78047+#ifdef CONFIG_TASKSTATS
78048+int gr_is_taskstats_denied(int pid)
78049+{
78050+ return 0;
78051+}
78052+#endif
78053+
78054+int
78055+gr_acl_is_enabled(void)
78056+{
78057+ return 0;
78058+}
78059+
78060+int
78061+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
78062+{
78063+ return 0;
78064+}
78065+
78066+void
78067+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
78068+{
78069+ return;
78070+}
78071+
78072+int
78073+gr_handle_rawio(const struct inode *inode)
78074+{
78075+ return 0;
78076+}
78077+
78078+void
78079+gr_acl_handle_psacct(struct task_struct *task, const long code)
78080+{
78081+ return;
78082+}
78083+
78084+int
78085+gr_handle_ptrace(struct task_struct *task, const long request)
78086+{
78087+ return 0;
78088+}
78089+
78090+int
78091+gr_handle_proc_ptrace(struct task_struct *task)
78092+{
78093+ return 0;
78094+}
78095+
78096+int
78097+gr_set_acls(const int type)
78098+{
78099+ return 0;
78100+}
78101+
78102+int
78103+gr_check_hidden_task(const struct task_struct *tsk)
78104+{
78105+ return 0;
78106+}
78107+
78108+int
78109+gr_check_protected_task(const struct task_struct *task)
78110+{
78111+ return 0;
78112+}
78113+
78114+int
78115+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
78116+{
78117+ return 0;
78118+}
78119+
78120+void
78121+gr_copy_label(struct task_struct *tsk)
78122+{
78123+ return;
78124+}
78125+
78126+void
78127+gr_set_pax_flags(struct task_struct *task)
78128+{
78129+ return;
78130+}
78131+
78132+int
78133+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
78134+ const int unsafe_share)
78135+{
78136+ return 0;
78137+}
78138+
78139+void
78140+gr_handle_delete(const ino_t ino, const dev_t dev)
78141+{
78142+ return;
78143+}
78144+
78145+void
78146+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
78147+{
78148+ return;
78149+}
78150+
78151+void
78152+gr_handle_crash(struct task_struct *task, const int sig)
78153+{
78154+ return;
78155+}
78156+
78157+int
78158+gr_check_crash_exec(const struct file *filp)
78159+{
78160+ return 0;
78161+}
78162+
78163+int
78164+gr_check_crash_uid(const kuid_t uid)
78165+{
78166+ return 0;
78167+}
78168+
78169+void
78170+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
78171+ struct dentry *old_dentry,
78172+ struct dentry *new_dentry,
78173+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
78174+{
78175+ return;
78176+}
78177+
78178+int
78179+gr_search_socket(const int family, const int type, const int protocol)
78180+{
78181+ return 1;
78182+}
78183+
78184+int
78185+gr_search_connectbind(const int mode, const struct socket *sock,
78186+ const struct sockaddr_in *addr)
78187+{
78188+ return 0;
78189+}
78190+
78191+void
78192+gr_handle_alertkill(struct task_struct *task)
78193+{
78194+ return;
78195+}
78196+
78197+__u32
78198+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
78199+{
78200+ return 1;
78201+}
78202+
78203+__u32
78204+gr_acl_handle_hidden_file(const struct dentry * dentry,
78205+ const struct vfsmount * mnt)
78206+{
78207+ return 1;
78208+}
78209+
78210+__u32
78211+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
78212+ int acc_mode)
78213+{
78214+ return 1;
78215+}
78216+
78217+__u32
78218+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
78219+{
78220+ return 1;
78221+}
78222+
78223+__u32
78224+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
78225+{
78226+ return 1;
78227+}
78228+
78229+int
78230+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
78231+ unsigned int *vm_flags)
78232+{
78233+ return 1;
78234+}
78235+
78236+__u32
78237+gr_acl_handle_truncate(const struct dentry * dentry,
78238+ const struct vfsmount * mnt)
78239+{
78240+ return 1;
78241+}
78242+
78243+__u32
78244+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
78245+{
78246+ return 1;
78247+}
78248+
78249+__u32
78250+gr_acl_handle_access(const struct dentry * dentry,
78251+ const struct vfsmount * mnt, const int fmode)
78252+{
78253+ return 1;
78254+}
78255+
78256+__u32
78257+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
78258+ umode_t *mode)
78259+{
78260+ return 1;
78261+}
78262+
78263+__u32
78264+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
78265+{
78266+ return 1;
78267+}
78268+
78269+__u32
78270+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
78271+{
78272+ return 1;
78273+}
78274+
78275+__u32
78276+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
78277+{
78278+ return 1;
78279+}
78280+
78281+void
78282+grsecurity_init(void)
78283+{
78284+ return;
78285+}
78286+
78287+umode_t gr_acl_umask(void)
78288+{
78289+ return 0;
78290+}
78291+
78292+__u32
78293+gr_acl_handle_mknod(const struct dentry * new_dentry,
78294+ const struct dentry * parent_dentry,
78295+ const struct vfsmount * parent_mnt,
78296+ const int mode)
78297+{
78298+ return 1;
78299+}
78300+
78301+__u32
78302+gr_acl_handle_mkdir(const struct dentry * new_dentry,
78303+ const struct dentry * parent_dentry,
78304+ const struct vfsmount * parent_mnt)
78305+{
78306+ return 1;
78307+}
78308+
78309+__u32
78310+gr_acl_handle_symlink(const struct dentry * new_dentry,
78311+ const struct dentry * parent_dentry,
78312+ const struct vfsmount * parent_mnt, const struct filename *from)
78313+{
78314+ return 1;
78315+}
78316+
78317+__u32
78318+gr_acl_handle_link(const struct dentry * new_dentry,
78319+ const struct dentry * parent_dentry,
78320+ const struct vfsmount * parent_mnt,
78321+ const struct dentry * old_dentry,
78322+ const struct vfsmount * old_mnt, const struct filename *to)
78323+{
78324+ return 1;
78325+}
78326+
78327+int
78328+gr_acl_handle_rename(const struct dentry *new_dentry,
78329+ const struct dentry *parent_dentry,
78330+ const struct vfsmount *parent_mnt,
78331+ const struct dentry *old_dentry,
78332+ const struct inode *old_parent_inode,
78333+ const struct vfsmount *old_mnt, const struct filename *newname,
78334+ unsigned int flags)
78335+{
78336+ return 0;
78337+}
78338+
78339+int
78340+gr_acl_handle_filldir(const struct file *file, const char *name,
78341+ const int namelen, const ino_t ino)
78342+{
78343+ return 1;
78344+}
78345+
78346+int
78347+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
78348+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
78349+{
78350+ return 1;
78351+}
78352+
78353+int
78354+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
78355+{
78356+ return 0;
78357+}
78358+
78359+int
78360+gr_search_accept(const struct socket *sock)
78361+{
78362+ return 0;
78363+}
78364+
78365+int
78366+gr_search_listen(const struct socket *sock)
78367+{
78368+ return 0;
78369+}
78370+
78371+int
78372+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
78373+{
78374+ return 0;
78375+}
78376+
78377+__u32
78378+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
78379+{
78380+ return 1;
78381+}
78382+
78383+__u32
78384+gr_acl_handle_creat(const struct dentry * dentry,
78385+ const struct dentry * p_dentry,
78386+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
78387+ const int imode)
78388+{
78389+ return 1;
78390+}
78391+
78392+void
78393+gr_acl_handle_exit(void)
78394+{
78395+ return;
78396+}
78397+
78398+int
78399+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78400+{
78401+ return 1;
78402+}
78403+
78404+void
78405+gr_set_role_label(const kuid_t uid, const kgid_t gid)
78406+{
78407+ return;
78408+}
78409+
78410+int
78411+gr_acl_handle_procpidmem(const struct task_struct *task)
78412+{
78413+ return 0;
78414+}
78415+
78416+int
78417+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
78418+{
78419+ return 0;
78420+}
78421+
78422+int
78423+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
78424+{
78425+ return 0;
78426+}
78427+
78428+int
78429+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
78430+{
78431+ return 0;
78432+}
78433+
78434+int
78435+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
78436+{
78437+ return 0;
78438+}
78439+
78440+int gr_acl_enable_at_secure(void)
78441+{
78442+ return 0;
78443+}
78444+
78445+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
78446+{
78447+ return dentry->d_sb->s_dev;
78448+}
78449+
78450+void gr_put_exec_file(struct task_struct *task)
78451+{
78452+ return;
78453+}
78454+
78455+#ifdef CONFIG_SECURITY
78456+EXPORT_SYMBOL_GPL(gr_check_user_change);
78457+EXPORT_SYMBOL_GPL(gr_check_group_change);
78458+#endif
78459diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
78460new file mode 100644
78461index 0000000..14638ff
78462--- /dev/null
78463+++ b/grsecurity/grsec_exec.c
78464@@ -0,0 +1,188 @@
78465+#include <linux/kernel.h>
78466+#include <linux/sched.h>
78467+#include <linux/file.h>
78468+#include <linux/binfmts.h>
78469+#include <linux/fs.h>
78470+#include <linux/types.h>
78471+#include <linux/grdefs.h>
78472+#include <linux/grsecurity.h>
78473+#include <linux/grinternal.h>
78474+#include <linux/capability.h>
78475+#include <linux/module.h>
78476+#include <linux/compat.h>
78477+
78478+#include <asm/uaccess.h>
78479+
78480+#ifdef CONFIG_GRKERNSEC_EXECLOG
78481+static char gr_exec_arg_buf[132];
78482+static DEFINE_MUTEX(gr_exec_arg_mutex);
78483+#endif
78484+
78485+struct user_arg_ptr {
78486+#ifdef CONFIG_COMPAT
78487+ bool is_compat;
78488+#endif
78489+ union {
78490+ const char __user *const __user *native;
78491+#ifdef CONFIG_COMPAT
78492+ const compat_uptr_t __user *compat;
78493+#endif
78494+ } ptr;
78495+};
78496+
78497+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
78498+
78499+void
78500+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
78501+{
78502+#ifdef CONFIG_GRKERNSEC_EXECLOG
78503+ char *grarg = gr_exec_arg_buf;
78504+ unsigned int i, x, execlen = 0;
78505+ char c;
78506+
78507+ if (!((grsec_enable_execlog && grsec_enable_group &&
78508+ in_group_p(grsec_audit_gid))
78509+ || (grsec_enable_execlog && !grsec_enable_group)))
78510+ return;
78511+
78512+ mutex_lock(&gr_exec_arg_mutex);
78513+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
78514+
78515+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
78516+ const char __user *p;
78517+ unsigned int len;
78518+
78519+ p = get_user_arg_ptr(argv, i);
78520+ if (IS_ERR(p))
78521+ goto log;
78522+
78523+ len = strnlen_user(p, 128 - execlen);
78524+ if (len > 128 - execlen)
78525+ len = 128 - execlen;
78526+ else if (len > 0)
78527+ len--;
78528+ if (copy_from_user(grarg + execlen, p, len))
78529+ goto log;
78530+
78531+ /* rewrite unprintable characters */
78532+ for (x = 0; x < len; x++) {
78533+ c = *(grarg + execlen + x);
78534+ if (c < 32 || c > 126)
78535+ *(grarg + execlen + x) = ' ';
78536+ }
78537+
78538+ execlen += len;
78539+ *(grarg + execlen) = ' ';
78540+ *(grarg + execlen + 1) = '\0';
78541+ execlen++;
78542+ }
78543+
78544+ log:
78545+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
78546+ bprm->file->f_path.mnt, grarg);
78547+ mutex_unlock(&gr_exec_arg_mutex);
78548+#endif
78549+ return;
78550+}
78551+
78552+#ifdef CONFIG_GRKERNSEC
78553+extern int gr_acl_is_capable(const int cap);
78554+extern int gr_acl_is_capable_nolog(const int cap);
78555+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78556+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
78557+extern int gr_chroot_is_capable(const int cap);
78558+extern int gr_chroot_is_capable_nolog(const int cap);
78559+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78560+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
78561+#endif
78562+
78563+const char *captab_log[] = {
78564+ "CAP_CHOWN",
78565+ "CAP_DAC_OVERRIDE",
78566+ "CAP_DAC_READ_SEARCH",
78567+ "CAP_FOWNER",
78568+ "CAP_FSETID",
78569+ "CAP_KILL",
78570+ "CAP_SETGID",
78571+ "CAP_SETUID",
78572+ "CAP_SETPCAP",
78573+ "CAP_LINUX_IMMUTABLE",
78574+ "CAP_NET_BIND_SERVICE",
78575+ "CAP_NET_BROADCAST",
78576+ "CAP_NET_ADMIN",
78577+ "CAP_NET_RAW",
78578+ "CAP_IPC_LOCK",
78579+ "CAP_IPC_OWNER",
78580+ "CAP_SYS_MODULE",
78581+ "CAP_SYS_RAWIO",
78582+ "CAP_SYS_CHROOT",
78583+ "CAP_SYS_PTRACE",
78584+ "CAP_SYS_PACCT",
78585+ "CAP_SYS_ADMIN",
78586+ "CAP_SYS_BOOT",
78587+ "CAP_SYS_NICE",
78588+ "CAP_SYS_RESOURCE",
78589+ "CAP_SYS_TIME",
78590+ "CAP_SYS_TTY_CONFIG",
78591+ "CAP_MKNOD",
78592+ "CAP_LEASE",
78593+ "CAP_AUDIT_WRITE",
78594+ "CAP_AUDIT_CONTROL",
78595+ "CAP_SETFCAP",
78596+ "CAP_MAC_OVERRIDE",
78597+ "CAP_MAC_ADMIN",
78598+ "CAP_SYSLOG",
78599+ "CAP_WAKE_ALARM",
78600+ "CAP_BLOCK_SUSPEND"
78601+};
78602+
78603+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
78604+
78605+int gr_is_capable(const int cap)
78606+{
78607+#ifdef CONFIG_GRKERNSEC
78608+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
78609+ return 1;
78610+ return 0;
78611+#else
78612+ return 1;
78613+#endif
78614+}
78615+
78616+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
78617+{
78618+#ifdef CONFIG_GRKERNSEC
78619+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
78620+ return 1;
78621+ return 0;
78622+#else
78623+ return 1;
78624+#endif
78625+}
78626+
78627+int gr_is_capable_nolog(const int cap)
78628+{
78629+#ifdef CONFIG_GRKERNSEC
78630+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
78631+ return 1;
78632+ return 0;
78633+#else
78634+ return 1;
78635+#endif
78636+}
78637+
78638+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
78639+{
78640+#ifdef CONFIG_GRKERNSEC
78641+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
78642+ return 1;
78643+ return 0;
78644+#else
78645+ return 1;
78646+#endif
78647+}
78648+
78649+EXPORT_SYMBOL_GPL(gr_is_capable);
78650+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
78651+EXPORT_SYMBOL_GPL(gr_task_is_capable);
78652+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
78653diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
78654new file mode 100644
78655index 0000000..06cc6ea
78656--- /dev/null
78657+++ b/grsecurity/grsec_fifo.c
78658@@ -0,0 +1,24 @@
78659+#include <linux/kernel.h>
78660+#include <linux/sched.h>
78661+#include <linux/fs.h>
78662+#include <linux/file.h>
78663+#include <linux/grinternal.h>
78664+
78665+int
78666+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
78667+ const struct dentry *dir, const int flag, const int acc_mode)
78668+{
78669+#ifdef CONFIG_GRKERNSEC_FIFO
78670+ const struct cred *cred = current_cred();
78671+
78672+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
78673+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
78674+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
78675+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
78676+ if (!inode_permission(dentry->d_inode, acc_mode))
78677+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
78678+ return -EACCES;
78679+ }
78680+#endif
78681+ return 0;
78682+}
78683diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
78684new file mode 100644
78685index 0000000..8ca18bf
78686--- /dev/null
78687+++ b/grsecurity/grsec_fork.c
78688@@ -0,0 +1,23 @@
78689+#include <linux/kernel.h>
78690+#include <linux/sched.h>
78691+#include <linux/grsecurity.h>
78692+#include <linux/grinternal.h>
78693+#include <linux/errno.h>
78694+
78695+void
78696+gr_log_forkfail(const int retval)
78697+{
78698+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78699+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
78700+ switch (retval) {
78701+ case -EAGAIN:
78702+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
78703+ break;
78704+ case -ENOMEM:
78705+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
78706+ break;
78707+ }
78708+ }
78709+#endif
78710+ return;
78711+}
78712diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
78713new file mode 100644
78714index 0000000..b7cb191
78715--- /dev/null
78716+++ b/grsecurity/grsec_init.c
78717@@ -0,0 +1,286 @@
78718+#include <linux/kernel.h>
78719+#include <linux/sched.h>
78720+#include <linux/mm.h>
78721+#include <linux/gracl.h>
78722+#include <linux/slab.h>
78723+#include <linux/vmalloc.h>
78724+#include <linux/percpu.h>
78725+#include <linux/module.h>
78726+
78727+int grsec_enable_ptrace_readexec;
78728+int grsec_enable_setxid;
78729+int grsec_enable_symlinkown;
78730+kgid_t grsec_symlinkown_gid;
78731+int grsec_enable_brute;
78732+int grsec_enable_link;
78733+int grsec_enable_dmesg;
78734+int grsec_enable_harden_ptrace;
78735+int grsec_enable_harden_ipc;
78736+int grsec_enable_fifo;
78737+int grsec_enable_execlog;
78738+int grsec_enable_signal;
78739+int grsec_enable_forkfail;
78740+int grsec_enable_audit_ptrace;
78741+int grsec_enable_time;
78742+int grsec_enable_group;
78743+kgid_t grsec_audit_gid;
78744+int grsec_enable_chdir;
78745+int grsec_enable_mount;
78746+int grsec_enable_rofs;
78747+int grsec_deny_new_usb;
78748+int grsec_enable_chroot_findtask;
78749+int grsec_enable_chroot_mount;
78750+int grsec_enable_chroot_shmat;
78751+int grsec_enable_chroot_fchdir;
78752+int grsec_enable_chroot_double;
78753+int grsec_enable_chroot_pivot;
78754+int grsec_enable_chroot_chdir;
78755+int grsec_enable_chroot_chmod;
78756+int grsec_enable_chroot_mknod;
78757+int grsec_enable_chroot_nice;
78758+int grsec_enable_chroot_execlog;
78759+int grsec_enable_chroot_caps;
78760+int grsec_enable_chroot_sysctl;
78761+int grsec_enable_chroot_unix;
78762+int grsec_enable_tpe;
78763+kgid_t grsec_tpe_gid;
78764+int grsec_enable_blackhole;
78765+#ifdef CONFIG_IPV6_MODULE
78766+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
78767+#endif
78768+int grsec_lastack_retries;
78769+int grsec_enable_tpe_all;
78770+int grsec_enable_tpe_invert;
78771+int grsec_enable_socket_all;
78772+kgid_t grsec_socket_all_gid;
78773+int grsec_enable_socket_client;
78774+kgid_t grsec_socket_client_gid;
78775+int grsec_enable_socket_server;
78776+kgid_t grsec_socket_server_gid;
78777+int grsec_resource_logging;
78778+int grsec_disable_privio;
78779+int grsec_enable_log_rwxmaps;
78780+int grsec_lock;
78781+
78782+DEFINE_SPINLOCK(grsec_alert_lock);
78783+unsigned long grsec_alert_wtime = 0;
78784+unsigned long grsec_alert_fyet = 0;
78785+
78786+DEFINE_SPINLOCK(grsec_audit_lock);
78787+
78788+DEFINE_RWLOCK(grsec_exec_file_lock);
78789+
78790+char *gr_shared_page[4];
78791+
78792+char *gr_alert_log_fmt;
78793+char *gr_audit_log_fmt;
78794+char *gr_alert_log_buf;
78795+char *gr_audit_log_buf;
78796+
78797+extern struct gr_arg *gr_usermode;
78798+extern unsigned char *gr_system_salt;
78799+extern unsigned char *gr_system_sum;
78800+
78801+void __init
78802+grsecurity_init(void)
78803+{
78804+ int j;
78805+ /* create the per-cpu shared pages */
78806+
78807+#ifdef CONFIG_X86
78808+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
78809+#endif
78810+
78811+ for (j = 0; j < 4; j++) {
78812+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
78813+ if (gr_shared_page[j] == NULL) {
78814+ panic("Unable to allocate grsecurity shared page");
78815+ return;
78816+ }
78817+ }
78818+
78819+ /* allocate log buffers */
78820+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
78821+ if (!gr_alert_log_fmt) {
78822+ panic("Unable to allocate grsecurity alert log format buffer");
78823+ return;
78824+ }
78825+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
78826+ if (!gr_audit_log_fmt) {
78827+ panic("Unable to allocate grsecurity audit log format buffer");
78828+ return;
78829+ }
78830+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78831+ if (!gr_alert_log_buf) {
78832+ panic("Unable to allocate grsecurity alert log buffer");
78833+ return;
78834+ }
78835+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78836+ if (!gr_audit_log_buf) {
78837+ panic("Unable to allocate grsecurity audit log buffer");
78838+ return;
78839+ }
78840+
78841+ /* allocate memory for authentication structure */
78842+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
78843+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
78844+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
78845+
78846+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
78847+ panic("Unable to allocate grsecurity authentication structure");
78848+ return;
78849+ }
78850+
78851+#ifdef CONFIG_GRKERNSEC_IO
78852+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
78853+ grsec_disable_privio = 1;
78854+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78855+ grsec_disable_privio = 1;
78856+#else
78857+ grsec_disable_privio = 0;
78858+#endif
78859+#endif
78860+
78861+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78862+ /* for backward compatibility, tpe_invert always defaults to on if
78863+ enabled in the kernel
78864+ */
78865+ grsec_enable_tpe_invert = 1;
78866+#endif
78867+
78868+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78869+#ifndef CONFIG_GRKERNSEC_SYSCTL
78870+ grsec_lock = 1;
78871+#endif
78872+
78873+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78874+ grsec_enable_log_rwxmaps = 1;
78875+#endif
78876+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78877+ grsec_enable_group = 1;
78878+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78879+#endif
78880+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78881+ grsec_enable_ptrace_readexec = 1;
78882+#endif
78883+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78884+ grsec_enable_chdir = 1;
78885+#endif
78886+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78887+ grsec_enable_harden_ptrace = 1;
78888+#endif
78889+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78890+ grsec_enable_harden_ipc = 1;
78891+#endif
78892+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78893+ grsec_enable_mount = 1;
78894+#endif
78895+#ifdef CONFIG_GRKERNSEC_LINK
78896+ grsec_enable_link = 1;
78897+#endif
78898+#ifdef CONFIG_GRKERNSEC_BRUTE
78899+ grsec_enable_brute = 1;
78900+#endif
78901+#ifdef CONFIG_GRKERNSEC_DMESG
78902+ grsec_enable_dmesg = 1;
78903+#endif
78904+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78905+ grsec_enable_blackhole = 1;
78906+ grsec_lastack_retries = 4;
78907+#endif
78908+#ifdef CONFIG_GRKERNSEC_FIFO
78909+ grsec_enable_fifo = 1;
78910+#endif
78911+#ifdef CONFIG_GRKERNSEC_EXECLOG
78912+ grsec_enable_execlog = 1;
78913+#endif
78914+#ifdef CONFIG_GRKERNSEC_SETXID
78915+ grsec_enable_setxid = 1;
78916+#endif
78917+#ifdef CONFIG_GRKERNSEC_SIGNAL
78918+ grsec_enable_signal = 1;
78919+#endif
78920+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78921+ grsec_enable_forkfail = 1;
78922+#endif
78923+#ifdef CONFIG_GRKERNSEC_TIME
78924+ grsec_enable_time = 1;
78925+#endif
78926+#ifdef CONFIG_GRKERNSEC_RESLOG
78927+ grsec_resource_logging = 1;
78928+#endif
78929+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78930+ grsec_enable_chroot_findtask = 1;
78931+#endif
78932+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78933+ grsec_enable_chroot_unix = 1;
78934+#endif
78935+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78936+ grsec_enable_chroot_mount = 1;
78937+#endif
78938+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78939+ grsec_enable_chroot_fchdir = 1;
78940+#endif
78941+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78942+ grsec_enable_chroot_shmat = 1;
78943+#endif
78944+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78945+ grsec_enable_audit_ptrace = 1;
78946+#endif
78947+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78948+ grsec_enable_chroot_double = 1;
78949+#endif
78950+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78951+ grsec_enable_chroot_pivot = 1;
78952+#endif
78953+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78954+ grsec_enable_chroot_chdir = 1;
78955+#endif
78956+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78957+ grsec_enable_chroot_chmod = 1;
78958+#endif
78959+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78960+ grsec_enable_chroot_mknod = 1;
78961+#endif
78962+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78963+ grsec_enable_chroot_nice = 1;
78964+#endif
78965+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78966+ grsec_enable_chroot_execlog = 1;
78967+#endif
78968+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78969+ grsec_enable_chroot_caps = 1;
78970+#endif
78971+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78972+ grsec_enable_chroot_sysctl = 1;
78973+#endif
78974+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78975+ grsec_enable_symlinkown = 1;
78976+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78977+#endif
78978+#ifdef CONFIG_GRKERNSEC_TPE
78979+ grsec_enable_tpe = 1;
78980+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78981+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78982+ grsec_enable_tpe_all = 1;
78983+#endif
78984+#endif
78985+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78986+ grsec_enable_socket_all = 1;
78987+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78988+#endif
78989+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78990+ grsec_enable_socket_client = 1;
78991+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78992+#endif
78993+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78994+ grsec_enable_socket_server = 1;
78995+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78996+#endif
78997+#endif
78998+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78999+ grsec_deny_new_usb = 1;
79000+#endif
79001+
79002+ return;
79003+}
79004diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
79005new file mode 100644
79006index 0000000..1773300
79007--- /dev/null
79008+++ b/grsecurity/grsec_ipc.c
79009@@ -0,0 +1,48 @@
79010+#include <linux/kernel.h>
79011+#include <linux/mm.h>
79012+#include <linux/sched.h>
79013+#include <linux/file.h>
79014+#include <linux/ipc.h>
79015+#include <linux/ipc_namespace.h>
79016+#include <linux/grsecurity.h>
79017+#include <linux/grinternal.h>
79018+
79019+int
79020+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
79021+{
79022+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79023+ int write;
79024+ int orig_granted_mode;
79025+ kuid_t euid;
79026+ kgid_t egid;
79027+
79028+ if (!grsec_enable_harden_ipc)
79029+ return 1;
79030+
79031+ euid = current_euid();
79032+ egid = current_egid();
79033+
79034+ write = requested_mode & 00002;
79035+ orig_granted_mode = ipcp->mode;
79036+
79037+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
79038+ orig_granted_mode >>= 6;
79039+ else {
79040+ /* if likely wrong permissions, lock to user */
79041+ if (orig_granted_mode & 0007)
79042+ orig_granted_mode = 0;
79043+ /* otherwise do a egid-only check */
79044+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
79045+ orig_granted_mode >>= 3;
79046+ /* otherwise, no access */
79047+ else
79048+ orig_granted_mode = 0;
79049+ }
79050+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
79051+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
79052+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
79053+ return 0;
79054+ }
79055+#endif
79056+ return 1;
79057+}
79058diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
79059new file mode 100644
79060index 0000000..5e05e20
79061--- /dev/null
79062+++ b/grsecurity/grsec_link.c
79063@@ -0,0 +1,58 @@
79064+#include <linux/kernel.h>
79065+#include <linux/sched.h>
79066+#include <linux/fs.h>
79067+#include <linux/file.h>
79068+#include <linux/grinternal.h>
79069+
79070+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
79071+{
79072+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79073+ const struct inode *link_inode = link->dentry->d_inode;
79074+
79075+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
79076+ /* ignore root-owned links, e.g. /proc/self */
79077+ gr_is_global_nonroot(link_inode->i_uid) && target &&
79078+ !uid_eq(link_inode->i_uid, target->i_uid)) {
79079+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
79080+ return 1;
79081+ }
79082+#endif
79083+ return 0;
79084+}
79085+
79086+int
79087+gr_handle_follow_link(const struct inode *parent,
79088+ const struct inode *inode,
79089+ const struct dentry *dentry, const struct vfsmount *mnt)
79090+{
79091+#ifdef CONFIG_GRKERNSEC_LINK
79092+ const struct cred *cred = current_cred();
79093+
79094+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
79095+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
79096+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
79097+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
79098+ return -EACCES;
79099+ }
79100+#endif
79101+ return 0;
79102+}
79103+
79104+int
79105+gr_handle_hardlink(const struct dentry *dentry,
79106+ const struct vfsmount *mnt,
79107+ struct inode *inode, const int mode, const struct filename *to)
79108+{
79109+#ifdef CONFIG_GRKERNSEC_LINK
79110+ const struct cred *cred = current_cred();
79111+
79112+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
79113+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
79114+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
79115+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
79116+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
79117+ return -EPERM;
79118+ }
79119+#endif
79120+ return 0;
79121+}
79122diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
79123new file mode 100644
79124index 0000000..dbe0a6b
79125--- /dev/null
79126+++ b/grsecurity/grsec_log.c
79127@@ -0,0 +1,341 @@
79128+#include <linux/kernel.h>
79129+#include <linux/sched.h>
79130+#include <linux/file.h>
79131+#include <linux/tty.h>
79132+#include <linux/fs.h>
79133+#include <linux/mm.h>
79134+#include <linux/grinternal.h>
79135+
79136+#ifdef CONFIG_TREE_PREEMPT_RCU
79137+#define DISABLE_PREEMPT() preempt_disable()
79138+#define ENABLE_PREEMPT() preempt_enable()
79139+#else
79140+#define DISABLE_PREEMPT()
79141+#define ENABLE_PREEMPT()
79142+#endif
79143+
79144+#define BEGIN_LOCKS(x) \
79145+ DISABLE_PREEMPT(); \
79146+ rcu_read_lock(); \
79147+ read_lock(&tasklist_lock); \
79148+ read_lock(&grsec_exec_file_lock); \
79149+ if (x != GR_DO_AUDIT) \
79150+ spin_lock(&grsec_alert_lock); \
79151+ else \
79152+ spin_lock(&grsec_audit_lock)
79153+
79154+#define END_LOCKS(x) \
79155+ if (x != GR_DO_AUDIT) \
79156+ spin_unlock(&grsec_alert_lock); \
79157+ else \
79158+ spin_unlock(&grsec_audit_lock); \
79159+ read_unlock(&grsec_exec_file_lock); \
79160+ read_unlock(&tasklist_lock); \
79161+ rcu_read_unlock(); \
79162+ ENABLE_PREEMPT(); \
79163+ if (x == GR_DONT_AUDIT) \
79164+ gr_handle_alertkill(current)
79165+
79166+enum {
79167+ FLOODING,
79168+ NO_FLOODING
79169+};
79170+
79171+extern char *gr_alert_log_fmt;
79172+extern char *gr_audit_log_fmt;
79173+extern char *gr_alert_log_buf;
79174+extern char *gr_audit_log_buf;
79175+
79176+static int gr_log_start(int audit)
79177+{
79178+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
79179+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
79180+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79181+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
79182+ unsigned long curr_secs = get_seconds();
79183+
79184+ if (audit == GR_DO_AUDIT)
79185+ goto set_fmt;
79186+
79187+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
79188+ grsec_alert_wtime = curr_secs;
79189+ grsec_alert_fyet = 0;
79190+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
79191+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
79192+ grsec_alert_fyet++;
79193+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
79194+ grsec_alert_wtime = curr_secs;
79195+ grsec_alert_fyet++;
79196+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
79197+ return FLOODING;
79198+ }
79199+ else return FLOODING;
79200+
79201+set_fmt:
79202+#endif
79203+ memset(buf, 0, PAGE_SIZE);
79204+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
79205+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
79206+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79207+ } else if (current->signal->curr_ip) {
79208+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
79209+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
79210+ } else if (gr_acl_is_enabled()) {
79211+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
79212+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79213+ } else {
79214+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
79215+ strcpy(buf, fmt);
79216+ }
79217+
79218+ return NO_FLOODING;
79219+}
79220+
79221+static void gr_log_middle(int audit, const char *msg, va_list ap)
79222+ __attribute__ ((format (printf, 2, 0)));
79223+
79224+static void gr_log_middle(int audit, const char *msg, va_list ap)
79225+{
79226+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79227+ unsigned int len = strlen(buf);
79228+
79229+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79230+
79231+ return;
79232+}
79233+
79234+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79235+ __attribute__ ((format (printf, 2, 3)));
79236+
79237+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79238+{
79239+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79240+ unsigned int len = strlen(buf);
79241+ va_list ap;
79242+
79243+ va_start(ap, msg);
79244+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79245+ va_end(ap);
79246+
79247+ return;
79248+}
79249+
79250+static void gr_log_end(int audit, int append_default)
79251+{
79252+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79253+ if (append_default) {
79254+ struct task_struct *task = current;
79255+ struct task_struct *parent = task->real_parent;
79256+ const struct cred *cred = __task_cred(task);
79257+ const struct cred *pcred = __task_cred(parent);
79258+ unsigned int len = strlen(buf);
79259+
79260+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79261+ }
79262+
79263+ printk("%s\n", buf);
79264+
79265+ return;
79266+}
79267+
79268+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
79269+{
79270+ int logtype;
79271+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
79272+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
79273+ void *voidptr = NULL;
79274+ int num1 = 0, num2 = 0;
79275+ unsigned long ulong1 = 0, ulong2 = 0;
79276+ struct dentry *dentry = NULL;
79277+ struct vfsmount *mnt = NULL;
79278+ struct file *file = NULL;
79279+ struct task_struct *task = NULL;
79280+ struct vm_area_struct *vma = NULL;
79281+ const struct cred *cred, *pcred;
79282+ va_list ap;
79283+
79284+ BEGIN_LOCKS(audit);
79285+ logtype = gr_log_start(audit);
79286+ if (logtype == FLOODING) {
79287+ END_LOCKS(audit);
79288+ return;
79289+ }
79290+ va_start(ap, argtypes);
79291+ switch (argtypes) {
79292+ case GR_TTYSNIFF:
79293+ task = va_arg(ap, struct task_struct *);
79294+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
79295+ break;
79296+ case GR_SYSCTL_HIDDEN:
79297+ str1 = va_arg(ap, char *);
79298+ gr_log_middle_varargs(audit, msg, result, str1);
79299+ break;
79300+ case GR_RBAC:
79301+ dentry = va_arg(ap, struct dentry *);
79302+ mnt = va_arg(ap, struct vfsmount *);
79303+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
79304+ break;
79305+ case GR_RBAC_STR:
79306+ dentry = va_arg(ap, struct dentry *);
79307+ mnt = va_arg(ap, struct vfsmount *);
79308+ str1 = va_arg(ap, char *);
79309+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
79310+ break;
79311+ case GR_STR_RBAC:
79312+ str1 = va_arg(ap, char *);
79313+ dentry = va_arg(ap, struct dentry *);
79314+ mnt = va_arg(ap, struct vfsmount *);
79315+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
79316+ break;
79317+ case GR_RBAC_MODE2:
79318+ dentry = va_arg(ap, struct dentry *);
79319+ mnt = va_arg(ap, struct vfsmount *);
79320+ str1 = va_arg(ap, char *);
79321+ str2 = va_arg(ap, char *);
79322+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
79323+ break;
79324+ case GR_RBAC_MODE3:
79325+ dentry = va_arg(ap, struct dentry *);
79326+ mnt = va_arg(ap, struct vfsmount *);
79327+ str1 = va_arg(ap, char *);
79328+ str2 = va_arg(ap, char *);
79329+ str3 = va_arg(ap, char *);
79330+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
79331+ break;
79332+ case GR_FILENAME:
79333+ dentry = va_arg(ap, struct dentry *);
79334+ mnt = va_arg(ap, struct vfsmount *);
79335+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
79336+ break;
79337+ case GR_STR_FILENAME:
79338+ str1 = va_arg(ap, char *);
79339+ dentry = va_arg(ap, struct dentry *);
79340+ mnt = va_arg(ap, struct vfsmount *);
79341+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
79342+ break;
79343+ case GR_FILENAME_STR:
79344+ dentry = va_arg(ap, struct dentry *);
79345+ mnt = va_arg(ap, struct vfsmount *);
79346+ str1 = va_arg(ap, char *);
79347+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
79348+ break;
79349+ case GR_FILENAME_TWO_INT:
79350+ dentry = va_arg(ap, struct dentry *);
79351+ mnt = va_arg(ap, struct vfsmount *);
79352+ num1 = va_arg(ap, int);
79353+ num2 = va_arg(ap, int);
79354+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
79355+ break;
79356+ case GR_FILENAME_TWO_INT_STR:
79357+ dentry = va_arg(ap, struct dentry *);
79358+ mnt = va_arg(ap, struct vfsmount *);
79359+ num1 = va_arg(ap, int);
79360+ num2 = va_arg(ap, int);
79361+ str1 = va_arg(ap, char *);
79362+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
79363+ break;
79364+ case GR_TEXTREL:
79365+ file = va_arg(ap, struct file *);
79366+ ulong1 = va_arg(ap, unsigned long);
79367+ ulong2 = va_arg(ap, unsigned long);
79368+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
79369+ break;
79370+ case GR_PTRACE:
79371+ task = va_arg(ap, struct task_struct *);
79372+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
79373+ break;
79374+ case GR_RESOURCE:
79375+ task = va_arg(ap, struct task_struct *);
79376+ cred = __task_cred(task);
79377+ pcred = __task_cred(task->real_parent);
79378+ ulong1 = va_arg(ap, unsigned long);
79379+ str1 = va_arg(ap, char *);
79380+ ulong2 = va_arg(ap, unsigned long);
79381+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79382+ break;
79383+ case GR_CAP:
79384+ task = va_arg(ap, struct task_struct *);
79385+ cred = __task_cred(task);
79386+ pcred = __task_cred(task->real_parent);
79387+ str1 = va_arg(ap, char *);
79388+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79389+ break;
79390+ case GR_SIG:
79391+ str1 = va_arg(ap, char *);
79392+ voidptr = va_arg(ap, void *);
79393+ gr_log_middle_varargs(audit, msg, str1, voidptr);
79394+ break;
79395+ case GR_SIG2:
79396+ task = va_arg(ap, struct task_struct *);
79397+ cred = __task_cred(task);
79398+ pcred = __task_cred(task->real_parent);
79399+ num1 = va_arg(ap, int);
79400+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79401+ break;
79402+ case GR_CRASH1:
79403+ task = va_arg(ap, struct task_struct *);
79404+ cred = __task_cred(task);
79405+ pcred = __task_cred(task->real_parent);
79406+ ulong1 = va_arg(ap, unsigned long);
79407+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
79408+ break;
79409+ case GR_CRASH2:
79410+ task = va_arg(ap, struct task_struct *);
79411+ cred = __task_cred(task);
79412+ pcred = __task_cred(task->real_parent);
79413+ ulong1 = va_arg(ap, unsigned long);
79414+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
79415+ break;
79416+ case GR_RWXMAP:
79417+ file = va_arg(ap, struct file *);
79418+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
79419+ break;
79420+ case GR_RWXMAPVMA:
79421+ vma = va_arg(ap, struct vm_area_struct *);
79422+ if (vma->vm_file)
79423+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
79424+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
79425+ str1 = "<stack>";
79426+ else if (vma->vm_start <= current->mm->brk &&
79427+ vma->vm_end >= current->mm->start_brk)
79428+ str1 = "<heap>";
79429+ else
79430+ str1 = "<anonymous mapping>";
79431+ gr_log_middle_varargs(audit, msg, str1);
79432+ break;
79433+ case GR_PSACCT:
79434+ {
79435+ unsigned int wday, cday;
79436+ __u8 whr, chr;
79437+ __u8 wmin, cmin;
79438+ __u8 wsec, csec;
79439+ char cur_tty[64] = { 0 };
79440+ char parent_tty[64] = { 0 };
79441+
79442+ task = va_arg(ap, struct task_struct *);
79443+ wday = va_arg(ap, unsigned int);
79444+ cday = va_arg(ap, unsigned int);
79445+ whr = va_arg(ap, int);
79446+ chr = va_arg(ap, int);
79447+ wmin = va_arg(ap, int);
79448+ cmin = va_arg(ap, int);
79449+ wsec = va_arg(ap, int);
79450+ csec = va_arg(ap, int);
79451+ ulong1 = va_arg(ap, unsigned long);
79452+ cred = __task_cred(task);
79453+ pcred = __task_cred(task->real_parent);
79454+
79455+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79456+ }
79457+ break;
79458+ default:
79459+ gr_log_middle(audit, msg, ap);
79460+ }
79461+ va_end(ap);
79462+ // these don't need DEFAULTSECARGS printed on the end
79463+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
79464+ gr_log_end(audit, 0);
79465+ else
79466+ gr_log_end(audit, 1);
79467+ END_LOCKS(audit);
79468+}
79469diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
79470new file mode 100644
79471index 0000000..0e39d8c
79472--- /dev/null
79473+++ b/grsecurity/grsec_mem.c
79474@@ -0,0 +1,48 @@
79475+#include <linux/kernel.h>
79476+#include <linux/sched.h>
79477+#include <linux/mm.h>
79478+#include <linux/mman.h>
79479+#include <linux/module.h>
79480+#include <linux/grinternal.h>
79481+
79482+void gr_handle_msr_write(void)
79483+{
79484+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
79485+ return;
79486+}
79487+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
79488+
79489+void
79490+gr_handle_ioperm(void)
79491+{
79492+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
79493+ return;
79494+}
79495+
79496+void
79497+gr_handle_iopl(void)
79498+{
79499+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
79500+ return;
79501+}
79502+
79503+void
79504+gr_handle_mem_readwrite(u64 from, u64 to)
79505+{
79506+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
79507+ return;
79508+}
79509+
79510+void
79511+gr_handle_vm86(void)
79512+{
79513+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
79514+ return;
79515+}
79516+
79517+void
79518+gr_log_badprocpid(const char *entry)
79519+{
79520+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
79521+ return;
79522+}
79523diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
79524new file mode 100644
79525index 0000000..cd9e124
79526--- /dev/null
79527+++ b/grsecurity/grsec_mount.c
79528@@ -0,0 +1,65 @@
79529+#include <linux/kernel.h>
79530+#include <linux/sched.h>
79531+#include <linux/mount.h>
79532+#include <linux/major.h>
79533+#include <linux/grsecurity.h>
79534+#include <linux/grinternal.h>
79535+
79536+void
79537+gr_log_remount(const char *devname, const int retval)
79538+{
79539+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79540+ if (grsec_enable_mount && (retval >= 0))
79541+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
79542+#endif
79543+ return;
79544+}
79545+
79546+void
79547+gr_log_unmount(const char *devname, const int retval)
79548+{
79549+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79550+ if (grsec_enable_mount && (retval >= 0))
79551+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
79552+#endif
79553+ return;
79554+}
79555+
79556+void
79557+gr_log_mount(const char *from, const char *to, const int retval)
79558+{
79559+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79560+ if (grsec_enable_mount && (retval >= 0))
79561+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
79562+#endif
79563+ return;
79564+}
79565+
79566+int
79567+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
79568+{
79569+#ifdef CONFIG_GRKERNSEC_ROFS
79570+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
79571+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
79572+ return -EPERM;
79573+ } else
79574+ return 0;
79575+#endif
79576+ return 0;
79577+}
79578+
79579+int
79580+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
79581+{
79582+#ifdef CONFIG_GRKERNSEC_ROFS
79583+ struct inode *inode = dentry->d_inode;
79584+
79585+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
79586+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
79587+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
79588+ return -EPERM;
79589+ } else
79590+ return 0;
79591+#endif
79592+ return 0;
79593+}
79594diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
79595new file mode 100644
79596index 0000000..6ee9d50
79597--- /dev/null
79598+++ b/grsecurity/grsec_pax.c
79599@@ -0,0 +1,45 @@
79600+#include <linux/kernel.h>
79601+#include <linux/sched.h>
79602+#include <linux/mm.h>
79603+#include <linux/file.h>
79604+#include <linux/grinternal.h>
79605+#include <linux/grsecurity.h>
79606+
79607+void
79608+gr_log_textrel(struct vm_area_struct * vma)
79609+{
79610+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79611+ if (grsec_enable_log_rwxmaps)
79612+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
79613+#endif
79614+ return;
79615+}
79616+
79617+void gr_log_ptgnustack(struct file *file)
79618+{
79619+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79620+ if (grsec_enable_log_rwxmaps)
79621+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
79622+#endif
79623+ return;
79624+}
79625+
79626+void
79627+gr_log_rwxmmap(struct file *file)
79628+{
79629+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79630+ if (grsec_enable_log_rwxmaps)
79631+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
79632+#endif
79633+ return;
79634+}
79635+
79636+void
79637+gr_log_rwxmprotect(struct vm_area_struct *vma)
79638+{
79639+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79640+ if (grsec_enable_log_rwxmaps)
79641+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
79642+#endif
79643+ return;
79644+}
79645diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
79646new file mode 100644
79647index 0000000..2005a3a
79648--- /dev/null
79649+++ b/grsecurity/grsec_proc.c
79650@@ -0,0 +1,20 @@
79651+#include <linux/kernel.h>
79652+#include <linux/sched.h>
79653+#include <linux/grsecurity.h>
79654+#include <linux/grinternal.h>
79655+
79656+int gr_proc_is_restricted(void)
79657+{
79658+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79659+ const struct cred *cred = current_cred();
79660+#endif
79661+
79662+#ifdef CONFIG_GRKERNSEC_PROC_USER
79663+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
79664+ return -EACCES;
79665+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79666+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
79667+ return -EACCES;
79668+#endif
79669+ return 0;
79670+}
79671diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
79672new file mode 100644
79673index 0000000..f7f29aa
79674--- /dev/null
79675+++ b/grsecurity/grsec_ptrace.c
79676@@ -0,0 +1,30 @@
79677+#include <linux/kernel.h>
79678+#include <linux/sched.h>
79679+#include <linux/grinternal.h>
79680+#include <linux/security.h>
79681+
79682+void
79683+gr_audit_ptrace(struct task_struct *task)
79684+{
79685+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79686+ if (grsec_enable_audit_ptrace)
79687+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
79688+#endif
79689+ return;
79690+}
79691+
79692+int
79693+gr_ptrace_readexec(struct file *file, int unsafe_flags)
79694+{
79695+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79696+ const struct dentry *dentry = file->f_path.dentry;
79697+ const struct vfsmount *mnt = file->f_path.mnt;
79698+
79699+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
79700+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
79701+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
79702+ return -EACCES;
79703+ }
79704+#endif
79705+ return 0;
79706+}
79707diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
79708new file mode 100644
79709index 0000000..3860c7e
79710--- /dev/null
79711+++ b/grsecurity/grsec_sig.c
79712@@ -0,0 +1,236 @@
79713+#include <linux/kernel.h>
79714+#include <linux/sched.h>
79715+#include <linux/fs.h>
79716+#include <linux/delay.h>
79717+#include <linux/grsecurity.h>
79718+#include <linux/grinternal.h>
79719+#include <linux/hardirq.h>
79720+
79721+char *signames[] = {
79722+ [SIGSEGV] = "Segmentation fault",
79723+ [SIGILL] = "Illegal instruction",
79724+ [SIGABRT] = "Abort",
79725+ [SIGBUS] = "Invalid alignment/Bus error"
79726+};
79727+
79728+void
79729+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
79730+{
79731+#ifdef CONFIG_GRKERNSEC_SIGNAL
79732+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
79733+ (sig == SIGABRT) || (sig == SIGBUS))) {
79734+ if (task_pid_nr(t) == task_pid_nr(current)) {
79735+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
79736+ } else {
79737+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
79738+ }
79739+ }
79740+#endif
79741+ return;
79742+}
79743+
79744+int
79745+gr_handle_signal(const struct task_struct *p, const int sig)
79746+{
79747+#ifdef CONFIG_GRKERNSEC
79748+ /* ignore the 0 signal for protected task checks */
79749+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
79750+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
79751+ return -EPERM;
79752+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
79753+ return -EPERM;
79754+ }
79755+#endif
79756+ return 0;
79757+}
79758+
79759+#ifdef CONFIG_GRKERNSEC
79760+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
79761+
79762+int gr_fake_force_sig(int sig, struct task_struct *t)
79763+{
79764+ unsigned long int flags;
79765+ int ret, blocked, ignored;
79766+ struct k_sigaction *action;
79767+
79768+ spin_lock_irqsave(&t->sighand->siglock, flags);
79769+ action = &t->sighand->action[sig-1];
79770+ ignored = action->sa.sa_handler == SIG_IGN;
79771+ blocked = sigismember(&t->blocked, sig);
79772+ if (blocked || ignored) {
79773+ action->sa.sa_handler = SIG_DFL;
79774+ if (blocked) {
79775+ sigdelset(&t->blocked, sig);
79776+ recalc_sigpending_and_wake(t);
79777+ }
79778+ }
79779+ if (action->sa.sa_handler == SIG_DFL)
79780+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
79781+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
79782+
79783+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
79784+
79785+ return ret;
79786+}
79787+#endif
79788+
79789+#define GR_USER_BAN_TIME (15 * 60)
79790+#define GR_DAEMON_BRUTE_TIME (30 * 60)
79791+
79792+void gr_handle_brute_attach(int dumpable)
79793+{
79794+#ifdef CONFIG_GRKERNSEC_BRUTE
79795+ struct task_struct *p = current;
79796+ kuid_t uid = GLOBAL_ROOT_UID;
79797+ int daemon = 0;
79798+
79799+ if (!grsec_enable_brute)
79800+ return;
79801+
79802+ rcu_read_lock();
79803+ read_lock(&tasklist_lock);
79804+ read_lock(&grsec_exec_file_lock);
79805+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
79806+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
79807+ p->real_parent->brute = 1;
79808+ daemon = 1;
79809+ } else {
79810+ const struct cred *cred = __task_cred(p), *cred2;
79811+ struct task_struct *tsk, *tsk2;
79812+
79813+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
79814+ struct user_struct *user;
79815+
79816+ uid = cred->uid;
79817+
79818+ /* this is put upon execution past expiration */
79819+ user = find_user(uid);
79820+ if (user == NULL)
79821+ goto unlock;
79822+ user->suid_banned = 1;
79823+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
79824+ if (user->suid_ban_expires == ~0UL)
79825+ user->suid_ban_expires--;
79826+
79827+ /* only kill other threads of the same binary, from the same user */
79828+ do_each_thread(tsk2, tsk) {
79829+ cred2 = __task_cred(tsk);
79830+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
79831+ gr_fake_force_sig(SIGKILL, tsk);
79832+ } while_each_thread(tsk2, tsk);
79833+ }
79834+ }
79835+unlock:
79836+ read_unlock(&grsec_exec_file_lock);
79837+ read_unlock(&tasklist_lock);
79838+ rcu_read_unlock();
79839+
79840+ if (gr_is_global_nonroot(uid))
79841+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
79842+ else if (daemon)
79843+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
79844+
79845+#endif
79846+ return;
79847+}
79848+
79849+void gr_handle_brute_check(void)
79850+{
79851+#ifdef CONFIG_GRKERNSEC_BRUTE
79852+ struct task_struct *p = current;
79853+
79854+ if (unlikely(p->brute)) {
79855+ if (!grsec_enable_brute)
79856+ p->brute = 0;
79857+ else if (time_before(get_seconds(), p->brute_expires))
79858+ msleep(30 * 1000);
79859+ }
79860+#endif
79861+ return;
79862+}
79863+
79864+void gr_handle_kernel_exploit(void)
79865+{
79866+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79867+ const struct cred *cred;
79868+ struct task_struct *tsk, *tsk2;
79869+ struct user_struct *user;
79870+ kuid_t uid;
79871+
79872+ if (in_irq() || in_serving_softirq() || in_nmi())
79873+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79874+
79875+ uid = current_uid();
79876+
79877+ if (gr_is_global_root(uid))
79878+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79879+ else {
79880+ /* kill all the processes of this user, hold a reference
79881+ to their creds struct, and prevent them from creating
79882+ another process until system reset
79883+ */
79884+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79885+ GR_GLOBAL_UID(uid));
79886+ /* we intentionally leak this ref */
79887+ user = get_uid(current->cred->user);
79888+ if (user)
79889+ user->kernel_banned = 1;
79890+
79891+ /* kill all processes of this user */
79892+ read_lock(&tasklist_lock);
79893+ do_each_thread(tsk2, tsk) {
79894+ cred = __task_cred(tsk);
79895+ if (uid_eq(cred->uid, uid))
79896+ gr_fake_force_sig(SIGKILL, tsk);
79897+ } while_each_thread(tsk2, tsk);
79898+ read_unlock(&tasklist_lock);
79899+ }
79900+#endif
79901+}
79902+
79903+#ifdef CONFIG_GRKERNSEC_BRUTE
79904+static bool suid_ban_expired(struct user_struct *user)
79905+{
79906+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79907+ user->suid_banned = 0;
79908+ user->suid_ban_expires = 0;
79909+ free_uid(user);
79910+ return true;
79911+ }
79912+
79913+ return false;
79914+}
79915+#endif
79916+
79917+int gr_process_kernel_exec_ban(void)
79918+{
79919+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79920+ if (unlikely(current->cred->user->kernel_banned))
79921+ return -EPERM;
79922+#endif
79923+ return 0;
79924+}
79925+
79926+int gr_process_kernel_setuid_ban(struct user_struct *user)
79927+{
79928+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79929+ if (unlikely(user->kernel_banned))
79930+ gr_fake_force_sig(SIGKILL, current);
79931+#endif
79932+ return 0;
79933+}
79934+
79935+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79936+{
79937+#ifdef CONFIG_GRKERNSEC_BRUTE
79938+ struct user_struct *user = current->cred->user;
79939+ if (unlikely(user->suid_banned)) {
79940+ if (suid_ban_expired(user))
79941+ return 0;
79942+ /* disallow execution of suid binaries only */
79943+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79944+ return -EPERM;
79945+ }
79946+#endif
79947+ return 0;
79948+}
79949diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79950new file mode 100644
79951index 0000000..c0aef3a
79952--- /dev/null
79953+++ b/grsecurity/grsec_sock.c
79954@@ -0,0 +1,244 @@
79955+#include <linux/kernel.h>
79956+#include <linux/module.h>
79957+#include <linux/sched.h>
79958+#include <linux/file.h>
79959+#include <linux/net.h>
79960+#include <linux/in.h>
79961+#include <linux/ip.h>
79962+#include <net/sock.h>
79963+#include <net/inet_sock.h>
79964+#include <linux/grsecurity.h>
79965+#include <linux/grinternal.h>
79966+#include <linux/gracl.h>
79967+
79968+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79969+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79970+
79971+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79972+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79973+
79974+#ifdef CONFIG_UNIX_MODULE
79975+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79976+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79977+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79978+EXPORT_SYMBOL_GPL(gr_handle_create);
79979+#endif
79980+
79981+#ifdef CONFIG_GRKERNSEC
79982+#define gr_conn_table_size 32749
79983+struct conn_table_entry {
79984+ struct conn_table_entry *next;
79985+ struct signal_struct *sig;
79986+};
79987+
79988+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79989+DEFINE_SPINLOCK(gr_conn_table_lock);
79990+
79991+extern const char * gr_socktype_to_name(unsigned char type);
79992+extern const char * gr_proto_to_name(unsigned char proto);
79993+extern const char * gr_sockfamily_to_name(unsigned char family);
79994+
79995+static __inline__ int
79996+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79997+{
79998+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79999+}
80000+
80001+static __inline__ int
80002+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
80003+ __u16 sport, __u16 dport)
80004+{
80005+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
80006+ sig->gr_sport == sport && sig->gr_dport == dport))
80007+ return 1;
80008+ else
80009+ return 0;
80010+}
80011+
80012+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
80013+{
80014+ struct conn_table_entry **match;
80015+ unsigned int index;
80016+
80017+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
80018+ sig->gr_sport, sig->gr_dport,
80019+ gr_conn_table_size);
80020+
80021+ newent->sig = sig;
80022+
80023+ match = &gr_conn_table[index];
80024+ newent->next = *match;
80025+ *match = newent;
80026+
80027+ return;
80028+}
80029+
80030+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
80031+{
80032+ struct conn_table_entry *match, *last = NULL;
80033+ unsigned int index;
80034+
80035+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
80036+ sig->gr_sport, sig->gr_dport,
80037+ gr_conn_table_size);
80038+
80039+ match = gr_conn_table[index];
80040+ while (match && !conn_match(match->sig,
80041+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
80042+ sig->gr_dport)) {
80043+ last = match;
80044+ match = match->next;
80045+ }
80046+
80047+ if (match) {
80048+ if (last)
80049+ last->next = match->next;
80050+ else
80051+ gr_conn_table[index] = NULL;
80052+ kfree(match);
80053+ }
80054+
80055+ return;
80056+}
80057+
80058+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
80059+ __u16 sport, __u16 dport)
80060+{
80061+ struct conn_table_entry *match;
80062+ unsigned int index;
80063+
80064+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
80065+
80066+ match = gr_conn_table[index];
80067+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
80068+ match = match->next;
80069+
80070+ if (match)
80071+ return match->sig;
80072+ else
80073+ return NULL;
80074+}
80075+
80076+#endif
80077+
80078+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
80079+{
80080+#ifdef CONFIG_GRKERNSEC
80081+ struct signal_struct *sig = task->signal;
80082+ struct conn_table_entry *newent;
80083+
80084+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
80085+ if (newent == NULL)
80086+ return;
80087+ /* no bh lock needed since we are called with bh disabled */
80088+ spin_lock(&gr_conn_table_lock);
80089+ gr_del_task_from_ip_table_nolock(sig);
80090+ sig->gr_saddr = inet->inet_rcv_saddr;
80091+ sig->gr_daddr = inet->inet_daddr;
80092+ sig->gr_sport = inet->inet_sport;
80093+ sig->gr_dport = inet->inet_dport;
80094+ gr_add_to_task_ip_table_nolock(sig, newent);
80095+ spin_unlock(&gr_conn_table_lock);
80096+#endif
80097+ return;
80098+}
80099+
80100+void gr_del_task_from_ip_table(struct task_struct *task)
80101+{
80102+#ifdef CONFIG_GRKERNSEC
80103+ spin_lock_bh(&gr_conn_table_lock);
80104+ gr_del_task_from_ip_table_nolock(task->signal);
80105+ spin_unlock_bh(&gr_conn_table_lock);
80106+#endif
80107+ return;
80108+}
80109+
80110+void
80111+gr_attach_curr_ip(const struct sock *sk)
80112+{
80113+#ifdef CONFIG_GRKERNSEC
80114+ struct signal_struct *p, *set;
80115+ const struct inet_sock *inet = inet_sk(sk);
80116+
80117+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
80118+ return;
80119+
80120+ set = current->signal;
80121+
80122+ spin_lock_bh(&gr_conn_table_lock);
80123+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
80124+ inet->inet_dport, inet->inet_sport);
80125+ if (unlikely(p != NULL)) {
80126+ set->curr_ip = p->curr_ip;
80127+ set->used_accept = 1;
80128+ gr_del_task_from_ip_table_nolock(p);
80129+ spin_unlock_bh(&gr_conn_table_lock);
80130+ return;
80131+ }
80132+ spin_unlock_bh(&gr_conn_table_lock);
80133+
80134+ set->curr_ip = inet->inet_daddr;
80135+ set->used_accept = 1;
80136+#endif
80137+ return;
80138+}
80139+
80140+int
80141+gr_handle_sock_all(const int family, const int type, const int protocol)
80142+{
80143+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80144+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
80145+ (family != AF_UNIX)) {
80146+ if (family == AF_INET)
80147+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
80148+ else
80149+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
80150+ return -EACCES;
80151+ }
80152+#endif
80153+ return 0;
80154+}
80155+
80156+int
80157+gr_handle_sock_server(const struct sockaddr *sck)
80158+{
80159+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80160+ if (grsec_enable_socket_server &&
80161+ in_group_p(grsec_socket_server_gid) &&
80162+ sck && (sck->sa_family != AF_UNIX) &&
80163+ (sck->sa_family != AF_LOCAL)) {
80164+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
80165+ return -EACCES;
80166+ }
80167+#endif
80168+ return 0;
80169+}
80170+
80171+int
80172+gr_handle_sock_server_other(const struct sock *sck)
80173+{
80174+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80175+ if (grsec_enable_socket_server &&
80176+ in_group_p(grsec_socket_server_gid) &&
80177+ sck && (sck->sk_family != AF_UNIX) &&
80178+ (sck->sk_family != AF_LOCAL)) {
80179+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
80180+ return -EACCES;
80181+ }
80182+#endif
80183+ return 0;
80184+}
80185+
80186+int
80187+gr_handle_sock_client(const struct sockaddr *sck)
80188+{
80189+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80190+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
80191+ sck && (sck->sa_family != AF_UNIX) &&
80192+ (sck->sa_family != AF_LOCAL)) {
80193+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
80194+ return -EACCES;
80195+ }
80196+#endif
80197+ return 0;
80198+}
80199diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
80200new file mode 100644
80201index 0000000..8159888
80202--- /dev/null
80203+++ b/grsecurity/grsec_sysctl.c
80204@@ -0,0 +1,479 @@
80205+#include <linux/kernel.h>
80206+#include <linux/sched.h>
80207+#include <linux/sysctl.h>
80208+#include <linux/grsecurity.h>
80209+#include <linux/grinternal.h>
80210+
80211+int
80212+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
80213+{
80214+#ifdef CONFIG_GRKERNSEC_SYSCTL
80215+ if (dirname == NULL || name == NULL)
80216+ return 0;
80217+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
80218+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
80219+ return -EACCES;
80220+ }
80221+#endif
80222+ return 0;
80223+}
80224+
80225+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
80226+static int __maybe_unused __read_only one = 1;
80227+#endif
80228+
80229+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
80230+ defined(CONFIG_GRKERNSEC_DENYUSB)
80231+struct ctl_table grsecurity_table[] = {
80232+#ifdef CONFIG_GRKERNSEC_SYSCTL
80233+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
80234+#ifdef CONFIG_GRKERNSEC_IO
80235+ {
80236+ .procname = "disable_priv_io",
80237+ .data = &grsec_disable_privio,
80238+ .maxlen = sizeof(int),
80239+ .mode = 0600,
80240+ .proc_handler = &proc_dointvec,
80241+ },
80242+#endif
80243+#endif
80244+#ifdef CONFIG_GRKERNSEC_LINK
80245+ {
80246+ .procname = "linking_restrictions",
80247+ .data = &grsec_enable_link,
80248+ .maxlen = sizeof(int),
80249+ .mode = 0600,
80250+ .proc_handler = &proc_dointvec,
80251+ },
80252+#endif
80253+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
80254+ {
80255+ .procname = "enforce_symlinksifowner",
80256+ .data = &grsec_enable_symlinkown,
80257+ .maxlen = sizeof(int),
80258+ .mode = 0600,
80259+ .proc_handler = &proc_dointvec,
80260+ },
80261+ {
80262+ .procname = "symlinkown_gid",
80263+ .data = &grsec_symlinkown_gid,
80264+ .maxlen = sizeof(int),
80265+ .mode = 0600,
80266+ .proc_handler = &proc_dointvec,
80267+ },
80268+#endif
80269+#ifdef CONFIG_GRKERNSEC_BRUTE
80270+ {
80271+ .procname = "deter_bruteforce",
80272+ .data = &grsec_enable_brute,
80273+ .maxlen = sizeof(int),
80274+ .mode = 0600,
80275+ .proc_handler = &proc_dointvec,
80276+ },
80277+#endif
80278+#ifdef CONFIG_GRKERNSEC_FIFO
80279+ {
80280+ .procname = "fifo_restrictions",
80281+ .data = &grsec_enable_fifo,
80282+ .maxlen = sizeof(int),
80283+ .mode = 0600,
80284+ .proc_handler = &proc_dointvec,
80285+ },
80286+#endif
80287+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
80288+ {
80289+ .procname = "ptrace_readexec",
80290+ .data = &grsec_enable_ptrace_readexec,
80291+ .maxlen = sizeof(int),
80292+ .mode = 0600,
80293+ .proc_handler = &proc_dointvec,
80294+ },
80295+#endif
80296+#ifdef CONFIG_GRKERNSEC_SETXID
80297+ {
80298+ .procname = "consistent_setxid",
80299+ .data = &grsec_enable_setxid,
80300+ .maxlen = sizeof(int),
80301+ .mode = 0600,
80302+ .proc_handler = &proc_dointvec,
80303+ },
80304+#endif
80305+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80306+ {
80307+ .procname = "ip_blackhole",
80308+ .data = &grsec_enable_blackhole,
80309+ .maxlen = sizeof(int),
80310+ .mode = 0600,
80311+ .proc_handler = &proc_dointvec,
80312+ },
80313+ {
80314+ .procname = "lastack_retries",
80315+ .data = &grsec_lastack_retries,
80316+ .maxlen = sizeof(int),
80317+ .mode = 0600,
80318+ .proc_handler = &proc_dointvec,
80319+ },
80320+#endif
80321+#ifdef CONFIG_GRKERNSEC_EXECLOG
80322+ {
80323+ .procname = "exec_logging",
80324+ .data = &grsec_enable_execlog,
80325+ .maxlen = sizeof(int),
80326+ .mode = 0600,
80327+ .proc_handler = &proc_dointvec,
80328+ },
80329+#endif
80330+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
80331+ {
80332+ .procname = "rwxmap_logging",
80333+ .data = &grsec_enable_log_rwxmaps,
80334+ .maxlen = sizeof(int),
80335+ .mode = 0600,
80336+ .proc_handler = &proc_dointvec,
80337+ },
80338+#endif
80339+#ifdef CONFIG_GRKERNSEC_SIGNAL
80340+ {
80341+ .procname = "signal_logging",
80342+ .data = &grsec_enable_signal,
80343+ .maxlen = sizeof(int),
80344+ .mode = 0600,
80345+ .proc_handler = &proc_dointvec,
80346+ },
80347+#endif
80348+#ifdef CONFIG_GRKERNSEC_FORKFAIL
80349+ {
80350+ .procname = "forkfail_logging",
80351+ .data = &grsec_enable_forkfail,
80352+ .maxlen = sizeof(int),
80353+ .mode = 0600,
80354+ .proc_handler = &proc_dointvec,
80355+ },
80356+#endif
80357+#ifdef CONFIG_GRKERNSEC_TIME
80358+ {
80359+ .procname = "timechange_logging",
80360+ .data = &grsec_enable_time,
80361+ .maxlen = sizeof(int),
80362+ .mode = 0600,
80363+ .proc_handler = &proc_dointvec,
80364+ },
80365+#endif
80366+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80367+ {
80368+ .procname = "chroot_deny_shmat",
80369+ .data = &grsec_enable_chroot_shmat,
80370+ .maxlen = sizeof(int),
80371+ .mode = 0600,
80372+ .proc_handler = &proc_dointvec,
80373+ },
80374+#endif
80375+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80376+ {
80377+ .procname = "chroot_deny_unix",
80378+ .data = &grsec_enable_chroot_unix,
80379+ .maxlen = sizeof(int),
80380+ .mode = 0600,
80381+ .proc_handler = &proc_dointvec,
80382+ },
80383+#endif
80384+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
80385+ {
80386+ .procname = "chroot_deny_mount",
80387+ .data = &grsec_enable_chroot_mount,
80388+ .maxlen = sizeof(int),
80389+ .mode = 0600,
80390+ .proc_handler = &proc_dointvec,
80391+ },
80392+#endif
80393+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80394+ {
80395+ .procname = "chroot_deny_fchdir",
80396+ .data = &grsec_enable_chroot_fchdir,
80397+ .maxlen = sizeof(int),
80398+ .mode = 0600,
80399+ .proc_handler = &proc_dointvec,
80400+ },
80401+#endif
80402+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
80403+ {
80404+ .procname = "chroot_deny_chroot",
80405+ .data = &grsec_enable_chroot_double,
80406+ .maxlen = sizeof(int),
80407+ .mode = 0600,
80408+ .proc_handler = &proc_dointvec,
80409+ },
80410+#endif
80411+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
80412+ {
80413+ .procname = "chroot_deny_pivot",
80414+ .data = &grsec_enable_chroot_pivot,
80415+ .maxlen = sizeof(int),
80416+ .mode = 0600,
80417+ .proc_handler = &proc_dointvec,
80418+ },
80419+#endif
80420+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
80421+ {
80422+ .procname = "chroot_enforce_chdir",
80423+ .data = &grsec_enable_chroot_chdir,
80424+ .maxlen = sizeof(int),
80425+ .mode = 0600,
80426+ .proc_handler = &proc_dointvec,
80427+ },
80428+#endif
80429+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
80430+ {
80431+ .procname = "chroot_deny_chmod",
80432+ .data = &grsec_enable_chroot_chmod,
80433+ .maxlen = sizeof(int),
80434+ .mode = 0600,
80435+ .proc_handler = &proc_dointvec,
80436+ },
80437+#endif
80438+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
80439+ {
80440+ .procname = "chroot_deny_mknod",
80441+ .data = &grsec_enable_chroot_mknod,
80442+ .maxlen = sizeof(int),
80443+ .mode = 0600,
80444+ .proc_handler = &proc_dointvec,
80445+ },
80446+#endif
80447+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80448+ {
80449+ .procname = "chroot_restrict_nice",
80450+ .data = &grsec_enable_chroot_nice,
80451+ .maxlen = sizeof(int),
80452+ .mode = 0600,
80453+ .proc_handler = &proc_dointvec,
80454+ },
80455+#endif
80456+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
80457+ {
80458+ .procname = "chroot_execlog",
80459+ .data = &grsec_enable_chroot_execlog,
80460+ .maxlen = sizeof(int),
80461+ .mode = 0600,
80462+ .proc_handler = &proc_dointvec,
80463+ },
80464+#endif
80465+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80466+ {
80467+ .procname = "chroot_caps",
80468+ .data = &grsec_enable_chroot_caps,
80469+ .maxlen = sizeof(int),
80470+ .mode = 0600,
80471+ .proc_handler = &proc_dointvec,
80472+ },
80473+#endif
80474+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
80475+ {
80476+ .procname = "chroot_deny_sysctl",
80477+ .data = &grsec_enable_chroot_sysctl,
80478+ .maxlen = sizeof(int),
80479+ .mode = 0600,
80480+ .proc_handler = &proc_dointvec,
80481+ },
80482+#endif
80483+#ifdef CONFIG_GRKERNSEC_TPE
80484+ {
80485+ .procname = "tpe",
80486+ .data = &grsec_enable_tpe,
80487+ .maxlen = sizeof(int),
80488+ .mode = 0600,
80489+ .proc_handler = &proc_dointvec,
80490+ },
80491+ {
80492+ .procname = "tpe_gid",
80493+ .data = &grsec_tpe_gid,
80494+ .maxlen = sizeof(int),
80495+ .mode = 0600,
80496+ .proc_handler = &proc_dointvec,
80497+ },
80498+#endif
80499+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80500+ {
80501+ .procname = "tpe_invert",
80502+ .data = &grsec_enable_tpe_invert,
80503+ .maxlen = sizeof(int),
80504+ .mode = 0600,
80505+ .proc_handler = &proc_dointvec,
80506+ },
80507+#endif
80508+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80509+ {
80510+ .procname = "tpe_restrict_all",
80511+ .data = &grsec_enable_tpe_all,
80512+ .maxlen = sizeof(int),
80513+ .mode = 0600,
80514+ .proc_handler = &proc_dointvec,
80515+ },
80516+#endif
80517+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80518+ {
80519+ .procname = "socket_all",
80520+ .data = &grsec_enable_socket_all,
80521+ .maxlen = sizeof(int),
80522+ .mode = 0600,
80523+ .proc_handler = &proc_dointvec,
80524+ },
80525+ {
80526+ .procname = "socket_all_gid",
80527+ .data = &grsec_socket_all_gid,
80528+ .maxlen = sizeof(int),
80529+ .mode = 0600,
80530+ .proc_handler = &proc_dointvec,
80531+ },
80532+#endif
80533+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80534+ {
80535+ .procname = "socket_client",
80536+ .data = &grsec_enable_socket_client,
80537+ .maxlen = sizeof(int),
80538+ .mode = 0600,
80539+ .proc_handler = &proc_dointvec,
80540+ },
80541+ {
80542+ .procname = "socket_client_gid",
80543+ .data = &grsec_socket_client_gid,
80544+ .maxlen = sizeof(int),
80545+ .mode = 0600,
80546+ .proc_handler = &proc_dointvec,
80547+ },
80548+#endif
80549+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80550+ {
80551+ .procname = "socket_server",
80552+ .data = &grsec_enable_socket_server,
80553+ .maxlen = sizeof(int),
80554+ .mode = 0600,
80555+ .proc_handler = &proc_dointvec,
80556+ },
80557+ {
80558+ .procname = "socket_server_gid",
80559+ .data = &grsec_socket_server_gid,
80560+ .maxlen = sizeof(int),
80561+ .mode = 0600,
80562+ .proc_handler = &proc_dointvec,
80563+ },
80564+#endif
80565+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
80566+ {
80567+ .procname = "audit_group",
80568+ .data = &grsec_enable_group,
80569+ .maxlen = sizeof(int),
80570+ .mode = 0600,
80571+ .proc_handler = &proc_dointvec,
80572+ },
80573+ {
80574+ .procname = "audit_gid",
80575+ .data = &grsec_audit_gid,
80576+ .maxlen = sizeof(int),
80577+ .mode = 0600,
80578+ .proc_handler = &proc_dointvec,
80579+ },
80580+#endif
80581+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80582+ {
80583+ .procname = "audit_chdir",
80584+ .data = &grsec_enable_chdir,
80585+ .maxlen = sizeof(int),
80586+ .mode = 0600,
80587+ .proc_handler = &proc_dointvec,
80588+ },
80589+#endif
80590+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
80591+ {
80592+ .procname = "audit_mount",
80593+ .data = &grsec_enable_mount,
80594+ .maxlen = sizeof(int),
80595+ .mode = 0600,
80596+ .proc_handler = &proc_dointvec,
80597+ },
80598+#endif
80599+#ifdef CONFIG_GRKERNSEC_DMESG
80600+ {
80601+ .procname = "dmesg",
80602+ .data = &grsec_enable_dmesg,
80603+ .maxlen = sizeof(int),
80604+ .mode = 0600,
80605+ .proc_handler = &proc_dointvec,
80606+ },
80607+#endif
80608+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80609+ {
80610+ .procname = "chroot_findtask",
80611+ .data = &grsec_enable_chroot_findtask,
80612+ .maxlen = sizeof(int),
80613+ .mode = 0600,
80614+ .proc_handler = &proc_dointvec,
80615+ },
80616+#endif
80617+#ifdef CONFIG_GRKERNSEC_RESLOG
80618+ {
80619+ .procname = "resource_logging",
80620+ .data = &grsec_resource_logging,
80621+ .maxlen = sizeof(int),
80622+ .mode = 0600,
80623+ .proc_handler = &proc_dointvec,
80624+ },
80625+#endif
80626+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
80627+ {
80628+ .procname = "audit_ptrace",
80629+ .data = &grsec_enable_audit_ptrace,
80630+ .maxlen = sizeof(int),
80631+ .mode = 0600,
80632+ .proc_handler = &proc_dointvec,
80633+ },
80634+#endif
80635+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80636+ {
80637+ .procname = "harden_ptrace",
80638+ .data = &grsec_enable_harden_ptrace,
80639+ .maxlen = sizeof(int),
80640+ .mode = 0600,
80641+ .proc_handler = &proc_dointvec,
80642+ },
80643+#endif
80644+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
80645+ {
80646+ .procname = "harden_ipc",
80647+ .data = &grsec_enable_harden_ipc,
80648+ .maxlen = sizeof(int),
80649+ .mode = 0600,
80650+ .proc_handler = &proc_dointvec,
80651+ },
80652+#endif
80653+ {
80654+ .procname = "grsec_lock",
80655+ .data = &grsec_lock,
80656+ .maxlen = sizeof(int),
80657+ .mode = 0600,
80658+ .proc_handler = &proc_dointvec,
80659+ },
80660+#endif
80661+#ifdef CONFIG_GRKERNSEC_ROFS
80662+ {
80663+ .procname = "romount_protect",
80664+ .data = &grsec_enable_rofs,
80665+ .maxlen = sizeof(int),
80666+ .mode = 0600,
80667+ .proc_handler = &proc_dointvec_minmax,
80668+ .extra1 = &one,
80669+ .extra2 = &one,
80670+ },
80671+#endif
80672+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
80673+ {
80674+ .procname = "deny_new_usb",
80675+ .data = &grsec_deny_new_usb,
80676+ .maxlen = sizeof(int),
80677+ .mode = 0600,
80678+ .proc_handler = &proc_dointvec,
80679+ },
80680+#endif
80681+ { }
80682+};
80683+#endif
80684diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
80685new file mode 100644
80686index 0000000..61b514e
80687--- /dev/null
80688+++ b/grsecurity/grsec_time.c
80689@@ -0,0 +1,16 @@
80690+#include <linux/kernel.h>
80691+#include <linux/sched.h>
80692+#include <linux/grinternal.h>
80693+#include <linux/module.h>
80694+
80695+void
80696+gr_log_timechange(void)
80697+{
80698+#ifdef CONFIG_GRKERNSEC_TIME
80699+ if (grsec_enable_time)
80700+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
80701+#endif
80702+ return;
80703+}
80704+
80705+EXPORT_SYMBOL_GPL(gr_log_timechange);
80706diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
80707new file mode 100644
80708index 0000000..d1953de
80709--- /dev/null
80710+++ b/grsecurity/grsec_tpe.c
80711@@ -0,0 +1,78 @@
80712+#include <linux/kernel.h>
80713+#include <linux/sched.h>
80714+#include <linux/file.h>
80715+#include <linux/fs.h>
80716+#include <linux/grinternal.h>
80717+
80718+extern int gr_acl_tpe_check(void);
80719+
80720+int
80721+gr_tpe_allow(const struct file *file)
80722+{
80723+#ifdef CONFIG_GRKERNSEC
80724+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
80725+ struct inode *file_inode = file->f_path.dentry->d_inode;
80726+ const struct cred *cred = current_cred();
80727+ char *msg = NULL;
80728+ char *msg2 = NULL;
80729+
80730+ // never restrict root
80731+ if (gr_is_global_root(cred->uid))
80732+ return 1;
80733+
80734+ if (grsec_enable_tpe) {
80735+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80736+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
80737+ msg = "not being in trusted group";
80738+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
80739+ msg = "being in untrusted group";
80740+#else
80741+ if (in_group_p(grsec_tpe_gid))
80742+ msg = "being in untrusted group";
80743+#endif
80744+ }
80745+ if (!msg && gr_acl_tpe_check())
80746+ msg = "being in untrusted role";
80747+
80748+ // not in any affected group/role
80749+ if (!msg)
80750+ goto next_check;
80751+
80752+ if (gr_is_global_nonroot(inode->i_uid))
80753+ msg2 = "file in non-root-owned directory";
80754+ else if (inode->i_mode & S_IWOTH)
80755+ msg2 = "file in world-writable directory";
80756+ else if (inode->i_mode & S_IWGRP)
80757+ msg2 = "file in group-writable directory";
80758+ else if (file_inode->i_mode & S_IWOTH)
80759+ msg2 = "file is world-writable";
80760+
80761+ if (msg && msg2) {
80762+ char fullmsg[70] = {0};
80763+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
80764+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
80765+ return 0;
80766+ }
80767+ msg = NULL;
80768+next_check:
80769+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80770+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
80771+ return 1;
80772+
80773+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
80774+ msg = "directory not owned by user";
80775+ else if (inode->i_mode & S_IWOTH)
80776+ msg = "file in world-writable directory";
80777+ else if (inode->i_mode & S_IWGRP)
80778+ msg = "file in group-writable directory";
80779+ else if (file_inode->i_mode & S_IWOTH)
80780+ msg = "file is world-writable";
80781+
80782+ if (msg) {
80783+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
80784+ return 0;
80785+ }
80786+#endif
80787+#endif
80788+ return 1;
80789+}
80790diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
80791new file mode 100644
80792index 0000000..ae02d8e
80793--- /dev/null
80794+++ b/grsecurity/grsec_usb.c
80795@@ -0,0 +1,15 @@
80796+#include <linux/kernel.h>
80797+#include <linux/grinternal.h>
80798+#include <linux/module.h>
80799+
80800+int gr_handle_new_usb(void)
80801+{
80802+#ifdef CONFIG_GRKERNSEC_DENYUSB
80803+ if (grsec_deny_new_usb) {
80804+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
80805+ return 1;
80806+ }
80807+#endif
80808+ return 0;
80809+}
80810+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
80811diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
80812new file mode 100644
80813index 0000000..158b330
80814--- /dev/null
80815+++ b/grsecurity/grsum.c
80816@@ -0,0 +1,64 @@
80817+#include <linux/err.h>
80818+#include <linux/kernel.h>
80819+#include <linux/sched.h>
80820+#include <linux/mm.h>
80821+#include <linux/scatterlist.h>
80822+#include <linux/crypto.h>
80823+#include <linux/gracl.h>
80824+
80825+
80826+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
80827+#error "crypto and sha256 must be built into the kernel"
80828+#endif
80829+
80830+int
80831+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
80832+{
80833+ struct crypto_hash *tfm;
80834+ struct hash_desc desc;
80835+ struct scatterlist sg[2];
80836+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
80837+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
80838+ unsigned long *sumptr = (unsigned long *)sum;
80839+ int cryptres;
80840+ int retval = 1;
80841+ volatile int mismatched = 0;
80842+ volatile int dummy = 0;
80843+ unsigned int i;
80844+
80845+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
80846+ if (IS_ERR(tfm)) {
80847+ /* should never happen, since sha256 should be built in */
80848+ memset(entry->pw, 0, GR_PW_LEN);
80849+ return 1;
80850+ }
80851+
80852+ sg_init_table(sg, 2);
80853+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
80854+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
80855+
80856+ desc.tfm = tfm;
80857+ desc.flags = 0;
80858+
80859+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
80860+ temp_sum);
80861+
80862+ memset(entry->pw, 0, GR_PW_LEN);
80863+
80864+ if (cryptres)
80865+ goto out;
80866+
80867+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80868+ if (sumptr[i] != tmpsumptr[i])
80869+ mismatched = 1;
80870+ else
80871+ dummy = 1; // waste a cycle
80872+
80873+ if (!mismatched)
80874+ retval = dummy - 1;
80875+
80876+out:
80877+ crypto_free_hash(tfm);
80878+
80879+ return retval;
80880+}
80881diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80882index 77ff547..181834f 100644
80883--- a/include/asm-generic/4level-fixup.h
80884+++ b/include/asm-generic/4level-fixup.h
80885@@ -13,8 +13,10 @@
80886 #define pmd_alloc(mm, pud, address) \
80887 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80888 NULL: pmd_offset(pud, address))
80889+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80890
80891 #define pud_alloc(mm, pgd, address) (pgd)
80892+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80893 #define pud_offset(pgd, start) (pgd)
80894 #define pud_none(pud) 0
80895 #define pud_bad(pud) 0
80896diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80897index b7babf0..97f4c4f 100644
80898--- a/include/asm-generic/atomic-long.h
80899+++ b/include/asm-generic/atomic-long.h
80900@@ -22,6 +22,12 @@
80901
80902 typedef atomic64_t atomic_long_t;
80903
80904+#ifdef CONFIG_PAX_REFCOUNT
80905+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80906+#else
80907+typedef atomic64_t atomic_long_unchecked_t;
80908+#endif
80909+
80910 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80911
80912 static inline long atomic_long_read(atomic_long_t *l)
80913@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80914 return (long)atomic64_read(v);
80915 }
80916
80917+#ifdef CONFIG_PAX_REFCOUNT
80918+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80919+{
80920+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80921+
80922+ return (long)atomic64_read_unchecked(v);
80923+}
80924+#endif
80925+
80926 static inline void atomic_long_set(atomic_long_t *l, long i)
80927 {
80928 atomic64_t *v = (atomic64_t *)l;
80929@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80930 atomic64_set(v, i);
80931 }
80932
80933+#ifdef CONFIG_PAX_REFCOUNT
80934+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80935+{
80936+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80937+
80938+ atomic64_set_unchecked(v, i);
80939+}
80940+#endif
80941+
80942 static inline void atomic_long_inc(atomic_long_t *l)
80943 {
80944 atomic64_t *v = (atomic64_t *)l;
80945@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80946 atomic64_inc(v);
80947 }
80948
80949+#ifdef CONFIG_PAX_REFCOUNT
80950+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80951+{
80952+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80953+
80954+ atomic64_inc_unchecked(v);
80955+}
80956+#endif
80957+
80958 static inline void atomic_long_dec(atomic_long_t *l)
80959 {
80960 atomic64_t *v = (atomic64_t *)l;
80961@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80962 atomic64_dec(v);
80963 }
80964
80965+#ifdef CONFIG_PAX_REFCOUNT
80966+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80967+{
80968+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80969+
80970+ atomic64_dec_unchecked(v);
80971+}
80972+#endif
80973+
80974 static inline void atomic_long_add(long i, atomic_long_t *l)
80975 {
80976 atomic64_t *v = (atomic64_t *)l;
80977@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80978 atomic64_add(i, v);
80979 }
80980
80981+#ifdef CONFIG_PAX_REFCOUNT
80982+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80983+{
80984+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80985+
80986+ atomic64_add_unchecked(i, v);
80987+}
80988+#endif
80989+
80990 static inline void atomic_long_sub(long i, atomic_long_t *l)
80991 {
80992 atomic64_t *v = (atomic64_t *)l;
80993@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80994 atomic64_sub(i, v);
80995 }
80996
80997+#ifdef CONFIG_PAX_REFCOUNT
80998+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80999+{
81000+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
81001+
81002+ atomic64_sub_unchecked(i, v);
81003+}
81004+#endif
81005+
81006 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
81007 {
81008 atomic64_t *v = (atomic64_t *)l;
81009@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
81010 return atomic64_add_negative(i, v);
81011 }
81012
81013-static inline long atomic_long_add_return(long i, atomic_long_t *l)
81014+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
81015 {
81016 atomic64_t *v = (atomic64_t *)l;
81017
81018 return (long)atomic64_add_return(i, v);
81019 }
81020
81021+#ifdef CONFIG_PAX_REFCOUNT
81022+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
81023+{
81024+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
81025+
81026+ return (long)atomic64_add_return_unchecked(i, v);
81027+}
81028+#endif
81029+
81030 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
81031 {
81032 atomic64_t *v = (atomic64_t *)l;
81033@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
81034 return (long)atomic64_inc_return(v);
81035 }
81036
81037+#ifdef CONFIG_PAX_REFCOUNT
81038+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
81039+{
81040+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
81041+
81042+ return (long)atomic64_inc_return_unchecked(v);
81043+}
81044+#endif
81045+
81046 static inline long atomic_long_dec_return(atomic_long_t *l)
81047 {
81048 atomic64_t *v = (atomic64_t *)l;
81049@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
81050
81051 typedef atomic_t atomic_long_t;
81052
81053+#ifdef CONFIG_PAX_REFCOUNT
81054+typedef atomic_unchecked_t atomic_long_unchecked_t;
81055+#else
81056+typedef atomic_t atomic_long_unchecked_t;
81057+#endif
81058+
81059 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
81060 static inline long atomic_long_read(atomic_long_t *l)
81061 {
81062@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
81063 return (long)atomic_read(v);
81064 }
81065
81066+#ifdef CONFIG_PAX_REFCOUNT
81067+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
81068+{
81069+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81070+
81071+ return (long)atomic_read_unchecked(v);
81072+}
81073+#endif
81074+
81075 static inline void atomic_long_set(atomic_long_t *l, long i)
81076 {
81077 atomic_t *v = (atomic_t *)l;
81078@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
81079 atomic_set(v, i);
81080 }
81081
81082+#ifdef CONFIG_PAX_REFCOUNT
81083+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
81084+{
81085+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81086+
81087+ atomic_set_unchecked(v, i);
81088+}
81089+#endif
81090+
81091 static inline void atomic_long_inc(atomic_long_t *l)
81092 {
81093 atomic_t *v = (atomic_t *)l;
81094@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
81095 atomic_inc(v);
81096 }
81097
81098+#ifdef CONFIG_PAX_REFCOUNT
81099+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
81100+{
81101+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81102+
81103+ atomic_inc_unchecked(v);
81104+}
81105+#endif
81106+
81107 static inline void atomic_long_dec(atomic_long_t *l)
81108 {
81109 atomic_t *v = (atomic_t *)l;
81110@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
81111 atomic_dec(v);
81112 }
81113
81114+#ifdef CONFIG_PAX_REFCOUNT
81115+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
81116+{
81117+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81118+
81119+ atomic_dec_unchecked(v);
81120+}
81121+#endif
81122+
81123 static inline void atomic_long_add(long i, atomic_long_t *l)
81124 {
81125 atomic_t *v = (atomic_t *)l;
81126@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
81127 atomic_add(i, v);
81128 }
81129
81130+#ifdef CONFIG_PAX_REFCOUNT
81131+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
81132+{
81133+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81134+
81135+ atomic_add_unchecked(i, v);
81136+}
81137+#endif
81138+
81139 static inline void atomic_long_sub(long i, atomic_long_t *l)
81140 {
81141 atomic_t *v = (atomic_t *)l;
81142@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
81143 atomic_sub(i, v);
81144 }
81145
81146+#ifdef CONFIG_PAX_REFCOUNT
81147+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
81148+{
81149+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81150+
81151+ atomic_sub_unchecked(i, v);
81152+}
81153+#endif
81154+
81155 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
81156 {
81157 atomic_t *v = (atomic_t *)l;
81158@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
81159 return (long)atomic_add_return(i, v);
81160 }
81161
81162+#ifdef CONFIG_PAX_REFCOUNT
81163+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
81164+{
81165+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81166+
81167+ return (long)atomic_add_return_unchecked(i, v);
81168+}
81169+
81170+#endif
81171+
81172 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
81173 {
81174 atomic_t *v = (atomic_t *)l;
81175@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
81176 return (long)atomic_inc_return(v);
81177 }
81178
81179+#ifdef CONFIG_PAX_REFCOUNT
81180+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
81181+{
81182+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81183+
81184+ return (long)atomic_inc_return_unchecked(v);
81185+}
81186+#endif
81187+
81188 static inline long atomic_long_dec_return(atomic_long_t *l)
81189 {
81190 atomic_t *v = (atomic_t *)l;
81191@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
81192
81193 #endif /* BITS_PER_LONG == 64 */
81194
81195+#ifdef CONFIG_PAX_REFCOUNT
81196+static inline void pax_refcount_needs_these_functions(void)
81197+{
81198+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
81199+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
81200+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
81201+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
81202+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
81203+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
81204+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
81205+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
81206+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
81207+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
81208+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
81209+#ifdef CONFIG_X86
81210+ atomic_clear_mask_unchecked(0, NULL);
81211+ atomic_set_mask_unchecked(0, NULL);
81212+#endif
81213+
81214+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
81215+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
81216+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
81217+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
81218+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
81219+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
81220+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
81221+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
81222+}
81223+#else
81224+#define atomic_read_unchecked(v) atomic_read(v)
81225+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
81226+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
81227+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
81228+#define atomic_inc_unchecked(v) atomic_inc(v)
81229+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
81230+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
81231+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
81232+#define atomic_dec_unchecked(v) atomic_dec(v)
81233+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
81234+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
81235+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
81236+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
81237+
81238+#define atomic_long_read_unchecked(v) atomic_long_read(v)
81239+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
81240+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
81241+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
81242+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
81243+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
81244+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
81245+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
81246+#endif
81247+
81248 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
81249diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
81250index 9c79e76..9f7827d 100644
81251--- a/include/asm-generic/atomic.h
81252+++ b/include/asm-generic/atomic.h
81253@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
81254 * Atomically clears the bits set in @mask from @v
81255 */
81256 #ifndef atomic_clear_mask
81257-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
81258+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
81259 {
81260 unsigned long flags;
81261
81262diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
81263index b18ce4f..2ee2843 100644
81264--- a/include/asm-generic/atomic64.h
81265+++ b/include/asm-generic/atomic64.h
81266@@ -16,6 +16,8 @@ typedef struct {
81267 long long counter;
81268 } atomic64_t;
81269
81270+typedef atomic64_t atomic64_unchecked_t;
81271+
81272 #define ATOMIC64_INIT(i) { (i) }
81273
81274 extern long long atomic64_read(const atomic64_t *v);
81275@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
81276 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
81277 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
81278
81279+#define atomic64_read_unchecked(v) atomic64_read(v)
81280+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
81281+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
81282+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
81283+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
81284+#define atomic64_inc_unchecked(v) atomic64_inc(v)
81285+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
81286+#define atomic64_dec_unchecked(v) atomic64_dec(v)
81287+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
81288+
81289 #endif /* _ASM_GENERIC_ATOMIC64_H */
81290diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
81291index 1402fa8..025a736 100644
81292--- a/include/asm-generic/barrier.h
81293+++ b/include/asm-generic/barrier.h
81294@@ -74,7 +74,7 @@
81295 do { \
81296 compiletime_assert_atomic_type(*p); \
81297 smp_mb(); \
81298- ACCESS_ONCE(*p) = (v); \
81299+ ACCESS_ONCE_RW(*p) = (v); \
81300 } while (0)
81301
81302 #define smp_load_acquire(p) \
81303diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
81304index a60a7cc..0fe12f2 100644
81305--- a/include/asm-generic/bitops/__fls.h
81306+++ b/include/asm-generic/bitops/__fls.h
81307@@ -9,7 +9,7 @@
81308 *
81309 * Undefined if no set bit exists, so code should check against 0 first.
81310 */
81311-static __always_inline unsigned long __fls(unsigned long word)
81312+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
81313 {
81314 int num = BITS_PER_LONG - 1;
81315
81316diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
81317index 0576d1f..dad6c71 100644
81318--- a/include/asm-generic/bitops/fls.h
81319+++ b/include/asm-generic/bitops/fls.h
81320@@ -9,7 +9,7 @@
81321 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
81322 */
81323
81324-static __always_inline int fls(int x)
81325+static __always_inline int __intentional_overflow(-1) fls(int x)
81326 {
81327 int r = 32;
81328
81329diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
81330index b097cf8..3d40e14 100644
81331--- a/include/asm-generic/bitops/fls64.h
81332+++ b/include/asm-generic/bitops/fls64.h
81333@@ -15,7 +15,7 @@
81334 * at position 64.
81335 */
81336 #if BITS_PER_LONG == 32
81337-static __always_inline int fls64(__u64 x)
81338+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81339 {
81340 __u32 h = x >> 32;
81341 if (h)
81342@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
81343 return fls(x);
81344 }
81345 #elif BITS_PER_LONG == 64
81346-static __always_inline int fls64(__u64 x)
81347+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81348 {
81349 if (x == 0)
81350 return 0;
81351diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
81352index 1bfcfe5..e04c5c9 100644
81353--- a/include/asm-generic/cache.h
81354+++ b/include/asm-generic/cache.h
81355@@ -6,7 +6,7 @@
81356 * cache lines need to provide their own cache.h.
81357 */
81358
81359-#define L1_CACHE_SHIFT 5
81360-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
81361+#define L1_CACHE_SHIFT 5UL
81362+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
81363
81364 #endif /* __ASM_GENERIC_CACHE_H */
81365diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
81366index 0d68a1e..b74a761 100644
81367--- a/include/asm-generic/emergency-restart.h
81368+++ b/include/asm-generic/emergency-restart.h
81369@@ -1,7 +1,7 @@
81370 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
81371 #define _ASM_GENERIC_EMERGENCY_RESTART_H
81372
81373-static inline void machine_emergency_restart(void)
81374+static inline __noreturn void machine_emergency_restart(void)
81375 {
81376 machine_restart(NULL);
81377 }
81378diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
81379index 975e1cc..0b8a083 100644
81380--- a/include/asm-generic/io.h
81381+++ b/include/asm-generic/io.h
81382@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
81383 * These are pretty trivial
81384 */
81385 #ifndef virt_to_phys
81386-static inline unsigned long virt_to_phys(volatile void *address)
81387+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
81388 {
81389 return __pa((unsigned long)address);
81390 }
81391diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
81392index 90f99c7..00ce236 100644
81393--- a/include/asm-generic/kmap_types.h
81394+++ b/include/asm-generic/kmap_types.h
81395@@ -2,9 +2,9 @@
81396 #define _ASM_GENERIC_KMAP_TYPES_H
81397
81398 #ifdef __WITH_KM_FENCE
81399-# define KM_TYPE_NR 41
81400+# define KM_TYPE_NR 42
81401 #else
81402-# define KM_TYPE_NR 20
81403+# define KM_TYPE_NR 21
81404 #endif
81405
81406 #endif
81407diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
81408index 9ceb03b..62b0b8f 100644
81409--- a/include/asm-generic/local.h
81410+++ b/include/asm-generic/local.h
81411@@ -23,24 +23,37 @@ typedef struct
81412 atomic_long_t a;
81413 } local_t;
81414
81415+typedef struct {
81416+ atomic_long_unchecked_t a;
81417+} local_unchecked_t;
81418+
81419 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
81420
81421 #define local_read(l) atomic_long_read(&(l)->a)
81422+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
81423 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
81424+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
81425 #define local_inc(l) atomic_long_inc(&(l)->a)
81426+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
81427 #define local_dec(l) atomic_long_dec(&(l)->a)
81428+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
81429 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
81430+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
81431 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
81432+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
81433
81434 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
81435 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
81436 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
81437 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
81438 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
81439+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
81440 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
81441 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
81442+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
81443
81444 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81445+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81446 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
81447 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
81448 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
81449diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
81450index 725612b..9cc513a 100644
81451--- a/include/asm-generic/pgtable-nopmd.h
81452+++ b/include/asm-generic/pgtable-nopmd.h
81453@@ -1,14 +1,19 @@
81454 #ifndef _PGTABLE_NOPMD_H
81455 #define _PGTABLE_NOPMD_H
81456
81457-#ifndef __ASSEMBLY__
81458-
81459 #include <asm-generic/pgtable-nopud.h>
81460
81461-struct mm_struct;
81462-
81463 #define __PAGETABLE_PMD_FOLDED
81464
81465+#define PMD_SHIFT PUD_SHIFT
81466+#define PTRS_PER_PMD 1
81467+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
81468+#define PMD_MASK (~(PMD_SIZE-1))
81469+
81470+#ifndef __ASSEMBLY__
81471+
81472+struct mm_struct;
81473+
81474 /*
81475 * Having the pmd type consist of a pud gets the size right, and allows
81476 * us to conceptually access the pud entry that this pmd is folded into
81477@@ -16,11 +21,6 @@ struct mm_struct;
81478 */
81479 typedef struct { pud_t pud; } pmd_t;
81480
81481-#define PMD_SHIFT PUD_SHIFT
81482-#define PTRS_PER_PMD 1
81483-#define PMD_SIZE (1UL << PMD_SHIFT)
81484-#define PMD_MASK (~(PMD_SIZE-1))
81485-
81486 /*
81487 * The "pud_xxx()" functions here are trivial for a folded two-level
81488 * setup: the pmd is never bad, and a pmd always exists (as it's folded
81489diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
81490index 810431d..0ec4804f 100644
81491--- a/include/asm-generic/pgtable-nopud.h
81492+++ b/include/asm-generic/pgtable-nopud.h
81493@@ -1,10 +1,15 @@
81494 #ifndef _PGTABLE_NOPUD_H
81495 #define _PGTABLE_NOPUD_H
81496
81497-#ifndef __ASSEMBLY__
81498-
81499 #define __PAGETABLE_PUD_FOLDED
81500
81501+#define PUD_SHIFT PGDIR_SHIFT
81502+#define PTRS_PER_PUD 1
81503+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
81504+#define PUD_MASK (~(PUD_SIZE-1))
81505+
81506+#ifndef __ASSEMBLY__
81507+
81508 /*
81509 * Having the pud type consist of a pgd gets the size right, and allows
81510 * us to conceptually access the pgd entry that this pud is folded into
81511@@ -12,11 +17,6 @@
81512 */
81513 typedef struct { pgd_t pgd; } pud_t;
81514
81515-#define PUD_SHIFT PGDIR_SHIFT
81516-#define PTRS_PER_PUD 1
81517-#define PUD_SIZE (1UL << PUD_SHIFT)
81518-#define PUD_MASK (~(PUD_SIZE-1))
81519-
81520 /*
81521 * The "pgd_xxx()" functions here are trivial for a folded two-level
81522 * setup: the pud is never bad, and a pud always exists (as it's folded
81523@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
81524 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
81525
81526 #define pgd_populate(mm, pgd, pud) do { } while (0)
81527+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
81528 /*
81529 * (puds are folded into pgds so this doesn't get actually called,
81530 * but the define is needed for a generic inline function.)
81531diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
81532index 53b2acc..f4568e7 100644
81533--- a/include/asm-generic/pgtable.h
81534+++ b/include/asm-generic/pgtable.h
81535@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
81536 }
81537 #endif /* CONFIG_NUMA_BALANCING */
81538
81539+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
81540+#ifdef CONFIG_PAX_KERNEXEC
81541+#error KERNEXEC requires pax_open_kernel
81542+#else
81543+static inline unsigned long pax_open_kernel(void) { return 0; }
81544+#endif
81545+#endif
81546+
81547+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
81548+#ifdef CONFIG_PAX_KERNEXEC
81549+#error KERNEXEC requires pax_close_kernel
81550+#else
81551+static inline unsigned long pax_close_kernel(void) { return 0; }
81552+#endif
81553+#endif
81554+
81555 #endif /* CONFIG_MMU */
81556
81557 #endif /* !__ASSEMBLY__ */
81558diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
81559index 72d8803..cb9749c 100644
81560--- a/include/asm-generic/uaccess.h
81561+++ b/include/asm-generic/uaccess.h
81562@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
81563 return __clear_user(to, n);
81564 }
81565
81566+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
81567+#ifdef CONFIG_PAX_MEMORY_UDEREF
81568+#error UDEREF requires pax_open_userland
81569+#else
81570+static inline unsigned long pax_open_userland(void) { return 0; }
81571+#endif
81572+#endif
81573+
81574+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
81575+#ifdef CONFIG_PAX_MEMORY_UDEREF
81576+#error UDEREF requires pax_close_userland
81577+#else
81578+static inline unsigned long pax_close_userland(void) { return 0; }
81579+#endif
81580+#endif
81581+
81582 #endif /* __ASM_GENERIC_UACCESS_H */
81583diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
81584index c1c0b0c..05c9588 100644
81585--- a/include/asm-generic/vmlinux.lds.h
81586+++ b/include/asm-generic/vmlinux.lds.h
81587@@ -231,6 +231,7 @@
81588 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
81589 VMLINUX_SYMBOL(__start_rodata) = .; \
81590 *(.rodata) *(.rodata.*) \
81591+ *(.data..read_only) \
81592 *(__vermagic) /* Kernel version magic */ \
81593 . = ALIGN(8); \
81594 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
81595@@ -719,17 +720,18 @@
81596 * section in the linker script will go there too. @phdr should have
81597 * a leading colon.
81598 *
81599- * Note that this macros defines __per_cpu_load as an absolute symbol.
81600+ * Note that this macros defines per_cpu_load as an absolute symbol.
81601 * If there is no need to put the percpu section at a predetermined
81602 * address, use PERCPU_SECTION.
81603 */
81604 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
81605- VMLINUX_SYMBOL(__per_cpu_load) = .; \
81606- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
81607+ per_cpu_load = .; \
81608+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
81609 - LOAD_OFFSET) { \
81610+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
81611 PERCPU_INPUT(cacheline) \
81612 } phdr \
81613- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
81614+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
81615
81616 /**
81617 * PERCPU_SECTION - define output section for percpu area, simple version
81618diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
81619index 016c2f1..c4baa98 100644
81620--- a/include/crypto/algapi.h
81621+++ b/include/crypto/algapi.h
81622@@ -34,7 +34,7 @@ struct crypto_type {
81623 unsigned int maskclear;
81624 unsigned int maskset;
81625 unsigned int tfmsize;
81626-};
81627+} __do_const;
81628
81629 struct crypto_instance {
81630 struct crypto_alg alg;
81631diff --git a/include/drm/drmP.h b/include/drm/drmP.h
81632index 8af71a8..7fe6c19 100644
81633--- a/include/drm/drmP.h
81634+++ b/include/drm/drmP.h
81635@@ -68,6 +68,7 @@
81636 #include <linux/workqueue.h>
81637 #include <linux/poll.h>
81638 #include <asm/pgalloc.h>
81639+#include <asm/local.h>
81640 #include <drm/drm.h>
81641 #include <drm/drm_sarea.h>
81642 #include <drm/drm_vma_manager.h>
81643@@ -261,10 +262,12 @@ do { \
81644 * \param cmd command.
81645 * \param arg argument.
81646 */
81647-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
81648+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
81649+ struct drm_file *file_priv);
81650+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
81651 struct drm_file *file_priv);
81652
81653-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81654+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
81655 unsigned long arg);
81656
81657 #define DRM_IOCTL_NR(n) _IOC_NR(n)
81658@@ -280,10 +283,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81659 struct drm_ioctl_desc {
81660 unsigned int cmd;
81661 int flags;
81662- drm_ioctl_t *func;
81663+ drm_ioctl_t func;
81664 unsigned int cmd_drv;
81665 const char *name;
81666-};
81667+} __do_const;
81668
81669 /**
81670 * Creates a driver or general drm_ioctl_desc array entry for the given
81671@@ -983,7 +986,8 @@ struct drm_info_list {
81672 int (*show)(struct seq_file*, void*); /** show callback */
81673 u32 driver_features; /**< Required driver features for this entry */
81674 void *data;
81675-};
81676+} __do_const;
81677+typedef struct drm_info_list __no_const drm_info_list_no_const;
81678
81679 /**
81680 * debugfs node structure. This structure represents a debugfs file.
81681@@ -1067,7 +1071,7 @@ struct drm_device {
81682
81683 /** \name Usage Counters */
81684 /*@{ */
81685- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81686+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81687 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
81688 int buf_use; /**< Buffers in use -- cannot alloc */
81689 atomic_t buf_alloc; /**< Buffer allocation in progress */
81690diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
81691index a3d75fe..6802f9c 100644
81692--- a/include/drm/drm_crtc_helper.h
81693+++ b/include/drm/drm_crtc_helper.h
81694@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
81695 struct drm_connector *connector);
81696 /* disable encoder when not in use - more explicit than dpms off */
81697 void (*disable)(struct drm_encoder *encoder);
81698-};
81699+} __no_const;
81700
81701 /**
81702 * drm_connector_helper_funcs - helper operations for connectors
81703diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
81704index a70d456..6ea07cd 100644
81705--- a/include/drm/i915_pciids.h
81706+++ b/include/drm/i915_pciids.h
81707@@ -37,7 +37,7 @@
81708 */
81709 #define INTEL_VGA_DEVICE(id, info) { \
81710 0x8086, id, \
81711- ~0, ~0, \
81712+ PCI_ANY_ID, PCI_ANY_ID, \
81713 0x030000, 0xff0000, \
81714 (unsigned long) info }
81715
81716diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
81717index 72dcbe8..8db58d7 100644
81718--- a/include/drm/ttm/ttm_memory.h
81719+++ b/include/drm/ttm/ttm_memory.h
81720@@ -48,7 +48,7 @@
81721
81722 struct ttm_mem_shrink {
81723 int (*do_shrink) (struct ttm_mem_shrink *);
81724-};
81725+} __no_const;
81726
81727 /**
81728 * struct ttm_mem_global - Global memory accounting structure.
81729diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
81730index 49a8284..9643967 100644
81731--- a/include/drm/ttm/ttm_page_alloc.h
81732+++ b/include/drm/ttm/ttm_page_alloc.h
81733@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
81734 */
81735 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
81736
81737+struct device;
81738 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81739 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81740
81741diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
81742index 4b840e8..155d235 100644
81743--- a/include/keys/asymmetric-subtype.h
81744+++ b/include/keys/asymmetric-subtype.h
81745@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
81746 /* Verify the signature on a key of this subtype (optional) */
81747 int (*verify_signature)(const struct key *key,
81748 const struct public_key_signature *sig);
81749-};
81750+} __do_const;
81751
81752 /**
81753 * asymmetric_key_subtype - Get the subtype from an asymmetric key
81754diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
81755index c1da539..1dcec55 100644
81756--- a/include/linux/atmdev.h
81757+++ b/include/linux/atmdev.h
81758@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
81759 #endif
81760
81761 struct k_atm_aal_stats {
81762-#define __HANDLE_ITEM(i) atomic_t i
81763+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81764 __AAL_STAT_ITEMS
81765 #undef __HANDLE_ITEM
81766 };
81767@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
81768 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
81769 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
81770 struct module *owner;
81771-};
81772+} __do_const ;
81773
81774 struct atmphy_ops {
81775 int (*start)(struct atm_dev *dev);
81776diff --git a/include/linux/audit.h b/include/linux/audit.h
81777index 22cfddb..ab759e8 100644
81778--- a/include/linux/audit.h
81779+++ b/include/linux/audit.h
81780@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
81781 extern unsigned int audit_serial(void);
81782 extern int auditsc_get_stamp(struct audit_context *ctx,
81783 struct timespec *t, unsigned int *serial);
81784-extern int audit_set_loginuid(kuid_t loginuid);
81785+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
81786
81787 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
81788 {
81789diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
81790index 61f29e5..e67c658 100644
81791--- a/include/linux/binfmts.h
81792+++ b/include/linux/binfmts.h
81793@@ -44,7 +44,7 @@ struct linux_binprm {
81794 unsigned interp_flags;
81795 unsigned interp_data;
81796 unsigned long loader, exec;
81797-};
81798+} __randomize_layout;
81799
81800 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
81801 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
81802@@ -73,8 +73,10 @@ struct linux_binfmt {
81803 int (*load_binary)(struct linux_binprm *);
81804 int (*load_shlib)(struct file *);
81805 int (*core_dump)(struct coredump_params *cprm);
81806+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
81807+ void (*handle_mmap)(struct file *);
81808 unsigned long min_coredump; /* minimal dump size */
81809-};
81810+} __do_const __randomize_layout;
81811
81812 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
81813
81814diff --git a/include/linux/bitops.h b/include/linux/bitops.h
81815index cbc5833..8123ebc 100644
81816--- a/include/linux/bitops.h
81817+++ b/include/linux/bitops.h
81818@@ -122,7 +122,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
81819 * @word: value to rotate
81820 * @shift: bits to roll
81821 */
81822-static inline __u32 rol32(__u32 word, unsigned int shift)
81823+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
81824 {
81825 return (word << shift) | (word >> (32 - shift));
81826 }
81827@@ -132,7 +132,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
81828 * @word: value to rotate
81829 * @shift: bits to roll
81830 */
81831-static inline __u32 ror32(__u32 word, unsigned int shift)
81832+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
81833 {
81834 return (word >> shift) | (word << (32 - shift));
81835 }
81836@@ -188,7 +188,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
81837 return (__s32)(value << shift) >> shift;
81838 }
81839
81840-static inline unsigned fls_long(unsigned long l)
81841+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
81842 {
81843 if (sizeof(l) == 4)
81844 return fls(l);
81845diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
81846index 8699bcf..279485d 100644
81847--- a/include/linux/blkdev.h
81848+++ b/include/linux/blkdev.h
81849@@ -1625,7 +1625,7 @@ struct block_device_operations {
81850 /* this callback is with swap_lock and sometimes page table lock held */
81851 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
81852 struct module *owner;
81853-};
81854+} __do_const;
81855
81856 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81857 unsigned long);
81858diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81859index afc1343..9735539 100644
81860--- a/include/linux/blktrace_api.h
81861+++ b/include/linux/blktrace_api.h
81862@@ -25,7 +25,7 @@ struct blk_trace {
81863 struct dentry *dropped_file;
81864 struct dentry *msg_file;
81865 struct list_head running_list;
81866- atomic_t dropped;
81867+ atomic_unchecked_t dropped;
81868 };
81869
81870 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81871diff --git a/include/linux/cache.h b/include/linux/cache.h
81872index 17e7e82..1d7da26 100644
81873--- a/include/linux/cache.h
81874+++ b/include/linux/cache.h
81875@@ -16,6 +16,14 @@
81876 #define __read_mostly
81877 #endif
81878
81879+#ifndef __read_only
81880+#ifdef CONFIG_PAX_KERNEXEC
81881+#error KERNEXEC requires __read_only
81882+#else
81883+#define __read_only __read_mostly
81884+#endif
81885+#endif
81886+
81887 #ifndef ____cacheline_aligned
81888 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81889 #endif
81890diff --git a/include/linux/capability.h b/include/linux/capability.h
81891index aa93e5e..985a1b0 100644
81892--- a/include/linux/capability.h
81893+++ b/include/linux/capability.h
81894@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81895 extern bool capable(int cap);
81896 extern bool ns_capable(struct user_namespace *ns, int cap);
81897 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81898+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81899 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81900+extern bool capable_nolog(int cap);
81901+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81902
81903 /* audit system wants to get cap info from files as well */
81904 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81905
81906+extern int is_privileged_binary(const struct dentry *dentry);
81907+
81908 #endif /* !_LINUX_CAPABILITY_H */
81909diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81910index 8609d57..86e4d79 100644
81911--- a/include/linux/cdrom.h
81912+++ b/include/linux/cdrom.h
81913@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81914
81915 /* driver specifications */
81916 const int capability; /* capability flags */
81917- int n_minors; /* number of active minor devices */
81918 /* handle uniform packets for scsi type devices (scsi,atapi) */
81919 int (*generic_packet) (struct cdrom_device_info *,
81920 struct packet_command *);
81921diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81922index 4ce9056..86caac6 100644
81923--- a/include/linux/cleancache.h
81924+++ b/include/linux/cleancache.h
81925@@ -31,7 +31,7 @@ struct cleancache_ops {
81926 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81927 void (*invalidate_inode)(int, struct cleancache_filekey);
81928 void (*invalidate_fs)(int);
81929-};
81930+} __no_const;
81931
81932 extern struct cleancache_ops *
81933 cleancache_register_ops(struct cleancache_ops *ops);
81934diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81935index 0c287db..5efa775 100644
81936--- a/include/linux/clk-provider.h
81937+++ b/include/linux/clk-provider.h
81938@@ -180,6 +180,7 @@ struct clk_ops {
81939 void (*init)(struct clk_hw *hw);
81940 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81941 };
81942+typedef struct clk_ops __no_const clk_ops_no_const;
81943
81944 /**
81945 * struct clk_init_data - holds init data that's common to all clocks and is
81946diff --git a/include/linux/compat.h b/include/linux/compat.h
81947index e649426..a74047b 100644
81948--- a/include/linux/compat.h
81949+++ b/include/linux/compat.h
81950@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81951 compat_size_t __user *len_ptr);
81952
81953 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81954-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81955+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81956 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81957 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81958 compat_ssize_t msgsz, int msgflg);
81959@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81960 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81961 compat_ulong_t addr, compat_ulong_t data);
81962 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81963- compat_long_t addr, compat_long_t data);
81964+ compat_ulong_t addr, compat_ulong_t data);
81965
81966 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81967 /*
81968diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81969index 2507fd2..55203f8 100644
81970--- a/include/linux/compiler-gcc4.h
81971+++ b/include/linux/compiler-gcc4.h
81972@@ -39,9 +39,34 @@
81973 # define __compiletime_warning(message) __attribute__((warning(message)))
81974 # define __compiletime_error(message) __attribute__((error(message)))
81975 #endif /* __CHECKER__ */
81976+
81977+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81978+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81979+#define __bos0(ptr) __bos((ptr), 0)
81980+#define __bos1(ptr) __bos((ptr), 1)
81981 #endif /* GCC_VERSION >= 40300 */
81982
81983 #if GCC_VERSION >= 40500
81984+
81985+#ifdef RANDSTRUCT_PLUGIN
81986+#define __randomize_layout __attribute__((randomize_layout))
81987+#define __no_randomize_layout __attribute__((no_randomize_layout))
81988+#endif
81989+
81990+#ifdef CONSTIFY_PLUGIN
81991+#define __no_const __attribute__((no_const))
81992+#define __do_const __attribute__((do_const))
81993+#endif
81994+
81995+#ifdef SIZE_OVERFLOW_PLUGIN
81996+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81997+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81998+#endif
81999+
82000+#ifdef LATENT_ENTROPY_PLUGIN
82001+#define __latent_entropy __attribute__((latent_entropy))
82002+#endif
82003+
82004 /*
82005 * Mark a position in code as unreachable. This can be used to
82006 * suppress control flow warnings after asm blocks that transfer
82007diff --git a/include/linux/compiler.h b/include/linux/compiler.h
82008index d5ad7b1..3b74638 100644
82009--- a/include/linux/compiler.h
82010+++ b/include/linux/compiler.h
82011@@ -5,11 +5,14 @@
82012
82013 #ifdef __CHECKER__
82014 # define __user __attribute__((noderef, address_space(1)))
82015+# define __force_user __force __user
82016 # define __kernel __attribute__((address_space(0)))
82017+# define __force_kernel __force __kernel
82018 # define __safe __attribute__((safe))
82019 # define __force __attribute__((force))
82020 # define __nocast __attribute__((nocast))
82021 # define __iomem __attribute__((noderef, address_space(2)))
82022+# define __force_iomem __force __iomem
82023 # define __must_hold(x) __attribute__((context(x,1,1)))
82024 # define __acquires(x) __attribute__((context(x,0,1)))
82025 # define __releases(x) __attribute__((context(x,1,0)))
82026@@ -17,20 +20,37 @@
82027 # define __release(x) __context__(x,-1)
82028 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
82029 # define __percpu __attribute__((noderef, address_space(3)))
82030+# define __force_percpu __force __percpu
82031 #ifdef CONFIG_SPARSE_RCU_POINTER
82032 # define __rcu __attribute__((noderef, address_space(4)))
82033+# define __force_rcu __force __rcu
82034 #else
82035 # define __rcu
82036+# define __force_rcu
82037 #endif
82038 extern void __chk_user_ptr(const volatile void __user *);
82039 extern void __chk_io_ptr(const volatile void __iomem *);
82040 #else
82041-# define __user
82042-# define __kernel
82043+# ifdef CHECKER_PLUGIN
82044+//# define __user
82045+//# define __force_user
82046+//# define __kernel
82047+//# define __force_kernel
82048+# else
82049+# ifdef STRUCTLEAK_PLUGIN
82050+# define __user __attribute__((user))
82051+# else
82052+# define __user
82053+# endif
82054+# define __force_user
82055+# define __kernel
82056+# define __force_kernel
82057+# endif
82058 # define __safe
82059 # define __force
82060 # define __nocast
82061 # define __iomem
82062+# define __force_iomem
82063 # define __chk_user_ptr(x) (void)0
82064 # define __chk_io_ptr(x) (void)0
82065 # define __builtin_warning(x, y...) (1)
82066@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
82067 # define __release(x) (void)0
82068 # define __cond_lock(x,c) (c)
82069 # define __percpu
82070+# define __force_percpu
82071 # define __rcu
82072+# define __force_rcu
82073 #endif
82074
82075 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
82076@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
82077 # define __attribute_const__ /* unimplemented */
82078 #endif
82079
82080+#ifndef __randomize_layout
82081+# define __randomize_layout
82082+#endif
82083+
82084+#ifndef __no_randomize_layout
82085+# define __no_randomize_layout
82086+#endif
82087+
82088+#ifndef __no_const
82089+# define __no_const
82090+#endif
82091+
82092+#ifndef __do_const
82093+# define __do_const
82094+#endif
82095+
82096+#ifndef __size_overflow
82097+# define __size_overflow(...)
82098+#endif
82099+
82100+#ifndef __intentional_overflow
82101+# define __intentional_overflow(...)
82102+#endif
82103+
82104+#ifndef __latent_entropy
82105+# define __latent_entropy
82106+#endif
82107+
82108 /*
82109 * Tell gcc if a function is cold. The compiler will assume any path
82110 * directly leading to the call is unlikely.
82111@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
82112 #define __cold
82113 #endif
82114
82115+#ifndef __alloc_size
82116+#define __alloc_size(...)
82117+#endif
82118+
82119+#ifndef __bos
82120+#define __bos(ptr, arg)
82121+#endif
82122+
82123+#ifndef __bos0
82124+#define __bos0(ptr)
82125+#endif
82126+
82127+#ifndef __bos1
82128+#define __bos1(ptr)
82129+#endif
82130+
82131 /* Simple shorthand for a section definition */
82132 #ifndef __section
82133 # define __section(S) __attribute__ ((__section__(#S)))
82134@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
82135 * use is to mediate communication between process-level code and irq/NMI
82136 * handlers, all running on the same CPU.
82137 */
82138-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
82139+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
82140+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
82141
82142 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
82143 #ifdef CONFIG_KPROBES
82144diff --git a/include/linux/completion.h b/include/linux/completion.h
82145index 5d5aaae..0ea9b84 100644
82146--- a/include/linux/completion.h
82147+++ b/include/linux/completion.h
82148@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
82149
82150 extern void wait_for_completion(struct completion *);
82151 extern void wait_for_completion_io(struct completion *);
82152-extern int wait_for_completion_interruptible(struct completion *x);
82153-extern int wait_for_completion_killable(struct completion *x);
82154+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
82155+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
82156 extern unsigned long wait_for_completion_timeout(struct completion *x,
82157- unsigned long timeout);
82158+ unsigned long timeout) __intentional_overflow(-1);
82159 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
82160- unsigned long timeout);
82161+ unsigned long timeout) __intentional_overflow(-1);
82162 extern long wait_for_completion_interruptible_timeout(
82163- struct completion *x, unsigned long timeout);
82164+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
82165 extern long wait_for_completion_killable_timeout(
82166- struct completion *x, unsigned long timeout);
82167+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
82168 extern bool try_wait_for_completion(struct completion *x);
82169 extern bool completion_done(struct completion *x);
82170
82171diff --git a/include/linux/configfs.h b/include/linux/configfs.h
82172index 34025df..d94bbbc 100644
82173--- a/include/linux/configfs.h
82174+++ b/include/linux/configfs.h
82175@@ -125,7 +125,7 @@ struct configfs_attribute {
82176 const char *ca_name;
82177 struct module *ca_owner;
82178 umode_t ca_mode;
82179-};
82180+} __do_const;
82181
82182 /*
82183 * Users often need to create attribute structures for their configurable
82184diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
82185index 8f8ae95..b9b0e6d 100644
82186--- a/include/linux/cpufreq.h
82187+++ b/include/linux/cpufreq.h
82188@@ -202,6 +202,7 @@ struct global_attr {
82189 ssize_t (*store)(struct kobject *a, struct attribute *b,
82190 const char *c, size_t count);
82191 };
82192+typedef struct global_attr __no_const global_attr_no_const;
82193
82194 #define define_one_global_ro(_name) \
82195 static struct global_attr _name = \
82196@@ -268,7 +269,7 @@ struct cpufreq_driver {
82197 bool boost_supported;
82198 bool boost_enabled;
82199 int (*set_boost) (int state);
82200-};
82201+} __do_const;
82202
82203 /* flags */
82204 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
82205diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
82206index 25e0df6..952dffd 100644
82207--- a/include/linux/cpuidle.h
82208+++ b/include/linux/cpuidle.h
82209@@ -50,7 +50,8 @@ struct cpuidle_state {
82210 int index);
82211
82212 int (*enter_dead) (struct cpuidle_device *dev, int index);
82213-};
82214+} __do_const;
82215+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
82216
82217 /* Idle State Flags */
82218 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
82219@@ -209,7 +210,7 @@ struct cpuidle_governor {
82220 void (*reflect) (struct cpuidle_device *dev, int index);
82221
82222 struct module *owner;
82223-};
82224+} __do_const;
82225
82226 #ifdef CONFIG_CPU_IDLE
82227 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
82228diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
82229index 2997af6..424ddc1 100644
82230--- a/include/linux/cpumask.h
82231+++ b/include/linux/cpumask.h
82232@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82233 }
82234
82235 /* Valid inputs for n are -1 and 0. */
82236-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82237+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82238 {
82239 return n+1;
82240 }
82241
82242-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82243+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82244 {
82245 return n+1;
82246 }
82247
82248-static inline unsigned int cpumask_next_and(int n,
82249+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
82250 const struct cpumask *srcp,
82251 const struct cpumask *andp)
82252 {
82253@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82254 *
82255 * Returns >= nr_cpu_ids if no further cpus set.
82256 */
82257-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82258+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82259 {
82260 /* -1 is a legal arg here. */
82261 if (n != -1)
82262@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82263 *
82264 * Returns >= nr_cpu_ids if no further cpus unset.
82265 */
82266-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82267+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82268 {
82269 /* -1 is a legal arg here. */
82270 if (n != -1)
82271@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82272 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
82273 }
82274
82275-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
82276+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
82277 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
82278 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
82279
82280diff --git a/include/linux/cred.h b/include/linux/cred.h
82281index f61d6c8..d372d95 100644
82282--- a/include/linux/cred.h
82283+++ b/include/linux/cred.h
82284@@ -35,7 +35,7 @@ struct group_info {
82285 int nblocks;
82286 kgid_t small_block[NGROUPS_SMALL];
82287 kgid_t *blocks[0];
82288-};
82289+} __randomize_layout;
82290
82291 /**
82292 * get_group_info - Get a reference to a group info structure
82293@@ -136,7 +136,7 @@ struct cred {
82294 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
82295 struct group_info *group_info; /* supplementary groups for euid/fsgid */
82296 struct rcu_head rcu; /* RCU deletion hook */
82297-};
82298+} __randomize_layout;
82299
82300 extern void __put_cred(struct cred *);
82301 extern void exit_creds(struct task_struct *);
82302@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
82303 static inline void validate_process_creds(void)
82304 {
82305 }
82306+static inline void validate_task_creds(struct task_struct *task)
82307+{
82308+}
82309 #endif
82310
82311 /**
82312@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred)
82313
82314 #define task_uid(task) (task_cred_xxx((task), uid))
82315 #define task_euid(task) (task_cred_xxx((task), euid))
82316+#define task_securebits(task) (task_cred_xxx((task), securebits))
82317
82318 #define current_cred_xxx(xxx) \
82319 ({ \
82320diff --git a/include/linux/crypto.h b/include/linux/crypto.h
82321index b92eadf..b4ecdc1 100644
82322--- a/include/linux/crypto.h
82323+++ b/include/linux/crypto.h
82324@@ -373,7 +373,7 @@ struct cipher_tfm {
82325 const u8 *key, unsigned int keylen);
82326 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82327 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82328-};
82329+} __no_const;
82330
82331 struct hash_tfm {
82332 int (*init)(struct hash_desc *desc);
82333@@ -394,13 +394,13 @@ struct compress_tfm {
82334 int (*cot_decompress)(struct crypto_tfm *tfm,
82335 const u8 *src, unsigned int slen,
82336 u8 *dst, unsigned int *dlen);
82337-};
82338+} __no_const;
82339
82340 struct rng_tfm {
82341 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
82342 unsigned int dlen);
82343 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
82344-};
82345+} __no_const;
82346
82347 #define crt_ablkcipher crt_u.ablkcipher
82348 #define crt_aead crt_u.aead
82349diff --git a/include/linux/ctype.h b/include/linux/ctype.h
82350index 653589e..4ef254a 100644
82351--- a/include/linux/ctype.h
82352+++ b/include/linux/ctype.h
82353@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
82354 * Fast implementation of tolower() for internal usage. Do not use in your
82355 * code.
82356 */
82357-static inline char _tolower(const char c)
82358+static inline unsigned char _tolower(const unsigned char c)
82359 {
82360 return c | 0x20;
82361 }
82362diff --git a/include/linux/dcache.h b/include/linux/dcache.h
82363index 3c7ec32..4ca97cc 100644
82364--- a/include/linux/dcache.h
82365+++ b/include/linux/dcache.h
82366@@ -133,7 +133,7 @@ struct dentry {
82367 } d_u;
82368 struct list_head d_subdirs; /* our children */
82369 struct hlist_node d_alias; /* inode alias list */
82370-};
82371+} __randomize_layout;
82372
82373 /*
82374 * dentry->d_lock spinlock nesting subclasses:
82375diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
82376index 7925bf0..d5143d2 100644
82377--- a/include/linux/decompress/mm.h
82378+++ b/include/linux/decompress/mm.h
82379@@ -77,7 +77,7 @@ static void free(void *where)
82380 * warnings when not needed (indeed large_malloc / large_free are not
82381 * needed by inflate */
82382
82383-#define malloc(a) kmalloc(a, GFP_KERNEL)
82384+#define malloc(a) kmalloc((a), GFP_KERNEL)
82385 #define free(a) kfree(a)
82386
82387 #define large_malloc(a) vmalloc(a)
82388diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
82389index f1863dc..5c26074 100644
82390--- a/include/linux/devfreq.h
82391+++ b/include/linux/devfreq.h
82392@@ -114,7 +114,7 @@ struct devfreq_governor {
82393 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
82394 int (*event_handler)(struct devfreq *devfreq,
82395 unsigned int event, void *data);
82396-};
82397+} __do_const;
82398
82399 /**
82400 * struct devfreq - Device devfreq structure
82401diff --git a/include/linux/device.h b/include/linux/device.h
82402index af424ac..fd46ddf 100644
82403--- a/include/linux/device.h
82404+++ b/include/linux/device.h
82405@@ -310,7 +310,7 @@ struct subsys_interface {
82406 struct list_head node;
82407 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
82408 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
82409-};
82410+} __do_const;
82411
82412 int subsys_interface_register(struct subsys_interface *sif);
82413 void subsys_interface_unregister(struct subsys_interface *sif);
82414@@ -506,7 +506,7 @@ struct device_type {
82415 void (*release)(struct device *dev);
82416
82417 const struct dev_pm_ops *pm;
82418-};
82419+} __do_const;
82420
82421 /* interface for exporting device attributes */
82422 struct device_attribute {
82423@@ -516,11 +516,12 @@ struct device_attribute {
82424 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
82425 const char *buf, size_t count);
82426 };
82427+typedef struct device_attribute __no_const device_attribute_no_const;
82428
82429 struct dev_ext_attribute {
82430 struct device_attribute attr;
82431 void *var;
82432-};
82433+} __do_const;
82434
82435 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
82436 char *buf);
82437diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
82438index 931b709..89b2d89 100644
82439--- a/include/linux/dma-mapping.h
82440+++ b/include/linux/dma-mapping.h
82441@@ -60,7 +60,7 @@ struct dma_map_ops {
82442 u64 (*get_required_mask)(struct device *dev);
82443 #endif
82444 int is_phys;
82445-};
82446+} __do_const;
82447
82448 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
82449
82450diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
82451index d2c5cc7..d193394 100644
82452--- a/include/linux/dmaengine.h
82453+++ b/include/linux/dmaengine.h
82454@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
82455 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
82456 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
82457
82458-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82459+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82460 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
82461-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82462+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82463 struct dma_pinned_list *pinned_list, struct page *page,
82464 unsigned int offset, size_t len);
82465
82466diff --git a/include/linux/efi.h b/include/linux/efi.h
82467index 41bbf8b..bd3a718 100644
82468--- a/include/linux/efi.h
82469+++ b/include/linux/efi.h
82470@@ -1027,6 +1027,7 @@ struct efivar_operations {
82471 efi_set_variable_t *set_variable;
82472 efi_query_variable_store_t *query_variable_store;
82473 };
82474+typedef struct efivar_operations __no_const efivar_operations_no_const;
82475
82476 struct efivars {
82477 /*
82478diff --git a/include/linux/elf.h b/include/linux/elf.h
82479index 67a5fa7..b817372 100644
82480--- a/include/linux/elf.h
82481+++ b/include/linux/elf.h
82482@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
82483 #define elf_note elf32_note
82484 #define elf_addr_t Elf32_Off
82485 #define Elf_Half Elf32_Half
82486+#define elf_dyn Elf32_Dyn
82487
82488 #else
82489
82490@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
82491 #define elf_note elf64_note
82492 #define elf_addr_t Elf64_Off
82493 #define Elf_Half Elf64_Half
82494+#define elf_dyn Elf64_Dyn
82495
82496 #endif
82497
82498diff --git a/include/linux/err.h b/include/linux/err.h
82499index a729120..6ede2c9 100644
82500--- a/include/linux/err.h
82501+++ b/include/linux/err.h
82502@@ -20,12 +20,12 @@
82503
82504 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
82505
82506-static inline void * __must_check ERR_PTR(long error)
82507+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
82508 {
82509 return (void *) error;
82510 }
82511
82512-static inline long __must_check PTR_ERR(__force const void *ptr)
82513+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
82514 {
82515 return (long) ptr;
82516 }
82517diff --git a/include/linux/extcon.h b/include/linux/extcon.h
82518index 36f49c4..a2a1f4c 100644
82519--- a/include/linux/extcon.h
82520+++ b/include/linux/extcon.h
82521@@ -135,7 +135,7 @@ struct extcon_dev {
82522 /* /sys/class/extcon/.../mutually_exclusive/... */
82523 struct attribute_group attr_g_muex;
82524 struct attribute **attrs_muex;
82525- struct device_attribute *d_attrs_muex;
82526+ device_attribute_no_const *d_attrs_muex;
82527 };
82528
82529 /**
82530diff --git a/include/linux/fb.h b/include/linux/fb.h
82531index b6bfda9..1f13487 100644
82532--- a/include/linux/fb.h
82533+++ b/include/linux/fb.h
82534@@ -305,7 +305,7 @@ struct fb_ops {
82535 /* called at KDB enter and leave time to prepare the console */
82536 int (*fb_debug_enter)(struct fb_info *info);
82537 int (*fb_debug_leave)(struct fb_info *info);
82538-};
82539+} __do_const;
82540
82541 #ifdef CONFIG_FB_TILEBLITTING
82542 #define FB_TILE_CURSOR_NONE 0
82543diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
82544index 230f87b..1fd0485 100644
82545--- a/include/linux/fdtable.h
82546+++ b/include/linux/fdtable.h
82547@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
82548 void put_files_struct(struct files_struct *fs);
82549 void reset_files_struct(struct files_struct *);
82550 int unshare_files(struct files_struct **);
82551-struct files_struct *dup_fd(struct files_struct *, int *);
82552+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
82553 void do_close_on_exec(struct files_struct *);
82554 int iterate_fd(struct files_struct *, unsigned,
82555 int (*)(const void *, struct file *, unsigned),
82556diff --git a/include/linux/filter.h b/include/linux/filter.h
82557index a7e3c48..e568c8e 100644
82558--- a/include/linux/filter.h
82559+++ b/include/linux/filter.h
82560@@ -9,330 +9,28 @@
82561 #include <linux/workqueue.h>
82562 #include <uapi/linux/filter.h>
82563
82564-/* Internally used and optimized filter representation with extended
82565- * instruction set based on top of classic BPF.
82566- */
82567-
82568-/* instruction classes */
82569-#define BPF_ALU64 0x07 /* alu mode in double word width */
82570-
82571-/* ld/ldx fields */
82572-#define BPF_DW 0x18 /* double word */
82573-#define BPF_XADD 0xc0 /* exclusive add */
82574-
82575-/* alu/jmp fields */
82576-#define BPF_MOV 0xb0 /* mov reg to reg */
82577-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
82578-
82579-/* change endianness of a register */
82580-#define BPF_END 0xd0 /* flags for endianness conversion: */
82581-#define BPF_TO_LE 0x00 /* convert to little-endian */
82582-#define BPF_TO_BE 0x08 /* convert to big-endian */
82583-#define BPF_FROM_LE BPF_TO_LE
82584-#define BPF_FROM_BE BPF_TO_BE
82585-
82586-#define BPF_JNE 0x50 /* jump != */
82587-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
82588-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
82589-#define BPF_CALL 0x80 /* function call */
82590-#define BPF_EXIT 0x90 /* function return */
82591-
82592-/* Register numbers */
82593-enum {
82594- BPF_REG_0 = 0,
82595- BPF_REG_1,
82596- BPF_REG_2,
82597- BPF_REG_3,
82598- BPF_REG_4,
82599- BPF_REG_5,
82600- BPF_REG_6,
82601- BPF_REG_7,
82602- BPF_REG_8,
82603- BPF_REG_9,
82604- BPF_REG_10,
82605- __MAX_BPF_REG,
82606-};
82607-
82608-/* BPF has 10 general purpose 64-bit registers and stack frame. */
82609-#define MAX_BPF_REG __MAX_BPF_REG
82610-
82611-/* ArgX, context and stack frame pointer register positions. Note,
82612- * Arg1, Arg2, Arg3, etc are used as argument mappings of function
82613- * calls in BPF_CALL instruction.
82614- */
82615-#define BPF_REG_ARG1 BPF_REG_1
82616-#define BPF_REG_ARG2 BPF_REG_2
82617-#define BPF_REG_ARG3 BPF_REG_3
82618-#define BPF_REG_ARG4 BPF_REG_4
82619-#define BPF_REG_ARG5 BPF_REG_5
82620-#define BPF_REG_CTX BPF_REG_6
82621-#define BPF_REG_FP BPF_REG_10
82622-
82623-/* Additional register mappings for converted user programs. */
82624-#define BPF_REG_A BPF_REG_0
82625-#define BPF_REG_X BPF_REG_7
82626-#define BPF_REG_TMP BPF_REG_8
82627-
82628-/* BPF program can access up to 512 bytes of stack space. */
82629-#define MAX_BPF_STACK 512
82630-
82631-/* Helper macros for filter block array initializers. */
82632-
82633-/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
82634-
82635-#define BPF_ALU64_REG(OP, DST, SRC) \
82636- ((struct sock_filter_int) { \
82637- .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
82638- .dst_reg = DST, \
82639- .src_reg = SRC, \
82640- .off = 0, \
82641- .imm = 0 })
82642-
82643-#define BPF_ALU32_REG(OP, DST, SRC) \
82644- ((struct sock_filter_int) { \
82645- .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
82646- .dst_reg = DST, \
82647- .src_reg = SRC, \
82648- .off = 0, \
82649- .imm = 0 })
82650-
82651-/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
82652-
82653-#define BPF_ALU64_IMM(OP, DST, IMM) \
82654- ((struct sock_filter_int) { \
82655- .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
82656- .dst_reg = DST, \
82657- .src_reg = 0, \
82658- .off = 0, \
82659- .imm = IMM })
82660-
82661-#define BPF_ALU32_IMM(OP, DST, IMM) \
82662- ((struct sock_filter_int) { \
82663- .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
82664- .dst_reg = DST, \
82665- .src_reg = 0, \
82666- .off = 0, \
82667- .imm = IMM })
82668-
82669-/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
82670-
82671-#define BPF_ENDIAN(TYPE, DST, LEN) \
82672- ((struct sock_filter_int) { \
82673- .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
82674- .dst_reg = DST, \
82675- .src_reg = 0, \
82676- .off = 0, \
82677- .imm = LEN })
82678-
82679-/* Short form of mov, dst_reg = src_reg */
82680-
82681-#define BPF_MOV64_REG(DST, SRC) \
82682- ((struct sock_filter_int) { \
82683- .code = BPF_ALU64 | BPF_MOV | BPF_X, \
82684- .dst_reg = DST, \
82685- .src_reg = SRC, \
82686- .off = 0, \
82687- .imm = 0 })
82688-
82689-#define BPF_MOV32_REG(DST, SRC) \
82690- ((struct sock_filter_int) { \
82691- .code = BPF_ALU | BPF_MOV | BPF_X, \
82692- .dst_reg = DST, \
82693- .src_reg = SRC, \
82694- .off = 0, \
82695- .imm = 0 })
82696-
82697-/* Short form of mov, dst_reg = imm32 */
82698-
82699-#define BPF_MOV64_IMM(DST, IMM) \
82700- ((struct sock_filter_int) { \
82701- .code = BPF_ALU64 | BPF_MOV | BPF_K, \
82702- .dst_reg = DST, \
82703- .src_reg = 0, \
82704- .off = 0, \
82705- .imm = IMM })
82706-
82707-#define BPF_MOV32_IMM(DST, IMM) \
82708- ((struct sock_filter_int) { \
82709- .code = BPF_ALU | BPF_MOV | BPF_K, \
82710- .dst_reg = DST, \
82711- .src_reg = 0, \
82712- .off = 0, \
82713- .imm = IMM })
82714-
82715-/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
82716-
82717-#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
82718- ((struct sock_filter_int) { \
82719- .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
82720- .dst_reg = DST, \
82721- .src_reg = SRC, \
82722- .off = 0, \
82723- .imm = IMM })
82724-
82725-#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
82726- ((struct sock_filter_int) { \
82727- .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
82728- .dst_reg = DST, \
82729- .src_reg = SRC, \
82730- .off = 0, \
82731- .imm = IMM })
82732-
82733-/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
82734-
82735-#define BPF_LD_ABS(SIZE, IMM) \
82736- ((struct sock_filter_int) { \
82737- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
82738- .dst_reg = 0, \
82739- .src_reg = 0, \
82740- .off = 0, \
82741- .imm = IMM })
82742-
82743-/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
82744-
82745-#define BPF_LD_IND(SIZE, SRC, IMM) \
82746- ((struct sock_filter_int) { \
82747- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
82748- .dst_reg = 0, \
82749- .src_reg = SRC, \
82750- .off = 0, \
82751- .imm = IMM })
82752-
82753-/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
82754-
82755-#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
82756- ((struct sock_filter_int) { \
82757- .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
82758- .dst_reg = DST, \
82759- .src_reg = SRC, \
82760- .off = OFF, \
82761- .imm = 0 })
82762-
82763-/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
82764-
82765-#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
82766- ((struct sock_filter_int) { \
82767- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
82768- .dst_reg = DST, \
82769- .src_reg = SRC, \
82770- .off = OFF, \
82771- .imm = 0 })
82772-
82773-/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
82774-
82775-#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
82776- ((struct sock_filter_int) { \
82777- .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
82778- .dst_reg = DST, \
82779- .src_reg = 0, \
82780- .off = OFF, \
82781- .imm = IMM })
82782-
82783-/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
82784-
82785-#define BPF_JMP_REG(OP, DST, SRC, OFF) \
82786- ((struct sock_filter_int) { \
82787- .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
82788- .dst_reg = DST, \
82789- .src_reg = SRC, \
82790- .off = OFF, \
82791- .imm = 0 })
82792-
82793-/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
82794-
82795-#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
82796- ((struct sock_filter_int) { \
82797- .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
82798- .dst_reg = DST, \
82799- .src_reg = 0, \
82800- .off = OFF, \
82801- .imm = IMM })
82802-
82803-/* Function call */
82804-
82805-#define BPF_EMIT_CALL(FUNC) \
82806- ((struct sock_filter_int) { \
82807- .code = BPF_JMP | BPF_CALL, \
82808- .dst_reg = 0, \
82809- .src_reg = 0, \
82810- .off = 0, \
82811- .imm = ((FUNC) - __bpf_call_base) })
82812-
82813-/* Raw code statement block */
82814-
82815-#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
82816- ((struct sock_filter_int) { \
82817- .code = CODE, \
82818- .dst_reg = DST, \
82819- .src_reg = SRC, \
82820- .off = OFF, \
82821- .imm = IMM })
82822-
82823-/* Program exit */
82824-
82825-#define BPF_EXIT_INSN() \
82826- ((struct sock_filter_int) { \
82827- .code = BPF_JMP | BPF_EXIT, \
82828- .dst_reg = 0, \
82829- .src_reg = 0, \
82830- .off = 0, \
82831- .imm = 0 })
82832-
82833-#define bytes_to_bpf_size(bytes) \
82834-({ \
82835- int bpf_size = -EINVAL; \
82836- \
82837- if (bytes == sizeof(u8)) \
82838- bpf_size = BPF_B; \
82839- else if (bytes == sizeof(u16)) \
82840- bpf_size = BPF_H; \
82841- else if (bytes == sizeof(u32)) \
82842- bpf_size = BPF_W; \
82843- else if (bytes == sizeof(u64)) \
82844- bpf_size = BPF_DW; \
82845- \
82846- bpf_size; \
82847-})
82848-
82849-/* Macro to invoke filter function. */
82850-#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
82851-
82852-struct sock_filter_int {
82853- __u8 code; /* opcode */
82854- __u8 dst_reg:4; /* dest register */
82855- __u8 src_reg:4; /* source register */
82856- __s16 off; /* signed offset */
82857- __s32 imm; /* signed immediate constant */
82858-};
82859-
82860 #ifdef CONFIG_COMPAT
82861-/* A struct sock_filter is architecture independent. */
82862+/*
82863+ * A struct sock_filter is architecture independent.
82864+ */
82865 struct compat_sock_fprog {
82866 u16 len;
82867- compat_uptr_t filter; /* struct sock_filter * */
82868+ compat_uptr_t filter; /* struct sock_filter * */
82869 };
82870 #endif
82871
82872-struct sock_fprog_kern {
82873- u16 len;
82874- struct sock_filter *filter;
82875-};
82876-
82877 struct sk_buff;
82878 struct sock;
82879-struct seccomp_data;
82880
82881-struct sk_filter {
82882+struct sk_filter
82883+{
82884 atomic_t refcnt;
82885- u32 jited:1, /* Is our filter JIT'ed? */
82886- len:31; /* Number of filter blocks */
82887- struct sock_fprog_kern *orig_prog; /* Original BPF program */
82888+ unsigned int len; /* Number of filter blocks */
82889 struct rcu_head rcu;
82890 unsigned int (*bpf_func)(const struct sk_buff *skb,
82891- const struct sock_filter_int *filter);
82892+ const struct sock_filter *filter);
82893 union {
82894- struct sock_filter insns[0];
82895- struct sock_filter_int insnsi[0];
82896+ struct sock_filter insns[0];
82897 struct work_struct work;
82898 };
82899 };
82900@@ -343,76 +41,25 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
82901 offsetof(struct sk_filter, insns[proglen]));
82902 }
82903
82904-#define sk_filter_proglen(fprog) \
82905- (fprog->len * sizeof(fprog->filter[0]))
82906-
82907-int sk_filter(struct sock *sk, struct sk_buff *skb);
82908-
82909-void sk_filter_select_runtime(struct sk_filter *fp);
82910-void sk_filter_free(struct sk_filter *fp);
82911-
82912-int sk_convert_filter(struct sock_filter *prog, int len,
82913- struct sock_filter_int *new_prog, int *new_len);
82914-
82915-int sk_unattached_filter_create(struct sk_filter **pfp,
82916- struct sock_fprog_kern *fprog);
82917-void sk_unattached_filter_destroy(struct sk_filter *fp);
82918-
82919-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82920-int sk_detach_filter(struct sock *sk);
82921-
82922-int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82923-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
82924- unsigned int len);
82925-
82926-void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
82927-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
82928-
82929-u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
82930-void bpf_int_jit_compile(struct sk_filter *fp);
82931-
82932-#define BPF_ANC BIT(15)
82933-
82934-static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
82935-{
82936- BUG_ON(ftest->code & BPF_ANC);
82937-
82938- switch (ftest->code) {
82939- case BPF_LD | BPF_W | BPF_ABS:
82940- case BPF_LD | BPF_H | BPF_ABS:
82941- case BPF_LD | BPF_B | BPF_ABS:
82942-#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
82943- return BPF_ANC | SKF_AD_##CODE
82944- switch (ftest->k) {
82945- BPF_ANCILLARY(PROTOCOL);
82946- BPF_ANCILLARY(PKTTYPE);
82947- BPF_ANCILLARY(IFINDEX);
82948- BPF_ANCILLARY(NLATTR);
82949- BPF_ANCILLARY(NLATTR_NEST);
82950- BPF_ANCILLARY(MARK);
82951- BPF_ANCILLARY(QUEUE);
82952- BPF_ANCILLARY(HATYPE);
82953- BPF_ANCILLARY(RXHASH);
82954- BPF_ANCILLARY(CPU);
82955- BPF_ANCILLARY(ALU_XOR_X);
82956- BPF_ANCILLARY(VLAN_TAG);
82957- BPF_ANCILLARY(VLAN_TAG_PRESENT);
82958- BPF_ANCILLARY(PAY_OFFSET);
82959- BPF_ANCILLARY(RANDOM);
82960- }
82961- /* Fallthrough. */
82962- default:
82963- return ftest->code;
82964- }
82965-}
82966+extern int sk_filter(struct sock *sk, struct sk_buff *skb);
82967+extern unsigned int sk_run_filter(const struct sk_buff *skb,
82968+ const struct sock_filter *filter);
82969+extern int sk_unattached_filter_create(struct sk_filter **pfp,
82970+ struct sock_fprog *fprog);
82971+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
82972+extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82973+extern int sk_detach_filter(struct sock *sk);
82974+extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82975+extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
82976+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
82977
82978 #ifdef CONFIG_BPF_JIT
82979 #include <stdarg.h>
82980 #include <linux/linkage.h>
82981 #include <linux/printk.h>
82982
82983-void bpf_jit_compile(struct sk_filter *fp);
82984-void bpf_jit_free(struct sk_filter *fp);
82985+extern void bpf_jit_compile(struct sk_filter *fp);
82986+extern void bpf_jit_free(struct sk_filter *fp);
82987
82988 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82989 u32 pass, void *image)
82990@@ -423,22 +70,90 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82991 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
82992 16, 1, image, proglen, false);
82993 }
82994+#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
82995 #else
82996 #include <linux/slab.h>
82997-
82998 static inline void bpf_jit_compile(struct sk_filter *fp)
82999 {
83000 }
83001-
83002 static inline void bpf_jit_free(struct sk_filter *fp)
83003 {
83004 kfree(fp);
83005 }
83006-#endif /* CONFIG_BPF_JIT */
83007+#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
83008+#endif
83009
83010 static inline int bpf_tell_extensions(void)
83011 {
83012 return SKF_AD_MAX;
83013 }
83014
83015+enum {
83016+ BPF_S_RET_K = 1,
83017+ BPF_S_RET_A,
83018+ BPF_S_ALU_ADD_K,
83019+ BPF_S_ALU_ADD_X,
83020+ BPF_S_ALU_SUB_K,
83021+ BPF_S_ALU_SUB_X,
83022+ BPF_S_ALU_MUL_K,
83023+ BPF_S_ALU_MUL_X,
83024+ BPF_S_ALU_DIV_X,
83025+ BPF_S_ALU_MOD_K,
83026+ BPF_S_ALU_MOD_X,
83027+ BPF_S_ALU_AND_K,
83028+ BPF_S_ALU_AND_X,
83029+ BPF_S_ALU_OR_K,
83030+ BPF_S_ALU_OR_X,
83031+ BPF_S_ALU_XOR_K,
83032+ BPF_S_ALU_XOR_X,
83033+ BPF_S_ALU_LSH_K,
83034+ BPF_S_ALU_LSH_X,
83035+ BPF_S_ALU_RSH_K,
83036+ BPF_S_ALU_RSH_X,
83037+ BPF_S_ALU_NEG,
83038+ BPF_S_LD_W_ABS,
83039+ BPF_S_LD_H_ABS,
83040+ BPF_S_LD_B_ABS,
83041+ BPF_S_LD_W_LEN,
83042+ BPF_S_LD_W_IND,
83043+ BPF_S_LD_H_IND,
83044+ BPF_S_LD_B_IND,
83045+ BPF_S_LD_IMM,
83046+ BPF_S_LDX_W_LEN,
83047+ BPF_S_LDX_B_MSH,
83048+ BPF_S_LDX_IMM,
83049+ BPF_S_MISC_TAX,
83050+ BPF_S_MISC_TXA,
83051+ BPF_S_ALU_DIV_K,
83052+ BPF_S_LD_MEM,
83053+ BPF_S_LDX_MEM,
83054+ BPF_S_ST,
83055+ BPF_S_STX,
83056+ BPF_S_JMP_JA,
83057+ BPF_S_JMP_JEQ_K,
83058+ BPF_S_JMP_JEQ_X,
83059+ BPF_S_JMP_JGE_K,
83060+ BPF_S_JMP_JGE_X,
83061+ BPF_S_JMP_JGT_K,
83062+ BPF_S_JMP_JGT_X,
83063+ BPF_S_JMP_JSET_K,
83064+ BPF_S_JMP_JSET_X,
83065+ /* Ancillary data */
83066+ BPF_S_ANC_PROTOCOL,
83067+ BPF_S_ANC_PKTTYPE,
83068+ BPF_S_ANC_IFINDEX,
83069+ BPF_S_ANC_NLATTR,
83070+ BPF_S_ANC_NLATTR_NEST,
83071+ BPF_S_ANC_MARK,
83072+ BPF_S_ANC_QUEUE,
83073+ BPF_S_ANC_HATYPE,
83074+ BPF_S_ANC_RXHASH,
83075+ BPF_S_ANC_CPU,
83076+ BPF_S_ANC_ALU_XOR_X,
83077+ BPF_S_ANC_SECCOMP_LD_W,
83078+ BPF_S_ANC_VLAN_TAG,
83079+ BPF_S_ANC_VLAN_TAG_PRESENT,
83080+ BPF_S_ANC_PAY_OFFSET,
83081+};
83082+
83083 #endif /* __LINUX_FILTER_H__ */
83084diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
83085index 8293262..2b3b8bd 100644
83086--- a/include/linux/frontswap.h
83087+++ b/include/linux/frontswap.h
83088@@ -11,7 +11,7 @@ struct frontswap_ops {
83089 int (*load)(unsigned, pgoff_t, struct page *);
83090 void (*invalidate_page)(unsigned, pgoff_t);
83091 void (*invalidate_area)(unsigned);
83092-};
83093+} __no_const;
83094
83095 extern bool frontswap_enabled;
83096 extern struct frontswap_ops *
83097diff --git a/include/linux/fs.h b/include/linux/fs.h
83098index e11d60c..901317a 100644
83099--- a/include/linux/fs.h
83100+++ b/include/linux/fs.h
83101@@ -401,7 +401,7 @@ struct address_space {
83102 spinlock_t private_lock; /* for use by the address_space */
83103 struct list_head private_list; /* ditto */
83104 void *private_data; /* ditto */
83105-} __attribute__((aligned(sizeof(long))));
83106+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
83107 /*
83108 * On most architectures that alignment is already the case; but
83109 * must be enforced here for CRIS, to let the least significant bit
83110@@ -444,7 +444,7 @@ struct block_device {
83111 int bd_fsfreeze_count;
83112 /* Mutex for freeze */
83113 struct mutex bd_fsfreeze_mutex;
83114-};
83115+} __randomize_layout;
83116
83117 /*
83118 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
83119@@ -588,7 +588,7 @@ struct inode {
83120 #endif
83121
83122 void *i_private; /* fs or device private pointer */
83123-};
83124+} __randomize_layout;
83125
83126 static inline int inode_unhashed(struct inode *inode)
83127 {
83128@@ -781,7 +781,7 @@ struct file {
83129 struct list_head f_tfile_llink;
83130 #endif /* #ifdef CONFIG_EPOLL */
83131 struct address_space *f_mapping;
83132-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
83133+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
83134
83135 struct file_handle {
83136 __u32 handle_bytes;
83137@@ -909,7 +909,7 @@ struct file_lock {
83138 int state; /* state of grant or error if -ve */
83139 } afs;
83140 } fl_u;
83141-};
83142+} __randomize_layout;
83143
83144 /* The following constant reflects the upper bound of the file/locking space */
83145 #ifndef OFFSET_MAX
83146@@ -1258,7 +1258,7 @@ struct super_block {
83147 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
83148 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
83149 struct rcu_head rcu;
83150-};
83151+} __randomize_layout;
83152
83153 extern struct timespec current_fs_time(struct super_block *sb);
83154
83155@@ -1484,7 +1484,8 @@ struct file_operations {
83156 long (*fallocate)(struct file *file, int mode, loff_t offset,
83157 loff_t len);
83158 int (*show_fdinfo)(struct seq_file *m, struct file *f);
83159-};
83160+} __do_const __randomize_layout;
83161+typedef struct file_operations __no_const file_operations_no_const;
83162
83163 struct inode_operations {
83164 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
83165@@ -2769,4 +2770,14 @@ static inline bool dir_relax(struct inode *inode)
83166 return !IS_DEADDIR(inode);
83167 }
83168
83169+static inline bool is_sidechannel_device(const struct inode *inode)
83170+{
83171+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
83172+ umode_t mode = inode->i_mode;
83173+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
83174+#else
83175+ return false;
83176+#endif
83177+}
83178+
83179 #endif /* _LINUX_FS_H */
83180diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
83181index 0efc3e6..fd23610 100644
83182--- a/include/linux/fs_struct.h
83183+++ b/include/linux/fs_struct.h
83184@@ -6,13 +6,13 @@
83185 #include <linux/seqlock.h>
83186
83187 struct fs_struct {
83188- int users;
83189+ atomic_t users;
83190 spinlock_t lock;
83191 seqcount_t seq;
83192 int umask;
83193 int in_exec;
83194 struct path root, pwd;
83195-};
83196+} __randomize_layout;
83197
83198 extern struct kmem_cache *fs_cachep;
83199
83200diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
83201index 7714849..a4a5c7a 100644
83202--- a/include/linux/fscache-cache.h
83203+++ b/include/linux/fscache-cache.h
83204@@ -113,7 +113,7 @@ struct fscache_operation {
83205 fscache_operation_release_t release;
83206 };
83207
83208-extern atomic_t fscache_op_debug_id;
83209+extern atomic_unchecked_t fscache_op_debug_id;
83210 extern void fscache_op_work_func(struct work_struct *work);
83211
83212 extern void fscache_enqueue_operation(struct fscache_operation *);
83213@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
83214 INIT_WORK(&op->work, fscache_op_work_func);
83215 atomic_set(&op->usage, 1);
83216 op->state = FSCACHE_OP_ST_INITIALISED;
83217- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
83218+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
83219 op->processor = processor;
83220 op->release = release;
83221 INIT_LIST_HEAD(&op->pend_link);
83222diff --git a/include/linux/fscache.h b/include/linux/fscache.h
83223index 115bb81..e7b812b 100644
83224--- a/include/linux/fscache.h
83225+++ b/include/linux/fscache.h
83226@@ -152,7 +152,7 @@ struct fscache_cookie_def {
83227 * - this is mandatory for any object that may have data
83228 */
83229 void (*now_uncached)(void *cookie_netfs_data);
83230-};
83231+} __do_const;
83232
83233 /*
83234 * fscache cached network filesystem type
83235diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
83236index 1c804b0..1432c2b 100644
83237--- a/include/linux/fsnotify.h
83238+++ b/include/linux/fsnotify.h
83239@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
83240 struct inode *inode = file_inode(file);
83241 __u32 mask = FS_ACCESS;
83242
83243+ if (is_sidechannel_device(inode))
83244+ return;
83245+
83246 if (S_ISDIR(inode->i_mode))
83247 mask |= FS_ISDIR;
83248
83249@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
83250 struct inode *inode = file_inode(file);
83251 __u32 mask = FS_MODIFY;
83252
83253+ if (is_sidechannel_device(inode))
83254+ return;
83255+
83256 if (S_ISDIR(inode->i_mode))
83257 mask |= FS_ISDIR;
83258
83259@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
83260 */
83261 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
83262 {
83263- return kstrdup(name, GFP_KERNEL);
83264+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
83265 }
83266
83267 /*
83268diff --git a/include/linux/genhd.h b/include/linux/genhd.h
83269index ec274e0..e678159 100644
83270--- a/include/linux/genhd.h
83271+++ b/include/linux/genhd.h
83272@@ -194,7 +194,7 @@ struct gendisk {
83273 struct kobject *slave_dir;
83274
83275 struct timer_rand_state *random;
83276- atomic_t sync_io; /* RAID */
83277+ atomic_unchecked_t sync_io; /* RAID */
83278 struct disk_events *ev;
83279 #ifdef CONFIG_BLK_DEV_INTEGRITY
83280 struct blk_integrity *integrity;
83281@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
83282 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
83283
83284 /* drivers/char/random.c */
83285-extern void add_disk_randomness(struct gendisk *disk);
83286+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
83287 extern void rand_initialize_disk(struct gendisk *disk);
83288
83289 static inline sector_t get_start_sect(struct block_device *bdev)
83290diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
83291index c0894dd..2fbf10c 100644
83292--- a/include/linux/genl_magic_func.h
83293+++ b/include/linux/genl_magic_func.h
83294@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
83295 },
83296
83297 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
83298-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
83299+static struct genl_ops ZZZ_genl_ops[] = {
83300 #include GENL_MAGIC_INCLUDE_FILE
83301 };
83302
83303diff --git a/include/linux/gfp.h b/include/linux/gfp.h
83304index 6eb1fb3..30fe7e4 100644
83305--- a/include/linux/gfp.h
83306+++ b/include/linux/gfp.h
83307@@ -34,6 +34,13 @@ struct vm_area_struct;
83308 #define ___GFP_NO_KSWAPD 0x400000u
83309 #define ___GFP_OTHER_NODE 0x800000u
83310 #define ___GFP_WRITE 0x1000000u
83311+
83312+#ifdef CONFIG_PAX_USERCOPY_SLABS
83313+#define ___GFP_USERCOPY 0x2000000u
83314+#else
83315+#define ___GFP_USERCOPY 0
83316+#endif
83317+
83318 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
83319
83320 /*
83321@@ -90,6 +97,7 @@ struct vm_area_struct;
83322 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
83323 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
83324 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
83325+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
83326
83327 /*
83328 * This may seem redundant, but it's a way of annotating false positives vs.
83329@@ -97,7 +105,7 @@ struct vm_area_struct;
83330 */
83331 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
83332
83333-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
83334+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
83335 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
83336
83337 /* This equals 0, but use constants in case they ever change */
83338@@ -155,6 +163,8 @@ struct vm_area_struct;
83339 /* 4GB DMA on some platforms */
83340 #define GFP_DMA32 __GFP_DMA32
83341
83342+#define GFP_USERCOPY __GFP_USERCOPY
83343+
83344 /* Convert GFP flags to their corresponding migrate type */
83345 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
83346 {
83347diff --git a/include/linux/gracl.h b/include/linux/gracl.h
83348new file mode 100644
83349index 0000000..edb2cb6
83350--- /dev/null
83351+++ b/include/linux/gracl.h
83352@@ -0,0 +1,340 @@
83353+#ifndef GR_ACL_H
83354+#define GR_ACL_H
83355+
83356+#include <linux/grdefs.h>
83357+#include <linux/resource.h>
83358+#include <linux/capability.h>
83359+#include <linux/dcache.h>
83360+#include <asm/resource.h>
83361+
83362+/* Major status information */
83363+
83364+#define GR_VERSION "grsecurity 3.0"
83365+#define GRSECURITY_VERSION 0x3000
83366+
83367+enum {
83368+ GR_SHUTDOWN = 0,
83369+ GR_ENABLE = 1,
83370+ GR_SPROLE = 2,
83371+ GR_OLDRELOAD = 3,
83372+ GR_SEGVMOD = 4,
83373+ GR_STATUS = 5,
83374+ GR_UNSPROLE = 6,
83375+ GR_PASSSET = 7,
83376+ GR_SPROLEPAM = 8,
83377+ GR_RELOAD = 9,
83378+};
83379+
83380+/* Password setup definitions
83381+ * kernel/grhash.c */
83382+enum {
83383+ GR_PW_LEN = 128,
83384+ GR_SALT_LEN = 16,
83385+ GR_SHA_LEN = 32,
83386+};
83387+
83388+enum {
83389+ GR_SPROLE_LEN = 64,
83390+};
83391+
83392+enum {
83393+ GR_NO_GLOB = 0,
83394+ GR_REG_GLOB,
83395+ GR_CREATE_GLOB
83396+};
83397+
83398+#define GR_NLIMITS 32
83399+
83400+/* Begin Data Structures */
83401+
83402+struct sprole_pw {
83403+ unsigned char *rolename;
83404+ unsigned char salt[GR_SALT_LEN];
83405+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
83406+};
83407+
83408+struct name_entry {
83409+ __u32 key;
83410+ ino_t inode;
83411+ dev_t device;
83412+ char *name;
83413+ __u16 len;
83414+ __u8 deleted;
83415+ struct name_entry *prev;
83416+ struct name_entry *next;
83417+};
83418+
83419+struct inodev_entry {
83420+ struct name_entry *nentry;
83421+ struct inodev_entry *prev;
83422+ struct inodev_entry *next;
83423+};
83424+
83425+struct acl_role_db {
83426+ struct acl_role_label **r_hash;
83427+ __u32 r_size;
83428+};
83429+
83430+struct inodev_db {
83431+ struct inodev_entry **i_hash;
83432+ __u32 i_size;
83433+};
83434+
83435+struct name_db {
83436+ struct name_entry **n_hash;
83437+ __u32 n_size;
83438+};
83439+
83440+struct crash_uid {
83441+ uid_t uid;
83442+ unsigned long expires;
83443+};
83444+
83445+struct gr_hash_struct {
83446+ void **table;
83447+ void **nametable;
83448+ void *first;
83449+ __u32 table_size;
83450+ __u32 used_size;
83451+ int type;
83452+};
83453+
83454+/* Userspace Grsecurity ACL data structures */
83455+
83456+struct acl_subject_label {
83457+ char *filename;
83458+ ino_t inode;
83459+ dev_t device;
83460+ __u32 mode;
83461+ kernel_cap_t cap_mask;
83462+ kernel_cap_t cap_lower;
83463+ kernel_cap_t cap_invert_audit;
83464+
83465+ struct rlimit res[GR_NLIMITS];
83466+ __u32 resmask;
83467+
83468+ __u8 user_trans_type;
83469+ __u8 group_trans_type;
83470+ uid_t *user_transitions;
83471+ gid_t *group_transitions;
83472+ __u16 user_trans_num;
83473+ __u16 group_trans_num;
83474+
83475+ __u32 sock_families[2];
83476+ __u32 ip_proto[8];
83477+ __u32 ip_type;
83478+ struct acl_ip_label **ips;
83479+ __u32 ip_num;
83480+ __u32 inaddr_any_override;
83481+
83482+ __u32 crashes;
83483+ unsigned long expires;
83484+
83485+ struct acl_subject_label *parent_subject;
83486+ struct gr_hash_struct *hash;
83487+ struct acl_subject_label *prev;
83488+ struct acl_subject_label *next;
83489+
83490+ struct acl_object_label **obj_hash;
83491+ __u32 obj_hash_size;
83492+ __u16 pax_flags;
83493+};
83494+
83495+struct role_allowed_ip {
83496+ __u32 addr;
83497+ __u32 netmask;
83498+
83499+ struct role_allowed_ip *prev;
83500+ struct role_allowed_ip *next;
83501+};
83502+
83503+struct role_transition {
83504+ char *rolename;
83505+
83506+ struct role_transition *prev;
83507+ struct role_transition *next;
83508+};
83509+
83510+struct acl_role_label {
83511+ char *rolename;
83512+ uid_t uidgid;
83513+ __u16 roletype;
83514+
83515+ __u16 auth_attempts;
83516+ unsigned long expires;
83517+
83518+ struct acl_subject_label *root_label;
83519+ struct gr_hash_struct *hash;
83520+
83521+ struct acl_role_label *prev;
83522+ struct acl_role_label *next;
83523+
83524+ struct role_transition *transitions;
83525+ struct role_allowed_ip *allowed_ips;
83526+ uid_t *domain_children;
83527+ __u16 domain_child_num;
83528+
83529+ umode_t umask;
83530+
83531+ struct acl_subject_label **subj_hash;
83532+ __u32 subj_hash_size;
83533+};
83534+
83535+struct user_acl_role_db {
83536+ struct acl_role_label **r_table;
83537+ __u32 num_pointers; /* Number of allocations to track */
83538+ __u32 num_roles; /* Number of roles */
83539+ __u32 num_domain_children; /* Number of domain children */
83540+ __u32 num_subjects; /* Number of subjects */
83541+ __u32 num_objects; /* Number of objects */
83542+};
83543+
83544+struct acl_object_label {
83545+ char *filename;
83546+ ino_t inode;
83547+ dev_t device;
83548+ __u32 mode;
83549+
83550+ struct acl_subject_label *nested;
83551+ struct acl_object_label *globbed;
83552+
83553+ /* next two structures not used */
83554+
83555+ struct acl_object_label *prev;
83556+ struct acl_object_label *next;
83557+};
83558+
83559+struct acl_ip_label {
83560+ char *iface;
83561+ __u32 addr;
83562+ __u32 netmask;
83563+ __u16 low, high;
83564+ __u8 mode;
83565+ __u32 type;
83566+ __u32 proto[8];
83567+
83568+ /* next two structures not used */
83569+
83570+ struct acl_ip_label *prev;
83571+ struct acl_ip_label *next;
83572+};
83573+
83574+struct gr_arg {
83575+ struct user_acl_role_db role_db;
83576+ unsigned char pw[GR_PW_LEN];
83577+ unsigned char salt[GR_SALT_LEN];
83578+ unsigned char sum[GR_SHA_LEN];
83579+ unsigned char sp_role[GR_SPROLE_LEN];
83580+ struct sprole_pw *sprole_pws;
83581+ dev_t segv_device;
83582+ ino_t segv_inode;
83583+ uid_t segv_uid;
83584+ __u16 num_sprole_pws;
83585+ __u16 mode;
83586+};
83587+
83588+struct gr_arg_wrapper {
83589+ struct gr_arg *arg;
83590+ __u32 version;
83591+ __u32 size;
83592+};
83593+
83594+struct subject_map {
83595+ struct acl_subject_label *user;
83596+ struct acl_subject_label *kernel;
83597+ struct subject_map *prev;
83598+ struct subject_map *next;
83599+};
83600+
83601+struct acl_subj_map_db {
83602+ struct subject_map **s_hash;
83603+ __u32 s_size;
83604+};
83605+
83606+struct gr_policy_state {
83607+ struct sprole_pw **acl_special_roles;
83608+ __u16 num_sprole_pws;
83609+ struct acl_role_label *kernel_role;
83610+ struct acl_role_label *role_list;
83611+ struct acl_role_label *default_role;
83612+ struct acl_role_db acl_role_set;
83613+ struct acl_subj_map_db subj_map_set;
83614+ struct name_db name_set;
83615+ struct inodev_db inodev_set;
83616+};
83617+
83618+struct gr_alloc_state {
83619+ unsigned long alloc_stack_next;
83620+ unsigned long alloc_stack_size;
83621+ void **alloc_stack;
83622+};
83623+
83624+struct gr_reload_state {
83625+ struct gr_policy_state oldpolicy;
83626+ struct gr_alloc_state oldalloc;
83627+ struct gr_policy_state newpolicy;
83628+ struct gr_alloc_state newalloc;
83629+ struct gr_policy_state *oldpolicy_ptr;
83630+ struct gr_alloc_state *oldalloc_ptr;
83631+ unsigned char oldmode;
83632+};
83633+
83634+/* End Data Structures Section */
83635+
83636+/* Hash functions generated by empirical testing by Brad Spengler
83637+ Makes good use of the low bits of the inode. Generally 0-1 times
83638+ in loop for successful match. 0-3 for unsuccessful match.
83639+ Shift/add algorithm with modulus of table size and an XOR*/
83640+
83641+static __inline__ unsigned int
83642+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
83643+{
83644+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
83645+}
83646+
83647+ static __inline__ unsigned int
83648+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
83649+{
83650+ return ((const unsigned long)userp % sz);
83651+}
83652+
83653+static __inline__ unsigned int
83654+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
83655+{
83656+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
83657+}
83658+
83659+static __inline__ unsigned int
83660+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
83661+{
83662+ return full_name_hash((const unsigned char *)name, len) % sz;
83663+}
83664+
83665+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
83666+ subj = NULL; \
83667+ iter = 0; \
83668+ while (iter < role->subj_hash_size) { \
83669+ if (subj == NULL) \
83670+ subj = role->subj_hash[iter]; \
83671+ if (subj == NULL) { \
83672+ iter++; \
83673+ continue; \
83674+ }
83675+
83676+#define FOR_EACH_SUBJECT_END(subj,iter) \
83677+ subj = subj->next; \
83678+ if (subj == NULL) \
83679+ iter++; \
83680+ }
83681+
83682+
83683+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
83684+ subj = role->hash->first; \
83685+ while (subj != NULL) {
83686+
83687+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
83688+ subj = subj->next; \
83689+ }
83690+
83691+#endif
83692+
83693diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
83694new file mode 100644
83695index 0000000..33ebd1f
83696--- /dev/null
83697+++ b/include/linux/gracl_compat.h
83698@@ -0,0 +1,156 @@
83699+#ifndef GR_ACL_COMPAT_H
83700+#define GR_ACL_COMPAT_H
83701+
83702+#include <linux/resource.h>
83703+#include <asm/resource.h>
83704+
83705+struct sprole_pw_compat {
83706+ compat_uptr_t rolename;
83707+ unsigned char salt[GR_SALT_LEN];
83708+ unsigned char sum[GR_SHA_LEN];
83709+};
83710+
83711+struct gr_hash_struct_compat {
83712+ compat_uptr_t table;
83713+ compat_uptr_t nametable;
83714+ compat_uptr_t first;
83715+ __u32 table_size;
83716+ __u32 used_size;
83717+ int type;
83718+};
83719+
83720+struct acl_subject_label_compat {
83721+ compat_uptr_t filename;
83722+ compat_ino_t inode;
83723+ __u32 device;
83724+ __u32 mode;
83725+ kernel_cap_t cap_mask;
83726+ kernel_cap_t cap_lower;
83727+ kernel_cap_t cap_invert_audit;
83728+
83729+ struct compat_rlimit res[GR_NLIMITS];
83730+ __u32 resmask;
83731+
83732+ __u8 user_trans_type;
83733+ __u8 group_trans_type;
83734+ compat_uptr_t user_transitions;
83735+ compat_uptr_t group_transitions;
83736+ __u16 user_trans_num;
83737+ __u16 group_trans_num;
83738+
83739+ __u32 sock_families[2];
83740+ __u32 ip_proto[8];
83741+ __u32 ip_type;
83742+ compat_uptr_t ips;
83743+ __u32 ip_num;
83744+ __u32 inaddr_any_override;
83745+
83746+ __u32 crashes;
83747+ compat_ulong_t expires;
83748+
83749+ compat_uptr_t parent_subject;
83750+ compat_uptr_t hash;
83751+ compat_uptr_t prev;
83752+ compat_uptr_t next;
83753+
83754+ compat_uptr_t obj_hash;
83755+ __u32 obj_hash_size;
83756+ __u16 pax_flags;
83757+};
83758+
83759+struct role_allowed_ip_compat {
83760+ __u32 addr;
83761+ __u32 netmask;
83762+
83763+ compat_uptr_t prev;
83764+ compat_uptr_t next;
83765+};
83766+
83767+struct role_transition_compat {
83768+ compat_uptr_t rolename;
83769+
83770+ compat_uptr_t prev;
83771+ compat_uptr_t next;
83772+};
83773+
83774+struct acl_role_label_compat {
83775+ compat_uptr_t rolename;
83776+ uid_t uidgid;
83777+ __u16 roletype;
83778+
83779+ __u16 auth_attempts;
83780+ compat_ulong_t expires;
83781+
83782+ compat_uptr_t root_label;
83783+ compat_uptr_t hash;
83784+
83785+ compat_uptr_t prev;
83786+ compat_uptr_t next;
83787+
83788+ compat_uptr_t transitions;
83789+ compat_uptr_t allowed_ips;
83790+ compat_uptr_t domain_children;
83791+ __u16 domain_child_num;
83792+
83793+ umode_t umask;
83794+
83795+ compat_uptr_t subj_hash;
83796+ __u32 subj_hash_size;
83797+};
83798+
83799+struct user_acl_role_db_compat {
83800+ compat_uptr_t r_table;
83801+ __u32 num_pointers;
83802+ __u32 num_roles;
83803+ __u32 num_domain_children;
83804+ __u32 num_subjects;
83805+ __u32 num_objects;
83806+};
83807+
83808+struct acl_object_label_compat {
83809+ compat_uptr_t filename;
83810+ compat_ino_t inode;
83811+ __u32 device;
83812+ __u32 mode;
83813+
83814+ compat_uptr_t nested;
83815+ compat_uptr_t globbed;
83816+
83817+ compat_uptr_t prev;
83818+ compat_uptr_t next;
83819+};
83820+
83821+struct acl_ip_label_compat {
83822+ compat_uptr_t iface;
83823+ __u32 addr;
83824+ __u32 netmask;
83825+ __u16 low, high;
83826+ __u8 mode;
83827+ __u32 type;
83828+ __u32 proto[8];
83829+
83830+ compat_uptr_t prev;
83831+ compat_uptr_t next;
83832+};
83833+
83834+struct gr_arg_compat {
83835+ struct user_acl_role_db_compat role_db;
83836+ unsigned char pw[GR_PW_LEN];
83837+ unsigned char salt[GR_SALT_LEN];
83838+ unsigned char sum[GR_SHA_LEN];
83839+ unsigned char sp_role[GR_SPROLE_LEN];
83840+ compat_uptr_t sprole_pws;
83841+ __u32 segv_device;
83842+ compat_ino_t segv_inode;
83843+ uid_t segv_uid;
83844+ __u16 num_sprole_pws;
83845+ __u16 mode;
83846+};
83847+
83848+struct gr_arg_wrapper_compat {
83849+ compat_uptr_t arg;
83850+ __u32 version;
83851+ __u32 size;
83852+};
83853+
83854+#endif
83855diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
83856new file mode 100644
83857index 0000000..323ecf2
83858--- /dev/null
83859+++ b/include/linux/gralloc.h
83860@@ -0,0 +1,9 @@
83861+#ifndef __GRALLOC_H
83862+#define __GRALLOC_H
83863+
83864+void acl_free_all(void);
83865+int acl_alloc_stack_init(unsigned long size);
83866+void *acl_alloc(unsigned long len);
83867+void *acl_alloc_num(unsigned long num, unsigned long len);
83868+
83869+#endif
83870diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
83871new file mode 100644
83872index 0000000..be66033
83873--- /dev/null
83874+++ b/include/linux/grdefs.h
83875@@ -0,0 +1,140 @@
83876+#ifndef GRDEFS_H
83877+#define GRDEFS_H
83878+
83879+/* Begin grsecurity status declarations */
83880+
83881+enum {
83882+ GR_READY = 0x01,
83883+ GR_STATUS_INIT = 0x00 // disabled state
83884+};
83885+
83886+/* Begin ACL declarations */
83887+
83888+/* Role flags */
83889+
83890+enum {
83891+ GR_ROLE_USER = 0x0001,
83892+ GR_ROLE_GROUP = 0x0002,
83893+ GR_ROLE_DEFAULT = 0x0004,
83894+ GR_ROLE_SPECIAL = 0x0008,
83895+ GR_ROLE_AUTH = 0x0010,
83896+ GR_ROLE_NOPW = 0x0020,
83897+ GR_ROLE_GOD = 0x0040,
83898+ GR_ROLE_LEARN = 0x0080,
83899+ GR_ROLE_TPE = 0x0100,
83900+ GR_ROLE_DOMAIN = 0x0200,
83901+ GR_ROLE_PAM = 0x0400,
83902+ GR_ROLE_PERSIST = 0x0800
83903+};
83904+
83905+/* ACL Subject and Object mode flags */
83906+enum {
83907+ GR_DELETED = 0x80000000
83908+};
83909+
83910+/* ACL Object-only mode flags */
83911+enum {
83912+ GR_READ = 0x00000001,
83913+ GR_APPEND = 0x00000002,
83914+ GR_WRITE = 0x00000004,
83915+ GR_EXEC = 0x00000008,
83916+ GR_FIND = 0x00000010,
83917+ GR_INHERIT = 0x00000020,
83918+ GR_SETID = 0x00000040,
83919+ GR_CREATE = 0x00000080,
83920+ GR_DELETE = 0x00000100,
83921+ GR_LINK = 0x00000200,
83922+ GR_AUDIT_READ = 0x00000400,
83923+ GR_AUDIT_APPEND = 0x00000800,
83924+ GR_AUDIT_WRITE = 0x00001000,
83925+ GR_AUDIT_EXEC = 0x00002000,
83926+ GR_AUDIT_FIND = 0x00004000,
83927+ GR_AUDIT_INHERIT= 0x00008000,
83928+ GR_AUDIT_SETID = 0x00010000,
83929+ GR_AUDIT_CREATE = 0x00020000,
83930+ GR_AUDIT_DELETE = 0x00040000,
83931+ GR_AUDIT_LINK = 0x00080000,
83932+ GR_PTRACERD = 0x00100000,
83933+ GR_NOPTRACE = 0x00200000,
83934+ GR_SUPPRESS = 0x00400000,
83935+ GR_NOLEARN = 0x00800000,
83936+ GR_INIT_TRANSFER= 0x01000000
83937+};
83938+
83939+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
83940+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
83941+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
83942+
83943+/* ACL subject-only mode flags */
83944+enum {
83945+ GR_KILL = 0x00000001,
83946+ GR_VIEW = 0x00000002,
83947+ GR_PROTECTED = 0x00000004,
83948+ GR_LEARN = 0x00000008,
83949+ GR_OVERRIDE = 0x00000010,
83950+ /* just a placeholder, this mode is only used in userspace */
83951+ GR_DUMMY = 0x00000020,
83952+ GR_PROTSHM = 0x00000040,
83953+ GR_KILLPROC = 0x00000080,
83954+ GR_KILLIPPROC = 0x00000100,
83955+ /* just a placeholder, this mode is only used in userspace */
83956+ GR_NOTROJAN = 0x00000200,
83957+ GR_PROTPROCFD = 0x00000400,
83958+ GR_PROCACCT = 0x00000800,
83959+ GR_RELAXPTRACE = 0x00001000,
83960+ //GR_NESTED = 0x00002000,
83961+ GR_INHERITLEARN = 0x00004000,
83962+ GR_PROCFIND = 0x00008000,
83963+ GR_POVERRIDE = 0x00010000,
83964+ GR_KERNELAUTH = 0x00020000,
83965+ GR_ATSECURE = 0x00040000,
83966+ GR_SHMEXEC = 0x00080000
83967+};
83968+
83969+enum {
83970+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
83971+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
83972+ GR_PAX_ENABLE_MPROTECT = 0x0004,
83973+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
83974+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
83975+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
83976+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
83977+ GR_PAX_DISABLE_MPROTECT = 0x0400,
83978+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
83979+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
83980+};
83981+
83982+enum {
83983+ GR_ID_USER = 0x01,
83984+ GR_ID_GROUP = 0x02,
83985+};
83986+
83987+enum {
83988+ GR_ID_ALLOW = 0x01,
83989+ GR_ID_DENY = 0x02,
83990+};
83991+
83992+#define GR_CRASH_RES 31
83993+#define GR_UIDTABLE_MAX 500
83994+
83995+/* begin resource learning section */
83996+enum {
83997+ GR_RLIM_CPU_BUMP = 60,
83998+ GR_RLIM_FSIZE_BUMP = 50000,
83999+ GR_RLIM_DATA_BUMP = 10000,
84000+ GR_RLIM_STACK_BUMP = 1000,
84001+ GR_RLIM_CORE_BUMP = 10000,
84002+ GR_RLIM_RSS_BUMP = 500000,
84003+ GR_RLIM_NPROC_BUMP = 1,
84004+ GR_RLIM_NOFILE_BUMP = 5,
84005+ GR_RLIM_MEMLOCK_BUMP = 50000,
84006+ GR_RLIM_AS_BUMP = 500000,
84007+ GR_RLIM_LOCKS_BUMP = 2,
84008+ GR_RLIM_SIGPENDING_BUMP = 5,
84009+ GR_RLIM_MSGQUEUE_BUMP = 10000,
84010+ GR_RLIM_NICE_BUMP = 1,
84011+ GR_RLIM_RTPRIO_BUMP = 1,
84012+ GR_RLIM_RTTIME_BUMP = 1000000
84013+};
84014+
84015+#endif
84016diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
84017new file mode 100644
84018index 0000000..d25522e
84019--- /dev/null
84020+++ b/include/linux/grinternal.h
84021@@ -0,0 +1,229 @@
84022+#ifndef __GRINTERNAL_H
84023+#define __GRINTERNAL_H
84024+
84025+#ifdef CONFIG_GRKERNSEC
84026+
84027+#include <linux/fs.h>
84028+#include <linux/mnt_namespace.h>
84029+#include <linux/nsproxy.h>
84030+#include <linux/gracl.h>
84031+#include <linux/grdefs.h>
84032+#include <linux/grmsg.h>
84033+
84034+void gr_add_learn_entry(const char *fmt, ...)
84035+ __attribute__ ((format (printf, 1, 2)));
84036+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
84037+ const struct vfsmount *mnt);
84038+__u32 gr_check_create(const struct dentry *new_dentry,
84039+ const struct dentry *parent,
84040+ const struct vfsmount *mnt, const __u32 mode);
84041+int gr_check_protected_task(const struct task_struct *task);
84042+__u32 to_gr_audit(const __u32 reqmode);
84043+int gr_set_acls(const int type);
84044+int gr_acl_is_enabled(void);
84045+char gr_roletype_to_char(void);
84046+
84047+void gr_handle_alertkill(struct task_struct *task);
84048+char *gr_to_filename(const struct dentry *dentry,
84049+ const struct vfsmount *mnt);
84050+char *gr_to_filename1(const struct dentry *dentry,
84051+ const struct vfsmount *mnt);
84052+char *gr_to_filename2(const struct dentry *dentry,
84053+ const struct vfsmount *mnt);
84054+char *gr_to_filename3(const struct dentry *dentry,
84055+ const struct vfsmount *mnt);
84056+
84057+extern int grsec_enable_ptrace_readexec;
84058+extern int grsec_enable_harden_ptrace;
84059+extern int grsec_enable_link;
84060+extern int grsec_enable_fifo;
84061+extern int grsec_enable_execve;
84062+extern int grsec_enable_shm;
84063+extern int grsec_enable_execlog;
84064+extern int grsec_enable_signal;
84065+extern int grsec_enable_audit_ptrace;
84066+extern int grsec_enable_forkfail;
84067+extern int grsec_enable_time;
84068+extern int grsec_enable_rofs;
84069+extern int grsec_deny_new_usb;
84070+extern int grsec_enable_chroot_shmat;
84071+extern int grsec_enable_chroot_mount;
84072+extern int grsec_enable_chroot_double;
84073+extern int grsec_enable_chroot_pivot;
84074+extern int grsec_enable_chroot_chdir;
84075+extern int grsec_enable_chroot_chmod;
84076+extern int grsec_enable_chroot_mknod;
84077+extern int grsec_enable_chroot_fchdir;
84078+extern int grsec_enable_chroot_nice;
84079+extern int grsec_enable_chroot_execlog;
84080+extern int grsec_enable_chroot_caps;
84081+extern int grsec_enable_chroot_sysctl;
84082+extern int grsec_enable_chroot_unix;
84083+extern int grsec_enable_symlinkown;
84084+extern kgid_t grsec_symlinkown_gid;
84085+extern int grsec_enable_tpe;
84086+extern kgid_t grsec_tpe_gid;
84087+extern int grsec_enable_tpe_all;
84088+extern int grsec_enable_tpe_invert;
84089+extern int grsec_enable_socket_all;
84090+extern kgid_t grsec_socket_all_gid;
84091+extern int grsec_enable_socket_client;
84092+extern kgid_t grsec_socket_client_gid;
84093+extern int grsec_enable_socket_server;
84094+extern kgid_t grsec_socket_server_gid;
84095+extern kgid_t grsec_audit_gid;
84096+extern int grsec_enable_group;
84097+extern int grsec_enable_log_rwxmaps;
84098+extern int grsec_enable_mount;
84099+extern int grsec_enable_chdir;
84100+extern int grsec_resource_logging;
84101+extern int grsec_enable_blackhole;
84102+extern int grsec_lastack_retries;
84103+extern int grsec_enable_brute;
84104+extern int grsec_enable_harden_ipc;
84105+extern int grsec_lock;
84106+
84107+extern spinlock_t grsec_alert_lock;
84108+extern unsigned long grsec_alert_wtime;
84109+extern unsigned long grsec_alert_fyet;
84110+
84111+extern spinlock_t grsec_audit_lock;
84112+
84113+extern rwlock_t grsec_exec_file_lock;
84114+
84115+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
84116+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
84117+ (tsk)->exec_file->f_path.mnt) : "/")
84118+
84119+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
84120+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
84121+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
84122+
84123+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
84124+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
84125+ (tsk)->exec_file->f_path.mnt) : "/")
84126+
84127+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
84128+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
84129+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
84130+
84131+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
84132+
84133+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
84134+
84135+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
84136+{
84137+ if (file1 && file2) {
84138+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
84139+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
84140+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
84141+ return true;
84142+ }
84143+
84144+ return false;
84145+}
84146+
84147+#define GR_CHROOT_CAPS {{ \
84148+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
84149+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
84150+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
84151+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
84152+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
84153+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
84154+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
84155+
84156+#define security_learn(normal_msg,args...) \
84157+({ \
84158+ read_lock(&grsec_exec_file_lock); \
84159+ gr_add_learn_entry(normal_msg "\n", ## args); \
84160+ read_unlock(&grsec_exec_file_lock); \
84161+})
84162+
84163+enum {
84164+ GR_DO_AUDIT,
84165+ GR_DONT_AUDIT,
84166+ /* used for non-audit messages that we shouldn't kill the task on */
84167+ GR_DONT_AUDIT_GOOD
84168+};
84169+
84170+enum {
84171+ GR_TTYSNIFF,
84172+ GR_RBAC,
84173+ GR_RBAC_STR,
84174+ GR_STR_RBAC,
84175+ GR_RBAC_MODE2,
84176+ GR_RBAC_MODE3,
84177+ GR_FILENAME,
84178+ GR_SYSCTL_HIDDEN,
84179+ GR_NOARGS,
84180+ GR_ONE_INT,
84181+ GR_ONE_INT_TWO_STR,
84182+ GR_ONE_STR,
84183+ GR_STR_INT,
84184+ GR_TWO_STR_INT,
84185+ GR_TWO_INT,
84186+ GR_TWO_U64,
84187+ GR_THREE_INT,
84188+ GR_FIVE_INT_TWO_STR,
84189+ GR_TWO_STR,
84190+ GR_THREE_STR,
84191+ GR_FOUR_STR,
84192+ GR_STR_FILENAME,
84193+ GR_FILENAME_STR,
84194+ GR_FILENAME_TWO_INT,
84195+ GR_FILENAME_TWO_INT_STR,
84196+ GR_TEXTREL,
84197+ GR_PTRACE,
84198+ GR_RESOURCE,
84199+ GR_CAP,
84200+ GR_SIG,
84201+ GR_SIG2,
84202+ GR_CRASH1,
84203+ GR_CRASH2,
84204+ GR_PSACCT,
84205+ GR_RWXMAP,
84206+ GR_RWXMAPVMA
84207+};
84208+
84209+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
84210+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
84211+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
84212+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
84213+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
84214+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
84215+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
84216+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
84217+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
84218+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
84219+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
84220+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
84221+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
84222+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
84223+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
84224+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
84225+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
84226+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
84227+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
84228+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
84229+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
84230+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
84231+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
84232+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
84233+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
84234+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
84235+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
84236+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
84237+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
84238+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
84239+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
84240+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
84241+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
84242+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
84243+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
84244+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
84245+
84246+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
84247+
84248+#endif
84249+
84250+#endif
84251diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
84252new file mode 100644
84253index 0000000..b02ba9d
84254--- /dev/null
84255+++ b/include/linux/grmsg.h
84256@@ -0,0 +1,117 @@
84257+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
84258+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
84259+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
84260+#define GR_STOPMOD_MSG "denied modification of module state by "
84261+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
84262+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
84263+#define GR_IOPERM_MSG "denied use of ioperm() by "
84264+#define GR_IOPL_MSG "denied use of iopl() by "
84265+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
84266+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
84267+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
84268+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
84269+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
84270+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
84271+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
84272+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
84273+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
84274+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
84275+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
84276+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
84277+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
84278+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
84279+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
84280+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
84281+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
84282+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
84283+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
84284+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
84285+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
84286+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
84287+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
84288+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
84289+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
84290+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
84291+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
84292+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
84293+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
84294+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
84295+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
84296+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
84297+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
84298+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
84299+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
84300+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
84301+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
84302+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
84303+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
84304+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
84305+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
84306+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
84307+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
84308+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
84309+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
84310+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
84311+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
84312+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
84313+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
84314+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
84315+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
84316+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
84317+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
84318+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
84319+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
84320+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
84321+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
84322+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
84323+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
84324+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
84325+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
84326+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
84327+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
84328+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
84329+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
84330+#define GR_FAILFORK_MSG "failed fork with errno %s by "
84331+#define GR_NICE_CHROOT_MSG "denied priority change by "
84332+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
84333+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
84334+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
84335+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
84336+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
84337+#define GR_TIME_MSG "time set by "
84338+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
84339+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
84340+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
84341+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
84342+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
84343+#define GR_BIND_MSG "denied bind() by "
84344+#define GR_CONNECT_MSG "denied connect() by "
84345+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
84346+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
84347+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
84348+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
84349+#define GR_CAP_ACL_MSG "use of %s denied for "
84350+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
84351+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
84352+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
84353+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
84354+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
84355+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
84356+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
84357+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
84358+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
84359+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
84360+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
84361+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
84362+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
84363+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
84364+#define GR_VM86_MSG "denied use of vm86 by "
84365+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
84366+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
84367+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
84368+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
84369+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
84370+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
84371+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
84372+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
84373+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
84374diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
84375new file mode 100644
84376index 0000000..10b9635
84377--- /dev/null
84378+++ b/include/linux/grsecurity.h
84379@@ -0,0 +1,254 @@
84380+#ifndef GR_SECURITY_H
84381+#define GR_SECURITY_H
84382+#include <linux/fs.h>
84383+#include <linux/fs_struct.h>
84384+#include <linux/binfmts.h>
84385+#include <linux/gracl.h>
84386+
84387+/* notify of brain-dead configs */
84388+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84389+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
84390+#endif
84391+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84392+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
84393+#endif
84394+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
84395+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
84396+#endif
84397+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
84398+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
84399+#endif
84400+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
84401+#error "CONFIG_PAX enabled, but no PaX options are enabled."
84402+#endif
84403+
84404+int gr_handle_new_usb(void);
84405+
84406+void gr_handle_brute_attach(int dumpable);
84407+void gr_handle_brute_check(void);
84408+void gr_handle_kernel_exploit(void);
84409+
84410+char gr_roletype_to_char(void);
84411+
84412+int gr_proc_is_restricted(void);
84413+
84414+int gr_acl_enable_at_secure(void);
84415+
84416+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
84417+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
84418+
84419+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
84420+
84421+void gr_del_task_from_ip_table(struct task_struct *p);
84422+
84423+int gr_pid_is_chrooted(struct task_struct *p);
84424+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
84425+int gr_handle_chroot_nice(void);
84426+int gr_handle_chroot_sysctl(const int op);
84427+int gr_handle_chroot_setpriority(struct task_struct *p,
84428+ const int niceval);
84429+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
84430+int gr_chroot_fhandle(void);
84431+int gr_handle_chroot_chroot(const struct dentry *dentry,
84432+ const struct vfsmount *mnt);
84433+void gr_handle_chroot_chdir(const struct path *path);
84434+int gr_handle_chroot_chmod(const struct dentry *dentry,
84435+ const struct vfsmount *mnt, const int mode);
84436+int gr_handle_chroot_mknod(const struct dentry *dentry,
84437+ const struct vfsmount *mnt, const int mode);
84438+int gr_handle_chroot_mount(const struct dentry *dentry,
84439+ const struct vfsmount *mnt,
84440+ const char *dev_name);
84441+int gr_handle_chroot_pivot(void);
84442+int gr_handle_chroot_unix(const pid_t pid);
84443+
84444+int gr_handle_rawio(const struct inode *inode);
84445+
84446+void gr_handle_ioperm(void);
84447+void gr_handle_iopl(void);
84448+void gr_handle_msr_write(void);
84449+
84450+umode_t gr_acl_umask(void);
84451+
84452+int gr_tpe_allow(const struct file *file);
84453+
84454+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
84455+void gr_clear_chroot_entries(struct task_struct *task);
84456+
84457+void gr_log_forkfail(const int retval);
84458+void gr_log_timechange(void);
84459+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
84460+void gr_log_chdir(const struct dentry *dentry,
84461+ const struct vfsmount *mnt);
84462+void gr_log_chroot_exec(const struct dentry *dentry,
84463+ const struct vfsmount *mnt);
84464+void gr_log_remount(const char *devname, const int retval);
84465+void gr_log_unmount(const char *devname, const int retval);
84466+void gr_log_mount(const char *from, const char *to, const int retval);
84467+void gr_log_textrel(struct vm_area_struct *vma);
84468+void gr_log_ptgnustack(struct file *file);
84469+void gr_log_rwxmmap(struct file *file);
84470+void gr_log_rwxmprotect(struct vm_area_struct *vma);
84471+
84472+int gr_handle_follow_link(const struct inode *parent,
84473+ const struct inode *inode,
84474+ const struct dentry *dentry,
84475+ const struct vfsmount *mnt);
84476+int gr_handle_fifo(const struct dentry *dentry,
84477+ const struct vfsmount *mnt,
84478+ const struct dentry *dir, const int flag,
84479+ const int acc_mode);
84480+int gr_handle_hardlink(const struct dentry *dentry,
84481+ const struct vfsmount *mnt,
84482+ struct inode *inode,
84483+ const int mode, const struct filename *to);
84484+
84485+int gr_is_capable(const int cap);
84486+int gr_is_capable_nolog(const int cap);
84487+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
84488+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
84489+
84490+void gr_copy_label(struct task_struct *tsk);
84491+void gr_handle_crash(struct task_struct *task, const int sig);
84492+int gr_handle_signal(const struct task_struct *p, const int sig);
84493+int gr_check_crash_uid(const kuid_t uid);
84494+int gr_check_protected_task(const struct task_struct *task);
84495+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
84496+int gr_acl_handle_mmap(const struct file *file,
84497+ const unsigned long prot);
84498+int gr_acl_handle_mprotect(const struct file *file,
84499+ const unsigned long prot);
84500+int gr_check_hidden_task(const struct task_struct *tsk);
84501+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
84502+ const struct vfsmount *mnt);
84503+__u32 gr_acl_handle_utime(const struct dentry *dentry,
84504+ const struct vfsmount *mnt);
84505+__u32 gr_acl_handle_access(const struct dentry *dentry,
84506+ const struct vfsmount *mnt, const int fmode);
84507+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
84508+ const struct vfsmount *mnt, umode_t *mode);
84509+__u32 gr_acl_handle_chown(const struct dentry *dentry,
84510+ const struct vfsmount *mnt);
84511+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
84512+ const struct vfsmount *mnt);
84513+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
84514+ const struct vfsmount *mnt);
84515+int gr_handle_ptrace(struct task_struct *task, const long request);
84516+int gr_handle_proc_ptrace(struct task_struct *task);
84517+__u32 gr_acl_handle_execve(const struct dentry *dentry,
84518+ const struct vfsmount *mnt);
84519+int gr_check_crash_exec(const struct file *filp);
84520+int gr_acl_is_enabled(void);
84521+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
84522+ const kgid_t gid);
84523+int gr_set_proc_label(const struct dentry *dentry,
84524+ const struct vfsmount *mnt,
84525+ const int unsafe_flags);
84526+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
84527+ const struct vfsmount *mnt);
84528+__u32 gr_acl_handle_open(const struct dentry *dentry,
84529+ const struct vfsmount *mnt, int acc_mode);
84530+__u32 gr_acl_handle_creat(const struct dentry *dentry,
84531+ const struct dentry *p_dentry,
84532+ const struct vfsmount *p_mnt,
84533+ int open_flags, int acc_mode, const int imode);
84534+void gr_handle_create(const struct dentry *dentry,
84535+ const struct vfsmount *mnt);
84536+void gr_handle_proc_create(const struct dentry *dentry,
84537+ const struct inode *inode);
84538+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
84539+ const struct dentry *parent_dentry,
84540+ const struct vfsmount *parent_mnt,
84541+ const int mode);
84542+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
84543+ const struct dentry *parent_dentry,
84544+ const struct vfsmount *parent_mnt);
84545+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
84546+ const struct vfsmount *mnt);
84547+void gr_handle_delete(const ino_t ino, const dev_t dev);
84548+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
84549+ const struct vfsmount *mnt);
84550+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
84551+ const struct dentry *parent_dentry,
84552+ const struct vfsmount *parent_mnt,
84553+ const struct filename *from);
84554+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
84555+ const struct dentry *parent_dentry,
84556+ const struct vfsmount *parent_mnt,
84557+ const struct dentry *old_dentry,
84558+ const struct vfsmount *old_mnt, const struct filename *to);
84559+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
84560+int gr_acl_handle_rename(struct dentry *new_dentry,
84561+ struct dentry *parent_dentry,
84562+ const struct vfsmount *parent_mnt,
84563+ struct dentry *old_dentry,
84564+ struct inode *old_parent_inode,
84565+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
84566+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84567+ struct dentry *old_dentry,
84568+ struct dentry *new_dentry,
84569+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
84570+__u32 gr_check_link(const struct dentry *new_dentry,
84571+ const struct dentry *parent_dentry,
84572+ const struct vfsmount *parent_mnt,
84573+ const struct dentry *old_dentry,
84574+ const struct vfsmount *old_mnt);
84575+int gr_acl_handle_filldir(const struct file *file, const char *name,
84576+ const unsigned int namelen, const ino_t ino);
84577+
84578+__u32 gr_acl_handle_unix(const struct dentry *dentry,
84579+ const struct vfsmount *mnt);
84580+void gr_acl_handle_exit(void);
84581+void gr_acl_handle_psacct(struct task_struct *task, const long code);
84582+int gr_acl_handle_procpidmem(const struct task_struct *task);
84583+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
84584+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
84585+void gr_audit_ptrace(struct task_struct *task);
84586+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
84587+void gr_put_exec_file(struct task_struct *task);
84588+
84589+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
84590+
84591+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
84592+extern void gr_learn_resource(const struct task_struct *task, const int res,
84593+ const unsigned long wanted, const int gt);
84594+#else
84595+static inline void gr_learn_resource(const struct task_struct *task, const int res,
84596+ const unsigned long wanted, const int gt)
84597+{
84598+}
84599+#endif
84600+
84601+#ifdef CONFIG_GRKERNSEC_RESLOG
84602+extern void gr_log_resource(const struct task_struct *task, const int res,
84603+ const unsigned long wanted, const int gt);
84604+#else
84605+static inline void gr_log_resource(const struct task_struct *task, const int res,
84606+ const unsigned long wanted, const int gt)
84607+{
84608+}
84609+#endif
84610+
84611+#ifdef CONFIG_GRKERNSEC
84612+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
84613+void gr_handle_vm86(void);
84614+void gr_handle_mem_readwrite(u64 from, u64 to);
84615+
84616+void gr_log_badprocpid(const char *entry);
84617+
84618+extern int grsec_enable_dmesg;
84619+extern int grsec_disable_privio;
84620+
84621+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84622+extern kgid_t grsec_proc_gid;
84623+#endif
84624+
84625+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84626+extern int grsec_enable_chroot_findtask;
84627+#endif
84628+#ifdef CONFIG_GRKERNSEC_SETXID
84629+extern int grsec_enable_setxid;
84630+#endif
84631+#endif
84632+
84633+#endif
84634diff --git a/include/linux/grsock.h b/include/linux/grsock.h
84635new file mode 100644
84636index 0000000..e7ffaaf
84637--- /dev/null
84638+++ b/include/linux/grsock.h
84639@@ -0,0 +1,19 @@
84640+#ifndef __GRSOCK_H
84641+#define __GRSOCK_H
84642+
84643+extern void gr_attach_curr_ip(const struct sock *sk);
84644+extern int gr_handle_sock_all(const int family, const int type,
84645+ const int protocol);
84646+extern int gr_handle_sock_server(const struct sockaddr *sck);
84647+extern int gr_handle_sock_server_other(const struct sock *sck);
84648+extern int gr_handle_sock_client(const struct sockaddr *sck);
84649+extern int gr_search_connect(struct socket * sock,
84650+ struct sockaddr_in * addr);
84651+extern int gr_search_bind(struct socket * sock,
84652+ struct sockaddr_in * addr);
84653+extern int gr_search_listen(struct socket * sock);
84654+extern int gr_search_accept(struct socket * sock);
84655+extern int gr_search_socket(const int domain, const int type,
84656+ const int protocol);
84657+
84658+#endif
84659diff --git a/include/linux/hash.h b/include/linux/hash.h
84660index bd1754c..69b7715 100644
84661--- a/include/linux/hash.h
84662+++ b/include/linux/hash.h
84663@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
84664 {
84665 u64 hash = val;
84666
84667+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
84668+ hash = hash * GOLDEN_RATIO_PRIME_64;
84669+#else
84670 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
84671 u64 n = hash;
84672 n <<= 18;
84673@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
84674 hash += n;
84675 n <<= 2;
84676 hash += n;
84677+#endif
84678
84679 /* High bits are more random, so use them. */
84680 return hash >> (64 - bits);
84681@@ -83,7 +87,7 @@ static inline u32 hash32_ptr(const void *ptr)
84682 struct fast_hash_ops {
84683 u32 (*hash)(const void *data, u32 len, u32 seed);
84684 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
84685-};
84686+} __no_const;
84687
84688 /**
84689 * arch_fast_hash - Caclulates a hash over a given buffer that can have
84690diff --git a/include/linux/highmem.h b/include/linux/highmem.h
84691index 7fb31da..08b5114 100644
84692--- a/include/linux/highmem.h
84693+++ b/include/linux/highmem.h
84694@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
84695 kunmap_atomic(kaddr);
84696 }
84697
84698+static inline void sanitize_highpage(struct page *page)
84699+{
84700+ void *kaddr;
84701+ unsigned long flags;
84702+
84703+ local_irq_save(flags);
84704+ kaddr = kmap_atomic(page);
84705+ clear_page(kaddr);
84706+ kunmap_atomic(kaddr);
84707+ local_irq_restore(flags);
84708+}
84709+
84710 static inline void zero_user_segments(struct page *page,
84711 unsigned start1, unsigned end1,
84712 unsigned start2, unsigned end2)
84713diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
84714index 1c7b89a..7dda400 100644
84715--- a/include/linux/hwmon-sysfs.h
84716+++ b/include/linux/hwmon-sysfs.h
84717@@ -25,7 +25,8 @@
84718 struct sensor_device_attribute{
84719 struct device_attribute dev_attr;
84720 int index;
84721-};
84722+} __do_const;
84723+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
84724 #define to_sensor_dev_attr(_dev_attr) \
84725 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
84726
84727@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
84728 struct device_attribute dev_attr;
84729 u8 index;
84730 u8 nr;
84731-};
84732+} __do_const;
84733+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
84734 #define to_sensor_dev_attr_2(_dev_attr) \
84735 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
84736
84737diff --git a/include/linux/i2c.h b/include/linux/i2c.h
84738index b556e0a..c10a515 100644
84739--- a/include/linux/i2c.h
84740+++ b/include/linux/i2c.h
84741@@ -378,6 +378,7 @@ struct i2c_algorithm {
84742 /* To determine what the adapter supports */
84743 u32 (*functionality) (struct i2c_adapter *);
84744 };
84745+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
84746
84747 /**
84748 * struct i2c_bus_recovery_info - I2C bus recovery information
84749diff --git a/include/linux/i2o.h b/include/linux/i2o.h
84750index d23c3c2..eb63c81 100644
84751--- a/include/linux/i2o.h
84752+++ b/include/linux/i2o.h
84753@@ -565,7 +565,7 @@ struct i2o_controller {
84754 struct i2o_device *exec; /* Executive */
84755 #if BITS_PER_LONG == 64
84756 spinlock_t context_list_lock; /* lock for context_list */
84757- atomic_t context_list_counter; /* needed for unique contexts */
84758+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
84759 struct list_head context_list; /* list of context id's
84760 and pointers */
84761 #endif
84762diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
84763index aff7ad8..3942bbd 100644
84764--- a/include/linux/if_pppox.h
84765+++ b/include/linux/if_pppox.h
84766@@ -76,7 +76,7 @@ struct pppox_proto {
84767 int (*ioctl)(struct socket *sock, unsigned int cmd,
84768 unsigned long arg);
84769 struct module *owner;
84770-};
84771+} __do_const;
84772
84773 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
84774 extern void unregister_pppox_proto(int proto_num);
84775diff --git a/include/linux/init.h b/include/linux/init.h
84776index 2df8e8d..3e1280d 100644
84777--- a/include/linux/init.h
84778+++ b/include/linux/init.h
84779@@ -37,9 +37,17 @@
84780 * section.
84781 */
84782
84783+#define add_init_latent_entropy __latent_entropy
84784+
84785+#ifdef CONFIG_MEMORY_HOTPLUG
84786+#define add_meminit_latent_entropy
84787+#else
84788+#define add_meminit_latent_entropy __latent_entropy
84789+#endif
84790+
84791 /* These are for everybody (although not all archs will actually
84792 discard it in modules) */
84793-#define __init __section(.init.text) __cold notrace
84794+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
84795 #define __initdata __section(.init.data)
84796 #define __initconst __constsection(.init.rodata)
84797 #define __exitdata __section(.exit.data)
84798@@ -100,7 +108,7 @@
84799 #define __cpuexitconst
84800
84801 /* Used for MEMORY_HOTPLUG */
84802-#define __meminit __section(.meminit.text) __cold notrace
84803+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
84804 #define __meminitdata __section(.meminit.data)
84805 #define __meminitconst __constsection(.meminit.rodata)
84806 #define __memexit __section(.memexit.text) __exitused __cold notrace
84807diff --git a/include/linux/init_task.h b/include/linux/init_task.h
84808index 6df7f9f..d0bf699 100644
84809--- a/include/linux/init_task.h
84810+++ b/include/linux/init_task.h
84811@@ -156,6 +156,12 @@ extern struct task_group root_task_group;
84812
84813 #define INIT_TASK_COMM "swapper"
84814
84815+#ifdef CONFIG_X86
84816+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
84817+#else
84818+#define INIT_TASK_THREAD_INFO
84819+#endif
84820+
84821 #ifdef CONFIG_RT_MUTEXES
84822 # define INIT_RT_MUTEXES(tsk) \
84823 .pi_waiters = RB_ROOT, \
84824@@ -203,6 +209,7 @@ extern struct task_group root_task_group;
84825 RCU_POINTER_INITIALIZER(cred, &init_cred), \
84826 .comm = INIT_TASK_COMM, \
84827 .thread = INIT_THREAD, \
84828+ INIT_TASK_THREAD_INFO \
84829 .fs = &init_fs, \
84830 .files = &init_files, \
84831 .signal = &init_signals, \
84832diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
84833index 698ad05..8601bb7 100644
84834--- a/include/linux/interrupt.h
84835+++ b/include/linux/interrupt.h
84836@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
84837
84838 struct softirq_action
84839 {
84840- void (*action)(struct softirq_action *);
84841-};
84842+ void (*action)(void);
84843+} __no_const;
84844
84845 asmlinkage void do_softirq(void);
84846 asmlinkage void __do_softirq(void);
84847@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
84848 }
84849 #endif
84850
84851-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
84852+extern void open_softirq(int nr, void (*action)(void));
84853 extern void softirq_init(void);
84854 extern void __raise_softirq_irqoff(unsigned int nr);
84855
84856diff --git a/include/linux/iommu.h b/include/linux/iommu.h
84857index b96a5b2..2732d1c 100644
84858--- a/include/linux/iommu.h
84859+++ b/include/linux/iommu.h
84860@@ -131,7 +131,7 @@ struct iommu_ops {
84861 u32 (*domain_get_windows)(struct iommu_domain *domain);
84862
84863 unsigned long pgsize_bitmap;
84864-};
84865+} __do_const;
84866
84867 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
84868 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
84869diff --git a/include/linux/ioport.h b/include/linux/ioport.h
84870index 5e3a906..3131d0f 100644
84871--- a/include/linux/ioport.h
84872+++ b/include/linux/ioport.h
84873@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
84874 int adjust_resource(struct resource *res, resource_size_t start,
84875 resource_size_t size);
84876 resource_size_t resource_alignment(struct resource *res);
84877-static inline resource_size_t resource_size(const struct resource *res)
84878+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
84879 {
84880 return res->end - res->start + 1;
84881 }
84882diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
84883index 35e7eca..6afb7ad 100644
84884--- a/include/linux/ipc_namespace.h
84885+++ b/include/linux/ipc_namespace.h
84886@@ -69,7 +69,7 @@ struct ipc_namespace {
84887 struct user_namespace *user_ns;
84888
84889 unsigned int proc_inum;
84890-};
84891+} __randomize_layout;
84892
84893 extern struct ipc_namespace init_ipc_ns;
84894 extern atomic_t nr_ipc_ns;
84895diff --git a/include/linux/irq.h b/include/linux/irq.h
84896index 0d998d8..3a1c782 100644
84897--- a/include/linux/irq.h
84898+++ b/include/linux/irq.h
84899@@ -344,7 +344,8 @@ struct irq_chip {
84900 void (*irq_release_resources)(struct irq_data *data);
84901
84902 unsigned long flags;
84903-};
84904+} __do_const;
84905+typedef struct irq_chip __no_const irq_chip_no_const;
84906
84907 /*
84908 * irq_chip specific flags
84909diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
84910index 45e2d8c..26d85da 100644
84911--- a/include/linux/irqchip/arm-gic.h
84912+++ b/include/linux/irqchip/arm-gic.h
84913@@ -75,9 +75,11 @@
84914
84915 #ifndef __ASSEMBLY__
84916
84917+#include <linux/irq.h>
84918+
84919 struct device_node;
84920
84921-extern struct irq_chip gic_arch_extn;
84922+extern irq_chip_no_const gic_arch_extn;
84923
84924 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
84925 u32 offset, struct device_node *);
84926diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
84927index 8e10f57..d5f62bc 100644
84928--- a/include/linux/isdn_ppp.h
84929+++ b/include/linux/isdn_ppp.h
84930@@ -180,8 +180,9 @@ struct ippp_struct {
84931 struct slcompress *slcomp;
84932 #endif
84933 #ifdef CONFIG_IPPP_FILTER
84934- struct sk_filter *pass_filter; /* filter for packets to pass */
84935- struct sk_filter *active_filter; /* filter for pkts to reset idle */
84936+ struct sock_filter *pass_filter; /* filter for packets to pass */
84937+ struct sock_filter *active_filter; /* filter for pkts to reset idle */
84938+ unsigned pass_len, active_len;
84939 #endif
84940 unsigned long debug;
84941 struct isdn_ppp_compressor *compressor,*decompressor;
84942diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
84943index 1f44466..b481806 100644
84944--- a/include/linux/jiffies.h
84945+++ b/include/linux/jiffies.h
84946@@ -292,20 +292,20 @@ extern unsigned long preset_lpj;
84947 /*
84948 * Convert various time units to each other:
84949 */
84950-extern unsigned int jiffies_to_msecs(const unsigned long j);
84951-extern unsigned int jiffies_to_usecs(const unsigned long j);
84952+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
84953+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
84954
84955-static inline u64 jiffies_to_nsecs(const unsigned long j)
84956+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
84957 {
84958 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
84959 }
84960
84961-extern unsigned long msecs_to_jiffies(const unsigned int m);
84962-extern unsigned long usecs_to_jiffies(const unsigned int u);
84963+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
84964+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
84965 extern unsigned long timespec_to_jiffies(const struct timespec *value);
84966 extern void jiffies_to_timespec(const unsigned long jiffies,
84967- struct timespec *value);
84968-extern unsigned long timeval_to_jiffies(const struct timeval *value);
84969+ struct timespec *value) __intentional_overflow(-1);
84970+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
84971 extern void jiffies_to_timeval(const unsigned long jiffies,
84972 struct timeval *value);
84973
84974diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
84975index 6883e19..e854fcb 100644
84976--- a/include/linux/kallsyms.h
84977+++ b/include/linux/kallsyms.h
84978@@ -15,7 +15,8 @@
84979
84980 struct module;
84981
84982-#ifdef CONFIG_KALLSYMS
84983+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
84984+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
84985 /* Lookup the address for a symbol. Returns 0 if not found. */
84986 unsigned long kallsyms_lookup_name(const char *name);
84987
84988@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
84989 /* Stupid that this does nothing, but I didn't create this mess. */
84990 #define __print_symbol(fmt, addr)
84991 #endif /*CONFIG_KALLSYMS*/
84992+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
84993+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
84994+extern unsigned long kallsyms_lookup_name(const char *name);
84995+extern void __print_symbol(const char *fmt, unsigned long address);
84996+extern int sprint_backtrace(char *buffer, unsigned long address);
84997+extern int sprint_symbol(char *buffer, unsigned long address);
84998+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
84999+const char *kallsyms_lookup(unsigned long addr,
85000+ unsigned long *symbolsize,
85001+ unsigned long *offset,
85002+ char **modname, char *namebuf);
85003+extern int kallsyms_lookup_size_offset(unsigned long addr,
85004+ unsigned long *symbolsize,
85005+ unsigned long *offset);
85006+#endif
85007
85008 /* This macro allows us to keep printk typechecking */
85009 static __printf(1, 2)
85010diff --git a/include/linux/key-type.h b/include/linux/key-type.h
85011index a74c3a8..28d3f21 100644
85012--- a/include/linux/key-type.h
85013+++ b/include/linux/key-type.h
85014@@ -131,7 +131,7 @@ struct key_type {
85015 /* internal fields */
85016 struct list_head link; /* link in types list */
85017 struct lock_class_key lock_class; /* key->sem lock class */
85018-};
85019+} __do_const;
85020
85021 extern struct key_type key_type_keyring;
85022
85023diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
85024index 6b06d37..c134867 100644
85025--- a/include/linux/kgdb.h
85026+++ b/include/linux/kgdb.h
85027@@ -52,7 +52,7 @@ extern int kgdb_connected;
85028 extern int kgdb_io_module_registered;
85029
85030 extern atomic_t kgdb_setting_breakpoint;
85031-extern atomic_t kgdb_cpu_doing_single_step;
85032+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
85033
85034 extern struct task_struct *kgdb_usethread;
85035 extern struct task_struct *kgdb_contthread;
85036@@ -254,7 +254,7 @@ struct kgdb_arch {
85037 void (*correct_hw_break)(void);
85038
85039 void (*enable_nmi)(bool on);
85040-};
85041+} __do_const;
85042
85043 /**
85044 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
85045@@ -279,7 +279,7 @@ struct kgdb_io {
85046 void (*pre_exception) (void);
85047 void (*post_exception) (void);
85048 int is_console;
85049-};
85050+} __do_const;
85051
85052 extern struct kgdb_arch arch_kgdb_ops;
85053
85054diff --git a/include/linux/kmod.h b/include/linux/kmod.h
85055index 0555cc6..40116ce 100644
85056--- a/include/linux/kmod.h
85057+++ b/include/linux/kmod.h
85058@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
85059 * usually useless though. */
85060 extern __printf(2, 3)
85061 int __request_module(bool wait, const char *name, ...);
85062+extern __printf(3, 4)
85063+int ___request_module(bool wait, char *param_name, const char *name, ...);
85064 #define request_module(mod...) __request_module(true, mod)
85065 #define request_module_nowait(mod...) __request_module(false, mod)
85066 #define try_then_request_module(x, mod...) \
85067@@ -57,6 +59,9 @@ struct subprocess_info {
85068 struct work_struct work;
85069 struct completion *complete;
85070 char *path;
85071+#ifdef CONFIG_GRKERNSEC
85072+ char *origpath;
85073+#endif
85074 char **argv;
85075 char **envp;
85076 int wait;
85077diff --git a/include/linux/kobject.h b/include/linux/kobject.h
85078index 2d61b90..a1d0a13 100644
85079--- a/include/linux/kobject.h
85080+++ b/include/linux/kobject.h
85081@@ -118,7 +118,7 @@ struct kobj_type {
85082 struct attribute **default_attrs;
85083 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
85084 const void *(*namespace)(struct kobject *kobj);
85085-};
85086+} __do_const;
85087
85088 struct kobj_uevent_env {
85089 char *argv[3];
85090@@ -142,6 +142,7 @@ struct kobj_attribute {
85091 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
85092 const char *buf, size_t count);
85093 };
85094+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
85095
85096 extern const struct sysfs_ops kobj_sysfs_ops;
85097
85098@@ -169,7 +170,7 @@ struct kset {
85099 spinlock_t list_lock;
85100 struct kobject kobj;
85101 const struct kset_uevent_ops *uevent_ops;
85102-};
85103+} __randomize_layout;
85104
85105 extern void kset_init(struct kset *kset);
85106 extern int __must_check kset_register(struct kset *kset);
85107diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
85108index df32d25..fb52e27 100644
85109--- a/include/linux/kobject_ns.h
85110+++ b/include/linux/kobject_ns.h
85111@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
85112 const void *(*netlink_ns)(struct sock *sk);
85113 const void *(*initial_ns)(void);
85114 void (*drop_ns)(void *);
85115-};
85116+} __do_const;
85117
85118 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
85119 int kobj_ns_type_registered(enum kobj_ns_type type);
85120diff --git a/include/linux/kref.h b/include/linux/kref.h
85121index 484604d..0f6c5b6 100644
85122--- a/include/linux/kref.h
85123+++ b/include/linux/kref.h
85124@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
85125 static inline int kref_sub(struct kref *kref, unsigned int count,
85126 void (*release)(struct kref *kref))
85127 {
85128- WARN_ON(release == NULL);
85129+ BUG_ON(release == NULL);
85130
85131 if (atomic_sub_and_test((int) count, &kref->refcount)) {
85132 release(kref);
85133diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
85134index ec4e3bd..14db03a 100644
85135--- a/include/linux/kvm_host.h
85136+++ b/include/linux/kvm_host.h
85137@@ -468,7 +468,7 @@ static inline void kvm_irqfd_exit(void)
85138 {
85139 }
85140 #endif
85141-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
85142+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
85143 struct module *module);
85144 void kvm_exit(void);
85145
85146@@ -634,7 +634,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
85147 struct kvm_guest_debug *dbg);
85148 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
85149
85150-int kvm_arch_init(void *opaque);
85151+int kvm_arch_init(const void *opaque);
85152 void kvm_arch_exit(void);
85153
85154 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
85155diff --git a/include/linux/libata.h b/include/linux/libata.h
85156index 92abb49..e7fff2a 100644
85157--- a/include/linux/libata.h
85158+++ b/include/linux/libata.h
85159@@ -976,7 +976,7 @@ struct ata_port_operations {
85160 * fields must be pointers.
85161 */
85162 const struct ata_port_operations *inherits;
85163-};
85164+} __do_const;
85165
85166 struct ata_port_info {
85167 unsigned long flags;
85168diff --git a/include/linux/linkage.h b/include/linux/linkage.h
85169index a6a42dd..6c5ebce 100644
85170--- a/include/linux/linkage.h
85171+++ b/include/linux/linkage.h
85172@@ -36,6 +36,7 @@
85173 #endif
85174
85175 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
85176+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
85177 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
85178
85179 /*
85180diff --git a/include/linux/list.h b/include/linux/list.h
85181index ef95941..82db65a 100644
85182--- a/include/linux/list.h
85183+++ b/include/linux/list.h
85184@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
85185 extern void list_del(struct list_head *entry);
85186 #endif
85187
85188+extern void __pax_list_add(struct list_head *new,
85189+ struct list_head *prev,
85190+ struct list_head *next);
85191+static inline void pax_list_add(struct list_head *new, struct list_head *head)
85192+{
85193+ __pax_list_add(new, head, head->next);
85194+}
85195+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
85196+{
85197+ __pax_list_add(new, head->prev, head);
85198+}
85199+extern void pax_list_del(struct list_head *entry);
85200+
85201 /**
85202 * list_replace - replace old entry by new one
85203 * @old : the element to be replaced
85204@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
85205 INIT_LIST_HEAD(entry);
85206 }
85207
85208+extern void pax_list_del_init(struct list_head *entry);
85209+
85210 /**
85211 * list_move - delete from one list and add as another's head
85212 * @list: the entry to move
85213diff --git a/include/linux/lockref.h b/include/linux/lockref.h
85214index 4bfde0e..d6e2e09 100644
85215--- a/include/linux/lockref.h
85216+++ b/include/linux/lockref.h
85217@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
85218 return ((int)l->count < 0);
85219 }
85220
85221+static inline unsigned int __lockref_read(struct lockref *lockref)
85222+{
85223+ return lockref->count;
85224+}
85225+
85226+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
85227+{
85228+ lockref->count = count;
85229+}
85230+
85231+static inline void __lockref_inc(struct lockref *lockref)
85232+{
85233+
85234+#ifdef CONFIG_PAX_REFCOUNT
85235+ atomic_inc((atomic_t *)&lockref->count);
85236+#else
85237+ lockref->count++;
85238+#endif
85239+
85240+}
85241+
85242+static inline void __lockref_dec(struct lockref *lockref)
85243+{
85244+
85245+#ifdef CONFIG_PAX_REFCOUNT
85246+ atomic_dec((atomic_t *)&lockref->count);
85247+#else
85248+ lockref->count--;
85249+#endif
85250+
85251+}
85252+
85253 #endif /* __LINUX_LOCKREF_H */
85254diff --git a/include/linux/math64.h b/include/linux/math64.h
85255index c45c089..298841c 100644
85256--- a/include/linux/math64.h
85257+++ b/include/linux/math64.h
85258@@ -15,7 +15,7 @@
85259 * This is commonly provided by 32bit archs to provide an optimized 64bit
85260 * divide.
85261 */
85262-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85263+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85264 {
85265 *remainder = dividend % divisor;
85266 return dividend / divisor;
85267@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
85268 /**
85269 * div64_u64 - unsigned 64bit divide with 64bit divisor
85270 */
85271-static inline u64 div64_u64(u64 dividend, u64 divisor)
85272+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
85273 {
85274 return dividend / divisor;
85275 }
85276@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
85277 #define div64_ul(x, y) div_u64((x), (y))
85278
85279 #ifndef div_u64_rem
85280-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85281+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85282 {
85283 *remainder = do_div(dividend, divisor);
85284 return dividend;
85285@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
85286 #endif
85287
85288 #ifndef div64_u64
85289-extern u64 div64_u64(u64 dividend, u64 divisor);
85290+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
85291 #endif
85292
85293 #ifndef div64_s64
85294@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
85295 * divide.
85296 */
85297 #ifndef div_u64
85298-static inline u64 div_u64(u64 dividend, u32 divisor)
85299+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
85300 {
85301 u32 remainder;
85302 return div_u64_rem(dividend, divisor, &remainder);
85303diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
85304index f230a97..714c006 100644
85305--- a/include/linux/mempolicy.h
85306+++ b/include/linux/mempolicy.h
85307@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
85308 }
85309
85310 #define vma_policy(vma) ((vma)->vm_policy)
85311+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
85312+{
85313+ vma->vm_policy = pol;
85314+}
85315
85316 static inline void mpol_get(struct mempolicy *pol)
85317 {
85318@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
85319 }
85320
85321 #define vma_policy(vma) NULL
85322+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
85323+{
85324+}
85325
85326 static inline int
85327 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
85328diff --git a/include/linux/mm.h b/include/linux/mm.h
85329index e03dd29..eaf923c 100644
85330--- a/include/linux/mm.h
85331+++ b/include/linux/mm.h
85332@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
85333 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
85334 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
85335 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
85336+
85337+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
85338+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
85339+#endif
85340+
85341 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
85342
85343 #ifdef CONFIG_MEM_SOFT_DIRTY
85344@@ -237,8 +242,8 @@ struct vm_operations_struct {
85345 /* called by access_process_vm when get_user_pages() fails, typically
85346 * for use by special VMAs that can switch between memory and hardware
85347 */
85348- int (*access)(struct vm_area_struct *vma, unsigned long addr,
85349- void *buf, int len, int write);
85350+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
85351+ void *buf, size_t len, int write);
85352
85353 /* Called by the /proc/PID/maps code to ask the vma whether it
85354 * has a special name. Returning non-NULL will also cause this
85355@@ -274,6 +279,7 @@ struct vm_operations_struct {
85356 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
85357 unsigned long size, pgoff_t pgoff);
85358 };
85359+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
85360
85361 struct mmu_gather;
85362 struct inode;
85363@@ -1144,8 +1150,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
85364 unsigned long *pfn);
85365 int follow_phys(struct vm_area_struct *vma, unsigned long address,
85366 unsigned int flags, unsigned long *prot, resource_size_t *phys);
85367-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85368- void *buf, int len, int write);
85369+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85370+ void *buf, size_t len, int write);
85371
85372 static inline void unmap_shared_mapping_range(struct address_space *mapping,
85373 loff_t const holebegin, loff_t const holelen)
85374@@ -1184,9 +1190,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
85375 }
85376 #endif
85377
85378-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
85379-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
85380- void *buf, int len, int write);
85381+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
85382+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
85383+ void *buf, size_t len, int write);
85384
85385 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
85386 unsigned long start, unsigned long nr_pages,
85387@@ -1219,34 +1225,6 @@ int set_page_dirty_lock(struct page *page);
85388 int clear_page_dirty_for_io(struct page *page);
85389 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
85390
85391-/* Is the vma a continuation of the stack vma above it? */
85392-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
85393-{
85394- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
85395-}
85396-
85397-static inline int stack_guard_page_start(struct vm_area_struct *vma,
85398- unsigned long addr)
85399-{
85400- return (vma->vm_flags & VM_GROWSDOWN) &&
85401- (vma->vm_start == addr) &&
85402- !vma_growsdown(vma->vm_prev, addr);
85403-}
85404-
85405-/* Is the vma a continuation of the stack vma below it? */
85406-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
85407-{
85408- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
85409-}
85410-
85411-static inline int stack_guard_page_end(struct vm_area_struct *vma,
85412- unsigned long addr)
85413-{
85414- return (vma->vm_flags & VM_GROWSUP) &&
85415- (vma->vm_end == addr) &&
85416- !vma_growsup(vma->vm_next, addr);
85417-}
85418-
85419 extern pid_t
85420 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
85421
85422@@ -1346,6 +1324,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
85423 }
85424 #endif
85425
85426+#ifdef CONFIG_MMU
85427+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
85428+#else
85429+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
85430+{
85431+ return __pgprot(0);
85432+}
85433+#endif
85434+
85435 int vma_wants_writenotify(struct vm_area_struct *vma);
85436
85437 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
85438@@ -1364,8 +1351,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
85439 {
85440 return 0;
85441 }
85442+
85443+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
85444+ unsigned long address)
85445+{
85446+ return 0;
85447+}
85448 #else
85449 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
85450+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
85451 #endif
85452
85453 #ifdef __PAGETABLE_PMD_FOLDED
85454@@ -1374,8 +1368,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
85455 {
85456 return 0;
85457 }
85458+
85459+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
85460+ unsigned long address)
85461+{
85462+ return 0;
85463+}
85464 #else
85465 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
85466+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
85467 #endif
85468
85469 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
85470@@ -1393,11 +1394,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
85471 NULL: pud_offset(pgd, address);
85472 }
85473
85474+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
85475+{
85476+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
85477+ NULL: pud_offset(pgd, address);
85478+}
85479+
85480 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
85481 {
85482 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
85483 NULL: pmd_offset(pud, address);
85484 }
85485+
85486+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
85487+{
85488+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
85489+ NULL: pmd_offset(pud, address);
85490+}
85491 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
85492
85493 #if USE_SPLIT_PTE_PTLOCKS
85494@@ -1796,7 +1809,7 @@ extern int install_special_mapping(struct mm_struct *mm,
85495 unsigned long addr, unsigned long len,
85496 unsigned long flags, struct page **pages);
85497
85498-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
85499+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
85500
85501 extern unsigned long mmap_region(struct file *file, unsigned long addr,
85502 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
85503@@ -1804,6 +1817,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
85504 unsigned long len, unsigned long prot, unsigned long flags,
85505 unsigned long pgoff, unsigned long *populate);
85506 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
85507+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
85508
85509 #ifdef CONFIG_MMU
85510 extern int __mm_populate(unsigned long addr, unsigned long len,
85511@@ -1832,10 +1846,11 @@ struct vm_unmapped_area_info {
85512 unsigned long high_limit;
85513 unsigned long align_mask;
85514 unsigned long align_offset;
85515+ unsigned long threadstack_offset;
85516 };
85517
85518-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
85519-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85520+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
85521+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
85522
85523 /*
85524 * Search for an unmapped address range.
85525@@ -1847,7 +1862,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85526 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
85527 */
85528 static inline unsigned long
85529-vm_unmapped_area(struct vm_unmapped_area_info *info)
85530+vm_unmapped_area(const struct vm_unmapped_area_info *info)
85531 {
85532 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
85533 return unmapped_area(info);
85534@@ -1909,6 +1924,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
85535 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
85536 struct vm_area_struct **pprev);
85537
85538+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
85539+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
85540+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
85541+
85542 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
85543 NULL if none. Assume start_addr < end_addr. */
85544 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
85545@@ -1937,15 +1956,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
85546 return vma;
85547 }
85548
85549-#ifdef CONFIG_MMU
85550-pgprot_t vm_get_page_prot(unsigned long vm_flags);
85551-#else
85552-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
85553-{
85554- return __pgprot(0);
85555-}
85556-#endif
85557-
85558 #ifdef CONFIG_NUMA_BALANCING
85559 unsigned long change_prot_numa(struct vm_area_struct *vma,
85560 unsigned long start, unsigned long end);
85561@@ -1997,6 +2007,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
85562 static inline void vm_stat_account(struct mm_struct *mm,
85563 unsigned long flags, struct file *file, long pages)
85564 {
85565+
85566+#ifdef CONFIG_PAX_RANDMMAP
85567+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
85568+#endif
85569+
85570 mm->total_vm += pages;
85571 }
85572 #endif /* CONFIG_PROC_FS */
85573@@ -2078,7 +2093,7 @@ extern int unpoison_memory(unsigned long pfn);
85574 extern int sysctl_memory_failure_early_kill;
85575 extern int sysctl_memory_failure_recovery;
85576 extern void shake_page(struct page *p, int access);
85577-extern atomic_long_t num_poisoned_pages;
85578+extern atomic_long_unchecked_t num_poisoned_pages;
85579 extern int soft_offline_page(struct page *page, int flags);
85580
85581 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
85582@@ -2113,5 +2128,11 @@ void __init setup_nr_node_ids(void);
85583 static inline void setup_nr_node_ids(void) {}
85584 #endif
85585
85586+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
85587+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
85588+#else
85589+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
85590+#endif
85591+
85592 #endif /* __KERNEL__ */
85593 #endif /* _LINUX_MM_H */
85594diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
85595index 96c5750..15668ba 100644
85596--- a/include/linux/mm_types.h
85597+++ b/include/linux/mm_types.h
85598@@ -308,7 +308,9 @@ struct vm_area_struct {
85599 #ifdef CONFIG_NUMA
85600 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
85601 #endif
85602-};
85603+
85604+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
85605+} __randomize_layout;
85606
85607 struct core_thread {
85608 struct task_struct *task;
85609@@ -454,7 +456,25 @@ struct mm_struct {
85610 bool tlb_flush_pending;
85611 #endif
85612 struct uprobes_state uprobes_state;
85613-};
85614+
85615+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85616+ unsigned long pax_flags;
85617+#endif
85618+
85619+#ifdef CONFIG_PAX_DLRESOLVE
85620+ unsigned long call_dl_resolve;
85621+#endif
85622+
85623+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
85624+ unsigned long call_syscall;
85625+#endif
85626+
85627+#ifdef CONFIG_PAX_ASLR
85628+ unsigned long delta_mmap; /* randomized offset */
85629+ unsigned long delta_stack; /* randomized offset */
85630+#endif
85631+
85632+} __randomize_layout;
85633
85634 static inline void mm_init_cpumask(struct mm_struct *mm)
85635 {
85636diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
85637index c5d5278..f0b68c8 100644
85638--- a/include/linux/mmiotrace.h
85639+++ b/include/linux/mmiotrace.h
85640@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
85641 /* Called from ioremap.c */
85642 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
85643 void __iomem *addr);
85644-extern void mmiotrace_iounmap(volatile void __iomem *addr);
85645+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
85646
85647 /* For anyone to insert markers. Remember trailing newline. */
85648 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
85649@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
85650 {
85651 }
85652
85653-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
85654+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
85655 {
85656 }
85657
85658diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
85659index 6cbd1b6..b1d2f99 100644
85660--- a/include/linux/mmzone.h
85661+++ b/include/linux/mmzone.h
85662@@ -412,7 +412,7 @@ struct zone {
85663 unsigned long flags; /* zone flags, see below */
85664
85665 /* Zone statistics */
85666- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85667+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85668
85669 /*
85670 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
85671diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
85672index 44eeef0..a92d3f9 100644
85673--- a/include/linux/mod_devicetable.h
85674+++ b/include/linux/mod_devicetable.h
85675@@ -139,7 +139,7 @@ struct usb_device_id {
85676 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
85677 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
85678
85679-#define HID_ANY_ID (~0)
85680+#define HID_ANY_ID (~0U)
85681 #define HID_BUS_ANY 0xffff
85682 #define HID_GROUP_ANY 0x0000
85683
85684@@ -475,7 +475,7 @@ struct dmi_system_id {
85685 const char *ident;
85686 struct dmi_strmatch matches[4];
85687 void *driver_data;
85688-};
85689+} __do_const;
85690 /*
85691 * struct dmi_device_id appears during expansion of
85692 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
85693diff --git a/include/linux/module.h b/include/linux/module.h
85694index f520a76..5f898ef 100644
85695--- a/include/linux/module.h
85696+++ b/include/linux/module.h
85697@@ -17,9 +17,11 @@
85698 #include <linux/moduleparam.h>
85699 #include <linux/jump_label.h>
85700 #include <linux/export.h>
85701+#include <linux/fs.h>
85702
85703 #include <linux/percpu.h>
85704 #include <asm/module.h>
85705+#include <asm/pgtable.h>
85706
85707 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
85708 #define MODULE_SIG_STRING "~Module signature appended~\n"
85709@@ -42,7 +44,7 @@ struct module_kobject {
85710 struct kobject *drivers_dir;
85711 struct module_param_attrs *mp;
85712 struct completion *kobj_completion;
85713-};
85714+} __randomize_layout;
85715
85716 struct module_attribute {
85717 struct attribute attr;
85718@@ -54,12 +56,13 @@ struct module_attribute {
85719 int (*test)(struct module *);
85720 void (*free)(struct module *);
85721 };
85722+typedef struct module_attribute __no_const module_attribute_no_const;
85723
85724 struct module_version_attribute {
85725 struct module_attribute mattr;
85726 const char *module_name;
85727 const char *version;
85728-} __attribute__ ((__aligned__(sizeof(void *))));
85729+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
85730
85731 extern ssize_t __modver_version_show(struct module_attribute *,
85732 struct module_kobject *, char *);
85733@@ -235,7 +238,7 @@ struct module {
85734
85735 /* Sysfs stuff. */
85736 struct module_kobject mkobj;
85737- struct module_attribute *modinfo_attrs;
85738+ module_attribute_no_const *modinfo_attrs;
85739 const char *version;
85740 const char *srcversion;
85741 struct kobject *holders_dir;
85742@@ -284,19 +287,16 @@ struct module {
85743 int (*init)(void);
85744
85745 /* If this is non-NULL, vfree after init() returns */
85746- void *module_init;
85747+ void *module_init_rx, *module_init_rw;
85748
85749 /* Here is the actual code + data, vfree'd on unload. */
85750- void *module_core;
85751+ void *module_core_rx, *module_core_rw;
85752
85753 /* Here are the sizes of the init and core sections */
85754- unsigned int init_size, core_size;
85755+ unsigned int init_size_rw, core_size_rw;
85756
85757 /* The size of the executable code in each section. */
85758- unsigned int init_text_size, core_text_size;
85759-
85760- /* Size of RO sections of the module (text+rodata) */
85761- unsigned int init_ro_size, core_ro_size;
85762+ unsigned int init_size_rx, core_size_rx;
85763
85764 /* Arch-specific module values */
85765 struct mod_arch_specific arch;
85766@@ -352,6 +352,10 @@ struct module {
85767 #ifdef CONFIG_EVENT_TRACING
85768 struct ftrace_event_call **trace_events;
85769 unsigned int num_trace_events;
85770+ struct file_operations trace_id;
85771+ struct file_operations trace_enable;
85772+ struct file_operations trace_format;
85773+ struct file_operations trace_filter;
85774 #endif
85775 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
85776 unsigned int num_ftrace_callsites;
85777@@ -375,7 +379,7 @@ struct module {
85778 ctor_fn_t *ctors;
85779 unsigned int num_ctors;
85780 #endif
85781-};
85782+} __randomize_layout;
85783 #ifndef MODULE_ARCH_INIT
85784 #define MODULE_ARCH_INIT {}
85785 #endif
85786@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
85787 bool is_module_percpu_address(unsigned long addr);
85788 bool is_module_text_address(unsigned long addr);
85789
85790+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
85791+{
85792+
85793+#ifdef CONFIG_PAX_KERNEXEC
85794+ if (ktla_ktva(addr) >= (unsigned long)start &&
85795+ ktla_ktva(addr) < (unsigned long)start + size)
85796+ return 1;
85797+#endif
85798+
85799+ return ((void *)addr >= start && (void *)addr < start + size);
85800+}
85801+
85802+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
85803+{
85804+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
85805+}
85806+
85807+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
85808+{
85809+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
85810+}
85811+
85812+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
85813+{
85814+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
85815+}
85816+
85817+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
85818+{
85819+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
85820+}
85821+
85822 static inline int within_module_core(unsigned long addr, const struct module *mod)
85823 {
85824- return (unsigned long)mod->module_core <= addr &&
85825- addr < (unsigned long)mod->module_core + mod->core_size;
85826+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
85827 }
85828
85829 static inline int within_module_init(unsigned long addr, const struct module *mod)
85830 {
85831- return (unsigned long)mod->module_init <= addr &&
85832- addr < (unsigned long)mod->module_init + mod->init_size;
85833+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
85834 }
85835
85836 /* Search for module by name: must hold module_mutex. */
85837diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
85838index 560ca53..ef621ef 100644
85839--- a/include/linux/moduleloader.h
85840+++ b/include/linux/moduleloader.h
85841@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
85842 sections. Returns NULL on failure. */
85843 void *module_alloc(unsigned long size);
85844
85845+#ifdef CONFIG_PAX_KERNEXEC
85846+void *module_alloc_exec(unsigned long size);
85847+#else
85848+#define module_alloc_exec(x) module_alloc(x)
85849+#endif
85850+
85851 /* Free memory returned from module_alloc. */
85852 void module_free(struct module *mod, void *module_region);
85853
85854+#ifdef CONFIG_PAX_KERNEXEC
85855+void module_free_exec(struct module *mod, void *module_region);
85856+#else
85857+#define module_free_exec(x, y) module_free((x), (y))
85858+#endif
85859+
85860 /*
85861 * Apply the given relocation to the (simplified) ELF. Return -error
85862 * or 0.
85863@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
85864 unsigned int relsec,
85865 struct module *me)
85866 {
85867+#ifdef CONFIG_MODULES
85868 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85869+#endif
85870 return -ENOEXEC;
85871 }
85872 #endif
85873@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
85874 unsigned int relsec,
85875 struct module *me)
85876 {
85877+#ifdef CONFIG_MODULES
85878 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85879+#endif
85880 return -ENOEXEC;
85881 }
85882 #endif
85883diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
85884index b1990c5..2a6e611 100644
85885--- a/include/linux/moduleparam.h
85886+++ b/include/linux/moduleparam.h
85887@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
85888 * @len is usually just sizeof(string).
85889 */
85890 #define module_param_string(name, string, len, perm) \
85891- static const struct kparam_string __param_string_##name \
85892+ static const struct kparam_string __param_string_##name __used \
85893 = { len, string }; \
85894 __module_param_call(MODULE_PARAM_PREFIX, name, \
85895 &param_ops_string, \
85896@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
85897 */
85898 #define module_param_array_named(name, array, type, nump, perm) \
85899 param_check_##type(name, &(array)[0]); \
85900- static const struct kparam_array __param_arr_##name \
85901+ static const struct kparam_array __param_arr_##name __used \
85902 = { .max = ARRAY_SIZE(array), .num = nump, \
85903 .ops = &param_ops_##type, \
85904 .elemsize = sizeof(array[0]), .elem = array }; \
85905diff --git a/include/linux/mount.h b/include/linux/mount.h
85906index b0c1e65..fd6baf1 100644
85907--- a/include/linux/mount.h
85908+++ b/include/linux/mount.h
85909@@ -66,7 +66,7 @@ struct vfsmount {
85910 struct dentry *mnt_root; /* root of the mounted tree */
85911 struct super_block *mnt_sb; /* pointer to superblock */
85912 int mnt_flags;
85913-};
85914+} __randomize_layout;
85915
85916 struct file; /* forward dec */
85917
85918diff --git a/include/linux/namei.h b/include/linux/namei.h
85919index 492de72..1bddcd4 100644
85920--- a/include/linux/namei.h
85921+++ b/include/linux/namei.h
85922@@ -19,7 +19,7 @@ struct nameidata {
85923 unsigned seq, m_seq;
85924 int last_type;
85925 unsigned depth;
85926- char *saved_names[MAX_NESTED_LINKS + 1];
85927+ const char *saved_names[MAX_NESTED_LINKS + 1];
85928 };
85929
85930 /*
85931@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
85932
85933 extern void nd_jump_link(struct nameidata *nd, struct path *path);
85934
85935-static inline void nd_set_link(struct nameidata *nd, char *path)
85936+static inline void nd_set_link(struct nameidata *nd, const char *path)
85937 {
85938 nd->saved_names[nd->depth] = path;
85939 }
85940
85941-static inline char *nd_get_link(struct nameidata *nd)
85942+static inline const char *nd_get_link(const struct nameidata *nd)
85943 {
85944 return nd->saved_names[nd->depth];
85945 }
85946diff --git a/include/linux/net.h b/include/linux/net.h
85947index 17d8339..81656c0 100644
85948--- a/include/linux/net.h
85949+++ b/include/linux/net.h
85950@@ -192,7 +192,7 @@ struct net_proto_family {
85951 int (*create)(struct net *net, struct socket *sock,
85952 int protocol, int kern);
85953 struct module *owner;
85954-};
85955+} __do_const;
85956
85957 struct iovec;
85958 struct kvec;
85959diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
85960index 66f9a04..056078d 100644
85961--- a/include/linux/netdevice.h
85962+++ b/include/linux/netdevice.h
85963@@ -1145,6 +1145,7 @@ struct net_device_ops {
85964 void *priv);
85965 int (*ndo_get_lock_subclass)(struct net_device *dev);
85966 };
85967+typedef struct net_device_ops __no_const net_device_ops_no_const;
85968
85969 /**
85970 * enum net_device_priv_flags - &struct net_device priv_flags
85971@@ -1312,11 +1313,11 @@ struct net_device {
85972 struct net_device_stats stats;
85973
85974 /* dropped packets by core network, Do not use this in drivers */
85975- atomic_long_t rx_dropped;
85976- atomic_long_t tx_dropped;
85977+ atomic_long_unchecked_t rx_dropped;
85978+ atomic_long_unchecked_t tx_dropped;
85979
85980 /* Stats to monitor carrier on<->off transitions */
85981- atomic_t carrier_changes;
85982+ atomic_unchecked_t carrier_changes;
85983
85984 #ifdef CONFIG_WIRELESS_EXT
85985 /* List of functions to handle Wireless Extensions (instead of ioctl).
85986diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
85987index 2077489..a15e561 100644
85988--- a/include/linux/netfilter.h
85989+++ b/include/linux/netfilter.h
85990@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
85991 #endif
85992 /* Use the module struct to lock set/get code in place */
85993 struct module *owner;
85994-};
85995+} __do_const;
85996
85997 /* Function to register/unregister hook points. */
85998 int nf_register_hook(struct nf_hook_ops *reg);
85999diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
86000index e955d47..04a5338 100644
86001--- a/include/linux/netfilter/nfnetlink.h
86002+++ b/include/linux/netfilter/nfnetlink.h
86003@@ -19,7 +19,7 @@ struct nfnl_callback {
86004 const struct nlattr * const cda[]);
86005 const struct nla_policy *policy; /* netlink attribute policy */
86006 const u_int16_t attr_count; /* number of nlattr's */
86007-};
86008+} __do_const;
86009
86010 struct nfnetlink_subsystem {
86011 const char *name;
86012diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
86013new file mode 100644
86014index 0000000..33f4af8
86015--- /dev/null
86016+++ b/include/linux/netfilter/xt_gradm.h
86017@@ -0,0 +1,9 @@
86018+#ifndef _LINUX_NETFILTER_XT_GRADM_H
86019+#define _LINUX_NETFILTER_XT_GRADM_H 1
86020+
86021+struct xt_gradm_mtinfo {
86022+ __u16 flags;
86023+ __u16 invflags;
86024+};
86025+
86026+#endif
86027diff --git a/include/linux/nls.h b/include/linux/nls.h
86028index 520681b..2b7fabb 100644
86029--- a/include/linux/nls.h
86030+++ b/include/linux/nls.h
86031@@ -31,7 +31,7 @@ struct nls_table {
86032 const unsigned char *charset2upper;
86033 struct module *owner;
86034 struct nls_table *next;
86035-};
86036+} __do_const;
86037
86038 /* this value hold the maximum octet of charset */
86039 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
86040@@ -46,7 +46,7 @@ enum utf16_endian {
86041 /* nls_base.c */
86042 extern int __register_nls(struct nls_table *, struct module *);
86043 extern int unregister_nls(struct nls_table *);
86044-extern struct nls_table *load_nls(char *);
86045+extern struct nls_table *load_nls(const char *);
86046 extern void unload_nls(struct nls_table *);
86047 extern struct nls_table *load_nls_default(void);
86048 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
86049diff --git a/include/linux/notifier.h b/include/linux/notifier.h
86050index d14a4c3..a078786 100644
86051--- a/include/linux/notifier.h
86052+++ b/include/linux/notifier.h
86053@@ -54,7 +54,8 @@ struct notifier_block {
86054 notifier_fn_t notifier_call;
86055 struct notifier_block __rcu *next;
86056 int priority;
86057-};
86058+} __do_const;
86059+typedef struct notifier_block __no_const notifier_block_no_const;
86060
86061 struct atomic_notifier_head {
86062 spinlock_t lock;
86063diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
86064index b2a0f15..4d7da32 100644
86065--- a/include/linux/oprofile.h
86066+++ b/include/linux/oprofile.h
86067@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
86068 int oprofilefs_create_ro_ulong(struct dentry * root,
86069 char const * name, ulong * val);
86070
86071-/** Create a file for read-only access to an atomic_t. */
86072+/** Create a file for read-only access to an atomic_unchecked_t. */
86073 int oprofilefs_create_ro_atomic(struct dentry * root,
86074- char const * name, atomic_t * val);
86075+ char const * name, atomic_unchecked_t * val);
86076
86077 /** create a directory */
86078 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
86079diff --git a/include/linux/padata.h b/include/linux/padata.h
86080index 4386946..f50c615 100644
86081--- a/include/linux/padata.h
86082+++ b/include/linux/padata.h
86083@@ -129,7 +129,7 @@ struct parallel_data {
86084 struct padata_serial_queue __percpu *squeue;
86085 atomic_t reorder_objects;
86086 atomic_t refcnt;
86087- atomic_t seq_nr;
86088+ atomic_unchecked_t seq_nr;
86089 struct padata_cpumask cpumask;
86090 spinlock_t lock ____cacheline_aligned;
86091 unsigned int processed;
86092diff --git a/include/linux/path.h b/include/linux/path.h
86093index d137218..be0c176 100644
86094--- a/include/linux/path.h
86095+++ b/include/linux/path.h
86096@@ -1,13 +1,15 @@
86097 #ifndef _LINUX_PATH_H
86098 #define _LINUX_PATH_H
86099
86100+#include <linux/compiler.h>
86101+
86102 struct dentry;
86103 struct vfsmount;
86104
86105 struct path {
86106 struct vfsmount *mnt;
86107 struct dentry *dentry;
86108-};
86109+} __randomize_layout;
86110
86111 extern void path_get(const struct path *);
86112 extern void path_put(const struct path *);
86113diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
86114index 5f2e559..7d59314 100644
86115--- a/include/linux/pci_hotplug.h
86116+++ b/include/linux/pci_hotplug.h
86117@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
86118 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
86119 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
86120 int (*reset_slot) (struct hotplug_slot *slot, int probe);
86121-};
86122+} __do_const;
86123+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
86124
86125 /**
86126 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
86127diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
86128index 707617a..28a2e7e 100644
86129--- a/include/linux/perf_event.h
86130+++ b/include/linux/perf_event.h
86131@@ -339,8 +339,8 @@ struct perf_event {
86132
86133 enum perf_event_active_state state;
86134 unsigned int attach_state;
86135- local64_t count;
86136- atomic64_t child_count;
86137+ local64_t count; /* PaX: fix it one day */
86138+ atomic64_unchecked_t child_count;
86139
86140 /*
86141 * These are the total time in nanoseconds that the event
86142@@ -391,8 +391,8 @@ struct perf_event {
86143 * These accumulate total time (in nanoseconds) that children
86144 * events have been enabled and running, respectively.
86145 */
86146- atomic64_t child_total_time_enabled;
86147- atomic64_t child_total_time_running;
86148+ atomic64_unchecked_t child_total_time_enabled;
86149+ atomic64_unchecked_t child_total_time_running;
86150
86151 /*
86152 * Protect attach/detach and child_list:
86153@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
86154 entry->ip[entry->nr++] = ip;
86155 }
86156
86157-extern int sysctl_perf_event_paranoid;
86158+extern int sysctl_perf_event_legitimately_concerned;
86159 extern int sysctl_perf_event_mlock;
86160 extern int sysctl_perf_event_sample_rate;
86161 extern int sysctl_perf_cpu_time_max_percent;
86162@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
86163 loff_t *ppos);
86164
86165
86166+static inline bool perf_paranoid_any(void)
86167+{
86168+ return sysctl_perf_event_legitimately_concerned > 2;
86169+}
86170+
86171 static inline bool perf_paranoid_tracepoint_raw(void)
86172 {
86173- return sysctl_perf_event_paranoid > -1;
86174+ return sysctl_perf_event_legitimately_concerned > -1;
86175 }
86176
86177 static inline bool perf_paranoid_cpu(void)
86178 {
86179- return sysctl_perf_event_paranoid > 0;
86180+ return sysctl_perf_event_legitimately_concerned > 0;
86181 }
86182
86183 static inline bool perf_paranoid_kernel(void)
86184 {
86185- return sysctl_perf_event_paranoid > 1;
86186+ return sysctl_perf_event_legitimately_concerned > 1;
86187 }
86188
86189 extern void perf_event_init(void);
86190@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
86191 struct device_attribute attr;
86192 u64 id;
86193 const char *event_str;
86194-};
86195+} __do_const;
86196
86197 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
86198 static struct perf_pmu_events_attr _var = { \
86199diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
86200index 7246ef3..1539ea4 100644
86201--- a/include/linux/pid_namespace.h
86202+++ b/include/linux/pid_namespace.h
86203@@ -43,7 +43,7 @@ struct pid_namespace {
86204 int hide_pid;
86205 int reboot; /* group exit code if this pidns was rebooted */
86206 unsigned int proc_inum;
86207-};
86208+} __randomize_layout;
86209
86210 extern struct pid_namespace init_pid_ns;
86211
86212diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
86213index eb8b8ac..62649e1 100644
86214--- a/include/linux/pipe_fs_i.h
86215+++ b/include/linux/pipe_fs_i.h
86216@@ -47,10 +47,10 @@ struct pipe_inode_info {
86217 struct mutex mutex;
86218 wait_queue_head_t wait;
86219 unsigned int nrbufs, curbuf, buffers;
86220- unsigned int readers;
86221- unsigned int writers;
86222- unsigned int files;
86223- unsigned int waiting_writers;
86224+ atomic_t readers;
86225+ atomic_t writers;
86226+ atomic_t files;
86227+ atomic_t waiting_writers;
86228 unsigned int r_counter;
86229 unsigned int w_counter;
86230 struct page *tmp_page;
86231diff --git a/include/linux/pm.h b/include/linux/pm.h
86232index 72c0fe0..26918ed 100644
86233--- a/include/linux/pm.h
86234+++ b/include/linux/pm.h
86235@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
86236 struct dev_pm_domain {
86237 struct dev_pm_ops ops;
86238 };
86239+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
86240
86241 /*
86242 * The PM_EVENT_ messages are also used by drivers implementing the legacy
86243diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
86244index 7c1d252..0e7061d 100644
86245--- a/include/linux/pm_domain.h
86246+++ b/include/linux/pm_domain.h
86247@@ -44,11 +44,11 @@ struct gpd_dev_ops {
86248 int (*thaw_early)(struct device *dev);
86249 int (*thaw)(struct device *dev);
86250 bool (*active_wakeup)(struct device *dev);
86251-};
86252+} __no_const;
86253
86254 struct gpd_cpu_data {
86255 unsigned int saved_exit_latency;
86256- struct cpuidle_state *idle_state;
86257+ cpuidle_state_no_const *idle_state;
86258 };
86259
86260 struct generic_pm_domain {
86261diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
86262index 43fd671..08c96ee 100644
86263--- a/include/linux/pm_runtime.h
86264+++ b/include/linux/pm_runtime.h
86265@@ -118,7 +118,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
86266
86267 static inline void pm_runtime_mark_last_busy(struct device *dev)
86268 {
86269- ACCESS_ONCE(dev->power.last_busy) = jiffies;
86270+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
86271 }
86272
86273 #else /* !CONFIG_PM_RUNTIME */
86274diff --git a/include/linux/pnp.h b/include/linux/pnp.h
86275index 195aafc..49a7bc2 100644
86276--- a/include/linux/pnp.h
86277+++ b/include/linux/pnp.h
86278@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
86279 struct pnp_fixup {
86280 char id[7];
86281 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
86282-};
86283+} __do_const;
86284
86285 /* config parameters */
86286 #define PNP_CONFIG_NORMAL 0x0001
86287diff --git a/include/linux/poison.h b/include/linux/poison.h
86288index 2110a81..13a11bb 100644
86289--- a/include/linux/poison.h
86290+++ b/include/linux/poison.h
86291@@ -19,8 +19,8 @@
86292 * under normal circumstances, used to verify that nobody uses
86293 * non-initialized list entries.
86294 */
86295-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
86296-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
86297+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
86298+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
86299
86300 /********** include/linux/timer.h **********/
86301 /*
86302diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
86303index d8b187c3..9a9257a 100644
86304--- a/include/linux/power/smartreflex.h
86305+++ b/include/linux/power/smartreflex.h
86306@@ -238,7 +238,7 @@ struct omap_sr_class_data {
86307 int (*notify)(struct omap_sr *sr, u32 status);
86308 u8 notify_flags;
86309 u8 class_type;
86310-};
86311+} __do_const;
86312
86313 /**
86314 * struct omap_sr_nvalue_table - Smartreflex n-target value info
86315diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
86316index 4ea1d37..80f4b33 100644
86317--- a/include/linux/ppp-comp.h
86318+++ b/include/linux/ppp-comp.h
86319@@ -84,7 +84,7 @@ struct compressor {
86320 struct module *owner;
86321 /* Extra skb space needed by the compressor algorithm */
86322 unsigned int comp_extra;
86323-};
86324+} __do_const;
86325
86326 /*
86327 * The return value from decompress routine is the length of the
86328diff --git a/include/linux/preempt.h b/include/linux/preempt.h
86329index de83b4e..c4b997d 100644
86330--- a/include/linux/preempt.h
86331+++ b/include/linux/preempt.h
86332@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
86333 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
86334 #endif
86335
86336+#define raw_preempt_count_add(val) __preempt_count_add(val)
86337+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
86338+
86339 #define __preempt_count_inc() __preempt_count_add(1)
86340 #define __preempt_count_dec() __preempt_count_sub(1)
86341
86342 #define preempt_count_inc() preempt_count_add(1)
86343+#define raw_preempt_count_inc() raw_preempt_count_add(1)
86344 #define preempt_count_dec() preempt_count_sub(1)
86345+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
86346
86347 #ifdef CONFIG_PREEMPT_COUNT
86348
86349@@ -41,6 +46,12 @@ do { \
86350 barrier(); \
86351 } while (0)
86352
86353+#define raw_preempt_disable() \
86354+do { \
86355+ raw_preempt_count_inc(); \
86356+ barrier(); \
86357+} while (0)
86358+
86359 #define sched_preempt_enable_no_resched() \
86360 do { \
86361 barrier(); \
86362@@ -49,6 +60,12 @@ do { \
86363
86364 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
86365
86366+#define raw_preempt_enable_no_resched() \
86367+do { \
86368+ barrier(); \
86369+ raw_preempt_count_dec(); \
86370+} while (0)
86371+
86372 #ifdef CONFIG_PREEMPT
86373 #define preempt_enable() \
86374 do { \
86375@@ -113,8 +130,10 @@ do { \
86376 * region.
86377 */
86378 #define preempt_disable() barrier()
86379+#define raw_preempt_disable() barrier()
86380 #define sched_preempt_enable_no_resched() barrier()
86381 #define preempt_enable_no_resched() barrier()
86382+#define raw_preempt_enable_no_resched() barrier()
86383 #define preempt_enable() barrier()
86384 #define preempt_check_resched() do { } while (0)
86385
86386@@ -128,11 +147,13 @@ do { \
86387 /*
86388 * Modules have no business playing preemption tricks.
86389 */
86390+#ifndef CONFIG_PAX_KERNEXEC
86391 #undef sched_preempt_enable_no_resched
86392 #undef preempt_enable_no_resched
86393 #undef preempt_enable_no_resched_notrace
86394 #undef preempt_check_resched
86395 #endif
86396+#endif
86397
86398 #define preempt_set_need_resched() \
86399 do { \
86400diff --git a/include/linux/printk.h b/include/linux/printk.h
86401index 319ff7e..608849a 100644
86402--- a/include/linux/printk.h
86403+++ b/include/linux/printk.h
86404@@ -121,6 +121,8 @@ static inline __printf(1, 2) __cold
86405 void early_printk(const char *s, ...) { }
86406 #endif
86407
86408+extern int kptr_restrict;
86409+
86410 #ifdef CONFIG_PRINTK
86411 asmlinkage __printf(5, 0)
86412 int vprintk_emit(int facility, int level,
86413@@ -155,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
86414
86415 extern int printk_delay_msec;
86416 extern int dmesg_restrict;
86417-extern int kptr_restrict;
86418
86419 extern void wake_up_klogd(void);
86420
86421diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
86422index 9d117f6..d832b31 100644
86423--- a/include/linux/proc_fs.h
86424+++ b/include/linux/proc_fs.h
86425@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
86426 extern struct proc_dir_entry *proc_symlink(const char *,
86427 struct proc_dir_entry *, const char *);
86428 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
86429+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
86430 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
86431 struct proc_dir_entry *, void *);
86432+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
86433+ struct proc_dir_entry *, void *);
86434 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
86435 struct proc_dir_entry *);
86436
86437@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
86438 return proc_create_data(name, mode, parent, proc_fops, NULL);
86439 }
86440
86441+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
86442+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
86443+{
86444+#ifdef CONFIG_GRKERNSEC_PROC_USER
86445+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
86446+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86447+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
86448+#else
86449+ return proc_create_data(name, mode, parent, proc_fops, NULL);
86450+#endif
86451+}
86452+
86453+
86454 extern void proc_set_size(struct proc_dir_entry *, loff_t);
86455 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
86456 extern void *PDE_DATA(const struct inode *);
86457@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
86458 struct proc_dir_entry *parent,const char *dest) { return NULL;}
86459 static inline struct proc_dir_entry *proc_mkdir(const char *name,
86460 struct proc_dir_entry *parent) {return NULL;}
86461+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
86462+ struct proc_dir_entry *parent) { return NULL; }
86463 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
86464 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
86465+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
86466+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
86467 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
86468 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
86469 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
86470@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
86471 static inline struct proc_dir_entry *proc_net_mkdir(
86472 struct net *net, const char *name, struct proc_dir_entry *parent)
86473 {
86474- return proc_mkdir_data(name, 0, parent, net);
86475+ return proc_mkdir_data_restrict(name, 0, parent, net);
86476 }
86477
86478 #endif /* _LINUX_PROC_FS_H */
86479diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
86480index 34a1e10..70f6bde 100644
86481--- a/include/linux/proc_ns.h
86482+++ b/include/linux/proc_ns.h
86483@@ -14,7 +14,7 @@ struct proc_ns_operations {
86484 void (*put)(void *ns);
86485 int (*install)(struct nsproxy *nsproxy, void *ns);
86486 unsigned int (*inum)(void *ns);
86487-};
86488+} __do_const __randomize_layout;
86489
86490 struct proc_ns {
86491 void *ns;
86492diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
86493index 7dfed71..1dc420b 100644
86494--- a/include/linux/ptp_classify.h
86495+++ b/include/linux/ptp_classify.h
86496@@ -23,8 +23,15 @@
86497 #ifndef _PTP_CLASSIFY_H_
86498 #define _PTP_CLASSIFY_H_
86499
86500+#include <linux/if_ether.h>
86501+#include <linux/if_vlan.h>
86502 #include <linux/ip.h>
86503-#include <linux/skbuff.h>
86504+#include <linux/filter.h>
86505+#ifdef __KERNEL__
86506+#include <linux/in.h>
86507+#else
86508+#include <netinet/in.h>
86509+#endif
86510
86511 #define PTP_CLASS_NONE 0x00 /* not a PTP event message */
86512 #define PTP_CLASS_V1 0x01 /* protocol version 1 */
86513@@ -37,7 +44,7 @@
86514 #define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
86515
86516 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
86517-#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
86518+#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/
86519 #define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4)
86520 #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6)
86521 #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2)
86522@@ -46,34 +53,88 @@
86523 #define PTP_EV_PORT 319
86524 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
86525
86526+#define OFF_ETYPE 12
86527+#define OFF_IHL 14
86528+#define OFF_FRAG 20
86529+#define OFF_PROTO4 23
86530+#define OFF_NEXT 6
86531+#define OFF_UDP_DST 2
86532+
86533 #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
86534 #define OFF_PTP_SEQUENCE_ID 30
86535 #define OFF_PTP_CONTROL 32 /* PTPv1 only */
86536
86537-/* Below defines should actually be removed at some point in time. */
86538+#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
86539+
86540 #define IP6_HLEN 40
86541 #define UDP_HLEN 8
86542-#define OFF_IHL 14
86543+
86544+#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST)
86545+#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST)
86546 #define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN)
86547-#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
86548
86549-#if defined(CONFIG_NET_PTP_CLASSIFY)
86550-/**
86551- * ptp_classify_raw - classify a PTP packet
86552- * @skb: buffer
86553- *
86554- * Runs a minimal BPF dissector to classify a network packet to
86555- * determine the PTP class. In case the skb does not contain any
86556- * PTP protocol data, PTP_CLASS_NONE will be returned, otherwise
86557- * PTP_CLASS_V1_IPV{4,6}, PTP_CLASS_V2_IPV{4,6} or
86558- * PTP_CLASS_V2_{L2,VLAN}, depending on the packet content.
86559- */
86560-unsigned int ptp_classify_raw(const struct sk_buff *skb);
86561+#define OP_AND (BPF_ALU | BPF_AND | BPF_K)
86562+#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K)
86563+#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K)
86564+#define OP_LDB (BPF_LD | BPF_B | BPF_ABS)
86565+#define OP_LDH (BPF_LD | BPF_H | BPF_ABS)
86566+#define OP_LDHI (BPF_LD | BPF_H | BPF_IND)
86567+#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH)
86568+#define OP_OR (BPF_ALU | BPF_OR | BPF_K)
86569+#define OP_RETA (BPF_RET | BPF_A)
86570+#define OP_RETK (BPF_RET | BPF_K)
86571
86572-void __init ptp_classifier_init(void);
86573-#else
86574-static inline void ptp_classifier_init(void)
86575+static inline int ptp_filter_init(struct sock_filter *f, int len)
86576 {
86577+ if (OP_LDH == f[0].code)
86578+ return sk_chk_filter(f, len);
86579+ else
86580+ return 0;
86581 }
86582+
86583+#define PTP_FILTER \
86584+ {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \
86585+ {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \
86586+ {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \
86587+ {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \
86588+ {OP_LDH, 0, 0, OFF_FRAG }, /* */ \
86589+ {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \
86590+ {OP_LDX, 0, 0, OFF_IHL }, /* */ \
86591+ {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \
86592+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \
86593+ {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \
86594+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86595+ {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \
86596+ {OP_RETA, 0, 0, 0 }, /* */ \
86597+/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
86598+/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \
86599+ {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \
86600+ {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \
86601+ {OP_LDH, 0, 0, OFF_DST6 }, /* */ \
86602+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \
86603+ {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \
86604+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86605+ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
86606+ {OP_RETA, 0, 0, 0 }, /* */ \
86607+/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
86608+/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
86609+ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
86610+ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
86611+ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
86612+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
86613+ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
86614+ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
86615+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86616+ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
86617+ {OP_RETA, 0, 0, 0 }, /* */ \
86618+/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
86619+ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
86620+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
86621+ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
86622+ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
86623+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86624+ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
86625+ {OP_RETA, 0, 0, 0 }, /* */ \
86626+/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE },
86627+
86628 #endif
86629-#endif /* _PTP_CLASSIFY_H_ */
86630diff --git a/include/linux/quota.h b/include/linux/quota.h
86631index 0f3c5d3..bc559e3 100644
86632--- a/include/linux/quota.h
86633+++ b/include/linux/quota.h
86634@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
86635
86636 extern bool qid_eq(struct kqid left, struct kqid right);
86637 extern bool qid_lt(struct kqid left, struct kqid right);
86638-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
86639+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
86640 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
86641 extern bool qid_valid(struct kqid qid);
86642
86643diff --git a/include/linux/random.h b/include/linux/random.h
86644index 57fbbff..2170304 100644
86645--- a/include/linux/random.h
86646+++ b/include/linux/random.h
86647@@ -9,9 +9,19 @@
86648 #include <uapi/linux/random.h>
86649
86650 extern void add_device_randomness(const void *, unsigned int);
86651+
86652+static inline void add_latent_entropy(void)
86653+{
86654+
86655+#ifdef LATENT_ENTROPY_PLUGIN
86656+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
86657+#endif
86658+
86659+}
86660+
86661 extern void add_input_randomness(unsigned int type, unsigned int code,
86662- unsigned int value);
86663-extern void add_interrupt_randomness(int irq, int irq_flags);
86664+ unsigned int value) __latent_entropy;
86665+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
86666
86667 extern void get_random_bytes(void *buf, int nbytes);
86668 extern void get_random_bytes_arch(void *buf, int nbytes);
86669@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
86670 extern const struct file_operations random_fops, urandom_fops;
86671 #endif
86672
86673-unsigned int get_random_int(void);
86674+unsigned int __intentional_overflow(-1) get_random_int(void);
86675 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
86676
86677-u32 prandom_u32(void);
86678+u32 prandom_u32(void) __intentional_overflow(-1);
86679 void prandom_bytes(void *buf, int nbytes);
86680 void prandom_seed(u32 seed);
86681 void prandom_reseed_late(void);
86682@@ -37,6 +47,11 @@ struct rnd_state {
86683 u32 prandom_u32_state(struct rnd_state *state);
86684 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
86685
86686+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
86687+{
86688+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
86689+}
86690+
86691 /**
86692 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
86693 * @ep_ro: right open interval endpoint
86694@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
86695 *
86696 * Returns: pseudo-random number in interval [0, ep_ro)
86697 */
86698-static inline u32 prandom_u32_max(u32 ep_ro)
86699+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
86700 {
86701 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
86702 }
86703diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
86704index fea49b5..2ac22bb 100644
86705--- a/include/linux/rbtree_augmented.h
86706+++ b/include/linux/rbtree_augmented.h
86707@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
86708 old->rbaugmented = rbcompute(old); \
86709 } \
86710 rbstatic const struct rb_augment_callbacks rbname = { \
86711- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
86712+ .propagate = rbname ## _propagate, \
86713+ .copy = rbname ## _copy, \
86714+ .rotate = rbname ## _rotate \
86715 };
86716
86717
86718diff --git a/include/linux/rculist.h b/include/linux/rculist.h
86719index 8183b46..a388711 100644
86720--- a/include/linux/rculist.h
86721+++ b/include/linux/rculist.h
86722@@ -29,8 +29,8 @@
86723 */
86724 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
86725 {
86726- ACCESS_ONCE(list->next) = list;
86727- ACCESS_ONCE(list->prev) = list;
86728+ ACCESS_ONCE_RW(list->next) = list;
86729+ ACCESS_ONCE_RW(list->prev) = list;
86730 }
86731
86732 /*
86733@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
86734 struct list_head *prev, struct list_head *next);
86735 #endif
86736
86737+void __pax_list_add_rcu(struct list_head *new,
86738+ struct list_head *prev, struct list_head *next);
86739+
86740 /**
86741 * list_add_rcu - add a new entry to rcu-protected list
86742 * @new: new entry to be added
86743@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
86744 __list_add_rcu(new, head, head->next);
86745 }
86746
86747+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
86748+{
86749+ __pax_list_add_rcu(new, head, head->next);
86750+}
86751+
86752 /**
86753 * list_add_tail_rcu - add a new entry to rcu-protected list
86754 * @new: new entry to be added
86755@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
86756 __list_add_rcu(new, head->prev, head);
86757 }
86758
86759+static inline void pax_list_add_tail_rcu(struct list_head *new,
86760+ struct list_head *head)
86761+{
86762+ __pax_list_add_rcu(new, head->prev, head);
86763+}
86764+
86765 /**
86766 * list_del_rcu - deletes entry from list without re-initialization
86767 * @entry: the element to delete from the list.
86768@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
86769 entry->prev = LIST_POISON2;
86770 }
86771
86772+extern void pax_list_del_rcu(struct list_head *entry);
86773+
86774 /**
86775 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
86776 * @n: the element to delete from the hash list.
86777diff --git a/include/linux/reboot.h b/include/linux/reboot.h
86778index 48bf152..d38b785 100644
86779--- a/include/linux/reboot.h
86780+++ b/include/linux/reboot.h
86781@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
86782 */
86783
86784 extern void migrate_to_reboot_cpu(void);
86785-extern void machine_restart(char *cmd);
86786-extern void machine_halt(void);
86787-extern void machine_power_off(void);
86788+extern void machine_restart(char *cmd) __noreturn;
86789+extern void machine_halt(void) __noreturn;
86790+extern void machine_power_off(void) __noreturn;
86791
86792 extern void machine_shutdown(void);
86793 struct pt_regs;
86794@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
86795 */
86796
86797 extern void kernel_restart_prepare(char *cmd);
86798-extern void kernel_restart(char *cmd);
86799-extern void kernel_halt(void);
86800-extern void kernel_power_off(void);
86801+extern void kernel_restart(char *cmd) __noreturn;
86802+extern void kernel_halt(void) __noreturn;
86803+extern void kernel_power_off(void) __noreturn;
86804
86805 extern int C_A_D; /* for sysctl */
86806 void ctrl_alt_del(void);
86807@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
86808 * Emergency restart, callable from an interrupt handler.
86809 */
86810
86811-extern void emergency_restart(void);
86812+extern void emergency_restart(void) __noreturn;
86813 #include <asm/emergency-restart.h>
86814
86815 #endif /* _LINUX_REBOOT_H */
86816diff --git a/include/linux/regset.h b/include/linux/regset.h
86817index 8e0c9fe..ac4d221 100644
86818--- a/include/linux/regset.h
86819+++ b/include/linux/regset.h
86820@@ -161,7 +161,8 @@ struct user_regset {
86821 unsigned int align;
86822 unsigned int bias;
86823 unsigned int core_note_type;
86824-};
86825+} __do_const;
86826+typedef struct user_regset __no_const user_regset_no_const;
86827
86828 /**
86829 * struct user_regset_view - available regsets
86830diff --git a/include/linux/relay.h b/include/linux/relay.h
86831index d7c8359..818daf5 100644
86832--- a/include/linux/relay.h
86833+++ b/include/linux/relay.h
86834@@ -157,7 +157,7 @@ struct rchan_callbacks
86835 * The callback should return 0 if successful, negative if not.
86836 */
86837 int (*remove_buf_file)(struct dentry *dentry);
86838-};
86839+} __no_const;
86840
86841 /*
86842 * CONFIG_RELAY kernel API, kernel/relay.c
86843diff --git a/include/linux/rio.h b/include/linux/rio.h
86844index 6bda06f..bf39a9b 100644
86845--- a/include/linux/rio.h
86846+++ b/include/linux/rio.h
86847@@ -358,7 +358,7 @@ struct rio_ops {
86848 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
86849 u64 rstart, u32 size, u32 flags);
86850 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
86851-};
86852+} __no_const;
86853
86854 #define RIO_RESOURCE_MEM 0x00000100
86855 #define RIO_RESOURCE_DOORBELL 0x00000200
86856diff --git a/include/linux/rmap.h b/include/linux/rmap.h
86857index be57450..31cf65e 100644
86858--- a/include/linux/rmap.h
86859+++ b/include/linux/rmap.h
86860@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
86861 void anon_vma_init(void); /* create anon_vma_cachep */
86862 int anon_vma_prepare(struct vm_area_struct *);
86863 void unlink_anon_vmas(struct vm_area_struct *);
86864-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
86865-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
86866+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
86867+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
86868
86869 static inline void anon_vma_merge(struct vm_area_struct *vma,
86870 struct vm_area_struct *next)
86871diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
86872index a964f72..b475afb 100644
86873--- a/include/linux/scatterlist.h
86874+++ b/include/linux/scatterlist.h
86875@@ -1,6 +1,7 @@
86876 #ifndef _LINUX_SCATTERLIST_H
86877 #define _LINUX_SCATTERLIST_H
86878
86879+#include <linux/sched.h>
86880 #include <linux/string.h>
86881 #include <linux/bug.h>
86882 #include <linux/mm.h>
86883@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
86884 #ifdef CONFIG_DEBUG_SG
86885 BUG_ON(!virt_addr_valid(buf));
86886 #endif
86887+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86888+ if (object_starts_on_stack(buf)) {
86889+ void *adjbuf = buf - current->stack + current->lowmem_stack;
86890+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
86891+ } else
86892+#endif
86893 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
86894 }
86895
86896diff --git a/include/linux/sched.h b/include/linux/sched.h
86897index 0376b05..82054c2 100644
86898--- a/include/linux/sched.h
86899+++ b/include/linux/sched.h
86900@@ -131,6 +131,7 @@ struct fs_struct;
86901 struct perf_event_context;
86902 struct blk_plug;
86903 struct filename;
86904+struct linux_binprm;
86905
86906 #define VMACACHE_BITS 2
86907 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
86908@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
86909 extern int in_sched_functions(unsigned long addr);
86910
86911 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
86912-extern signed long schedule_timeout(signed long timeout);
86913+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
86914 extern signed long schedule_timeout_interruptible(signed long timeout);
86915 extern signed long schedule_timeout_killable(signed long timeout);
86916 extern signed long schedule_timeout_uninterruptible(signed long timeout);
86917@@ -385,6 +386,19 @@ struct nsproxy;
86918 struct user_namespace;
86919
86920 #ifdef CONFIG_MMU
86921+
86922+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86923+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
86924+#else
86925+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86926+{
86927+ return 0;
86928+}
86929+#endif
86930+
86931+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
86932+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
86933+
86934 extern void arch_pick_mmap_layout(struct mm_struct *mm);
86935 extern unsigned long
86936 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
86937@@ -682,6 +696,17 @@ struct signal_struct {
86938 #ifdef CONFIG_TASKSTATS
86939 struct taskstats *stats;
86940 #endif
86941+
86942+#ifdef CONFIG_GRKERNSEC
86943+ u32 curr_ip;
86944+ u32 saved_ip;
86945+ u32 gr_saddr;
86946+ u32 gr_daddr;
86947+ u16 gr_sport;
86948+ u16 gr_dport;
86949+ u8 used_accept:1;
86950+#endif
86951+
86952 #ifdef CONFIG_AUDIT
86953 unsigned audit_tty;
86954 unsigned audit_tty_log_passwd;
86955@@ -708,7 +733,7 @@ struct signal_struct {
86956 struct mutex cred_guard_mutex; /* guard against foreign influences on
86957 * credential calculations
86958 * (notably. ptrace) */
86959-};
86960+} __randomize_layout;
86961
86962 /*
86963 * Bits in flags field of signal_struct.
86964@@ -761,6 +786,14 @@ struct user_struct {
86965 struct key *session_keyring; /* UID's default session keyring */
86966 #endif
86967
86968+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86969+ unsigned char kernel_banned;
86970+#endif
86971+#ifdef CONFIG_GRKERNSEC_BRUTE
86972+ unsigned char suid_banned;
86973+ unsigned long suid_ban_expires;
86974+#endif
86975+
86976 /* Hash table maintenance information */
86977 struct hlist_node uidhash_node;
86978 kuid_t uid;
86979@@ -768,7 +801,7 @@ struct user_struct {
86980 #ifdef CONFIG_PERF_EVENTS
86981 atomic_long_t locked_vm;
86982 #endif
86983-};
86984+} __randomize_layout;
86985
86986 extern int uids_sysfs_init(void);
86987
86988@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
86989 struct task_struct {
86990 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
86991 void *stack;
86992+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86993+ void *lowmem_stack;
86994+#endif
86995 atomic_t usage;
86996 unsigned int flags; /* per process flags, defined below */
86997 unsigned int ptrace;
86998@@ -1349,8 +1385,8 @@ struct task_struct {
86999 struct list_head thread_node;
87000
87001 struct completion *vfork_done; /* for vfork() */
87002- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
87003- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87004+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
87005+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87006
87007 cputime_t utime, stime, utimescaled, stimescaled;
87008 cputime_t gtime;
87009@@ -1375,11 +1411,6 @@ struct task_struct {
87010 struct task_cputime cputime_expires;
87011 struct list_head cpu_timers[3];
87012
87013-/* process credentials */
87014- const struct cred __rcu *real_cred; /* objective and real subjective task
87015- * credentials (COW) */
87016- const struct cred __rcu *cred; /* effective (overridable) subjective task
87017- * credentials (COW) */
87018 char comm[TASK_COMM_LEN]; /* executable name excluding path
87019 - access with [gs]et_task_comm (which lock
87020 it with task_lock())
87021@@ -1396,6 +1427,10 @@ struct task_struct {
87022 #endif
87023 /* CPU-specific state of this task */
87024 struct thread_struct thread;
87025+/* thread_info moved to task_struct */
87026+#ifdef CONFIG_X86
87027+ struct thread_info tinfo;
87028+#endif
87029 /* filesystem information */
87030 struct fs_struct *fs;
87031 /* open file information */
87032@@ -1472,6 +1507,10 @@ struct task_struct {
87033 gfp_t lockdep_reclaim_gfp;
87034 #endif
87035
87036+/* process credentials */
87037+ const struct cred __rcu *real_cred; /* objective and real subjective task
87038+ * credentials (COW) */
87039+
87040 /* journalling filesystem info */
87041 void *journal_info;
87042
87043@@ -1510,6 +1549,10 @@ struct task_struct {
87044 /* cg_list protected by css_set_lock and tsk->alloc_lock */
87045 struct list_head cg_list;
87046 #endif
87047+
87048+ const struct cred __rcu *cred; /* effective (overridable) subjective task
87049+ * credentials (COW) */
87050+
87051 #ifdef CONFIG_FUTEX
87052 struct robust_list_head __user *robust_list;
87053 #ifdef CONFIG_COMPAT
87054@@ -1655,7 +1698,78 @@ struct task_struct {
87055 unsigned int sequential_io;
87056 unsigned int sequential_io_avg;
87057 #endif
87058-};
87059+
87060+#ifdef CONFIG_GRKERNSEC
87061+ /* grsecurity */
87062+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87063+ u64 exec_id;
87064+#endif
87065+#ifdef CONFIG_GRKERNSEC_SETXID
87066+ const struct cred *delayed_cred;
87067+#endif
87068+ struct dentry *gr_chroot_dentry;
87069+ struct acl_subject_label *acl;
87070+ struct acl_subject_label *tmpacl;
87071+ struct acl_role_label *role;
87072+ struct file *exec_file;
87073+ unsigned long brute_expires;
87074+ u16 acl_role_id;
87075+ u8 inherited;
87076+ /* is this the task that authenticated to the special role */
87077+ u8 acl_sp_role;
87078+ u8 is_writable;
87079+ u8 brute;
87080+ u8 gr_is_chrooted;
87081+#endif
87082+
87083+} __randomize_layout;
87084+
87085+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
87086+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
87087+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
87088+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
87089+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
87090+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
87091+
87092+#ifdef CONFIG_PAX_SOFTMODE
87093+extern int pax_softmode;
87094+#endif
87095+
87096+extern int pax_check_flags(unsigned long *);
87097+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
87098+
87099+/* if tsk != current then task_lock must be held on it */
87100+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87101+static inline unsigned long pax_get_flags(struct task_struct *tsk)
87102+{
87103+ if (likely(tsk->mm))
87104+ return tsk->mm->pax_flags;
87105+ else
87106+ return 0UL;
87107+}
87108+
87109+/* if tsk != current then task_lock must be held on it */
87110+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
87111+{
87112+ if (likely(tsk->mm)) {
87113+ tsk->mm->pax_flags = flags;
87114+ return 0;
87115+ }
87116+ return -EINVAL;
87117+}
87118+#endif
87119+
87120+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
87121+extern void pax_set_initial_flags(struct linux_binprm *bprm);
87122+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
87123+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
87124+#endif
87125+
87126+struct path;
87127+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
87128+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
87129+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
87130+extern void pax_report_refcount_overflow(struct pt_regs *regs);
87131
87132 /* Future-safe accessor for struct task_struct's cpus_allowed. */
87133 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
87134@@ -1737,7 +1851,7 @@ struct pid_namespace;
87135 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
87136 struct pid_namespace *ns);
87137
87138-static inline pid_t task_pid_nr(struct task_struct *tsk)
87139+static inline pid_t task_pid_nr(const struct task_struct *tsk)
87140 {
87141 return tsk->pid;
87142 }
87143@@ -2084,6 +2198,25 @@ extern u64 sched_clock_cpu(int cpu);
87144
87145 extern void sched_clock_init(void);
87146
87147+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
87148+static inline void populate_stack(void)
87149+{
87150+ struct task_struct *curtask = current;
87151+ int c;
87152+ int *ptr = curtask->stack;
87153+ int *end = curtask->stack + THREAD_SIZE;
87154+
87155+ while (ptr < end) {
87156+ c = *(volatile int *)ptr;
87157+ ptr += PAGE_SIZE/sizeof(int);
87158+ }
87159+}
87160+#else
87161+static inline void populate_stack(void)
87162+{
87163+}
87164+#endif
87165+
87166 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
87167 static inline void sched_clock_tick(void)
87168 {
87169@@ -2217,7 +2350,9 @@ void yield(void);
87170 extern struct exec_domain default_exec_domain;
87171
87172 union thread_union {
87173+#ifndef CONFIG_X86
87174 struct thread_info thread_info;
87175+#endif
87176 unsigned long stack[THREAD_SIZE/sizeof(long)];
87177 };
87178
87179@@ -2250,6 +2385,7 @@ extern struct pid_namespace init_pid_ns;
87180 */
87181
87182 extern struct task_struct *find_task_by_vpid(pid_t nr);
87183+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
87184 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
87185 struct pid_namespace *ns);
87186
87187@@ -2412,7 +2548,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
87188 extern void exit_itimers(struct signal_struct *);
87189 extern void flush_itimer_signals(void);
87190
87191-extern void do_group_exit(int);
87192+extern __noreturn void do_group_exit(int);
87193
87194 extern int do_execve(struct filename *,
87195 const char __user * const __user *,
87196@@ -2614,9 +2750,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
87197
87198 #endif
87199
87200-static inline int object_is_on_stack(void *obj)
87201+static inline int object_starts_on_stack(const void *obj)
87202 {
87203- void *stack = task_stack_page(current);
87204+ const void *stack = task_stack_page(current);
87205
87206 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
87207 }
87208diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
87209index 596a0e0..bea77ec 100644
87210--- a/include/linux/sched/sysctl.h
87211+++ b/include/linux/sched/sysctl.h
87212@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
87213 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
87214
87215 extern int sysctl_max_map_count;
87216+extern unsigned long sysctl_heap_stack_gap;
87217
87218 extern unsigned int sysctl_sched_latency;
87219 extern unsigned int sysctl_sched_min_granularity;
87220diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
87221index 4054b09..6f19cfd 100644
87222--- a/include/linux/seccomp.h
87223+++ b/include/linux/seccomp.h
87224@@ -76,6 +76,7 @@ static inline int seccomp_mode(struct seccomp *s)
87225 #ifdef CONFIG_SECCOMP_FILTER
87226 extern void put_seccomp_filter(struct task_struct *tsk);
87227 extern void get_seccomp_filter(struct task_struct *tsk);
87228+extern u32 seccomp_bpf_load(int off);
87229 #else /* CONFIG_SECCOMP_FILTER */
87230 static inline void put_seccomp_filter(struct task_struct *tsk)
87231 {
87232diff --git a/include/linux/security.h b/include/linux/security.h
87233index 9c6b972..7e7c704 100644
87234--- a/include/linux/security.h
87235+++ b/include/linux/security.h
87236@@ -27,6 +27,7 @@
87237 #include <linux/slab.h>
87238 #include <linux/err.h>
87239 #include <linux/string.h>
87240+#include <linux/grsecurity.h>
87241
87242 struct linux_binprm;
87243 struct cred;
87244@@ -116,8 +117,6 @@ struct seq_file;
87245
87246 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
87247
87248-void reset_security_ops(void);
87249-
87250 #ifdef CONFIG_MMU
87251 extern unsigned long mmap_min_addr;
87252 extern unsigned long dac_mmap_min_addr;
87253@@ -1719,7 +1718,7 @@ struct security_operations {
87254 struct audit_context *actx);
87255 void (*audit_rule_free) (void *lsmrule);
87256 #endif /* CONFIG_AUDIT */
87257-};
87258+} __randomize_layout;
87259
87260 /* prototypes */
87261 extern int security_init(void);
87262diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
87263index dc368b8..e895209 100644
87264--- a/include/linux/semaphore.h
87265+++ b/include/linux/semaphore.h
87266@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
87267 }
87268
87269 extern void down(struct semaphore *sem);
87270-extern int __must_check down_interruptible(struct semaphore *sem);
87271+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
87272 extern int __must_check down_killable(struct semaphore *sem);
87273 extern int __must_check down_trylock(struct semaphore *sem);
87274 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
87275diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
87276index 52e0097..383f21d 100644
87277--- a/include/linux/seq_file.h
87278+++ b/include/linux/seq_file.h
87279@@ -27,6 +27,9 @@ struct seq_file {
87280 struct mutex lock;
87281 const struct seq_operations *op;
87282 int poll_event;
87283+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87284+ u64 exec_id;
87285+#endif
87286 #ifdef CONFIG_USER_NS
87287 struct user_namespace *user_ns;
87288 #endif
87289@@ -39,6 +42,7 @@ struct seq_operations {
87290 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
87291 int (*show) (struct seq_file *m, void *v);
87292 };
87293+typedef struct seq_operations __no_const seq_operations_no_const;
87294
87295 #define SEQ_SKIP 1
87296
87297@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
87298
87299 char *mangle_path(char *s, const char *p, const char *esc);
87300 int seq_open(struct file *, const struct seq_operations *);
87301+int seq_open_restrict(struct file *, const struct seq_operations *);
87302 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
87303 loff_t seq_lseek(struct file *, loff_t, int);
87304 int seq_release(struct inode *, struct file *);
87305@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
87306 }
87307
87308 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
87309+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
87310 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
87311 int single_release(struct inode *, struct file *);
87312 void *__seq_open_private(struct file *, const struct seq_operations *, int);
87313diff --git a/include/linux/shm.h b/include/linux/shm.h
87314index 57d7770..0936af6 100644
87315--- a/include/linux/shm.h
87316+++ b/include/linux/shm.h
87317@@ -20,6 +20,10 @@ struct shmid_kernel /* private to the kernel */
87318
87319 /* The task created the shm object. NULL if the task is dead. */
87320 struct task_struct *shm_creator;
87321+#ifdef CONFIG_GRKERNSEC
87322+ time_t shm_createtime;
87323+ pid_t shm_lapid;
87324+#endif
87325 };
87326
87327 /* shm_mode upper byte flags */
87328diff --git a/include/linux/signal.h b/include/linux/signal.h
87329index c9e6536..923b302 100644
87330--- a/include/linux/signal.h
87331+++ b/include/linux/signal.h
87332@@ -293,7 +293,7 @@ static inline void allow_signal(int sig)
87333 * know it'll be handled, so that they don't get converted to
87334 * SIGKILL or just silently dropped.
87335 */
87336- kernel_sigaction(sig, (__force __sighandler_t)2);
87337+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
87338 }
87339
87340 static inline void disallow_signal(int sig)
87341diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
87342index ec89301..4fd29a6 100644
87343--- a/include/linux/skbuff.h
87344+++ b/include/linux/skbuff.h
87345@@ -725,7 +725,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
87346 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
87347 int node);
87348 struct sk_buff *build_skb(void *data, unsigned int frag_size);
87349-static inline struct sk_buff *alloc_skb(unsigned int size,
87350+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
87351 gfp_t priority)
87352 {
87353 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
87354@@ -1839,7 +1839,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
87355 return skb->inner_transport_header - skb->inner_network_header;
87356 }
87357
87358-static inline int skb_network_offset(const struct sk_buff *skb)
87359+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
87360 {
87361 return skb_network_header(skb) - skb->data;
87362 }
87363@@ -1911,7 +1911,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
87364 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
87365 */
87366 #ifndef NET_SKB_PAD
87367-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
87368+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
87369 #endif
87370
87371 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
87372@@ -2518,7 +2518,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
87373 int *err);
87374 unsigned int datagram_poll(struct file *file, struct socket *sock,
87375 struct poll_table_struct *wait);
87376-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
87377+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
87378 struct iovec *to, int size);
87379 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
87380 struct iovec *iov);
87381@@ -2664,6 +2664,8 @@ static inline ktime_t net_invalid_timestamp(void)
87382 return ktime_set(0, 0);
87383 }
87384
87385+void skb_timestamping_init(void);
87386+
87387 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
87388
87389 void skb_clone_tx_timestamp(struct sk_buff *skb);
87390@@ -2907,6 +2909,9 @@ static inline void nf_reset(struct sk_buff *skb)
87391 nf_bridge_put(skb->nf_bridge);
87392 skb->nf_bridge = NULL;
87393 #endif
87394+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
87395+ skb->nf_trace = 0;
87396+#endif
87397 }
87398
87399 static inline void nf_reset_trace(struct sk_buff *skb)
87400diff --git a/include/linux/slab.h b/include/linux/slab.h
87401index 1d9abb7..b1e8b10 100644
87402--- a/include/linux/slab.h
87403+++ b/include/linux/slab.h
87404@@ -14,15 +14,29 @@
87405 #include <linux/gfp.h>
87406 #include <linux/types.h>
87407 #include <linux/workqueue.h>
87408-
87409+#include <linux/err.h>
87410
87411 /*
87412 * Flags to pass to kmem_cache_create().
87413 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
87414 */
87415 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
87416+
87417+#ifdef CONFIG_PAX_USERCOPY_SLABS
87418+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
87419+#else
87420+#define SLAB_USERCOPY 0x00000000UL
87421+#endif
87422+
87423 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
87424 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
87425+
87426+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87427+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
87428+#else
87429+#define SLAB_NO_SANITIZE 0x00000000UL
87430+#endif
87431+
87432 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
87433 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
87434 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
87435@@ -98,10 +112,13 @@
87436 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
87437 * Both make kfree a no-op.
87438 */
87439-#define ZERO_SIZE_PTR ((void *)16)
87440+#define ZERO_SIZE_PTR \
87441+({ \
87442+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
87443+ (void *)(-MAX_ERRNO-1L); \
87444+})
87445
87446-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
87447- (unsigned long)ZERO_SIZE_PTR)
87448+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
87449
87450 #include <linux/kmemleak.h>
87451
87452@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
87453 void kfree(const void *);
87454 void kzfree(const void *);
87455 size_t ksize(const void *);
87456+const char *check_heap_object(const void *ptr, unsigned long n);
87457+bool is_usercopy_object(const void *ptr);
87458
87459 /*
87460 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
87461@@ -176,7 +195,7 @@ struct kmem_cache {
87462 unsigned int align; /* Alignment as calculated */
87463 unsigned long flags; /* Active flags on the slab */
87464 const char *name; /* Slab name for sysfs */
87465- int refcount; /* Use counter */
87466+ atomic_t refcount; /* Use counter */
87467 void (*ctor)(void *); /* Called on object slot creation */
87468 struct list_head list; /* List of all slab caches on the system */
87469 };
87470@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
87471 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
87472 #endif
87473
87474+#ifdef CONFIG_PAX_USERCOPY_SLABS
87475+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
87476+#endif
87477+
87478 /*
87479 * Figure out which kmalloc slab an allocation of a certain size
87480 * belongs to.
87481@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
87482 * 2 = 120 .. 192 bytes
87483 * n = 2^(n-1) .. 2^n -1
87484 */
87485-static __always_inline int kmalloc_index(size_t size)
87486+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
87487 {
87488 if (!size)
87489 return 0;
87490@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
87491 }
87492 #endif /* !CONFIG_SLOB */
87493
87494-void *__kmalloc(size_t size, gfp_t flags);
87495+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
87496 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
87497
87498 #ifdef CONFIG_NUMA
87499-void *__kmalloc_node(size_t size, gfp_t flags, int node);
87500+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
87501 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
87502 #else
87503 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
87504diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
87505index 8235dfb..47ce586 100644
87506--- a/include/linux/slab_def.h
87507+++ b/include/linux/slab_def.h
87508@@ -38,7 +38,7 @@ struct kmem_cache {
87509 /* 4) cache creation/removal */
87510 const char *name;
87511 struct list_head list;
87512- int refcount;
87513+ atomic_t refcount;
87514 int object_size;
87515 int align;
87516
87517@@ -54,10 +54,14 @@ struct kmem_cache {
87518 unsigned long node_allocs;
87519 unsigned long node_frees;
87520 unsigned long node_overflow;
87521- atomic_t allochit;
87522- atomic_t allocmiss;
87523- atomic_t freehit;
87524- atomic_t freemiss;
87525+ atomic_unchecked_t allochit;
87526+ atomic_unchecked_t allocmiss;
87527+ atomic_unchecked_t freehit;
87528+ atomic_unchecked_t freemiss;
87529+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87530+ atomic_unchecked_t sanitized;
87531+ atomic_unchecked_t not_sanitized;
87532+#endif
87533
87534 /*
87535 * If debugging is enabled, then the allocator can add additional
87536diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
87537index d82abd4..408c3a0 100644
87538--- a/include/linux/slub_def.h
87539+++ b/include/linux/slub_def.h
87540@@ -74,7 +74,7 @@ struct kmem_cache {
87541 struct kmem_cache_order_objects max;
87542 struct kmem_cache_order_objects min;
87543 gfp_t allocflags; /* gfp flags to use on each alloc */
87544- int refcount; /* Refcount for slab cache destroy */
87545+ atomic_t refcount; /* Refcount for slab cache destroy */
87546 void (*ctor)(void *);
87547 int inuse; /* Offset to metadata */
87548 int align; /* Alignment */
87549diff --git a/include/linux/smp.h b/include/linux/smp.h
87550index 34347f2..8739978 100644
87551--- a/include/linux/smp.h
87552+++ b/include/linux/smp.h
87553@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
87554 #endif
87555
87556 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
87557+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
87558 #define put_cpu() preempt_enable()
87559+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
87560
87561 /*
87562 * Callback to arch code if there's nosmp or maxcpus=0 on the
87563diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
87564index 46cca4c..3323536 100644
87565--- a/include/linux/sock_diag.h
87566+++ b/include/linux/sock_diag.h
87567@@ -11,7 +11,7 @@ struct sock;
87568 struct sock_diag_handler {
87569 __u8 family;
87570 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
87571-};
87572+} __do_const;
87573
87574 int sock_diag_register(const struct sock_diag_handler *h);
87575 void sock_diag_unregister(const struct sock_diag_handler *h);
87576diff --git a/include/linux/sonet.h b/include/linux/sonet.h
87577index 680f9a3..f13aeb0 100644
87578--- a/include/linux/sonet.h
87579+++ b/include/linux/sonet.h
87580@@ -7,7 +7,7 @@
87581 #include <uapi/linux/sonet.h>
87582
87583 struct k_sonet_stats {
87584-#define __HANDLE_ITEM(i) atomic_t i
87585+#define __HANDLE_ITEM(i) atomic_unchecked_t i
87586 __SONET_ITEMS
87587 #undef __HANDLE_ITEM
87588 };
87589diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
87590index 07d8e53..dc934c9 100644
87591--- a/include/linux/sunrpc/addr.h
87592+++ b/include/linux/sunrpc/addr.h
87593@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
87594 {
87595 switch (sap->sa_family) {
87596 case AF_INET:
87597- return ntohs(((struct sockaddr_in *)sap)->sin_port);
87598+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
87599 case AF_INET6:
87600- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
87601+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
87602 }
87603 return 0;
87604 }
87605@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
87606 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
87607 const struct sockaddr *src)
87608 {
87609- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
87610+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
87611 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
87612
87613 dsin->sin_family = ssin->sin_family;
87614@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
87615 if (sa->sa_family != AF_INET6)
87616 return 0;
87617
87618- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
87619+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
87620 }
87621
87622 #endif /* _LINUX_SUNRPC_ADDR_H */
87623diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
87624index 70736b9..37f33db 100644
87625--- a/include/linux/sunrpc/clnt.h
87626+++ b/include/linux/sunrpc/clnt.h
87627@@ -97,7 +97,7 @@ struct rpc_procinfo {
87628 unsigned int p_timer; /* Which RTT timer to use */
87629 u32 p_statidx; /* Which procedure to account */
87630 const char * p_name; /* name of procedure */
87631-};
87632+} __do_const;
87633
87634 #ifdef __KERNEL__
87635
87636diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
87637index 1bc7cd0..7912dc2 100644
87638--- a/include/linux/sunrpc/svc.h
87639+++ b/include/linux/sunrpc/svc.h
87640@@ -417,7 +417,7 @@ struct svc_procedure {
87641 unsigned int pc_count; /* call count */
87642 unsigned int pc_cachetype; /* cache info (NFS) */
87643 unsigned int pc_xdrressize; /* maximum size of XDR reply */
87644-};
87645+} __do_const;
87646
87647 /*
87648 * Function prototypes.
87649diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
87650index 5cf99a0..c0a1b98 100644
87651--- a/include/linux/sunrpc/svc_rdma.h
87652+++ b/include/linux/sunrpc/svc_rdma.h
87653@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
87654 extern unsigned int svcrdma_max_requests;
87655 extern unsigned int svcrdma_max_req_size;
87656
87657-extern atomic_t rdma_stat_recv;
87658-extern atomic_t rdma_stat_read;
87659-extern atomic_t rdma_stat_write;
87660-extern atomic_t rdma_stat_sq_starve;
87661-extern atomic_t rdma_stat_rq_starve;
87662-extern atomic_t rdma_stat_rq_poll;
87663-extern atomic_t rdma_stat_rq_prod;
87664-extern atomic_t rdma_stat_sq_poll;
87665-extern atomic_t rdma_stat_sq_prod;
87666+extern atomic_unchecked_t rdma_stat_recv;
87667+extern atomic_unchecked_t rdma_stat_read;
87668+extern atomic_unchecked_t rdma_stat_write;
87669+extern atomic_unchecked_t rdma_stat_sq_starve;
87670+extern atomic_unchecked_t rdma_stat_rq_starve;
87671+extern atomic_unchecked_t rdma_stat_rq_poll;
87672+extern atomic_unchecked_t rdma_stat_rq_prod;
87673+extern atomic_unchecked_t rdma_stat_sq_poll;
87674+extern atomic_unchecked_t rdma_stat_sq_prod;
87675
87676 #define RPCRDMA_VERSION 1
87677
87678diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
87679index 8d71d65..f79586e 100644
87680--- a/include/linux/sunrpc/svcauth.h
87681+++ b/include/linux/sunrpc/svcauth.h
87682@@ -120,7 +120,7 @@ struct auth_ops {
87683 int (*release)(struct svc_rqst *rq);
87684 void (*domain_release)(struct auth_domain *);
87685 int (*set_client)(struct svc_rqst *rq);
87686-};
87687+} __do_const;
87688
87689 #define SVC_GARBAGE 1
87690 #define SVC_SYSERR 2
87691diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
87692index e7a018e..49f8b17 100644
87693--- a/include/linux/swiotlb.h
87694+++ b/include/linux/swiotlb.h
87695@@ -60,7 +60,8 @@ extern void
87696
87697 extern void
87698 swiotlb_free_coherent(struct device *hwdev, size_t size,
87699- void *vaddr, dma_addr_t dma_handle);
87700+ void *vaddr, dma_addr_t dma_handle,
87701+ struct dma_attrs *attrs);
87702
87703 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
87704 unsigned long offset, size_t size,
87705diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
87706index b0881a0..559a440 100644
87707--- a/include/linux/syscalls.h
87708+++ b/include/linux/syscalls.h
87709@@ -98,10 +98,16 @@ struct sigaltstack;
87710 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
87711
87712 #define __SC_DECL(t, a) t a
87713+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
87714 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
87715 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
87716 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
87717-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
87718+#define __SC_LONG(t, a) __typeof( \
87719+ __builtin_choose_expr( \
87720+ sizeof(t) > sizeof(int), \
87721+ (t) 0, \
87722+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
87723+ )) a
87724 #define __SC_CAST(t, a) (t) a
87725 #define __SC_ARGS(t, a) a
87726 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
87727@@ -379,11 +385,11 @@ asmlinkage long sys_sync(void);
87728 asmlinkage long sys_fsync(unsigned int fd);
87729 asmlinkage long sys_fdatasync(unsigned int fd);
87730 asmlinkage long sys_bdflush(int func, long data);
87731-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
87732- char __user *type, unsigned long flags,
87733+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
87734+ const char __user *type, unsigned long flags,
87735 void __user *data);
87736-asmlinkage long sys_umount(char __user *name, int flags);
87737-asmlinkage long sys_oldumount(char __user *name);
87738+asmlinkage long sys_umount(const char __user *name, int flags);
87739+asmlinkage long sys_oldumount(const char __user *name);
87740 asmlinkage long sys_truncate(const char __user *path, long length);
87741 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
87742 asmlinkage long sys_stat(const char __user *filename,
87743@@ -595,7 +601,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
87744 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
87745 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
87746 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
87747- struct sockaddr __user *, int);
87748+ struct sockaddr __user *, int) __intentional_overflow(0);
87749 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
87750 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
87751 unsigned int vlen, unsigned flags);
87752diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
87753index 27b3b0b..e093dd9 100644
87754--- a/include/linux/syscore_ops.h
87755+++ b/include/linux/syscore_ops.h
87756@@ -16,7 +16,7 @@ struct syscore_ops {
87757 int (*suspend)(void);
87758 void (*resume)(void);
87759 void (*shutdown)(void);
87760-};
87761+} __do_const;
87762
87763 extern void register_syscore_ops(struct syscore_ops *ops);
87764 extern void unregister_syscore_ops(struct syscore_ops *ops);
87765diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
87766index 14a8ff2..fa95f3a 100644
87767--- a/include/linux/sysctl.h
87768+++ b/include/linux/sysctl.h
87769@@ -34,13 +34,13 @@ struct ctl_table_root;
87770 struct ctl_table_header;
87771 struct ctl_dir;
87772
87773-typedef struct ctl_table ctl_table;
87774-
87775 typedef int proc_handler (struct ctl_table *ctl, int write,
87776 void __user *buffer, size_t *lenp, loff_t *ppos);
87777
87778 extern int proc_dostring(struct ctl_table *, int,
87779 void __user *, size_t *, loff_t *);
87780+extern int proc_dostring_modpriv(struct ctl_table *, int,
87781+ void __user *, size_t *, loff_t *);
87782 extern int proc_dointvec(struct ctl_table *, int,
87783 void __user *, size_t *, loff_t *);
87784 extern int proc_dointvec_minmax(struct ctl_table *, int,
87785@@ -115,7 +115,9 @@ struct ctl_table
87786 struct ctl_table_poll *poll;
87787 void *extra1;
87788 void *extra2;
87789-};
87790+} __do_const __randomize_layout;
87791+typedef struct ctl_table __no_const ctl_table_no_const;
87792+typedef struct ctl_table ctl_table;
87793
87794 struct ctl_node {
87795 struct rb_node node;
87796diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
87797index f97d0db..c1187dc 100644
87798--- a/include/linux/sysfs.h
87799+++ b/include/linux/sysfs.h
87800@@ -34,7 +34,8 @@ struct attribute {
87801 struct lock_class_key *key;
87802 struct lock_class_key skey;
87803 #endif
87804-};
87805+} __do_const;
87806+typedef struct attribute __no_const attribute_no_const;
87807
87808 /**
87809 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
87810@@ -63,7 +64,8 @@ struct attribute_group {
87811 struct attribute *, int);
87812 struct attribute **attrs;
87813 struct bin_attribute **bin_attrs;
87814-};
87815+} __do_const;
87816+typedef struct attribute_group __no_const attribute_group_no_const;
87817
87818 /**
87819 * Use these macros to make defining attributes easier. See include/linux/device.h
87820@@ -128,7 +130,8 @@ struct bin_attribute {
87821 char *, loff_t, size_t);
87822 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
87823 struct vm_area_struct *vma);
87824-};
87825+} __do_const;
87826+typedef struct bin_attribute __no_const bin_attribute_no_const;
87827
87828 /**
87829 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
87830diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
87831index 387fa7d..3fcde6b 100644
87832--- a/include/linux/sysrq.h
87833+++ b/include/linux/sysrq.h
87834@@ -16,6 +16,7 @@
87835
87836 #include <linux/errno.h>
87837 #include <linux/types.h>
87838+#include <linux/compiler.h>
87839
87840 /* Possible values of bitmask for enabling sysrq functions */
87841 /* 0x0001 is reserved for enable everything */
87842@@ -33,7 +34,7 @@ struct sysrq_key_op {
87843 char *help_msg;
87844 char *action_msg;
87845 int enable_mask;
87846-};
87847+} __do_const;
87848
87849 #ifdef CONFIG_MAGIC_SYSRQ
87850
87851diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
87852index ff307b5..f1a4468 100644
87853--- a/include/linux/thread_info.h
87854+++ b/include/linux/thread_info.h
87855@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
87856 #error "no set_restore_sigmask() provided and default one won't work"
87857 #endif
87858
87859+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
87860+
87861+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
87862+{
87863+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
87864+}
87865+
87866 #endif /* __KERNEL__ */
87867
87868 #endif /* _LINUX_THREAD_INFO_H */
87869diff --git a/include/linux/tty.h b/include/linux/tty.h
87870index 1c3316a..ae83b9f 100644
87871--- a/include/linux/tty.h
87872+++ b/include/linux/tty.h
87873@@ -202,7 +202,7 @@ struct tty_port {
87874 const struct tty_port_operations *ops; /* Port operations */
87875 spinlock_t lock; /* Lock protecting tty field */
87876 int blocked_open; /* Waiting to open */
87877- int count; /* Usage count */
87878+ atomic_t count; /* Usage count */
87879 wait_queue_head_t open_wait; /* Open waiters */
87880 wait_queue_head_t close_wait; /* Close waiters */
87881 wait_queue_head_t delta_msr_wait; /* Modem status change */
87882@@ -284,7 +284,7 @@ struct tty_struct {
87883 /* If the tty has a pending do_SAK, queue it here - akpm */
87884 struct work_struct SAK_work;
87885 struct tty_port *port;
87886-};
87887+} __randomize_layout;
87888
87889 /* Each of a tty's open files has private_data pointing to tty_file_private */
87890 struct tty_file_private {
87891@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
87892 struct tty_struct *tty, struct file *filp);
87893 static inline int tty_port_users(struct tty_port *port)
87894 {
87895- return port->count + port->blocked_open;
87896+ return atomic_read(&port->count) + port->blocked_open;
87897 }
87898
87899 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
87900diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
87901index 756a609..89db85e 100644
87902--- a/include/linux/tty_driver.h
87903+++ b/include/linux/tty_driver.h
87904@@ -285,7 +285,7 @@ struct tty_operations {
87905 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
87906 #endif
87907 const struct file_operations *proc_fops;
87908-};
87909+} __do_const __randomize_layout;
87910
87911 struct tty_driver {
87912 int magic; /* magic number for this structure */
87913@@ -319,7 +319,7 @@ struct tty_driver {
87914
87915 const struct tty_operations *ops;
87916 struct list_head tty_drivers;
87917-};
87918+} __randomize_layout;
87919
87920 extern struct list_head tty_drivers;
87921
87922diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
87923index 00c9d68..bc0188b 100644
87924--- a/include/linux/tty_ldisc.h
87925+++ b/include/linux/tty_ldisc.h
87926@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
87927
87928 struct module *owner;
87929
87930- int refcount;
87931+ atomic_t refcount;
87932 };
87933
87934 struct tty_ldisc {
87935diff --git a/include/linux/types.h b/include/linux/types.h
87936index a0bb704..f511c77 100644
87937--- a/include/linux/types.h
87938+++ b/include/linux/types.h
87939@@ -177,10 +177,26 @@ typedef struct {
87940 int counter;
87941 } atomic_t;
87942
87943+#ifdef CONFIG_PAX_REFCOUNT
87944+typedef struct {
87945+ int counter;
87946+} atomic_unchecked_t;
87947+#else
87948+typedef atomic_t atomic_unchecked_t;
87949+#endif
87950+
87951 #ifdef CONFIG_64BIT
87952 typedef struct {
87953 long counter;
87954 } atomic64_t;
87955+
87956+#ifdef CONFIG_PAX_REFCOUNT
87957+typedef struct {
87958+ long counter;
87959+} atomic64_unchecked_t;
87960+#else
87961+typedef atomic64_t atomic64_unchecked_t;
87962+#endif
87963 #endif
87964
87965 struct list_head {
87966diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
87967index ecd3319..8a36ded 100644
87968--- a/include/linux/uaccess.h
87969+++ b/include/linux/uaccess.h
87970@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
87971 long ret; \
87972 mm_segment_t old_fs = get_fs(); \
87973 \
87974- set_fs(KERNEL_DS); \
87975 pagefault_disable(); \
87976- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
87977- pagefault_enable(); \
87978+ set_fs(KERNEL_DS); \
87979+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
87980 set_fs(old_fs); \
87981+ pagefault_enable(); \
87982 ret; \
87983 })
87984
87985diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
87986index 2d1f9b6..d7a9fce 100644
87987--- a/include/linux/uidgid.h
87988+++ b/include/linux/uidgid.h
87989@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
87990
87991 #endif /* CONFIG_USER_NS */
87992
87993+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
87994+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
87995+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
87996+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
87997+
87998 #endif /* _LINUX_UIDGID_H */
87999diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
88000index 99c1b4d..562e6f3 100644
88001--- a/include/linux/unaligned/access_ok.h
88002+++ b/include/linux/unaligned/access_ok.h
88003@@ -4,34 +4,34 @@
88004 #include <linux/kernel.h>
88005 #include <asm/byteorder.h>
88006
88007-static inline u16 get_unaligned_le16(const void *p)
88008+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
88009 {
88010- return le16_to_cpup((__le16 *)p);
88011+ return le16_to_cpup((const __le16 *)p);
88012 }
88013
88014-static inline u32 get_unaligned_le32(const void *p)
88015+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
88016 {
88017- return le32_to_cpup((__le32 *)p);
88018+ return le32_to_cpup((const __le32 *)p);
88019 }
88020
88021-static inline u64 get_unaligned_le64(const void *p)
88022+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
88023 {
88024- return le64_to_cpup((__le64 *)p);
88025+ return le64_to_cpup((const __le64 *)p);
88026 }
88027
88028-static inline u16 get_unaligned_be16(const void *p)
88029+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
88030 {
88031- return be16_to_cpup((__be16 *)p);
88032+ return be16_to_cpup((const __be16 *)p);
88033 }
88034
88035-static inline u32 get_unaligned_be32(const void *p)
88036+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
88037 {
88038- return be32_to_cpup((__be32 *)p);
88039+ return be32_to_cpup((const __be32 *)p);
88040 }
88041
88042-static inline u64 get_unaligned_be64(const void *p)
88043+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
88044 {
88045- return be64_to_cpup((__be64 *)p);
88046+ return be64_to_cpup((const __be64 *)p);
88047 }
88048
88049 static inline void put_unaligned_le16(u16 val, void *p)
88050diff --git a/include/linux/usb.h b/include/linux/usb.h
88051index d2465bc..5256de4 100644
88052--- a/include/linux/usb.h
88053+++ b/include/linux/usb.h
88054@@ -571,7 +571,7 @@ struct usb_device {
88055 int maxchild;
88056
88057 u32 quirks;
88058- atomic_t urbnum;
88059+ atomic_unchecked_t urbnum;
88060
88061 unsigned long active_duration;
88062
88063@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
88064
88065 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
88066 __u8 request, __u8 requesttype, __u16 value, __u16 index,
88067- void *data, __u16 size, int timeout);
88068+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
88069 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
88070 void *data, int len, int *actual_length, int timeout);
88071 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
88072diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
88073index e452ba6..78f8e80 100644
88074--- a/include/linux/usb/renesas_usbhs.h
88075+++ b/include/linux/usb/renesas_usbhs.h
88076@@ -39,7 +39,7 @@ enum {
88077 */
88078 struct renesas_usbhs_driver_callback {
88079 int (*notify_hotplug)(struct platform_device *pdev);
88080-};
88081+} __no_const;
88082
88083 /*
88084 * callback functions for platform
88085diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
88086index 4836ba3..603f6ee 100644
88087--- a/include/linux/user_namespace.h
88088+++ b/include/linux/user_namespace.h
88089@@ -33,7 +33,7 @@ struct user_namespace {
88090 struct key *persistent_keyring_register;
88091 struct rw_semaphore persistent_keyring_register_sem;
88092 #endif
88093-};
88094+} __randomize_layout;
88095
88096 extern struct user_namespace init_user_ns;
88097
88098diff --git a/include/linux/utsname.h b/include/linux/utsname.h
88099index 239e277..22a5cf5 100644
88100--- a/include/linux/utsname.h
88101+++ b/include/linux/utsname.h
88102@@ -24,7 +24,7 @@ struct uts_namespace {
88103 struct new_utsname name;
88104 struct user_namespace *user_ns;
88105 unsigned int proc_inum;
88106-};
88107+} __randomize_layout;
88108 extern struct uts_namespace init_uts_ns;
88109
88110 #ifdef CONFIG_UTS_NS
88111diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
88112index 6f8fbcf..4efc177 100644
88113--- a/include/linux/vermagic.h
88114+++ b/include/linux/vermagic.h
88115@@ -25,9 +25,42 @@
88116 #define MODULE_ARCH_VERMAGIC ""
88117 #endif
88118
88119+#ifdef CONFIG_PAX_REFCOUNT
88120+#define MODULE_PAX_REFCOUNT "REFCOUNT "
88121+#else
88122+#define MODULE_PAX_REFCOUNT ""
88123+#endif
88124+
88125+#ifdef CONSTIFY_PLUGIN
88126+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
88127+#else
88128+#define MODULE_CONSTIFY_PLUGIN ""
88129+#endif
88130+
88131+#ifdef STACKLEAK_PLUGIN
88132+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
88133+#else
88134+#define MODULE_STACKLEAK_PLUGIN ""
88135+#endif
88136+
88137+#ifdef RANDSTRUCT_PLUGIN
88138+#include <generated/randomize_layout_hash.h>
88139+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
88140+#else
88141+#define MODULE_RANDSTRUCT_PLUGIN
88142+#endif
88143+
88144+#ifdef CONFIG_GRKERNSEC
88145+#define MODULE_GRSEC "GRSEC "
88146+#else
88147+#define MODULE_GRSEC ""
88148+#endif
88149+
88150 #define VERMAGIC_STRING \
88151 UTS_RELEASE " " \
88152 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
88153 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
88154- MODULE_ARCH_VERMAGIC
88155+ MODULE_ARCH_VERMAGIC \
88156+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
88157+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
88158
88159diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
88160index 502073a..a7de024 100644
88161--- a/include/linux/vga_switcheroo.h
88162+++ b/include/linux/vga_switcheroo.h
88163@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
88164
88165 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
88166
88167-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
88168-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
88169+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
88170+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
88171 #else
88172
88173 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
88174@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
88175
88176 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
88177
88178-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
88179-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
88180+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
88181+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
88182
88183 #endif
88184 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
88185diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
88186index 4b8a891..e9a2863 100644
88187--- a/include/linux/vmalloc.h
88188+++ b/include/linux/vmalloc.h
88189@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
88190 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
88191 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
88192 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
88193+
88194+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
88195+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
88196+#endif
88197+
88198 /* bits [20..32] reserved for arch specific ioremap internals */
88199
88200 /*
88201@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
88202 unsigned long flags, pgprot_t prot);
88203 extern void vunmap(const void *addr);
88204
88205+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88206+extern void unmap_process_stacks(struct task_struct *task);
88207+#endif
88208+
88209 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
88210 unsigned long uaddr, void *kaddr,
88211 unsigned long size);
88212@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
88213
88214 /* for /dev/kmem */
88215 extern long vread(char *buf, char *addr, unsigned long count);
88216-extern long vwrite(char *buf, char *addr, unsigned long count);
88217+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
88218
88219 /*
88220 * Internals. Dont't use..
88221diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
88222index 82e7db7..f8ce3d0 100644
88223--- a/include/linux/vmstat.h
88224+++ b/include/linux/vmstat.h
88225@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
88226 /*
88227 * Zone based page accounting with per cpu differentials.
88228 */
88229-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
88230+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
88231
88232 static inline void zone_page_state_add(long x, struct zone *zone,
88233 enum zone_stat_item item)
88234 {
88235- atomic_long_add(x, &zone->vm_stat[item]);
88236- atomic_long_add(x, &vm_stat[item]);
88237+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
88238+ atomic_long_add_unchecked(x, &vm_stat[item]);
88239 }
88240
88241-static inline unsigned long global_page_state(enum zone_stat_item item)
88242+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
88243 {
88244- long x = atomic_long_read(&vm_stat[item]);
88245+ long x = atomic_long_read_unchecked(&vm_stat[item]);
88246 #ifdef CONFIG_SMP
88247 if (x < 0)
88248 x = 0;
88249@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
88250 return x;
88251 }
88252
88253-static inline unsigned long zone_page_state(struct zone *zone,
88254+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
88255 enum zone_stat_item item)
88256 {
88257- long x = atomic_long_read(&zone->vm_stat[item]);
88258+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
88259 #ifdef CONFIG_SMP
88260 if (x < 0)
88261 x = 0;
88262@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
88263 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
88264 enum zone_stat_item item)
88265 {
88266- long x = atomic_long_read(&zone->vm_stat[item]);
88267+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
88268
88269 #ifdef CONFIG_SMP
88270 int cpu;
88271@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
88272
88273 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
88274 {
88275- atomic_long_inc(&zone->vm_stat[item]);
88276- atomic_long_inc(&vm_stat[item]);
88277+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
88278+ atomic_long_inc_unchecked(&vm_stat[item]);
88279 }
88280
88281 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
88282 {
88283- atomic_long_dec(&zone->vm_stat[item]);
88284- atomic_long_dec(&vm_stat[item]);
88285+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
88286+ atomic_long_dec_unchecked(&vm_stat[item]);
88287 }
88288
88289 static inline void __inc_zone_page_state(struct page *page,
88290diff --git a/include/linux/xattr.h b/include/linux/xattr.h
88291index 91b0a68..0e9adf6 100644
88292--- a/include/linux/xattr.h
88293+++ b/include/linux/xattr.h
88294@@ -28,7 +28,7 @@ struct xattr_handler {
88295 size_t size, int handler_flags);
88296 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
88297 size_t size, int flags, int handler_flags);
88298-};
88299+} __do_const;
88300
88301 struct xattr {
88302 const char *name;
88303@@ -37,6 +37,9 @@ struct xattr {
88304 };
88305
88306 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
88307+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
88308+ssize_t pax_getxattr(struct dentry *, void *, size_t);
88309+#endif
88310 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
88311 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
88312 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
88313diff --git a/include/linux/zlib.h b/include/linux/zlib.h
88314index 9c5a6b4..09c9438 100644
88315--- a/include/linux/zlib.h
88316+++ b/include/linux/zlib.h
88317@@ -31,6 +31,7 @@
88318 #define _ZLIB_H
88319
88320 #include <linux/zconf.h>
88321+#include <linux/compiler.h>
88322
88323 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
88324 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
88325@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
88326
88327 /* basic functions */
88328
88329-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
88330+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
88331 /*
88332 Returns the number of bytes that needs to be allocated for a per-
88333 stream workspace with the specified parameters. A pointer to this
88334diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
88335index eec6e46..82d5641 100644
88336--- a/include/media/v4l2-dev.h
88337+++ b/include/media/v4l2-dev.h
88338@@ -77,7 +77,7 @@ struct v4l2_file_operations {
88339 int (*mmap) (struct file *, struct vm_area_struct *);
88340 int (*open) (struct file *);
88341 int (*release) (struct file *);
88342-};
88343+} __do_const;
88344
88345 /*
88346 * Newer version of video_device, handled by videodev2.c
88347diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
88348index ffb69da..040393e 100644
88349--- a/include/media/v4l2-device.h
88350+++ b/include/media/v4l2-device.h
88351@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
88352 this function returns 0. If the name ends with a digit (e.g. cx18),
88353 then the name will be set to cx18-0 since cx180 looks really odd. */
88354 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
88355- atomic_t *instance);
88356+ atomic_unchecked_t *instance);
88357
88358 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
88359 Since the parent disappears this ensures that v4l2_dev doesn't have an
88360diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
88361index d9fa68f..45c88d1 100644
88362--- a/include/net/9p/transport.h
88363+++ b/include/net/9p/transport.h
88364@@ -63,7 +63,7 @@ struct p9_trans_module {
88365 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
88366 int (*zc_request)(struct p9_client *, struct p9_req_t *,
88367 char *, char *, int , int, int, int);
88368-};
88369+} __do_const;
88370
88371 void v9fs_register_trans(struct p9_trans_module *m);
88372 void v9fs_unregister_trans(struct p9_trans_module *m);
88373diff --git a/include/net/af_unix.h b/include/net/af_unix.h
88374index a175ba4..196eb82 100644
88375--- a/include/net/af_unix.h
88376+++ b/include/net/af_unix.h
88377@@ -36,7 +36,7 @@ struct unix_skb_parms {
88378 u32 secid; /* Security ID */
88379 #endif
88380 u32 consumed;
88381-};
88382+} __randomize_layout;
88383
88384 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
88385 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
88386diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
88387index 4abdcb2..945c5cc 100644
88388--- a/include/net/bluetooth/l2cap.h
88389+++ b/include/net/bluetooth/l2cap.h
88390@@ -601,7 +601,7 @@ struct l2cap_ops {
88391 long (*get_sndtimeo) (struct l2cap_chan *chan);
88392 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
88393 unsigned long len, int nb);
88394-};
88395+} __do_const;
88396
88397 struct l2cap_conn {
88398 struct hci_conn *hcon;
88399diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
88400index f2ae33d..c457cf0 100644
88401--- a/include/net/caif/cfctrl.h
88402+++ b/include/net/caif/cfctrl.h
88403@@ -52,7 +52,7 @@ struct cfctrl_rsp {
88404 void (*radioset_rsp)(void);
88405 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
88406 struct cflayer *client_layer);
88407-};
88408+} __no_const;
88409
88410 /* Link Setup Parameters for CAIF-Links. */
88411 struct cfctrl_link_param {
88412@@ -101,8 +101,8 @@ struct cfctrl_request_info {
88413 struct cfctrl {
88414 struct cfsrvl serv;
88415 struct cfctrl_rsp res;
88416- atomic_t req_seq_no;
88417- atomic_t rsp_seq_no;
88418+ atomic_unchecked_t req_seq_no;
88419+ atomic_unchecked_t rsp_seq_no;
88420 struct list_head list;
88421 /* Protects from simultaneous access to first_req list */
88422 spinlock_t info_list_lock;
88423diff --git a/include/net/flow.h b/include/net/flow.h
88424index 8109a15..504466d 100644
88425--- a/include/net/flow.h
88426+++ b/include/net/flow.h
88427@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
88428
88429 void flow_cache_flush(struct net *net);
88430 void flow_cache_flush_deferred(struct net *net);
88431-extern atomic_t flow_cache_genid;
88432+extern atomic_unchecked_t flow_cache_genid;
88433
88434 #endif
88435diff --git a/include/net/genetlink.h b/include/net/genetlink.h
88436index 93695f0..766d71c 100644
88437--- a/include/net/genetlink.h
88438+++ b/include/net/genetlink.h
88439@@ -120,7 +120,7 @@ struct genl_ops {
88440 u8 cmd;
88441 u8 internal_flags;
88442 u8 flags;
88443-};
88444+} __do_const;
88445
88446 int __genl_register_family(struct genl_family *family);
88447
88448diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
88449index 734d9b5..48a9a4b 100644
88450--- a/include/net/gro_cells.h
88451+++ b/include/net/gro_cells.h
88452@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
88453 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
88454
88455 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
88456- atomic_long_inc(&dev->rx_dropped);
88457+ atomic_long_inc_unchecked(&dev->rx_dropped);
88458 kfree_skb(skb);
88459 return;
88460 }
88461diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
88462index 7a43138..bc76865 100644
88463--- a/include/net/inet_connection_sock.h
88464+++ b/include/net/inet_connection_sock.h
88465@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
88466 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
88467 int (*bind_conflict)(const struct sock *sk,
88468 const struct inet_bind_bucket *tb, bool relax);
88469-};
88470+} __do_const;
88471
88472 /** inet_connection_sock - INET connection oriented sock
88473 *
88474diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
88475index 01d590e..f69c61d 100644
88476--- a/include/net/inetpeer.h
88477+++ b/include/net/inetpeer.h
88478@@ -47,7 +47,7 @@ struct inet_peer {
88479 */
88480 union {
88481 struct {
88482- atomic_t rid; /* Frag reception counter */
88483+ atomic_unchecked_t rid; /* Frag reception counter */
88484 };
88485 struct rcu_head rcu;
88486 struct inet_peer *gc_next;
88487diff --git a/include/net/ip.h b/include/net/ip.h
88488index 7596eb2..f7f5fad 100644
88489--- a/include/net/ip.h
88490+++ b/include/net/ip.h
88491@@ -309,7 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
88492 }
88493 }
88494
88495-u32 ip_idents_reserve(u32 hash, int segs);
88496+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
88497 void __ip_select_ident(struct iphdr *iph, int segs);
88498
88499 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
88500diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
88501index 9922093..a1755d6 100644
88502--- a/include/net/ip_fib.h
88503+++ b/include/net/ip_fib.h
88504@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
88505
88506 #define FIB_RES_SADDR(net, res) \
88507 ((FIB_RES_NH(res).nh_saddr_genid == \
88508- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
88509+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
88510 FIB_RES_NH(res).nh_saddr : \
88511 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
88512 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
88513diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
88514index 624a8a5..b1e2a24 100644
88515--- a/include/net/ip_vs.h
88516+++ b/include/net/ip_vs.h
88517@@ -558,7 +558,7 @@ struct ip_vs_conn {
88518 struct ip_vs_conn *control; /* Master control connection */
88519 atomic_t n_control; /* Number of controlled ones */
88520 struct ip_vs_dest *dest; /* real server */
88521- atomic_t in_pkts; /* incoming packet counter */
88522+ atomic_unchecked_t in_pkts; /* incoming packet counter */
88523
88524 /* packet transmitter for different forwarding methods. If it
88525 mangles the packet, it must return NF_DROP or better NF_STOLEN,
88526@@ -705,7 +705,7 @@ struct ip_vs_dest {
88527 __be16 port; /* port number of the server */
88528 union nf_inet_addr addr; /* IP address of the server */
88529 volatile unsigned int flags; /* dest status flags */
88530- atomic_t conn_flags; /* flags to copy to conn */
88531+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
88532 atomic_t weight; /* server weight */
88533
88534 atomic_t refcnt; /* reference counter */
88535@@ -960,11 +960,11 @@ struct netns_ipvs {
88536 /* ip_vs_lblc */
88537 int sysctl_lblc_expiration;
88538 struct ctl_table_header *lblc_ctl_header;
88539- struct ctl_table *lblc_ctl_table;
88540+ ctl_table_no_const *lblc_ctl_table;
88541 /* ip_vs_lblcr */
88542 int sysctl_lblcr_expiration;
88543 struct ctl_table_header *lblcr_ctl_header;
88544- struct ctl_table *lblcr_ctl_table;
88545+ ctl_table_no_const *lblcr_ctl_table;
88546 /* ip_vs_est */
88547 struct list_head est_list; /* estimator list */
88548 spinlock_t est_lock;
88549diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
88550index 8d4f588..2e37ad2 100644
88551--- a/include/net/irda/ircomm_tty.h
88552+++ b/include/net/irda/ircomm_tty.h
88553@@ -33,6 +33,7 @@
88554 #include <linux/termios.h>
88555 #include <linux/timer.h>
88556 #include <linux/tty.h> /* struct tty_struct */
88557+#include <asm/local.h>
88558
88559 #include <net/irda/irias_object.h>
88560 #include <net/irda/ircomm_core.h>
88561diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
88562index 714cc9a..ea05f3e 100644
88563--- a/include/net/iucv/af_iucv.h
88564+++ b/include/net/iucv/af_iucv.h
88565@@ -149,7 +149,7 @@ struct iucv_skb_cb {
88566 struct iucv_sock_list {
88567 struct hlist_head head;
88568 rwlock_t lock;
88569- atomic_t autobind_name;
88570+ atomic_unchecked_t autobind_name;
88571 };
88572
88573 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
88574diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
88575index f3be818..bf46196 100644
88576--- a/include/net/llc_c_ac.h
88577+++ b/include/net/llc_c_ac.h
88578@@ -87,7 +87,7 @@
88579 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
88580 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
88581
88582-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88583+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88584
88585 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
88586 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
88587diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
88588index 3948cf1..83b28c4 100644
88589--- a/include/net/llc_c_ev.h
88590+++ b/include/net/llc_c_ev.h
88591@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
88592 return (struct llc_conn_state_ev *)skb->cb;
88593 }
88594
88595-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88596-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88597+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88598+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88599
88600 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
88601 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
88602diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
88603index 0e79cfb..f46db31 100644
88604--- a/include/net/llc_c_st.h
88605+++ b/include/net/llc_c_st.h
88606@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
88607 u8 next_state;
88608 llc_conn_ev_qfyr_t *ev_qualifiers;
88609 llc_conn_action_t *ev_actions;
88610-};
88611+} __do_const;
88612
88613 struct llc_conn_state {
88614 u8 current_state;
88615diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
88616index a61b98c..aade1eb 100644
88617--- a/include/net/llc_s_ac.h
88618+++ b/include/net/llc_s_ac.h
88619@@ -23,7 +23,7 @@
88620 #define SAP_ACT_TEST_IND 9
88621
88622 /* All action functions must look like this */
88623-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88624+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88625
88626 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
88627 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
88628diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
88629index 567c681..cd73ac02 100644
88630--- a/include/net/llc_s_st.h
88631+++ b/include/net/llc_s_st.h
88632@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
88633 llc_sap_ev_t ev;
88634 u8 next_state;
88635 llc_sap_action_t *ev_actions;
88636-};
88637+} __do_const;
88638
88639 struct llc_sap_state {
88640 u8 curr_state;
88641diff --git a/include/net/mac80211.h b/include/net/mac80211.h
88642index 421b6ec..5a03729 100644
88643--- a/include/net/mac80211.h
88644+++ b/include/net/mac80211.h
88645@@ -4588,7 +4588,7 @@ struct rate_control_ops {
88646 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
88647
88648 u32 (*get_expected_throughput)(void *priv_sta);
88649-};
88650+} __do_const;
88651
88652 static inline int rate_supported(struct ieee80211_sta *sta,
88653 enum ieee80211_band band,
88654diff --git a/include/net/neighbour.h b/include/net/neighbour.h
88655index 47f4254..fd095bc 100644
88656--- a/include/net/neighbour.h
88657+++ b/include/net/neighbour.h
88658@@ -163,7 +163,7 @@ struct neigh_ops {
88659 void (*error_report)(struct neighbour *, struct sk_buff *);
88660 int (*output)(struct neighbour *, struct sk_buff *);
88661 int (*connected_output)(struct neighbour *, struct sk_buff *);
88662-};
88663+} __do_const;
88664
88665 struct pneigh_entry {
88666 struct pneigh_entry *next;
88667@@ -217,7 +217,7 @@ struct neigh_table {
88668 struct neigh_statistics __percpu *stats;
88669 struct neigh_hash_table __rcu *nht;
88670 struct pneigh_entry **phash_buckets;
88671-};
88672+} __randomize_layout;
88673
88674 static inline int neigh_parms_family(struct neigh_parms *p)
88675 {
88676diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
88677index 361d260..903d15f 100644
88678--- a/include/net/net_namespace.h
88679+++ b/include/net/net_namespace.h
88680@@ -129,8 +129,8 @@ struct net {
88681 struct netns_ipvs *ipvs;
88682 #endif
88683 struct sock *diag_nlsk;
88684- atomic_t fnhe_genid;
88685-};
88686+ atomic_unchecked_t fnhe_genid;
88687+} __randomize_layout;
88688
88689 #include <linux/seq_file_net.h>
88690
88691@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
88692 #define __net_init __init
88693 #define __net_exit __exit_refok
88694 #define __net_initdata __initdata
88695+#ifdef CONSTIFY_PLUGIN
88696 #define __net_initconst __initconst
88697+#else
88698+#define __net_initconst __initdata
88699+#endif
88700 #endif
88701
88702 struct pernet_operations {
88703@@ -296,7 +300,7 @@ struct pernet_operations {
88704 void (*exit_batch)(struct list_head *net_exit_list);
88705 int *id;
88706 size_t size;
88707-};
88708+} __do_const;
88709
88710 /*
88711 * Use these carefully. If you implement a network device and it
88712@@ -344,23 +348,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
88713
88714 static inline int rt_genid_ipv4(struct net *net)
88715 {
88716- return atomic_read(&net->ipv4.rt_genid);
88717+ return atomic_read_unchecked(&net->ipv4.rt_genid);
88718 }
88719
88720 static inline void rt_genid_bump_ipv4(struct net *net)
88721 {
88722- atomic_inc(&net->ipv4.rt_genid);
88723+ atomic_inc_unchecked(&net->ipv4.rt_genid);
88724 }
88725
88726 #if IS_ENABLED(CONFIG_IPV6)
88727 static inline int rt_genid_ipv6(struct net *net)
88728 {
88729- return atomic_read(&net->ipv6.rt_genid);
88730+ return atomic_read_unchecked(&net->ipv6.rt_genid);
88731 }
88732
88733 static inline void rt_genid_bump_ipv6(struct net *net)
88734 {
88735- atomic_inc(&net->ipv6.rt_genid);
88736+ atomic_inc_unchecked(&net->ipv6.rt_genid);
88737 }
88738 #else
88739 static inline int rt_genid_ipv6(struct net *net)
88740@@ -390,12 +394,12 @@ static inline void rt_genid_bump_all(struct net *net)
88741
88742 static inline int fnhe_genid(struct net *net)
88743 {
88744- return atomic_read(&net->fnhe_genid);
88745+ return atomic_read_unchecked(&net->fnhe_genid);
88746 }
88747
88748 static inline void fnhe_genid_bump(struct net *net)
88749 {
88750- atomic_inc(&net->fnhe_genid);
88751+ atomic_inc_unchecked(&net->fnhe_genid);
88752 }
88753
88754 #endif /* __NET_NET_NAMESPACE_H */
88755diff --git a/include/net/netdma.h b/include/net/netdma.h
88756index 8ba8ce2..99b7fff 100644
88757--- a/include/net/netdma.h
88758+++ b/include/net/netdma.h
88759@@ -24,7 +24,7 @@
88760 #include <linux/dmaengine.h>
88761 #include <linux/skbuff.h>
88762
88763-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
88764+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
88765 struct sk_buff *skb, int offset, struct iovec *to,
88766 size_t len, struct dma_pinned_list *pinned_list);
88767
88768diff --git a/include/net/netlink.h b/include/net/netlink.h
88769index 2b47eaa..6d5bcc2 100644
88770--- a/include/net/netlink.h
88771+++ b/include/net/netlink.h
88772@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
88773 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
88774 {
88775 if (mark)
88776- skb_trim(skb, (unsigned char *) mark - skb->data);
88777+ skb_trim(skb, (const unsigned char *) mark - skb->data);
88778 }
88779
88780 /**
88781diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
88782index 773cce3..6a11852 100644
88783--- a/include/net/netns/conntrack.h
88784+++ b/include/net/netns/conntrack.h
88785@@ -13,10 +13,10 @@ struct nf_conntrack_ecache;
88786 struct nf_proto_net {
88787 #ifdef CONFIG_SYSCTL
88788 struct ctl_table_header *ctl_table_header;
88789- struct ctl_table *ctl_table;
88790+ ctl_table_no_const *ctl_table;
88791 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
88792 struct ctl_table_header *ctl_compat_header;
88793- struct ctl_table *ctl_compat_table;
88794+ ctl_table_no_const *ctl_compat_table;
88795 #endif
88796 #endif
88797 unsigned int users;
88798@@ -59,7 +59,7 @@ struct nf_ip_net {
88799 struct nf_icmp_net icmpv6;
88800 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
88801 struct ctl_table_header *ctl_table_header;
88802- struct ctl_table *ctl_table;
88803+ ctl_table_no_const *ctl_table;
88804 #endif
88805 };
88806
88807diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
88808index aec5e12..807233f 100644
88809--- a/include/net/netns/ipv4.h
88810+++ b/include/net/netns/ipv4.h
88811@@ -82,7 +82,7 @@ struct netns_ipv4 {
88812
88813 struct ping_group_range ping_group_range;
88814
88815- atomic_t dev_addr_genid;
88816+ atomic_unchecked_t dev_addr_genid;
88817
88818 #ifdef CONFIG_SYSCTL
88819 unsigned long *sysctl_local_reserved_ports;
88820@@ -96,6 +96,6 @@ struct netns_ipv4 {
88821 struct fib_rules_ops *mr_rules_ops;
88822 #endif
88823 #endif
88824- atomic_t rt_genid;
88825+ atomic_unchecked_t rt_genid;
88826 };
88827 #endif
88828diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
88829index 19d3446..3c87195 100644
88830--- a/include/net/netns/ipv6.h
88831+++ b/include/net/netns/ipv6.h
88832@@ -74,8 +74,8 @@ struct netns_ipv6 {
88833 struct fib_rules_ops *mr6_rules_ops;
88834 #endif
88835 #endif
88836- atomic_t dev_addr_genid;
88837- atomic_t rt_genid;
88838+ atomic_unchecked_t dev_addr_genid;
88839+ atomic_unchecked_t rt_genid;
88840 };
88841
88842 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
88843diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
88844index 3492434..209f58c 100644
88845--- a/include/net/netns/xfrm.h
88846+++ b/include/net/netns/xfrm.h
88847@@ -64,7 +64,7 @@ struct netns_xfrm {
88848
88849 /* flow cache part */
88850 struct flow_cache flow_cache_global;
88851- atomic_t flow_cache_genid;
88852+ atomic_unchecked_t flow_cache_genid;
88853 struct list_head flow_cache_gc_list;
88854 spinlock_t flow_cache_gc_lock;
88855 struct work_struct flow_cache_gc_work;
88856diff --git a/include/net/ping.h b/include/net/ping.h
88857index 026479b..d9b2829 100644
88858--- a/include/net/ping.h
88859+++ b/include/net/ping.h
88860@@ -54,7 +54,7 @@ struct ping_iter_state {
88861
88862 extern struct proto ping_prot;
88863 #if IS_ENABLED(CONFIG_IPV6)
88864-extern struct pingv6_ops pingv6_ops;
88865+extern struct pingv6_ops *pingv6_ops;
88866 #endif
88867
88868 struct pingfakehdr {
88869diff --git a/include/net/protocol.h b/include/net/protocol.h
88870index d6fcc1f..ca277058 100644
88871--- a/include/net/protocol.h
88872+++ b/include/net/protocol.h
88873@@ -49,7 +49,7 @@ struct net_protocol {
88874 * socket lookup?
88875 */
88876 icmp_strict_tag_validation:1;
88877-};
88878+} __do_const;
88879
88880 #if IS_ENABLED(CONFIG_IPV6)
88881 struct inet6_protocol {
88882@@ -62,7 +62,7 @@ struct inet6_protocol {
88883 u8 type, u8 code, int offset,
88884 __be32 info);
88885 unsigned int flags; /* INET6_PROTO_xxx */
88886-};
88887+} __do_const;
88888
88889 #define INET6_PROTO_NOPOLICY 0x1
88890 #define INET6_PROTO_FINAL 0x2
88891diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
88892index 72240e5..8c14bef 100644
88893--- a/include/net/rtnetlink.h
88894+++ b/include/net/rtnetlink.h
88895@@ -93,7 +93,7 @@ struct rtnl_link_ops {
88896 int (*fill_slave_info)(struct sk_buff *skb,
88897 const struct net_device *dev,
88898 const struct net_device *slave_dev);
88899-};
88900+} __do_const;
88901
88902 int __rtnl_link_register(struct rtnl_link_ops *ops);
88903 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
88904diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
88905index 4a5b9a3..ca27d73 100644
88906--- a/include/net/sctp/checksum.h
88907+++ b/include/net/sctp/checksum.h
88908@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
88909 unsigned int offset)
88910 {
88911 struct sctphdr *sh = sctp_hdr(skb);
88912- __le32 ret, old = sh->checksum;
88913- const struct skb_checksum_ops ops = {
88914+ __le32 ret, old = sh->checksum;
88915+ static const struct skb_checksum_ops ops = {
88916 .update = sctp_csum_update,
88917 .combine = sctp_csum_combine,
88918 };
88919diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
88920index 7f4eeb3..37e8fe1 100644
88921--- a/include/net/sctp/sm.h
88922+++ b/include/net/sctp/sm.h
88923@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
88924 typedef struct {
88925 sctp_state_fn_t *fn;
88926 const char *name;
88927-} sctp_sm_table_entry_t;
88928+} __do_const sctp_sm_table_entry_t;
88929
88930 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
88931 * currently in use.
88932@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
88933 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
88934
88935 /* Extern declarations for major data structures. */
88936-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88937+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88938
88939
88940 /* Get the size of a DATA chunk payload. */
88941diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
88942index f38588bf..94c1795 100644
88943--- a/include/net/sctp/structs.h
88944+++ b/include/net/sctp/structs.h
88945@@ -507,7 +507,7 @@ struct sctp_pf {
88946 struct sctp_association *asoc);
88947 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
88948 struct sctp_af *af;
88949-};
88950+} __do_const;
88951
88952
88953 /* Structure to track chunk fragments that have been acked, but peer
88954diff --git a/include/net/sock.h b/include/net/sock.h
88955index 1563507..20d5d0e 100644
88956--- a/include/net/sock.h
88957+++ b/include/net/sock.h
88958@@ -349,7 +349,7 @@ struct sock {
88959 unsigned int sk_napi_id;
88960 unsigned int sk_ll_usec;
88961 #endif
88962- atomic_t sk_drops;
88963+ atomic_unchecked_t sk_drops;
88964 int sk_rcvbuf;
88965
88966 struct sk_filter __rcu *sk_filter;
88967@@ -1038,7 +1038,7 @@ struct proto {
88968 void (*destroy_cgroup)(struct mem_cgroup *memcg);
88969 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
88970 #endif
88971-};
88972+} __randomize_layout;
88973
88974 /*
88975 * Bits in struct cg_proto.flags
88976@@ -1225,7 +1225,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
88977 return ret >> PAGE_SHIFT;
88978 }
88979
88980-static inline long
88981+static inline long __intentional_overflow(-1)
88982 sk_memory_allocated(const struct sock *sk)
88983 {
88984 struct proto *prot = sk->sk_prot;
88985@@ -1370,7 +1370,7 @@ struct sock_iocb {
88986 struct scm_cookie *scm;
88987 struct msghdr *msg, async_msg;
88988 struct kiocb *kiocb;
88989-};
88990+} __randomize_layout;
88991
88992 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
88993 {
88994@@ -1623,6 +1623,33 @@ void sk_common_release(struct sock *sk);
88995 /* Initialise core socket variables */
88996 void sock_init_data(struct socket *sock, struct sock *sk);
88997
88998+void sk_filter_release_rcu(struct rcu_head *rcu);
88999+
89000+/**
89001+ * sk_filter_release - release a socket filter
89002+ * @fp: filter to remove
89003+ *
89004+ * Remove a filter from a socket and release its resources.
89005+ */
89006+
89007+static inline void sk_filter_release(struct sk_filter *fp)
89008+{
89009+ if (atomic_dec_and_test(&fp->refcnt))
89010+ call_rcu(&fp->rcu, sk_filter_release_rcu);
89011+}
89012+
89013+static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
89014+{
89015+ atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
89016+ sk_filter_release(fp);
89017+}
89018+
89019+static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
89020+{
89021+ atomic_inc(&fp->refcnt);
89022+ atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
89023+}
89024+
89025 /*
89026 * Socket reference counting postulates.
89027 *
89028@@ -1805,7 +1832,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
89029 }
89030
89031 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
89032- char __user *from, char *to,
89033+ char __user *from, unsigned char *to,
89034 int copy, int offset)
89035 {
89036 if (skb->ip_summed == CHECKSUM_NONE) {
89037@@ -2067,7 +2094,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
89038 }
89039 }
89040
89041-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
89042+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
89043
89044 /**
89045 * sk_page_frag - return an appropriate page_frag
89046diff --git a/include/net/tcp.h b/include/net/tcp.h
89047index 7286db8..f1aa7dc 100644
89048--- a/include/net/tcp.h
89049+++ b/include/net/tcp.h
89050@@ -535,7 +535,7 @@ void tcp_retransmit_timer(struct sock *sk);
89051 void tcp_xmit_retransmit_queue(struct sock *);
89052 void tcp_simple_retransmit(struct sock *);
89053 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
89054-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
89055+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
89056
89057 void tcp_send_probe0(struct sock *);
89058 void tcp_send_partial(struct sock *);
89059@@ -708,8 +708,8 @@ struct tcp_skb_cb {
89060 struct inet6_skb_parm h6;
89061 #endif
89062 } header; /* For incoming frames */
89063- __u32 seq; /* Starting sequence number */
89064- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
89065+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
89066+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
89067 __u32 when; /* used to compute rtt's */
89068 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
89069
89070@@ -723,7 +723,7 @@ struct tcp_skb_cb {
89071
89072 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
89073 /* 1 byte hole */
89074- __u32 ack_seq; /* Sequence number ACK'd */
89075+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
89076 };
89077
89078 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
89079diff --git a/include/net/xfrm.h b/include/net/xfrm.h
89080index 721e9c3b..3c81bbf 100644
89081--- a/include/net/xfrm.h
89082+++ b/include/net/xfrm.h
89083@@ -285,7 +285,6 @@ struct xfrm_dst;
89084 struct xfrm_policy_afinfo {
89085 unsigned short family;
89086 struct dst_ops *dst_ops;
89087- void (*garbage_collect)(struct net *net);
89088 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
89089 const xfrm_address_t *saddr,
89090 const xfrm_address_t *daddr);
89091@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
89092 struct net_device *dev,
89093 const struct flowi *fl);
89094 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
89095-};
89096+} __do_const;
89097
89098 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
89099 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
89100@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
89101 int (*transport_finish)(struct sk_buff *skb,
89102 int async);
89103 void (*local_error)(struct sk_buff *skb, u32 mtu);
89104-};
89105+} __do_const;
89106
89107 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
89108 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
89109@@ -437,7 +436,7 @@ struct xfrm_mode {
89110 struct module *owner;
89111 unsigned int encap;
89112 int flags;
89113-};
89114+} __do_const;
89115
89116 /* Flags for xfrm_mode. */
89117 enum {
89118@@ -534,7 +533,7 @@ struct xfrm_policy {
89119 struct timer_list timer;
89120
89121 struct flow_cache_object flo;
89122- atomic_t genid;
89123+ atomic_unchecked_t genid;
89124 u32 priority;
89125 u32 index;
89126 struct xfrm_mark mark;
89127@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
89128 }
89129
89130 void xfrm_garbage_collect(struct net *net);
89131+void xfrm_garbage_collect_deferred(struct net *net);
89132
89133 #else
89134
89135@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
89136 static inline void xfrm_garbage_collect(struct net *net)
89137 {
89138 }
89139+static inline void xfrm_garbage_collect_deferred(struct net *net)
89140+{
89141+}
89142 #endif
89143
89144 static __inline__
89145diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
89146index 1017e0b..227aa4d 100644
89147--- a/include/rdma/iw_cm.h
89148+++ b/include/rdma/iw_cm.h
89149@@ -122,7 +122,7 @@ struct iw_cm_verbs {
89150 int backlog);
89151
89152 int (*destroy_listen)(struct iw_cm_id *cm_id);
89153-};
89154+} __no_const;
89155
89156 /**
89157 * iw_create_cm_id - Create an IW CM identifier.
89158diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
89159index 52beadf..598734c 100644
89160--- a/include/scsi/libfc.h
89161+++ b/include/scsi/libfc.h
89162@@ -771,6 +771,7 @@ struct libfc_function_template {
89163 */
89164 void (*disc_stop_final) (struct fc_lport *);
89165 };
89166+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
89167
89168 /**
89169 * struct fc_disc - Discovery context
89170@@ -875,7 +876,7 @@ struct fc_lport {
89171 struct fc_vport *vport;
89172
89173 /* Operational Information */
89174- struct libfc_function_template tt;
89175+ libfc_function_template_no_const tt;
89176 u8 link_up;
89177 u8 qfull;
89178 enum fc_lport_state state;
89179diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
89180index 758bc9f..9b14934 100644
89181--- a/include/scsi/scsi_device.h
89182+++ b/include/scsi/scsi_device.h
89183@@ -188,9 +188,9 @@ struct scsi_device {
89184 unsigned int max_device_blocked; /* what device_blocked counts down from */
89185 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
89186
89187- atomic_t iorequest_cnt;
89188- atomic_t iodone_cnt;
89189- atomic_t ioerr_cnt;
89190+ atomic_unchecked_t iorequest_cnt;
89191+ atomic_unchecked_t iodone_cnt;
89192+ atomic_unchecked_t ioerr_cnt;
89193
89194 struct device sdev_gendev,
89195 sdev_dev;
89196diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
89197index 8c79980..723f6f9 100644
89198--- a/include/scsi/scsi_transport_fc.h
89199+++ b/include/scsi/scsi_transport_fc.h
89200@@ -752,7 +752,8 @@ struct fc_function_template {
89201 unsigned long show_host_system_hostname:1;
89202
89203 unsigned long disable_target_scan:1;
89204-};
89205+} __do_const;
89206+typedef struct fc_function_template __no_const fc_function_template_no_const;
89207
89208
89209 /**
89210diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
89211index ae6c3b8..fd748ac 100644
89212--- a/include/sound/compress_driver.h
89213+++ b/include/sound/compress_driver.h
89214@@ -128,7 +128,7 @@ struct snd_compr_ops {
89215 struct snd_compr_caps *caps);
89216 int (*get_codec_caps) (struct snd_compr_stream *stream,
89217 struct snd_compr_codec_caps *codec);
89218-};
89219+} __no_const;
89220
89221 /**
89222 * struct snd_compr: Compressed device
89223diff --git a/include/sound/soc.h b/include/sound/soc.h
89224index ed9e2d7..aad0887 100644
89225--- a/include/sound/soc.h
89226+++ b/include/sound/soc.h
89227@@ -798,7 +798,7 @@ struct snd_soc_codec_driver {
89228 /* probe ordering - for components with runtime dependencies */
89229 int probe_order;
89230 int remove_order;
89231-};
89232+} __do_const;
89233
89234 /* SoC platform interface */
89235 struct snd_soc_platform_driver {
89236@@ -845,7 +845,7 @@ struct snd_soc_platform_driver {
89237 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
89238 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
89239 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
89240-};
89241+} __do_const;
89242
89243 struct snd_soc_platform {
89244 const char *name;
89245diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
89246index 9ec9864..e2ee1ee 100644
89247--- a/include/target/target_core_base.h
89248+++ b/include/target/target_core_base.h
89249@@ -761,7 +761,7 @@ struct se_device {
89250 atomic_long_t write_bytes;
89251 /* Active commands on this virtual SE device */
89252 atomic_t simple_cmds;
89253- atomic_t dev_ordered_id;
89254+ atomic_unchecked_t dev_ordered_id;
89255 atomic_t dev_ordered_sync;
89256 atomic_t dev_qf_count;
89257 int export_count;
89258diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
89259new file mode 100644
89260index 0000000..fb634b7
89261--- /dev/null
89262+++ b/include/trace/events/fs.h
89263@@ -0,0 +1,53 @@
89264+#undef TRACE_SYSTEM
89265+#define TRACE_SYSTEM fs
89266+
89267+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
89268+#define _TRACE_FS_H
89269+
89270+#include <linux/fs.h>
89271+#include <linux/tracepoint.h>
89272+
89273+TRACE_EVENT(do_sys_open,
89274+
89275+ TP_PROTO(const char *filename, int flags, int mode),
89276+
89277+ TP_ARGS(filename, flags, mode),
89278+
89279+ TP_STRUCT__entry(
89280+ __string( filename, filename )
89281+ __field( int, flags )
89282+ __field( int, mode )
89283+ ),
89284+
89285+ TP_fast_assign(
89286+ __assign_str(filename, filename);
89287+ __entry->flags = flags;
89288+ __entry->mode = mode;
89289+ ),
89290+
89291+ TP_printk("\"%s\" %x %o",
89292+ __get_str(filename), __entry->flags, __entry->mode)
89293+);
89294+
89295+TRACE_EVENT(open_exec,
89296+
89297+ TP_PROTO(const char *filename),
89298+
89299+ TP_ARGS(filename),
89300+
89301+ TP_STRUCT__entry(
89302+ __string( filename, filename )
89303+ ),
89304+
89305+ TP_fast_assign(
89306+ __assign_str(filename, filename);
89307+ ),
89308+
89309+ TP_printk("\"%s\"",
89310+ __get_str(filename))
89311+);
89312+
89313+#endif /* _TRACE_FS_H */
89314+
89315+/* This part must be outside protection */
89316+#include <trace/define_trace.h>
89317diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89318index 1c09820..7f5ec79 100644
89319--- a/include/trace/events/irq.h
89320+++ b/include/trace/events/irq.h
89321@@ -36,7 +36,7 @@ struct softirq_action;
89322 */
89323 TRACE_EVENT(irq_handler_entry,
89324
89325- TP_PROTO(int irq, struct irqaction *action),
89326+ TP_PROTO(int irq, const struct irqaction *action),
89327
89328 TP_ARGS(irq, action),
89329
89330@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
89331 */
89332 TRACE_EVENT(irq_handler_exit,
89333
89334- TP_PROTO(int irq, struct irqaction *action, int ret),
89335+ TP_PROTO(int irq, const struct irqaction *action, int ret),
89336
89337 TP_ARGS(irq, action, ret),
89338
89339diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
89340index 7caf44c..23c6f27 100644
89341--- a/include/uapi/linux/a.out.h
89342+++ b/include/uapi/linux/a.out.h
89343@@ -39,6 +39,14 @@ enum machine_type {
89344 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
89345 };
89346
89347+/* Constants for the N_FLAGS field */
89348+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
89349+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
89350+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
89351+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
89352+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
89353+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
89354+
89355 #if !defined (N_MAGIC)
89356 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
89357 #endif
89358diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
89359index 22b6ad3..aeba37e 100644
89360--- a/include/uapi/linux/bcache.h
89361+++ b/include/uapi/linux/bcache.h
89362@@ -5,6 +5,7 @@
89363 * Bcache on disk data structures
89364 */
89365
89366+#include <linux/compiler.h>
89367 #include <asm/types.h>
89368
89369 #define BITMASK(name, type, field, offset, size) \
89370@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
89371 /* Btree keys - all units are in sectors */
89372
89373 struct bkey {
89374- __u64 high;
89375- __u64 low;
89376+ __u64 high __intentional_overflow(-1);
89377+ __u64 low __intentional_overflow(-1);
89378 __u64 ptr[];
89379 };
89380
89381diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
89382index d876736..ccce5c0 100644
89383--- a/include/uapi/linux/byteorder/little_endian.h
89384+++ b/include/uapi/linux/byteorder/little_endian.h
89385@@ -42,51 +42,51 @@
89386
89387 static inline __le64 __cpu_to_le64p(const __u64 *p)
89388 {
89389- return (__force __le64)*p;
89390+ return (__force const __le64)*p;
89391 }
89392-static inline __u64 __le64_to_cpup(const __le64 *p)
89393+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
89394 {
89395- return (__force __u64)*p;
89396+ return (__force const __u64)*p;
89397 }
89398 static inline __le32 __cpu_to_le32p(const __u32 *p)
89399 {
89400- return (__force __le32)*p;
89401+ return (__force const __le32)*p;
89402 }
89403 static inline __u32 __le32_to_cpup(const __le32 *p)
89404 {
89405- return (__force __u32)*p;
89406+ return (__force const __u32)*p;
89407 }
89408 static inline __le16 __cpu_to_le16p(const __u16 *p)
89409 {
89410- return (__force __le16)*p;
89411+ return (__force const __le16)*p;
89412 }
89413 static inline __u16 __le16_to_cpup(const __le16 *p)
89414 {
89415- return (__force __u16)*p;
89416+ return (__force const __u16)*p;
89417 }
89418 static inline __be64 __cpu_to_be64p(const __u64 *p)
89419 {
89420- return (__force __be64)__swab64p(p);
89421+ return (__force const __be64)__swab64p(p);
89422 }
89423 static inline __u64 __be64_to_cpup(const __be64 *p)
89424 {
89425- return __swab64p((__u64 *)p);
89426+ return __swab64p((const __u64 *)p);
89427 }
89428 static inline __be32 __cpu_to_be32p(const __u32 *p)
89429 {
89430- return (__force __be32)__swab32p(p);
89431+ return (__force const __be32)__swab32p(p);
89432 }
89433-static inline __u32 __be32_to_cpup(const __be32 *p)
89434+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
89435 {
89436- return __swab32p((__u32 *)p);
89437+ return __swab32p((const __u32 *)p);
89438 }
89439 static inline __be16 __cpu_to_be16p(const __u16 *p)
89440 {
89441- return (__force __be16)__swab16p(p);
89442+ return (__force const __be16)__swab16p(p);
89443 }
89444 static inline __u16 __be16_to_cpup(const __be16 *p)
89445 {
89446- return __swab16p((__u16 *)p);
89447+ return __swab16p((const __u16 *)p);
89448 }
89449 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
89450 #define __le64_to_cpus(x) do { (void)(x); } while (0)
89451diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
89452index ef6103b..d4e65dd 100644
89453--- a/include/uapi/linux/elf.h
89454+++ b/include/uapi/linux/elf.h
89455@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
89456 #define PT_GNU_EH_FRAME 0x6474e550
89457
89458 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
89459+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
89460+
89461+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
89462+
89463+/* Constants for the e_flags field */
89464+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
89465+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
89466+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
89467+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
89468+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
89469+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
89470
89471 /*
89472 * Extended Numbering
89473@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
89474 #define DT_DEBUG 21
89475 #define DT_TEXTREL 22
89476 #define DT_JMPREL 23
89477+#define DT_FLAGS 30
89478+ #define DF_TEXTREL 0x00000004
89479 #define DT_ENCODING 32
89480 #define OLD_DT_LOOS 0x60000000
89481 #define DT_LOOS 0x6000000d
89482@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
89483 #define PF_W 0x2
89484 #define PF_X 0x1
89485
89486+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
89487+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
89488+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
89489+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
89490+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
89491+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
89492+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
89493+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
89494+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
89495+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
89496+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
89497+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
89498+
89499 typedef struct elf32_phdr{
89500 Elf32_Word p_type;
89501 Elf32_Off p_offset;
89502@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
89503 #define EI_OSABI 7
89504 #define EI_PAD 8
89505
89506+#define EI_PAX 14
89507+
89508 #define ELFMAG0 0x7f /* EI_MAG */
89509 #define ELFMAG1 'E'
89510 #define ELFMAG2 'L'
89511diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
89512index aa169c4..6a2771d 100644
89513--- a/include/uapi/linux/personality.h
89514+++ b/include/uapi/linux/personality.h
89515@@ -30,6 +30,7 @@ enum {
89516 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
89517 ADDR_NO_RANDOMIZE | \
89518 ADDR_COMPAT_LAYOUT | \
89519+ ADDR_LIMIT_3GB | \
89520 MMAP_PAGE_ZERO)
89521
89522 /*
89523diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
89524index 7530e74..e714828 100644
89525--- a/include/uapi/linux/screen_info.h
89526+++ b/include/uapi/linux/screen_info.h
89527@@ -43,7 +43,8 @@ struct screen_info {
89528 __u16 pages; /* 0x32 */
89529 __u16 vesa_attributes; /* 0x34 */
89530 __u32 capabilities; /* 0x36 */
89531- __u8 _reserved[6]; /* 0x3a */
89532+ __u16 vesapm_size; /* 0x3a */
89533+ __u8 _reserved[4]; /* 0x3c */
89534 } __attribute__((packed));
89535
89536 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
89537diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
89538index 0e011eb..82681b1 100644
89539--- a/include/uapi/linux/swab.h
89540+++ b/include/uapi/linux/swab.h
89541@@ -43,7 +43,7 @@
89542 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
89543 */
89544
89545-static inline __attribute_const__ __u16 __fswab16(__u16 val)
89546+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
89547 {
89548 #ifdef __HAVE_BUILTIN_BSWAP16__
89549 return __builtin_bswap16(val);
89550@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
89551 #endif
89552 }
89553
89554-static inline __attribute_const__ __u32 __fswab32(__u32 val)
89555+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
89556 {
89557 #ifdef __HAVE_BUILTIN_BSWAP32__
89558 return __builtin_bswap32(val);
89559@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
89560 #endif
89561 }
89562
89563-static inline __attribute_const__ __u64 __fswab64(__u64 val)
89564+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
89565 {
89566 #ifdef __HAVE_BUILTIN_BSWAP64__
89567 return __builtin_bswap64(val);
89568diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
89569index 6d67213..552fdd9 100644
89570--- a/include/uapi/linux/sysctl.h
89571+++ b/include/uapi/linux/sysctl.h
89572@@ -155,8 +155,6 @@ enum
89573 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
89574 };
89575
89576-
89577-
89578 /* CTL_VM names: */
89579 enum
89580 {
89581diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
89582index 168ff50..a921df2 100644
89583--- a/include/uapi/linux/videodev2.h
89584+++ b/include/uapi/linux/videodev2.h
89585@@ -1253,7 +1253,7 @@ struct v4l2_ext_control {
89586 union {
89587 __s32 value;
89588 __s64 value64;
89589- char *string;
89590+ char __user *string;
89591 };
89592 } __attribute__ ((packed));
89593
89594diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
89595index c38355c..17a57bc 100644
89596--- a/include/uapi/linux/xattr.h
89597+++ b/include/uapi/linux/xattr.h
89598@@ -73,5 +73,9 @@
89599 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
89600 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
89601
89602+/* User namespace */
89603+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89604+#define XATTR_PAX_FLAGS_SUFFIX "flags"
89605+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89606
89607 #endif /* _UAPI_LINUX_XATTR_H */
89608diff --git a/include/video/udlfb.h b/include/video/udlfb.h
89609index f9466fa..f4e2b81 100644
89610--- a/include/video/udlfb.h
89611+++ b/include/video/udlfb.h
89612@@ -53,10 +53,10 @@ struct dlfb_data {
89613 u32 pseudo_palette[256];
89614 int blank_mode; /*one of FB_BLANK_ */
89615 /* blit-only rendering path metrics, exposed through sysfs */
89616- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
89617- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
89618- atomic_t bytes_sent; /* to usb, after compression including overhead */
89619- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
89620+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
89621+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
89622+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
89623+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
89624 };
89625
89626 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
89627diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89628index 30f5362..8ed8ac9 100644
89629--- a/include/video/uvesafb.h
89630+++ b/include/video/uvesafb.h
89631@@ -122,6 +122,7 @@ struct uvesafb_par {
89632 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89633 u8 pmi_setpal; /* PMI for palette changes */
89634 u16 *pmi_base; /* protected mode interface location */
89635+ u8 *pmi_code; /* protected mode code location */
89636 void *pmi_start;
89637 void *pmi_pal;
89638 u8 *vbe_state_orig; /*
89639diff --git a/init/Kconfig b/init/Kconfig
89640index 9d76b99..d378b1e 100644
89641--- a/init/Kconfig
89642+++ b/init/Kconfig
89643@@ -1105,6 +1105,7 @@ endif # CGROUPS
89644
89645 config CHECKPOINT_RESTORE
89646 bool "Checkpoint/restore support" if EXPERT
89647+ depends on !GRKERNSEC
89648 default n
89649 help
89650 Enables additional kernel features in a sake of checkpoint/restore.
89651@@ -1589,7 +1590,7 @@ config SLUB_DEBUG
89652
89653 config COMPAT_BRK
89654 bool "Disable heap randomization"
89655- default y
89656+ default n
89657 help
89658 Randomizing heap placement makes heap exploits harder, but it
89659 also breaks ancient binaries (including anything libc5 based).
89660@@ -1877,7 +1878,7 @@ config INIT_ALL_POSSIBLE
89661 config STOP_MACHINE
89662 bool
89663 default y
89664- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
89665+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
89666 help
89667 Need stop_machine() primitive.
89668
89669diff --git a/init/Makefile b/init/Makefile
89670index 7bc47ee..6da2dc7 100644
89671--- a/init/Makefile
89672+++ b/init/Makefile
89673@@ -2,6 +2,9 @@
89674 # Makefile for the linux kernel.
89675 #
89676
89677+ccflags-y := $(GCC_PLUGINS_CFLAGS)
89678+asflags-y := $(GCC_PLUGINS_AFLAGS)
89679+
89680 obj-y := main.o version.o mounts.o
89681 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
89682 obj-y += noinitramfs.o
89683diff --git a/init/do_mounts.c b/init/do_mounts.c
89684index 82f2288..ea1430a 100644
89685--- a/init/do_mounts.c
89686+++ b/init/do_mounts.c
89687@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
89688 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89689 {
89690 struct super_block *s;
89691- int err = sys_mount(name, "/root", fs, flags, data);
89692+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
89693 if (err)
89694 return err;
89695
89696- sys_chdir("/root");
89697+ sys_chdir((const char __force_user *)"/root");
89698 s = current->fs->pwd.dentry->d_sb;
89699 ROOT_DEV = s->s_dev;
89700 printk(KERN_INFO
89701@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
89702 va_start(args, fmt);
89703 vsprintf(buf, fmt, args);
89704 va_end(args);
89705- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89706+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89707 if (fd >= 0) {
89708 sys_ioctl(fd, FDEJECT, 0);
89709 sys_close(fd);
89710 }
89711 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89712- fd = sys_open("/dev/console", O_RDWR, 0);
89713+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
89714 if (fd >= 0) {
89715 sys_ioctl(fd, TCGETS, (long)&termios);
89716 termios.c_lflag &= ~ICANON;
89717 sys_ioctl(fd, TCSETSF, (long)&termios);
89718- sys_read(fd, &c, 1);
89719+ sys_read(fd, (char __user *)&c, 1);
89720 termios.c_lflag |= ICANON;
89721 sys_ioctl(fd, TCSETSF, (long)&termios);
89722 sys_close(fd);
89723@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
89724 mount_root();
89725 out:
89726 devtmpfs_mount("dev");
89727- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89728- sys_chroot(".");
89729+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89730+ sys_chroot((const char __force_user *)".");
89731 }
89732
89733 static bool is_tmpfs;
89734diff --git a/init/do_mounts.h b/init/do_mounts.h
89735index f5b978a..69dbfe8 100644
89736--- a/init/do_mounts.h
89737+++ b/init/do_mounts.h
89738@@ -15,15 +15,15 @@ extern int root_mountflags;
89739
89740 static inline int create_dev(char *name, dev_t dev)
89741 {
89742- sys_unlink(name);
89743- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89744+ sys_unlink((char __force_user *)name);
89745+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89746 }
89747
89748 #if BITS_PER_LONG == 32
89749 static inline u32 bstat(char *name)
89750 {
89751 struct stat64 stat;
89752- if (sys_stat64(name, &stat) != 0)
89753+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89754 return 0;
89755 if (!S_ISBLK(stat.st_mode))
89756 return 0;
89757@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89758 static inline u32 bstat(char *name)
89759 {
89760 struct stat stat;
89761- if (sys_newstat(name, &stat) != 0)
89762+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89763 return 0;
89764 if (!S_ISBLK(stat.st_mode))
89765 return 0;
89766diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89767index 3e0878e..8a9d7a0 100644
89768--- a/init/do_mounts_initrd.c
89769+++ b/init/do_mounts_initrd.c
89770@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
89771 {
89772 sys_unshare(CLONE_FS | CLONE_FILES);
89773 /* stdin/stdout/stderr for /linuxrc */
89774- sys_open("/dev/console", O_RDWR, 0);
89775+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
89776 sys_dup(0);
89777 sys_dup(0);
89778 /* move initrd over / and chdir/chroot in initrd root */
89779- sys_chdir("/root");
89780- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89781- sys_chroot(".");
89782+ sys_chdir((const char __force_user *)"/root");
89783+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89784+ sys_chroot((const char __force_user *)".");
89785 sys_setsid();
89786 return 0;
89787 }
89788@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
89789 create_dev("/dev/root.old", Root_RAM0);
89790 /* mount initrd on rootfs' /root */
89791 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89792- sys_mkdir("/old", 0700);
89793- sys_chdir("/old");
89794+ sys_mkdir((const char __force_user *)"/old", 0700);
89795+ sys_chdir((const char __force_user *)"/old");
89796
89797 /* try loading default modules from initrd */
89798 load_default_modules();
89799@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
89800 current->flags &= ~PF_FREEZER_SKIP;
89801
89802 /* move initrd to rootfs' /old */
89803- sys_mount("..", ".", NULL, MS_MOVE, NULL);
89804+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
89805 /* switch root and cwd back to / of rootfs */
89806- sys_chroot("..");
89807+ sys_chroot((const char __force_user *)"..");
89808
89809 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89810- sys_chdir("/old");
89811+ sys_chdir((const char __force_user *)"/old");
89812 return;
89813 }
89814
89815- sys_chdir("/");
89816+ sys_chdir((const char __force_user *)"/");
89817 ROOT_DEV = new_decode_dev(real_root_dev);
89818 mount_root();
89819
89820 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89821- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89822+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89823 if (!error)
89824 printk("okay\n");
89825 else {
89826- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89827+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89828 if (error == -ENOENT)
89829 printk("/initrd does not exist. Ignored.\n");
89830 else
89831 printk("failed\n");
89832 printk(KERN_NOTICE "Unmounting old root\n");
89833- sys_umount("/old", MNT_DETACH);
89834+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89835 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89836 if (fd < 0) {
89837 error = fd;
89838@@ -127,11 +127,11 @@ int __init initrd_load(void)
89839 * mounted in the normal path.
89840 */
89841 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89842- sys_unlink("/initrd.image");
89843+ sys_unlink((const char __force_user *)"/initrd.image");
89844 handle_initrd();
89845 return 1;
89846 }
89847 }
89848- sys_unlink("/initrd.image");
89849+ sys_unlink((const char __force_user *)"/initrd.image");
89850 return 0;
89851 }
89852diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89853index 8cb6db5..d729f50 100644
89854--- a/init/do_mounts_md.c
89855+++ b/init/do_mounts_md.c
89856@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
89857 partitioned ? "_d" : "", minor,
89858 md_setup_args[ent].device_names);
89859
89860- fd = sys_open(name, 0, 0);
89861+ fd = sys_open((char __force_user *)name, 0, 0);
89862 if (fd < 0) {
89863 printk(KERN_ERR "md: open failed - cannot start "
89864 "array %s\n", name);
89865@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
89866 * array without it
89867 */
89868 sys_close(fd);
89869- fd = sys_open(name, 0, 0);
89870+ fd = sys_open((char __force_user *)name, 0, 0);
89871 sys_ioctl(fd, BLKRRPART, 0);
89872 }
89873 sys_close(fd);
89874@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
89875
89876 wait_for_device_probe();
89877
89878- fd = sys_open("/dev/md0", 0, 0);
89879+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
89880 if (fd >= 0) {
89881 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89882 sys_close(fd);
89883diff --git a/init/init_task.c b/init/init_task.c
89884index ba0a7f36..2bcf1d5 100644
89885--- a/init/init_task.c
89886+++ b/init/init_task.c
89887@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
89888 * Initial thread structure. Alignment of this is handled by a special
89889 * linker map entry.
89890 */
89891+#ifdef CONFIG_X86
89892+union thread_union init_thread_union __init_task_data;
89893+#else
89894 union thread_union init_thread_union __init_task_data =
89895 { INIT_THREAD_INFO(init_task) };
89896+#endif
89897diff --git a/init/initramfs.c b/init/initramfs.c
89898index a8497fa..35b3c90 100644
89899--- a/init/initramfs.c
89900+++ b/init/initramfs.c
89901@@ -84,7 +84,7 @@ static void __init free_hash(void)
89902 }
89903 }
89904
89905-static long __init do_utime(char *filename, time_t mtime)
89906+static long __init do_utime(char __force_user *filename, time_t mtime)
89907 {
89908 struct timespec t[2];
89909
89910@@ -119,7 +119,7 @@ static void __init dir_utime(void)
89911 struct dir_entry *de, *tmp;
89912 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89913 list_del(&de->list);
89914- do_utime(de->name, de->mtime);
89915+ do_utime((char __force_user *)de->name, de->mtime);
89916 kfree(de->name);
89917 kfree(de);
89918 }
89919@@ -281,7 +281,7 @@ static int __init maybe_link(void)
89920 if (nlink >= 2) {
89921 char *old = find_link(major, minor, ino, mode, collected);
89922 if (old)
89923- return (sys_link(old, collected) < 0) ? -1 : 1;
89924+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89925 }
89926 return 0;
89927 }
89928@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
89929 {
89930 struct stat st;
89931
89932- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89933+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89934 if (S_ISDIR(st.st_mode))
89935- sys_rmdir(path);
89936+ sys_rmdir((char __force_user *)path);
89937 else
89938- sys_unlink(path);
89939+ sys_unlink((char __force_user *)path);
89940 }
89941 }
89942
89943@@ -315,7 +315,7 @@ static int __init do_name(void)
89944 int openflags = O_WRONLY|O_CREAT;
89945 if (ml != 1)
89946 openflags |= O_TRUNC;
89947- wfd = sys_open(collected, openflags, mode);
89948+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89949
89950 if (wfd >= 0) {
89951 sys_fchown(wfd, uid, gid);
89952@@ -327,17 +327,17 @@ static int __init do_name(void)
89953 }
89954 }
89955 } else if (S_ISDIR(mode)) {
89956- sys_mkdir(collected, mode);
89957- sys_chown(collected, uid, gid);
89958- sys_chmod(collected, mode);
89959+ sys_mkdir((char __force_user *)collected, mode);
89960+ sys_chown((char __force_user *)collected, uid, gid);
89961+ sys_chmod((char __force_user *)collected, mode);
89962 dir_add(collected, mtime);
89963 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89964 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89965 if (maybe_link() == 0) {
89966- sys_mknod(collected, mode, rdev);
89967- sys_chown(collected, uid, gid);
89968- sys_chmod(collected, mode);
89969- do_utime(collected, mtime);
89970+ sys_mknod((char __force_user *)collected, mode, rdev);
89971+ sys_chown((char __force_user *)collected, uid, gid);
89972+ sys_chmod((char __force_user *)collected, mode);
89973+ do_utime((char __force_user *)collected, mtime);
89974 }
89975 }
89976 return 0;
89977@@ -346,15 +346,15 @@ static int __init do_name(void)
89978 static int __init do_copy(void)
89979 {
89980 if (count >= body_len) {
89981- sys_write(wfd, victim, body_len);
89982+ sys_write(wfd, (char __force_user *)victim, body_len);
89983 sys_close(wfd);
89984- do_utime(vcollected, mtime);
89985+ do_utime((char __force_user *)vcollected, mtime);
89986 kfree(vcollected);
89987 eat(body_len);
89988 state = SkipIt;
89989 return 0;
89990 } else {
89991- sys_write(wfd, victim, count);
89992+ sys_write(wfd, (char __force_user *)victim, count);
89993 body_len -= count;
89994 eat(count);
89995 return 1;
89996@@ -365,9 +365,9 @@ static int __init do_symlink(void)
89997 {
89998 collected[N_ALIGN(name_len) + body_len] = '\0';
89999 clean_path(collected, 0);
90000- sys_symlink(collected + N_ALIGN(name_len), collected);
90001- sys_lchown(collected, uid, gid);
90002- do_utime(collected, mtime);
90003+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
90004+ sys_lchown((char __force_user *)collected, uid, gid);
90005+ do_utime((char __force_user *)collected, mtime);
90006 state = SkipIt;
90007 next_state = Reset;
90008 return 0;
90009diff --git a/init/main.c b/init/main.c
90010index e8ae1fe..f60f98c 100644
90011--- a/init/main.c
90012+++ b/init/main.c
90013@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
90014 static inline void mark_rodata_ro(void) { }
90015 #endif
90016
90017+extern void grsecurity_init(void);
90018+
90019 /*
90020 * Debug helper: via this flag we know that we are in 'early bootup code'
90021 * where only the boot processor is running with IRQ disabled. This means
90022@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
90023
90024 __setup("reset_devices", set_reset_devices);
90025
90026+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
90027+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
90028+static int __init setup_grsec_proc_gid(char *str)
90029+{
90030+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
90031+ return 1;
90032+}
90033+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
90034+#endif
90035+
90036+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
90037+unsigned long pax_user_shadow_base __read_only;
90038+EXPORT_SYMBOL(pax_user_shadow_base);
90039+extern char pax_enter_kernel_user[];
90040+extern char pax_exit_kernel_user[];
90041+#endif
90042+
90043+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
90044+static int __init setup_pax_nouderef(char *str)
90045+{
90046+#ifdef CONFIG_X86_32
90047+ unsigned int cpu;
90048+ struct desc_struct *gdt;
90049+
90050+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
90051+ gdt = get_cpu_gdt_table(cpu);
90052+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
90053+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
90054+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
90055+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
90056+ }
90057+ loadsegment(ds, __KERNEL_DS);
90058+ loadsegment(es, __KERNEL_DS);
90059+ loadsegment(ss, __KERNEL_DS);
90060+#else
90061+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
90062+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
90063+ clone_pgd_mask = ~(pgdval_t)0UL;
90064+ pax_user_shadow_base = 0UL;
90065+ setup_clear_cpu_cap(X86_FEATURE_PCID);
90066+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
90067+#endif
90068+
90069+ return 0;
90070+}
90071+early_param("pax_nouderef", setup_pax_nouderef);
90072+
90073+#ifdef CONFIG_X86_64
90074+static int __init setup_pax_weakuderef(char *str)
90075+{
90076+ if (clone_pgd_mask != ~(pgdval_t)0UL)
90077+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
90078+ return 1;
90079+}
90080+__setup("pax_weakuderef", setup_pax_weakuderef);
90081+#endif
90082+#endif
90083+
90084+#ifdef CONFIG_PAX_SOFTMODE
90085+int pax_softmode;
90086+
90087+static int __init setup_pax_softmode(char *str)
90088+{
90089+ get_option(&str, &pax_softmode);
90090+ return 1;
90091+}
90092+__setup("pax_softmode=", setup_pax_softmode);
90093+#endif
90094+
90095 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
90096 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
90097 static const char *panic_later, *panic_param;
90098@@ -727,7 +798,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
90099 struct blacklist_entry *entry;
90100 char *fn_name;
90101
90102- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
90103+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
90104 if (!fn_name)
90105 return false;
90106
90107@@ -779,7 +850,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
90108 {
90109 int count = preempt_count();
90110 int ret;
90111- char msgbuf[64];
90112+ const char *msg1 = "", *msg2 = "";
90113
90114 if (initcall_blacklisted(fn))
90115 return -EPERM;
90116@@ -789,18 +860,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
90117 else
90118 ret = fn();
90119
90120- msgbuf[0] = 0;
90121-
90122 if (preempt_count() != count) {
90123- sprintf(msgbuf, "preemption imbalance ");
90124+ msg1 = " preemption imbalance";
90125 preempt_count_set(count);
90126 }
90127 if (irqs_disabled()) {
90128- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
90129+ msg2 = " disabled interrupts";
90130 local_irq_enable();
90131 }
90132- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
90133+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
90134
90135+ add_latent_entropy();
90136 return ret;
90137 }
90138
90139@@ -907,8 +977,8 @@ static int run_init_process(const char *init_filename)
90140 {
90141 argv_init[0] = init_filename;
90142 return do_execve(getname_kernel(init_filename),
90143- (const char __user *const __user *)argv_init,
90144- (const char __user *const __user *)envp_init);
90145+ (const char __user *const __force_user *)argv_init,
90146+ (const char __user *const __force_user *)envp_init);
90147 }
90148
90149 static int try_to_run_init_process(const char *init_filename)
90150@@ -925,6 +995,10 @@ static int try_to_run_init_process(const char *init_filename)
90151 return ret;
90152 }
90153
90154+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
90155+extern int gr_init_ran;
90156+#endif
90157+
90158 static noinline void __init kernel_init_freeable(void);
90159
90160 static int __ref kernel_init(void *unused)
90161@@ -949,6 +1023,11 @@ static int __ref kernel_init(void *unused)
90162 ramdisk_execute_command, ret);
90163 }
90164
90165+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
90166+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
90167+ gr_init_ran = 1;
90168+#endif
90169+
90170 /*
90171 * We try each of these until one succeeds.
90172 *
90173@@ -1004,7 +1083,7 @@ static noinline void __init kernel_init_freeable(void)
90174 do_basic_setup();
90175
90176 /* Open the /dev/console on the rootfs, this should never fail */
90177- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
90178+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
90179 pr_err("Warning: unable to open an initial console.\n");
90180
90181 (void) sys_dup(0);
90182@@ -1017,11 +1096,13 @@ static noinline void __init kernel_init_freeable(void)
90183 if (!ramdisk_execute_command)
90184 ramdisk_execute_command = "/init";
90185
90186- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
90187+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
90188 ramdisk_execute_command = NULL;
90189 prepare_namespace();
90190 }
90191
90192+ grsecurity_init();
90193+
90194 /*
90195 * Ok, we have completed the initial bootup, and
90196 * we're essentially up and running. Get rid of the
90197diff --git a/ipc/compat.c b/ipc/compat.c
90198index b5ef4f7..ff31d87 100644
90199--- a/ipc/compat.c
90200+++ b/ipc/compat.c
90201@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
90202 COMPAT_SHMLBA);
90203 if (err < 0)
90204 return err;
90205- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
90206+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
90207 }
90208 case SHMDT:
90209 return sys_shmdt(compat_ptr(ptr));
90210diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
90211index c3f0326..d4e0579 100644
90212--- a/ipc/ipc_sysctl.c
90213+++ b/ipc/ipc_sysctl.c
90214@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
90215 static int proc_ipc_dointvec(struct ctl_table *table, int write,
90216 void __user *buffer, size_t *lenp, loff_t *ppos)
90217 {
90218- struct ctl_table ipc_table;
90219+ ctl_table_no_const ipc_table;
90220
90221 memcpy(&ipc_table, table, sizeof(ipc_table));
90222 ipc_table.data = get_ipc(table);
90223@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
90224 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
90225 void __user *buffer, size_t *lenp, loff_t *ppos)
90226 {
90227- struct ctl_table ipc_table;
90228+ ctl_table_no_const ipc_table;
90229
90230 memcpy(&ipc_table, table, sizeof(ipc_table));
90231 ipc_table.data = get_ipc(table);
90232@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
90233 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
90234 void __user *buffer, size_t *lenp, loff_t *ppos)
90235 {
90236- struct ctl_table ipc_table;
90237+ ctl_table_no_const ipc_table;
90238 size_t lenp_bef = *lenp;
90239 int rc;
90240
90241@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
90242 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
90243 void __user *buffer, size_t *lenp, loff_t *ppos)
90244 {
90245- struct ctl_table ipc_table;
90246+ ctl_table_no_const ipc_table;
90247 memcpy(&ipc_table, table, sizeof(ipc_table));
90248 ipc_table.data = get_ipc(table);
90249
90250@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
90251 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
90252 void __user *buffer, size_t *lenp, loff_t *ppos)
90253 {
90254- struct ctl_table ipc_table;
90255+ ctl_table_no_const ipc_table;
90256 size_t lenp_bef = *lenp;
90257 int oldval;
90258 int rc;
90259diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
90260index 68d4e95..1477ded 100644
90261--- a/ipc/mq_sysctl.c
90262+++ b/ipc/mq_sysctl.c
90263@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
90264 static int proc_mq_dointvec(struct ctl_table *table, int write,
90265 void __user *buffer, size_t *lenp, loff_t *ppos)
90266 {
90267- struct ctl_table mq_table;
90268+ ctl_table_no_const mq_table;
90269 memcpy(&mq_table, table, sizeof(mq_table));
90270 mq_table.data = get_mq(table);
90271
90272@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
90273 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
90274 void __user *buffer, size_t *lenp, loff_t *ppos)
90275 {
90276- struct ctl_table mq_table;
90277+ ctl_table_no_const mq_table;
90278 memcpy(&mq_table, table, sizeof(mq_table));
90279 mq_table.data = get_mq(table);
90280
90281diff --git a/ipc/mqueue.c b/ipc/mqueue.c
90282index 4fcf39a..d3cc2ec 100644
90283--- a/ipc/mqueue.c
90284+++ b/ipc/mqueue.c
90285@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
90286 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
90287 info->attr.mq_msgsize);
90288
90289+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
90290 spin_lock(&mq_lock);
90291 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
90292 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
90293diff --git a/ipc/shm.c b/ipc/shm.c
90294index 89fc354..cf56786 100644
90295--- a/ipc/shm.c
90296+++ b/ipc/shm.c
90297@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
90298 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
90299 #endif
90300
90301+#ifdef CONFIG_GRKERNSEC
90302+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90303+ const time_t shm_createtime, const kuid_t cuid,
90304+ const int shmid);
90305+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90306+ const time_t shm_createtime);
90307+#endif
90308+
90309 void shm_init_ns(struct ipc_namespace *ns)
90310 {
90311 ns->shm_ctlmax = SHMMAX;
90312@@ -557,6 +565,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
90313 shp->shm_lprid = 0;
90314 shp->shm_atim = shp->shm_dtim = 0;
90315 shp->shm_ctim = get_seconds();
90316+#ifdef CONFIG_GRKERNSEC
90317+ {
90318+ struct timespec timeval;
90319+ do_posix_clock_monotonic_gettime(&timeval);
90320+
90321+ shp->shm_createtime = timeval.tv_sec;
90322+ }
90323+#endif
90324 shp->shm_segsz = size;
90325 shp->shm_nattch = 0;
90326 shp->shm_file = file;
90327@@ -1092,6 +1108,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
90328 f_mode = FMODE_READ | FMODE_WRITE;
90329 }
90330 if (shmflg & SHM_EXEC) {
90331+
90332+#ifdef CONFIG_PAX_MPROTECT
90333+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
90334+ goto out;
90335+#endif
90336+
90337 prot |= PROT_EXEC;
90338 acc_mode |= S_IXUGO;
90339 }
90340@@ -1116,6 +1138,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
90341 if (err)
90342 goto out_unlock;
90343
90344+#ifdef CONFIG_GRKERNSEC
90345+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
90346+ shp->shm_perm.cuid, shmid) ||
90347+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
90348+ err = -EACCES;
90349+ goto out_unlock;
90350+ }
90351+#endif
90352+
90353 ipc_lock_object(&shp->shm_perm);
90354
90355 /* check if shm_destroy() is tearing down shp */
90356@@ -1128,6 +1159,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
90357 path = shp->shm_file->f_path;
90358 path_get(&path);
90359 shp->shm_nattch++;
90360+#ifdef CONFIG_GRKERNSEC
90361+ shp->shm_lapid = current->pid;
90362+#endif
90363 size = i_size_read(path.dentry->d_inode);
90364 ipc_unlock_object(&shp->shm_perm);
90365 rcu_read_unlock();
90366diff --git a/ipc/util.c b/ipc/util.c
90367index 27d74e6..8be0be2 100644
90368--- a/ipc/util.c
90369+++ b/ipc/util.c
90370@@ -71,6 +71,8 @@ struct ipc_proc_iface {
90371 int (*show)(struct seq_file *, void *);
90372 };
90373
90374+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
90375+
90376 static void ipc_memory_notifier(struct work_struct *work)
90377 {
90378 ipcns_notify(IPCNS_MEMCHANGED);
90379@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
90380 granted_mode >>= 6;
90381 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
90382 granted_mode >>= 3;
90383+
90384+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
90385+ return -1;
90386+
90387 /* is there some bit set in requested_mode but not in granted_mode? */
90388 if ((requested_mode & ~granted_mode & 0007) &&
90389 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
90390diff --git a/kernel/acct.c b/kernel/acct.c
90391index 808a86f..da69695 100644
90392--- a/kernel/acct.c
90393+++ b/kernel/acct.c
90394@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
90395 */
90396 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
90397 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
90398- file->f_op->write(file, (char *)&ac,
90399+ file->f_op->write(file, (char __force_user *)&ac,
90400 sizeof(acct_t), &file->f_pos);
90401 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
90402 set_fs(fs);
90403diff --git a/kernel/audit.c b/kernel/audit.c
90404index ba2ff5a..c6c0deb 100644
90405--- a/kernel/audit.c
90406+++ b/kernel/audit.c
90407@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
90408 3) suppressed due to audit_rate_limit
90409 4) suppressed due to audit_backlog_limit
90410 */
90411-static atomic_t audit_lost = ATOMIC_INIT(0);
90412+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
90413
90414 /* The netlink socket. */
90415 static struct sock *audit_sock;
90416@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
90417 unsigned long now;
90418 int print;
90419
90420- atomic_inc(&audit_lost);
90421+ atomic_inc_unchecked(&audit_lost);
90422
90423 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
90424
90425@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
90426 if (print) {
90427 if (printk_ratelimit())
90428 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
90429- atomic_read(&audit_lost),
90430+ atomic_read_unchecked(&audit_lost),
90431 audit_rate_limit,
90432 audit_backlog_limit);
90433 audit_panic(message);
90434@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90435 s.pid = audit_pid;
90436 s.rate_limit = audit_rate_limit;
90437 s.backlog_limit = audit_backlog_limit;
90438- s.lost = atomic_read(&audit_lost);
90439+ s.lost = atomic_read_unchecked(&audit_lost);
90440 s.backlog = skb_queue_len(&audit_skb_queue);
90441 s.version = AUDIT_VERSION_LATEST;
90442 s.backlog_wait_time = audit_backlog_wait_time;
90443diff --git a/kernel/auditsc.c b/kernel/auditsc.c
90444index 21eae3c..66db239 100644
90445--- a/kernel/auditsc.c
90446+++ b/kernel/auditsc.c
90447@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
90448 }
90449
90450 /* global counter which is incremented every time something logs in */
90451-static atomic_t session_id = ATOMIC_INIT(0);
90452+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
90453
90454 static int audit_set_loginuid_perm(kuid_t loginuid)
90455 {
90456@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
90457
90458 /* are we setting or clearing? */
90459 if (uid_valid(loginuid))
90460- sessionid = (unsigned int)atomic_inc_return(&session_id);
90461+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
90462
90463 task->sessionid = sessionid;
90464 task->loginuid = loginuid;
90465diff --git a/kernel/capability.c b/kernel/capability.c
90466index 989f5bf..d317ca0 100644
90467--- a/kernel/capability.c
90468+++ b/kernel/capability.c
90469@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
90470 * before modification is attempted and the application
90471 * fails.
90472 */
90473+ if (tocopy > ARRAY_SIZE(kdata))
90474+ return -EFAULT;
90475+
90476 if (copy_to_user(dataptr, kdata, tocopy
90477 * sizeof(struct __user_cap_data_struct))) {
90478 return -EFAULT;
90479@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
90480 int ret;
90481
90482 rcu_read_lock();
90483- ret = security_capable(__task_cred(t), ns, cap);
90484+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
90485+ gr_task_is_capable(t, __task_cred(t), cap);
90486 rcu_read_unlock();
90487
90488- return (ret == 0);
90489+ return ret;
90490 }
90491
90492 /**
90493@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
90494 int ret;
90495
90496 rcu_read_lock();
90497- ret = security_capable_noaudit(__task_cred(t), ns, cap);
90498+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
90499 rcu_read_unlock();
90500
90501- return (ret == 0);
90502+ return ret;
90503 }
90504
90505 /**
90506@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
90507 BUG();
90508 }
90509
90510- if (security_capable(current_cred(), ns, cap) == 0) {
90511+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
90512 current->flags |= PF_SUPERPRIV;
90513 return true;
90514 }
90515@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
90516 }
90517 EXPORT_SYMBOL(ns_capable);
90518
90519+bool ns_capable_nolog(struct user_namespace *ns, int cap)
90520+{
90521+ if (unlikely(!cap_valid(cap))) {
90522+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
90523+ BUG();
90524+ }
90525+
90526+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
90527+ current->flags |= PF_SUPERPRIV;
90528+ return true;
90529+ }
90530+ return false;
90531+}
90532+EXPORT_SYMBOL(ns_capable_nolog);
90533+
90534 /**
90535 * file_ns_capable - Determine if the file's opener had a capability in effect
90536 * @file: The file we want to check
90537@@ -427,6 +446,12 @@ bool capable(int cap)
90538 }
90539 EXPORT_SYMBOL(capable);
90540
90541+bool capable_nolog(int cap)
90542+{
90543+ return ns_capable_nolog(&init_user_ns, cap);
90544+}
90545+EXPORT_SYMBOL(capable_nolog);
90546+
90547 /**
90548 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
90549 * @inode: The inode in question
90550@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
90551 kgid_has_mapping(ns, inode->i_gid);
90552 }
90553 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
90554+
90555+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
90556+{
90557+ struct user_namespace *ns = current_user_ns();
90558+
90559+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
90560+ kgid_has_mapping(ns, inode->i_gid);
90561+}
90562+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
90563diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90564index 70776ae..09c4988 100644
90565--- a/kernel/cgroup.c
90566+++ b/kernel/cgroup.c
90567@@ -5146,6 +5146,14 @@ static void cgroup_release_agent(struct work_struct *work)
90568 release_list);
90569 list_del_init(&cgrp->release_list);
90570 raw_spin_unlock(&release_list_lock);
90571+
90572+ /*
90573+ * don't bother calling call_usermodehelper if we haven't
90574+ * configured a binary to execute
90575+ */
90576+ if (cgrp->root->release_agent_path[0] == '\0')
90577+ goto continue_free;
90578+
90579 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
90580 if (!pathbuf)
90581 goto continue_free;
90582@@ -5336,7 +5344,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
90583 struct task_struct *task;
90584 int count = 0;
90585
90586- seq_printf(seq, "css_set %p\n", cset);
90587+ seq_printf(seq, "css_set %pK\n", cset);
90588
90589 list_for_each_entry(task, &cset->tasks, cg_list) {
90590 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
90591diff --git a/kernel/compat.c b/kernel/compat.c
90592index 633394f..bdfa969 100644
90593--- a/kernel/compat.c
90594+++ b/kernel/compat.c
90595@@ -13,6 +13,7 @@
90596
90597 #include <linux/linkage.h>
90598 #include <linux/compat.h>
90599+#include <linux/module.h>
90600 #include <linux/errno.h>
90601 #include <linux/time.h>
90602 #include <linux/signal.h>
90603@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90604 mm_segment_t oldfs;
90605 long ret;
90606
90607- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90608+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90609 oldfs = get_fs();
90610 set_fs(KERNEL_DS);
90611 ret = hrtimer_nanosleep_restart(restart);
90612@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
90613 oldfs = get_fs();
90614 set_fs(KERNEL_DS);
90615 ret = hrtimer_nanosleep(&tu,
90616- rmtp ? (struct timespec __user *)&rmt : NULL,
90617+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90618 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90619 set_fs(oldfs);
90620
90621@@ -361,7 +362,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
90622 mm_segment_t old_fs = get_fs();
90623
90624 set_fs(KERNEL_DS);
90625- ret = sys_sigpending((old_sigset_t __user *) &s);
90626+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90627 set_fs(old_fs);
90628 if (ret == 0)
90629 ret = put_user(s, set);
90630@@ -451,7 +452,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
90631 mm_segment_t old_fs = get_fs();
90632
90633 set_fs(KERNEL_DS);
90634- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
90635+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90636 set_fs(old_fs);
90637
90638 if (!ret) {
90639@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
90640 set_fs (KERNEL_DS);
90641 ret = sys_wait4(pid,
90642 (stat_addr ?
90643- (unsigned int __user *) &status : NULL),
90644- options, (struct rusage __user *) &r);
90645+ (unsigned int __force_user *) &status : NULL),
90646+ options, (struct rusage __force_user *) &r);
90647 set_fs (old_fs);
90648
90649 if (ret > 0) {
90650@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
90651 memset(&info, 0, sizeof(info));
90652
90653 set_fs(KERNEL_DS);
90654- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90655- uru ? (struct rusage __user *)&ru : NULL);
90656+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90657+ uru ? (struct rusage __force_user *)&ru : NULL);
90658 set_fs(old_fs);
90659
90660 if ((ret < 0) || (info.si_signo == 0))
90661@@ -695,8 +696,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
90662 oldfs = get_fs();
90663 set_fs(KERNEL_DS);
90664 err = sys_timer_settime(timer_id, flags,
90665- (struct itimerspec __user *) &newts,
90666- (struct itimerspec __user *) &oldts);
90667+ (struct itimerspec __force_user *) &newts,
90668+ (struct itimerspec __force_user *) &oldts);
90669 set_fs(oldfs);
90670 if (!err && old && put_compat_itimerspec(old, &oldts))
90671 return -EFAULT;
90672@@ -713,7 +714,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
90673 oldfs = get_fs();
90674 set_fs(KERNEL_DS);
90675 err = sys_timer_gettime(timer_id,
90676- (struct itimerspec __user *) &ts);
90677+ (struct itimerspec __force_user *) &ts);
90678 set_fs(oldfs);
90679 if (!err && put_compat_itimerspec(setting, &ts))
90680 return -EFAULT;
90681@@ -732,7 +733,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
90682 oldfs = get_fs();
90683 set_fs(KERNEL_DS);
90684 err = sys_clock_settime(which_clock,
90685- (struct timespec __user *) &ts);
90686+ (struct timespec __force_user *) &ts);
90687 set_fs(oldfs);
90688 return err;
90689 }
90690@@ -747,7 +748,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
90691 oldfs = get_fs();
90692 set_fs(KERNEL_DS);
90693 err = sys_clock_gettime(which_clock,
90694- (struct timespec __user *) &ts);
90695+ (struct timespec __force_user *) &ts);
90696 set_fs(oldfs);
90697 if (!err && compat_put_timespec(&ts, tp))
90698 return -EFAULT;
90699@@ -767,7 +768,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
90700
90701 oldfs = get_fs();
90702 set_fs(KERNEL_DS);
90703- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
90704+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
90705 set_fs(oldfs);
90706
90707 err = compat_put_timex(utp, &txc);
90708@@ -787,7 +788,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
90709 oldfs = get_fs();
90710 set_fs(KERNEL_DS);
90711 err = sys_clock_getres(which_clock,
90712- (struct timespec __user *) &ts);
90713+ (struct timespec __force_user *) &ts);
90714 set_fs(oldfs);
90715 if (!err && tp && compat_put_timespec(&ts, tp))
90716 return -EFAULT;
90717@@ -801,7 +802,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90718 struct timespec tu;
90719 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90720
90721- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90722+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90723 oldfs = get_fs();
90724 set_fs(KERNEL_DS);
90725 err = clock_nanosleep_restart(restart);
90726@@ -833,8 +834,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
90727 oldfs = get_fs();
90728 set_fs(KERNEL_DS);
90729 err = sys_clock_nanosleep(which_clock, flags,
90730- (struct timespec __user *) &in,
90731- (struct timespec __user *) &out);
90732+ (struct timespec __force_user *) &in,
90733+ (struct timespec __force_user *) &out);
90734 set_fs(oldfs);
90735
90736 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90737@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
90738 mm_segment_t old_fs = get_fs();
90739
90740 set_fs(KERNEL_DS);
90741- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
90742+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
90743 set_fs(old_fs);
90744 if (compat_put_timespec(&t, interval))
90745 return -EFAULT;
90746diff --git a/kernel/configs.c b/kernel/configs.c
90747index c18b1f1..b9a0132 100644
90748--- a/kernel/configs.c
90749+++ b/kernel/configs.c
90750@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
90751 struct proc_dir_entry *entry;
90752
90753 /* create the current config file */
90754+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90755+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90756+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90757+ &ikconfig_file_ops);
90758+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90759+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90760+ &ikconfig_file_ops);
90761+#endif
90762+#else
90763 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90764 &ikconfig_file_ops);
90765+#endif
90766+
90767 if (!entry)
90768 return -ENOMEM;
90769
90770diff --git a/kernel/cred.c b/kernel/cred.c
90771index e0573a4..26c0fd3 100644
90772--- a/kernel/cred.c
90773+++ b/kernel/cred.c
90774@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
90775 validate_creds(cred);
90776 alter_cred_subscribers(cred, -1);
90777 put_cred(cred);
90778+
90779+#ifdef CONFIG_GRKERNSEC_SETXID
90780+ cred = (struct cred *) tsk->delayed_cred;
90781+ if (cred != NULL) {
90782+ tsk->delayed_cred = NULL;
90783+ validate_creds(cred);
90784+ alter_cred_subscribers(cred, -1);
90785+ put_cred(cred);
90786+ }
90787+#endif
90788 }
90789
90790 /**
90791@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
90792 * Always returns 0 thus allowing this function to be tail-called at the end
90793 * of, say, sys_setgid().
90794 */
90795-int commit_creds(struct cred *new)
90796+static int __commit_creds(struct cred *new)
90797 {
90798 struct task_struct *task = current;
90799 const struct cred *old = task->real_cred;
90800@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
90801
90802 get_cred(new); /* we will require a ref for the subj creds too */
90803
90804+ gr_set_role_label(task, new->uid, new->gid);
90805+
90806 /* dumpability changes */
90807 if (!uid_eq(old->euid, new->euid) ||
90808 !gid_eq(old->egid, new->egid) ||
90809@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
90810 put_cred(old);
90811 return 0;
90812 }
90813+#ifdef CONFIG_GRKERNSEC_SETXID
90814+extern int set_user(struct cred *new);
90815+
90816+void gr_delayed_cred_worker(void)
90817+{
90818+ const struct cred *new = current->delayed_cred;
90819+ struct cred *ncred;
90820+
90821+ current->delayed_cred = NULL;
90822+
90823+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
90824+ // from doing get_cred on it when queueing this
90825+ put_cred(new);
90826+ return;
90827+ } else if (new == NULL)
90828+ return;
90829+
90830+ ncred = prepare_creds();
90831+ if (!ncred)
90832+ goto die;
90833+ // uids
90834+ ncred->uid = new->uid;
90835+ ncred->euid = new->euid;
90836+ ncred->suid = new->suid;
90837+ ncred->fsuid = new->fsuid;
90838+ // gids
90839+ ncred->gid = new->gid;
90840+ ncred->egid = new->egid;
90841+ ncred->sgid = new->sgid;
90842+ ncred->fsgid = new->fsgid;
90843+ // groups
90844+ set_groups(ncred, new->group_info);
90845+ // caps
90846+ ncred->securebits = new->securebits;
90847+ ncred->cap_inheritable = new->cap_inheritable;
90848+ ncred->cap_permitted = new->cap_permitted;
90849+ ncred->cap_effective = new->cap_effective;
90850+ ncred->cap_bset = new->cap_bset;
90851+
90852+ if (set_user(ncred)) {
90853+ abort_creds(ncred);
90854+ goto die;
90855+ }
90856+
90857+ // from doing get_cred on it when queueing this
90858+ put_cred(new);
90859+
90860+ __commit_creds(ncred);
90861+ return;
90862+die:
90863+ // from doing get_cred on it when queueing this
90864+ put_cred(new);
90865+ do_group_exit(SIGKILL);
90866+}
90867+#endif
90868+
90869+int commit_creds(struct cred *new)
90870+{
90871+#ifdef CONFIG_GRKERNSEC_SETXID
90872+ int ret;
90873+ int schedule_it = 0;
90874+ struct task_struct *t;
90875+ unsigned oldsecurebits = current_cred()->securebits;
90876+
90877+ /* we won't get called with tasklist_lock held for writing
90878+ and interrupts disabled as the cred struct in that case is
90879+ init_cred
90880+ */
90881+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90882+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
90883+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
90884+ schedule_it = 1;
90885+ }
90886+ ret = __commit_creds(new);
90887+ if (schedule_it) {
90888+ rcu_read_lock();
90889+ read_lock(&tasklist_lock);
90890+ for (t = next_thread(current); t != current;
90891+ t = next_thread(t)) {
90892+ /* we'll check if the thread has uid 0 in
90893+ * the delayed worker routine
90894+ */
90895+ if (task_securebits(t) == oldsecurebits &&
90896+ t->delayed_cred == NULL) {
90897+ t->delayed_cred = get_cred(new);
90898+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
90899+ set_tsk_need_resched(t);
90900+ }
90901+ }
90902+ read_unlock(&tasklist_lock);
90903+ rcu_read_unlock();
90904+ }
90905+
90906+ return ret;
90907+#else
90908+ return __commit_creds(new);
90909+#endif
90910+}
90911+
90912 EXPORT_SYMBOL(commit_creds);
90913
90914 /**
90915diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
90916index 1adf62b..7736e06 100644
90917--- a/kernel/debug/debug_core.c
90918+++ b/kernel/debug/debug_core.c
90919@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
90920 */
90921 static atomic_t masters_in_kgdb;
90922 static atomic_t slaves_in_kgdb;
90923-static atomic_t kgdb_break_tasklet_var;
90924+static atomic_unchecked_t kgdb_break_tasklet_var;
90925 atomic_t kgdb_setting_breakpoint;
90926
90927 struct task_struct *kgdb_usethread;
90928@@ -134,7 +134,7 @@ int kgdb_single_step;
90929 static pid_t kgdb_sstep_pid;
90930
90931 /* to keep track of the CPU which is doing the single stepping*/
90932-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90933+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90934
90935 /*
90936 * If you are debugging a problem where roundup (the collection of
90937@@ -549,7 +549,7 @@ return_normal:
90938 * kernel will only try for the value of sstep_tries before
90939 * giving up and continuing on.
90940 */
90941- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90942+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90943 (kgdb_info[cpu].task &&
90944 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
90945 atomic_set(&kgdb_active, -1);
90946@@ -647,8 +647,8 @@ cpu_master_loop:
90947 }
90948
90949 kgdb_restore:
90950- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
90951- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
90952+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
90953+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
90954 if (kgdb_info[sstep_cpu].task)
90955 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
90956 else
90957@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
90958 static void kgdb_tasklet_bpt(unsigned long ing)
90959 {
90960 kgdb_breakpoint();
90961- atomic_set(&kgdb_break_tasklet_var, 0);
90962+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
90963 }
90964
90965 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
90966
90967 void kgdb_schedule_breakpoint(void)
90968 {
90969- if (atomic_read(&kgdb_break_tasklet_var) ||
90970+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
90971 atomic_read(&kgdb_active) != -1 ||
90972 atomic_read(&kgdb_setting_breakpoint))
90973 return;
90974- atomic_inc(&kgdb_break_tasklet_var);
90975+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
90976 tasklet_schedule(&kgdb_tasklet_breakpoint);
90977 }
90978 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
90979diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
90980index 2f7c760..95b6a66 100644
90981--- a/kernel/debug/kdb/kdb_main.c
90982+++ b/kernel/debug/kdb/kdb_main.c
90983@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
90984 continue;
90985
90986 kdb_printf("%-20s%8u 0x%p ", mod->name,
90987- mod->core_size, (void *)mod);
90988+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
90989 #ifdef CONFIG_MODULE_UNLOAD
90990 kdb_printf("%4ld ", module_refcount(mod));
90991 #endif
90992@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
90993 kdb_printf(" (Loading)");
90994 else
90995 kdb_printf(" (Live)");
90996- kdb_printf(" 0x%p", mod->module_core);
90997+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
90998
90999 #ifdef CONFIG_MODULE_UNLOAD
91000 {
91001diff --git a/kernel/events/core.c b/kernel/events/core.c
91002index 6b17ac1..00fd505 100644
91003--- a/kernel/events/core.c
91004+++ b/kernel/events/core.c
91005@@ -160,8 +160,15 @@ static struct srcu_struct pmus_srcu;
91006 * 0 - disallow raw tracepoint access for unpriv
91007 * 1 - disallow cpu events for unpriv
91008 * 2 - disallow kernel profiling for unpriv
91009+ * 3 - disallow all unpriv perf event use
91010 */
91011-int sysctl_perf_event_paranoid __read_mostly = 1;
91012+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
91013+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
91014+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
91015+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
91016+#else
91017+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
91018+#endif
91019
91020 /* Minimum for 512 kiB + 1 user control page */
91021 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
91022@@ -187,7 +194,7 @@ void update_perf_cpu_limits(void)
91023
91024 tmp *= sysctl_perf_cpu_time_max_percent;
91025 do_div(tmp, 100);
91026- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
91027+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
91028 }
91029
91030 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
91031@@ -293,7 +300,7 @@ void perf_sample_event_took(u64 sample_len_ns)
91032 }
91033 }
91034
91035-static atomic64_t perf_event_id;
91036+static atomic64_unchecked_t perf_event_id;
91037
91038 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
91039 enum event_type_t event_type);
91040@@ -3023,7 +3030,7 @@ static void __perf_event_read(void *info)
91041
91042 static inline u64 perf_event_count(struct perf_event *event)
91043 {
91044- return local64_read(&event->count) + atomic64_read(&event->child_count);
91045+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
91046 }
91047
91048 static u64 perf_event_read(struct perf_event *event)
91049@@ -3399,9 +3406,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
91050 mutex_lock(&event->child_mutex);
91051 total += perf_event_read(event);
91052 *enabled += event->total_time_enabled +
91053- atomic64_read(&event->child_total_time_enabled);
91054+ atomic64_read_unchecked(&event->child_total_time_enabled);
91055 *running += event->total_time_running +
91056- atomic64_read(&event->child_total_time_running);
91057+ atomic64_read_unchecked(&event->child_total_time_running);
91058
91059 list_for_each_entry(child, &event->child_list, child_list) {
91060 total += perf_event_read(child);
91061@@ -3830,10 +3837,10 @@ void perf_event_update_userpage(struct perf_event *event)
91062 userpg->offset -= local64_read(&event->hw.prev_count);
91063
91064 userpg->time_enabled = enabled +
91065- atomic64_read(&event->child_total_time_enabled);
91066+ atomic64_read_unchecked(&event->child_total_time_enabled);
91067
91068 userpg->time_running = running +
91069- atomic64_read(&event->child_total_time_running);
91070+ atomic64_read_unchecked(&event->child_total_time_running);
91071
91072 arch_perf_update_userpage(userpg, now);
91073
91074@@ -4397,7 +4404,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
91075
91076 /* Data. */
91077 sp = perf_user_stack_pointer(regs);
91078- rem = __output_copy_user(handle, (void *) sp, dump_size);
91079+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
91080 dyn_size = dump_size - rem;
91081
91082 perf_output_skip(handle, rem);
91083@@ -4488,11 +4495,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
91084 values[n++] = perf_event_count(event);
91085 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
91086 values[n++] = enabled +
91087- atomic64_read(&event->child_total_time_enabled);
91088+ atomic64_read_unchecked(&event->child_total_time_enabled);
91089 }
91090 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
91091 values[n++] = running +
91092- atomic64_read(&event->child_total_time_running);
91093+ atomic64_read_unchecked(&event->child_total_time_running);
91094 }
91095 if (read_format & PERF_FORMAT_ID)
91096 values[n++] = primary_event_id(event);
91097@@ -6801,7 +6808,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
91098 event->parent = parent_event;
91099
91100 event->ns = get_pid_ns(task_active_pid_ns(current));
91101- event->id = atomic64_inc_return(&perf_event_id);
91102+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
91103
91104 event->state = PERF_EVENT_STATE_INACTIVE;
91105
91106@@ -7080,6 +7087,11 @@ SYSCALL_DEFINE5(perf_event_open,
91107 if (flags & ~PERF_FLAG_ALL)
91108 return -EINVAL;
91109
91110+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
91111+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
91112+ return -EACCES;
91113+#endif
91114+
91115 err = perf_copy_attr(attr_uptr, &attr);
91116 if (err)
91117 return err;
91118@@ -7432,10 +7444,10 @@ static void sync_child_event(struct perf_event *child_event,
91119 /*
91120 * Add back the child's count to the parent's count:
91121 */
91122- atomic64_add(child_val, &parent_event->child_count);
91123- atomic64_add(child_event->total_time_enabled,
91124+ atomic64_add_unchecked(child_val, &parent_event->child_count);
91125+ atomic64_add_unchecked(child_event->total_time_enabled,
91126 &parent_event->child_total_time_enabled);
91127- atomic64_add(child_event->total_time_running,
91128+ atomic64_add_unchecked(child_event->total_time_running,
91129 &parent_event->child_total_time_running);
91130
91131 /*
91132diff --git a/kernel/events/internal.h b/kernel/events/internal.h
91133index 569b2187..19940d9 100644
91134--- a/kernel/events/internal.h
91135+++ b/kernel/events/internal.h
91136@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
91137 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
91138 }
91139
91140-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
91141+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
91142 static inline unsigned long \
91143 func_name(struct perf_output_handle *handle, \
91144- const void *buf, unsigned long len) \
91145+ const void user *buf, unsigned long len) \
91146 { \
91147 unsigned long size, written; \
91148 \
91149@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
91150 return 0;
91151 }
91152
91153-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
91154+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
91155
91156 static inline unsigned long
91157 memcpy_skip(void *dst, const void *src, unsigned long n)
91158@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
91159 return 0;
91160 }
91161
91162-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
91163+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
91164
91165 #ifndef arch_perf_out_copy_user
91166 #define arch_perf_out_copy_user arch_perf_out_copy_user
91167@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
91168 }
91169 #endif
91170
91171-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
91172+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
91173
91174 /* Callchain handling */
91175 extern struct perf_callchain_entry *
91176diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
91177index 6f3254e..e4c1fe4 100644
91178--- a/kernel/events/uprobes.c
91179+++ b/kernel/events/uprobes.c
91180@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
91181 {
91182 struct page *page;
91183 uprobe_opcode_t opcode;
91184- int result;
91185+ long result;
91186
91187 pagefault_disable();
91188 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
91189diff --git a/kernel/exit.c b/kernel/exit.c
91190index e5c4668..592d2e5 100644
91191--- a/kernel/exit.c
91192+++ b/kernel/exit.c
91193@@ -173,6 +173,10 @@ void release_task(struct task_struct * p)
91194 struct task_struct *leader;
91195 int zap_leader;
91196 repeat:
91197+#ifdef CONFIG_NET
91198+ gr_del_task_from_ip_table(p);
91199+#endif
91200+
91201 /* don't need to get the RCU readlock here - the process is dead and
91202 * can't be modifying its own credentials. But shut RCU-lockdep up */
91203 rcu_read_lock();
91204@@ -664,6 +668,8 @@ void do_exit(long code)
91205 struct task_struct *tsk = current;
91206 int group_dead;
91207
91208+ set_fs(USER_DS);
91209+
91210 profile_task_exit(tsk);
91211
91212 WARN_ON(blk_needs_flush_plug(tsk));
91213@@ -680,7 +686,6 @@ void do_exit(long code)
91214 * mm_release()->clear_child_tid() from writing to a user-controlled
91215 * kernel address.
91216 */
91217- set_fs(USER_DS);
91218
91219 ptrace_event(PTRACE_EVENT_EXIT, code);
91220
91221@@ -739,6 +744,9 @@ void do_exit(long code)
91222 tsk->exit_code = code;
91223 taskstats_exit(tsk, group_dead);
91224
91225+ gr_acl_handle_psacct(tsk, code);
91226+ gr_acl_handle_exit();
91227+
91228 exit_mm(tsk);
91229
91230 if (group_dead)
91231@@ -858,7 +866,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
91232 * Take down every thread in the group. This is called by fatal signals
91233 * as well as by sys_exit_group (below).
91234 */
91235-void
91236+__noreturn void
91237 do_group_exit(int exit_code)
91238 {
91239 struct signal_struct *sig = current->signal;
91240diff --git a/kernel/fork.c b/kernel/fork.c
91241index 6a13c46..a623c8e 100644
91242--- a/kernel/fork.c
91243+++ b/kernel/fork.c
91244@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
91245 # endif
91246 #endif
91247
91248+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
91249+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
91250+ int node, void **lowmem_stack)
91251+{
91252+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
91253+ void *ret = NULL;
91254+ unsigned int i;
91255+
91256+ *lowmem_stack = alloc_thread_info_node(tsk, node);
91257+ if (*lowmem_stack == NULL)
91258+ goto out;
91259+
91260+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
91261+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
91262+
91263+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
91264+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
91265+ if (ret == NULL) {
91266+ free_thread_info(*lowmem_stack);
91267+ *lowmem_stack = NULL;
91268+ }
91269+
91270+out:
91271+ return ret;
91272+}
91273+
91274+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
91275+{
91276+ unmap_process_stacks(tsk);
91277+}
91278+#else
91279+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
91280+ int node, void **lowmem_stack)
91281+{
91282+ return alloc_thread_info_node(tsk, node);
91283+}
91284+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
91285+{
91286+ free_thread_info(ti);
91287+}
91288+#endif
91289+
91290 /* SLAB cache for signal_struct structures (tsk->signal) */
91291 static struct kmem_cache *signal_cachep;
91292
91293@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
91294 /* SLAB cache for mm_struct structures (tsk->mm) */
91295 static struct kmem_cache *mm_cachep;
91296
91297-static void account_kernel_stack(struct thread_info *ti, int account)
91298+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
91299 {
91300+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
91301+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
91302+#else
91303 struct zone *zone = page_zone(virt_to_page(ti));
91304+#endif
91305
91306 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
91307 }
91308
91309 void free_task(struct task_struct *tsk)
91310 {
91311- account_kernel_stack(tsk->stack, -1);
91312+ account_kernel_stack(tsk, tsk->stack, -1);
91313 arch_release_thread_info(tsk->stack);
91314- free_thread_info(tsk->stack);
91315+ gr_free_thread_info(tsk, tsk->stack);
91316 rt_mutex_debug_task_free(tsk);
91317 ftrace_graph_exit_task(tsk);
91318 put_seccomp_filter(tsk);
91319@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91320 struct task_struct *tsk;
91321 struct thread_info *ti;
91322 unsigned long *stackend;
91323+ void *lowmem_stack;
91324 int node = tsk_fork_get_node(orig);
91325 int err;
91326
91327@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91328 if (!tsk)
91329 return NULL;
91330
91331- ti = alloc_thread_info_node(tsk, node);
91332+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
91333 if (!ti)
91334 goto free_tsk;
91335
91336@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91337 goto free_ti;
91338
91339 tsk->stack = ti;
91340+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
91341+ tsk->lowmem_stack = lowmem_stack;
91342+#endif
91343
91344 setup_thread_stack(tsk, orig);
91345 clear_user_return_notifier(tsk);
91346@@ -323,7 +373,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91347 *stackend = STACK_END_MAGIC; /* for overflow detection */
91348
91349 #ifdef CONFIG_CC_STACKPROTECTOR
91350- tsk->stack_canary = get_random_int();
91351+ tsk->stack_canary = pax_get_random_long();
91352 #endif
91353
91354 /*
91355@@ -337,24 +387,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91356 tsk->splice_pipe = NULL;
91357 tsk->task_frag.page = NULL;
91358
91359- account_kernel_stack(ti, 1);
91360+ account_kernel_stack(tsk, ti, 1);
91361
91362 return tsk;
91363
91364 free_ti:
91365- free_thread_info(ti);
91366+ gr_free_thread_info(tsk, ti);
91367 free_tsk:
91368 free_task_struct(tsk);
91369 return NULL;
91370 }
91371
91372 #ifdef CONFIG_MMU
91373-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91374+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
91375+{
91376+ struct vm_area_struct *tmp;
91377+ unsigned long charge;
91378+ struct file *file;
91379+ int retval;
91380+
91381+ charge = 0;
91382+ if (mpnt->vm_flags & VM_ACCOUNT) {
91383+ unsigned long len = vma_pages(mpnt);
91384+
91385+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
91386+ goto fail_nomem;
91387+ charge = len;
91388+ }
91389+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
91390+ if (!tmp)
91391+ goto fail_nomem;
91392+ *tmp = *mpnt;
91393+ tmp->vm_mm = mm;
91394+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
91395+ retval = vma_dup_policy(mpnt, tmp);
91396+ if (retval)
91397+ goto fail_nomem_policy;
91398+ if (anon_vma_fork(tmp, mpnt))
91399+ goto fail_nomem_anon_vma_fork;
91400+ tmp->vm_flags &= ~VM_LOCKED;
91401+ tmp->vm_next = tmp->vm_prev = NULL;
91402+ tmp->vm_mirror = NULL;
91403+ file = tmp->vm_file;
91404+ if (file) {
91405+ struct inode *inode = file_inode(file);
91406+ struct address_space *mapping = file->f_mapping;
91407+
91408+ get_file(file);
91409+ if (tmp->vm_flags & VM_DENYWRITE)
91410+ atomic_dec(&inode->i_writecount);
91411+ mutex_lock(&mapping->i_mmap_mutex);
91412+ if (tmp->vm_flags & VM_SHARED)
91413+ mapping->i_mmap_writable++;
91414+ flush_dcache_mmap_lock(mapping);
91415+ /* insert tmp into the share list, just after mpnt */
91416+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
91417+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
91418+ else
91419+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
91420+ flush_dcache_mmap_unlock(mapping);
91421+ mutex_unlock(&mapping->i_mmap_mutex);
91422+ }
91423+
91424+ /*
91425+ * Clear hugetlb-related page reserves for children. This only
91426+ * affects MAP_PRIVATE mappings. Faults generated by the child
91427+ * are not guaranteed to succeed, even if read-only
91428+ */
91429+ if (is_vm_hugetlb_page(tmp))
91430+ reset_vma_resv_huge_pages(tmp);
91431+
91432+ return tmp;
91433+
91434+fail_nomem_anon_vma_fork:
91435+ mpol_put(vma_policy(tmp));
91436+fail_nomem_policy:
91437+ kmem_cache_free(vm_area_cachep, tmp);
91438+fail_nomem:
91439+ vm_unacct_memory(charge);
91440+ return NULL;
91441+}
91442+
91443+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91444 {
91445 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
91446 struct rb_node **rb_link, *rb_parent;
91447 int retval;
91448- unsigned long charge;
91449
91450 uprobe_start_dup_mmap();
91451 down_write(&oldmm->mmap_sem);
91452@@ -383,55 +501,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91453
91454 prev = NULL;
91455 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
91456- struct file *file;
91457-
91458 if (mpnt->vm_flags & VM_DONTCOPY) {
91459 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
91460 -vma_pages(mpnt));
91461 continue;
91462 }
91463- charge = 0;
91464- if (mpnt->vm_flags & VM_ACCOUNT) {
91465- unsigned long len = vma_pages(mpnt);
91466-
91467- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
91468- goto fail_nomem;
91469- charge = len;
91470- }
91471- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
91472- if (!tmp)
91473- goto fail_nomem;
91474- *tmp = *mpnt;
91475- INIT_LIST_HEAD(&tmp->anon_vma_chain);
91476- retval = vma_dup_policy(mpnt, tmp);
91477- if (retval)
91478- goto fail_nomem_policy;
91479- tmp->vm_mm = mm;
91480- if (anon_vma_fork(tmp, mpnt))
91481- goto fail_nomem_anon_vma_fork;
91482- tmp->vm_flags &= ~VM_LOCKED;
91483- tmp->vm_next = tmp->vm_prev = NULL;
91484- file = tmp->vm_file;
91485- if (file) {
91486- struct inode *inode = file_inode(file);
91487- struct address_space *mapping = file->f_mapping;
91488-
91489- get_file(file);
91490- if (tmp->vm_flags & VM_DENYWRITE)
91491- atomic_dec(&inode->i_writecount);
91492- mutex_lock(&mapping->i_mmap_mutex);
91493- if (tmp->vm_flags & VM_SHARED)
91494- mapping->i_mmap_writable++;
91495- flush_dcache_mmap_lock(mapping);
91496- /* insert tmp into the share list, just after mpnt */
91497- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
91498- vma_nonlinear_insert(tmp,
91499- &mapping->i_mmap_nonlinear);
91500- else
91501- vma_interval_tree_insert_after(tmp, mpnt,
91502- &mapping->i_mmap);
91503- flush_dcache_mmap_unlock(mapping);
91504- mutex_unlock(&mapping->i_mmap_mutex);
91505+ tmp = dup_vma(mm, oldmm, mpnt);
91506+ if (!tmp) {
91507+ retval = -ENOMEM;
91508+ goto out;
91509 }
91510
91511 /*
91512@@ -463,6 +541,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91513 if (retval)
91514 goto out;
91515 }
91516+
91517+#ifdef CONFIG_PAX_SEGMEXEC
91518+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
91519+ struct vm_area_struct *mpnt_m;
91520+
91521+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
91522+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
91523+
91524+ if (!mpnt->vm_mirror)
91525+ continue;
91526+
91527+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
91528+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
91529+ mpnt->vm_mirror = mpnt_m;
91530+ } else {
91531+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
91532+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
91533+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
91534+ mpnt->vm_mirror->vm_mirror = mpnt;
91535+ }
91536+ }
91537+ BUG_ON(mpnt_m);
91538+ }
91539+#endif
91540+
91541 /* a new mm has just been created */
91542 arch_dup_mmap(oldmm, mm);
91543 retval = 0;
91544@@ -472,14 +575,6 @@ out:
91545 up_write(&oldmm->mmap_sem);
91546 uprobe_end_dup_mmap();
91547 return retval;
91548-fail_nomem_anon_vma_fork:
91549- mpol_put(vma_policy(tmp));
91550-fail_nomem_policy:
91551- kmem_cache_free(vm_area_cachep, tmp);
91552-fail_nomem:
91553- retval = -ENOMEM;
91554- vm_unacct_memory(charge);
91555- goto out;
91556 }
91557
91558 static inline int mm_alloc_pgd(struct mm_struct *mm)
91559@@ -698,8 +793,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
91560 return ERR_PTR(err);
91561
91562 mm = get_task_mm(task);
91563- if (mm && mm != current->mm &&
91564- !ptrace_may_access(task, mode)) {
91565+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
91566+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
91567 mmput(mm);
91568 mm = ERR_PTR(-EACCES);
91569 }
91570@@ -918,13 +1013,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91571 spin_unlock(&fs->lock);
91572 return -EAGAIN;
91573 }
91574- fs->users++;
91575+ atomic_inc(&fs->users);
91576 spin_unlock(&fs->lock);
91577 return 0;
91578 }
91579 tsk->fs = copy_fs_struct(fs);
91580 if (!tsk->fs)
91581 return -ENOMEM;
91582+ /* Carry through gr_chroot_dentry and is_chrooted instead
91583+ of recomputing it here. Already copied when the task struct
91584+ is duplicated. This allows pivot_root to not be treated as
91585+ a chroot
91586+ */
91587+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
91588+
91589 return 0;
91590 }
91591
91592@@ -1133,7 +1235,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
91593 * parts of the process environment (as per the clone
91594 * flags). The actual kick-off is left to the caller.
91595 */
91596-static struct task_struct *copy_process(unsigned long clone_flags,
91597+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
91598 unsigned long stack_start,
91599 unsigned long stack_size,
91600 int __user *child_tidptr,
91601@@ -1205,6 +1307,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91602 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91603 #endif
91604 retval = -EAGAIN;
91605+
91606+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91607+
91608 if (atomic_read(&p->real_cred->user->processes) >=
91609 task_rlimit(p, RLIMIT_NPROC)) {
91610 if (p->real_cred->user != INIT_USER &&
91611@@ -1452,6 +1557,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91612 goto bad_fork_free_pid;
91613 }
91614
91615+ /* synchronizes with gr_set_acls()
91616+ we need to call this past the point of no return for fork()
91617+ */
91618+ gr_copy_label(p);
91619+
91620 if (likely(p->pid)) {
91621 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
91622
91623@@ -1541,6 +1651,8 @@ bad_fork_cleanup_count:
91624 bad_fork_free:
91625 free_task(p);
91626 fork_out:
91627+ gr_log_forkfail(retval);
91628+
91629 return ERR_PTR(retval);
91630 }
91631
91632@@ -1602,6 +1714,7 @@ long do_fork(unsigned long clone_flags,
91633
91634 p = copy_process(clone_flags, stack_start, stack_size,
91635 child_tidptr, NULL, trace);
91636+ add_latent_entropy();
91637 /*
91638 * Do this prior waking up the new thread - the thread pointer
91639 * might get invalid after that point, if the thread exits quickly.
91640@@ -1618,6 +1731,8 @@ long do_fork(unsigned long clone_flags,
91641 if (clone_flags & CLONE_PARENT_SETTID)
91642 put_user(nr, parent_tidptr);
91643
91644+ gr_handle_brute_check();
91645+
91646 if (clone_flags & CLONE_VFORK) {
91647 p->vfork_done = &vfork;
91648 init_completion(&vfork);
91649@@ -1736,7 +1851,7 @@ void __init proc_caches_init(void)
91650 mm_cachep = kmem_cache_create("mm_struct",
91651 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
91652 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
91653- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
91654+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
91655 mmap_init();
91656 nsproxy_cache_init();
91657 }
91658@@ -1776,7 +1891,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91659 return 0;
91660
91661 /* don't need lock here; in the worst case we'll do useless copy */
91662- if (fs->users == 1)
91663+ if (atomic_read(&fs->users) == 1)
91664 return 0;
91665
91666 *new_fsp = copy_fs_struct(fs);
91667@@ -1883,7 +1998,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91668 fs = current->fs;
91669 spin_lock(&fs->lock);
91670 current->fs = new_fs;
91671- if (--fs->users)
91672+ gr_set_chroot_entries(current, &current->fs->root);
91673+ if (atomic_dec_return(&fs->users))
91674 new_fs = NULL;
91675 else
91676 new_fs = fs;
91677diff --git a/kernel/futex.c b/kernel/futex.c
91678index b632b5f..0aa434d 100644
91679--- a/kernel/futex.c
91680+++ b/kernel/futex.c
91681@@ -202,7 +202,7 @@ struct futex_pi_state {
91682 atomic_t refcount;
91683
91684 union futex_key key;
91685-};
91686+} __randomize_layout;
91687
91688 /**
91689 * struct futex_q - The hashed futex queue entry, one per waiting task
91690@@ -236,7 +236,7 @@ struct futex_q {
91691 struct rt_mutex_waiter *rt_waiter;
91692 union futex_key *requeue_pi_key;
91693 u32 bitset;
91694-};
91695+} __randomize_layout;
91696
91697 static const struct futex_q futex_q_init = {
91698 /* list gets initialized in queue_me()*/
91699@@ -394,6 +394,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91700 struct page *page, *page_head;
91701 int err, ro = 0;
91702
91703+#ifdef CONFIG_PAX_SEGMEXEC
91704+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91705+ return -EFAULT;
91706+#endif
91707+
91708 /*
91709 * The futex address must be "naturally" aligned.
91710 */
91711@@ -593,7 +598,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
91712
91713 static int get_futex_value_locked(u32 *dest, u32 __user *from)
91714 {
91715- int ret;
91716+ unsigned long ret;
91717
91718 pagefault_disable();
91719 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
91720@@ -2628,6 +2633,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
91721 * shared futexes. We need to compare the keys:
91722 */
91723 if (match_futex(&q.key, &key2)) {
91724+ queue_unlock(hb);
91725 ret = -EINVAL;
91726 goto out_put_keys;
91727 }
91728@@ -3033,6 +3039,7 @@ static void __init futex_detect_cmpxchg(void)
91729 {
91730 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
91731 u32 curval;
91732+ mm_segment_t oldfs;
91733
91734 /*
91735 * This will fail and we want it. Some arch implementations do
91736@@ -3044,8 +3051,11 @@ static void __init futex_detect_cmpxchg(void)
91737 * implementation, the non-functional ones will return
91738 * -ENOSYS.
91739 */
91740+ oldfs = get_fs();
91741+ set_fs(USER_DS);
91742 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
91743 futex_cmpxchg_enabled = 1;
91744+ set_fs(oldfs);
91745 #endif
91746 }
91747
91748diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91749index 55c8c93..9ba7ad6 100644
91750--- a/kernel/futex_compat.c
91751+++ b/kernel/futex_compat.c
91752@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
91753 return 0;
91754 }
91755
91756-static void __user *futex_uaddr(struct robust_list __user *entry,
91757+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
91758 compat_long_t futex_offset)
91759 {
91760 compat_uptr_t base = ptr_to_compat(entry);
91761diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91762index b358a80..fc25240 100644
91763--- a/kernel/gcov/base.c
91764+++ b/kernel/gcov/base.c
91765@@ -114,11 +114,6 @@ void gcov_enable_events(void)
91766 }
91767
91768 #ifdef CONFIG_MODULES
91769-static inline int within(void *addr, void *start, unsigned long size)
91770-{
91771- return ((addr >= start) && (addr < start + size));
91772-}
91773-
91774 /* Update list and generate events when modules are unloaded. */
91775 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91776 void *data)
91777@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91778
91779 /* Remove entries located in module from linked list. */
91780 while ((info = gcov_info_next(info))) {
91781- if (within(info, mod->module_core, mod->core_size)) {
91782+ if (within_module_core_rw((unsigned long)info, mod)) {
91783 gcov_info_unlink(prev, info);
91784 if (gcov_events_enabled)
91785 gcov_event(GCOV_REMOVE, info);
91786diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91787index 3ab2899..c6ad010 100644
91788--- a/kernel/hrtimer.c
91789+++ b/kernel/hrtimer.c
91790@@ -1449,7 +1449,7 @@ void hrtimer_peek_ahead_timers(void)
91791 local_irq_restore(flags);
91792 }
91793
91794-static void run_hrtimer_softirq(struct softirq_action *h)
91795+static __latent_entropy void run_hrtimer_softirq(void)
91796 {
91797 hrtimer_peek_ahead_timers();
91798 }
91799diff --git a/kernel/irq_work.c b/kernel/irq_work.c
91800index a82170e..5b01e7f 100644
91801--- a/kernel/irq_work.c
91802+++ b/kernel/irq_work.c
91803@@ -191,12 +191,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
91804 return NOTIFY_OK;
91805 }
91806
91807-static struct notifier_block cpu_notify;
91808+static struct notifier_block cpu_notify = {
91809+ .notifier_call = irq_work_cpu_notify,
91810+ .priority = 0,
91811+};
91812
91813 static __init int irq_work_init_cpu_notifier(void)
91814 {
91815- cpu_notify.notifier_call = irq_work_cpu_notify;
91816- cpu_notify.priority = 0;
91817 register_cpu_notifier(&cpu_notify);
91818 return 0;
91819 }
91820diff --git a/kernel/jump_label.c b/kernel/jump_label.c
91821index 9019f15..9a3c42e 100644
91822--- a/kernel/jump_label.c
91823+++ b/kernel/jump_label.c
91824@@ -14,6 +14,7 @@
91825 #include <linux/err.h>
91826 #include <linux/static_key.h>
91827 #include <linux/jump_label_ratelimit.h>
91828+#include <linux/mm.h>
91829
91830 #ifdef HAVE_JUMP_LABEL
91831
91832@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
91833
91834 size = (((unsigned long)stop - (unsigned long)start)
91835 / sizeof(struct jump_entry));
91836+ pax_open_kernel();
91837 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
91838+ pax_close_kernel();
91839 }
91840
91841 static void jump_label_update(struct static_key *key, int enable);
91842@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
91843 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
91844 struct jump_entry *iter;
91845
91846+ pax_open_kernel();
91847 for (iter = iter_start; iter < iter_stop; iter++) {
91848 if (within_module_init(iter->code, mod))
91849 iter->code = 0;
91850 }
91851+ pax_close_kernel();
91852 }
91853
91854 static int
91855diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91856index cb0cf37..b69e161 100644
91857--- a/kernel/kallsyms.c
91858+++ b/kernel/kallsyms.c
91859@@ -11,6 +11,9 @@
91860 * Changed the compression method from stem compression to "table lookup"
91861 * compression (see scripts/kallsyms.c for a more complete description)
91862 */
91863+#ifdef CONFIG_GRKERNSEC_HIDESYM
91864+#define __INCLUDED_BY_HIDESYM 1
91865+#endif
91866 #include <linux/kallsyms.h>
91867 #include <linux/module.h>
91868 #include <linux/init.h>
91869@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
91870
91871 static inline int is_kernel_inittext(unsigned long addr)
91872 {
91873+ if (system_state != SYSTEM_BOOTING)
91874+ return 0;
91875+
91876 if (addr >= (unsigned long)_sinittext
91877 && addr <= (unsigned long)_einittext)
91878 return 1;
91879 return 0;
91880 }
91881
91882+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91883+#ifdef CONFIG_MODULES
91884+static inline int is_module_text(unsigned long addr)
91885+{
91886+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91887+ return 1;
91888+
91889+ addr = ktla_ktva(addr);
91890+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91891+}
91892+#else
91893+static inline int is_module_text(unsigned long addr)
91894+{
91895+ return 0;
91896+}
91897+#endif
91898+#endif
91899+
91900 static inline int is_kernel_text(unsigned long addr)
91901 {
91902 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91903@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
91904
91905 static inline int is_kernel(unsigned long addr)
91906 {
91907+
91908+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91909+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91910+ return 1;
91911+
91912+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91913+#else
91914 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91915+#endif
91916+
91917 return 1;
91918 return in_gate_area_no_mm(addr);
91919 }
91920
91921 static int is_ksym_addr(unsigned long addr)
91922 {
91923+
91924+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91925+ if (is_module_text(addr))
91926+ return 0;
91927+#endif
91928+
91929 if (all_var)
91930 return is_kernel(addr);
91931
91932@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91933
91934 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91935 {
91936- iter->name[0] = '\0';
91937 iter->nameoff = get_symbol_offset(new_pos);
91938 iter->pos = new_pos;
91939 }
91940@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
91941 {
91942 struct kallsym_iter *iter = m->private;
91943
91944+#ifdef CONFIG_GRKERNSEC_HIDESYM
91945+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
91946+ return 0;
91947+#endif
91948+
91949 /* Some debugging symbols have no name. Ignore them. */
91950 if (!iter->name[0])
91951 return 0;
91952@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
91953 */
91954 type = iter->exported ? toupper(iter->type) :
91955 tolower(iter->type);
91956+
91957 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
91958 type, iter->name, iter->module_name);
91959 } else
91960@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91961 struct kallsym_iter *iter;
91962 int ret;
91963
91964- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91965+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91966 if (!iter)
91967 return -ENOMEM;
91968 reset_iter(iter, 0);
91969diff --git a/kernel/kcmp.c b/kernel/kcmp.c
91970index e30ac0f..a7fcafb 100644
91971--- a/kernel/kcmp.c
91972+++ b/kernel/kcmp.c
91973@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
91974 */
91975 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
91976 {
91977- long ret;
91978+ long t1, t2;
91979
91980- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
91981+ t1 = kptr_obfuscate((long)v1, type);
91982+ t2 = kptr_obfuscate((long)v2, type);
91983
91984- return (ret < 0) | ((ret > 0) << 1);
91985+ return (t1 < t2) | ((t1 > t2) << 1);
91986 }
91987
91988 /* The caller must have pinned the task */
91989@@ -99,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
91990 struct task_struct *task1, *task2;
91991 int ret;
91992
91993+#ifdef CONFIG_GRKERNSEC
91994+ return -ENOSYS;
91995+#endif
91996+
91997 rcu_read_lock();
91998
91999 /*
92000diff --git a/kernel/kexec.c b/kernel/kexec.c
92001index 4b8f0c9..fffd0df 100644
92002--- a/kernel/kexec.c
92003+++ b/kernel/kexec.c
92004@@ -1045,7 +1045,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
92005 compat_ulong_t, flags)
92006 {
92007 struct compat_kexec_segment in;
92008- struct kexec_segment out, __user *ksegments;
92009+ struct kexec_segment out;
92010+ struct kexec_segment __user *ksegments;
92011 unsigned long i, result;
92012
92013 /* Don't allow clients that don't understand the native
92014diff --git a/kernel/kmod.c b/kernel/kmod.c
92015index 8637e04..8b1d0d8 100644
92016--- a/kernel/kmod.c
92017+++ b/kernel/kmod.c
92018@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
92019 kfree(info->argv);
92020 }
92021
92022-static int call_modprobe(char *module_name, int wait)
92023+static int call_modprobe(char *module_name, char *module_param, int wait)
92024 {
92025 struct subprocess_info *info;
92026 static char *envp[] = {
92027@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
92028 NULL
92029 };
92030
92031- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
92032+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
92033 if (!argv)
92034 goto out;
92035
92036@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
92037 argv[1] = "-q";
92038 argv[2] = "--";
92039 argv[3] = module_name; /* check free_modprobe_argv() */
92040- argv[4] = NULL;
92041+ argv[4] = module_param;
92042+ argv[5] = NULL;
92043
92044 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
92045 NULL, free_modprobe_argv, NULL);
92046@@ -129,9 +130,8 @@ out:
92047 * If module auto-loading support is disabled then this function
92048 * becomes a no-operation.
92049 */
92050-int __request_module(bool wait, const char *fmt, ...)
92051+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
92052 {
92053- va_list args;
92054 char module_name[MODULE_NAME_LEN];
92055 unsigned int max_modprobes;
92056 int ret;
92057@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
92058 if (!modprobe_path[0])
92059 return 0;
92060
92061- va_start(args, fmt);
92062- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
92063- va_end(args);
92064+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
92065 if (ret >= MODULE_NAME_LEN)
92066 return -ENAMETOOLONG;
92067
92068@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
92069 if (ret)
92070 return ret;
92071
92072+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92073+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
92074+ /* hack to workaround consolekit/udisks stupidity */
92075+ read_lock(&tasklist_lock);
92076+ if (!strcmp(current->comm, "mount") &&
92077+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
92078+ read_unlock(&tasklist_lock);
92079+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
92080+ return -EPERM;
92081+ }
92082+ read_unlock(&tasklist_lock);
92083+ }
92084+#endif
92085+
92086 /* If modprobe needs a service that is in a module, we get a recursive
92087 * loop. Limit the number of running kmod threads to max_threads/2 or
92088 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
92089@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
92090
92091 trace_module_request(module_name, wait, _RET_IP_);
92092
92093- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
92094+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
92095
92096 atomic_dec(&kmod_concurrent);
92097 return ret;
92098 }
92099+
92100+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
92101+{
92102+ va_list args;
92103+ int ret;
92104+
92105+ va_start(args, fmt);
92106+ ret = ____request_module(wait, module_param, fmt, args);
92107+ va_end(args);
92108+
92109+ return ret;
92110+}
92111+
92112+int __request_module(bool wait, const char *fmt, ...)
92113+{
92114+ va_list args;
92115+ int ret;
92116+
92117+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92118+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
92119+ char module_param[MODULE_NAME_LEN];
92120+
92121+ memset(module_param, 0, sizeof(module_param));
92122+
92123+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
92124+
92125+ va_start(args, fmt);
92126+ ret = ____request_module(wait, module_param, fmt, args);
92127+ va_end(args);
92128+
92129+ return ret;
92130+ }
92131+#endif
92132+
92133+ va_start(args, fmt);
92134+ ret = ____request_module(wait, NULL, fmt, args);
92135+ va_end(args);
92136+
92137+ return ret;
92138+}
92139+
92140 EXPORT_SYMBOL(__request_module);
92141 #endif /* CONFIG_MODULES */
92142
92143@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
92144 */
92145 set_user_nice(current, 0);
92146
92147+#ifdef CONFIG_GRKERNSEC
92148+ /* this is race-free as far as userland is concerned as we copied
92149+ out the path to be used prior to this point and are now operating
92150+ on that copy
92151+ */
92152+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
92153+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
92154+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
92155+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
92156+ retval = -EPERM;
92157+ goto fail;
92158+ }
92159+#endif
92160+
92161 retval = -ENOMEM;
92162 new = prepare_kernel_cred(current);
92163 if (!new)
92164@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
92165 commit_creds(new);
92166
92167 retval = do_execve(getname_kernel(sub_info->path),
92168- (const char __user *const __user *)sub_info->argv,
92169- (const char __user *const __user *)sub_info->envp);
92170+ (const char __user *const __force_user *)sub_info->argv,
92171+ (const char __user *const __force_user *)sub_info->envp);
92172 if (!retval)
92173 return 0;
92174
92175@@ -260,6 +327,10 @@ static int call_helper(void *data)
92176
92177 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
92178 {
92179+#ifdef CONFIG_GRKERNSEC
92180+ kfree(info->path);
92181+ info->path = info->origpath;
92182+#endif
92183 if (info->cleanup)
92184 (*info->cleanup)(info);
92185 kfree(info);
92186@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
92187 *
92188 * Thus the __user pointer cast is valid here.
92189 */
92190- sys_wait4(pid, (int __user *)&ret, 0, NULL);
92191+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
92192
92193 /*
92194 * If ret is 0, either ____call_usermodehelper failed and the
92195@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
92196 goto out;
92197
92198 INIT_WORK(&sub_info->work, __call_usermodehelper);
92199+#ifdef CONFIG_GRKERNSEC
92200+ sub_info->origpath = path;
92201+ sub_info->path = kstrdup(path, gfp_mask);
92202+#else
92203 sub_info->path = path;
92204+#endif
92205 sub_info->argv = argv;
92206 sub_info->envp = envp;
92207
92208@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
92209 static int proc_cap_handler(struct ctl_table *table, int write,
92210 void __user *buffer, size_t *lenp, loff_t *ppos)
92211 {
92212- struct ctl_table t;
92213+ ctl_table_no_const t;
92214 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
92215 kernel_cap_t new_cap;
92216 int err, i;
92217diff --git a/kernel/kprobes.c b/kernel/kprobes.c
92218index 734e9a7..0a313b8 100644
92219--- a/kernel/kprobes.c
92220+++ b/kernel/kprobes.c
92221@@ -31,6 +31,9 @@
92222 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
92223 * <prasanna@in.ibm.com> added function-return probes.
92224 */
92225+#ifdef CONFIG_GRKERNSEC_HIDESYM
92226+#define __INCLUDED_BY_HIDESYM 1
92227+#endif
92228 #include <linux/kprobes.h>
92229 #include <linux/hash.h>
92230 #include <linux/init.h>
92231@@ -122,12 +125,12 @@ enum kprobe_slot_state {
92232
92233 static void *alloc_insn_page(void)
92234 {
92235- return module_alloc(PAGE_SIZE);
92236+ return module_alloc_exec(PAGE_SIZE);
92237 }
92238
92239 static void free_insn_page(void *page)
92240 {
92241- module_free(NULL, page);
92242+ module_free_exec(NULL, page);
92243 }
92244
92245 struct kprobe_insn_cache kprobe_insn_slots = {
92246@@ -2176,11 +2179,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
92247 kprobe_type = "k";
92248
92249 if (sym)
92250- seq_printf(pi, "%p %s %s+0x%x %s ",
92251+ seq_printf(pi, "%pK %s %s+0x%x %s ",
92252 p->addr, kprobe_type, sym, offset,
92253 (modname ? modname : " "));
92254 else
92255- seq_printf(pi, "%p %s %p ",
92256+ seq_printf(pi, "%pK %s %pK ",
92257 p->addr, kprobe_type, p->addr);
92258
92259 if (!pp)
92260diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
92261index 6683cce..daf8999 100644
92262--- a/kernel/ksysfs.c
92263+++ b/kernel/ksysfs.c
92264@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
92265 {
92266 if (count+1 > UEVENT_HELPER_PATH_LEN)
92267 return -ENOENT;
92268+ if (!capable(CAP_SYS_ADMIN))
92269+ return -EPERM;
92270 memcpy(uevent_helper, buf, count);
92271 uevent_helper[count] = '\0';
92272 if (count && uevent_helper[count-1] == '\n')
92273@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
92274 return count;
92275 }
92276
92277-static struct bin_attribute notes_attr = {
92278+static bin_attribute_no_const notes_attr __read_only = {
92279 .attr = {
92280 .name = "notes",
92281 .mode = S_IRUGO,
92282diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
92283index d24e433..fa04fb8 100644
92284--- a/kernel/locking/lockdep.c
92285+++ b/kernel/locking/lockdep.c
92286@@ -597,6 +597,10 @@ static int static_obj(void *obj)
92287 end = (unsigned long) &_end,
92288 addr = (unsigned long) obj;
92289
92290+#ifdef CONFIG_PAX_KERNEXEC
92291+ start = ktla_ktva(start);
92292+#endif
92293+
92294 /*
92295 * static variable?
92296 */
92297@@ -738,6 +742,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
92298 if (!static_obj(lock->key)) {
92299 debug_locks_off();
92300 printk("INFO: trying to register non-static key.\n");
92301+ printk("lock:%pS key:%pS.\n", lock, lock->key);
92302 printk("the code is fine but needs lockdep annotation.\n");
92303 printk("turning off the locking correctness validator.\n");
92304 dump_stack();
92305@@ -3079,7 +3084,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
92306 if (!class)
92307 return 0;
92308 }
92309- atomic_inc((atomic_t *)&class->ops);
92310+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
92311 if (very_verbose(class)) {
92312 printk("\nacquire class [%p] %s", class->key, class->name);
92313 if (class->name_version > 1)
92314diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
92315index ef43ac4..2720dfa 100644
92316--- a/kernel/locking/lockdep_proc.c
92317+++ b/kernel/locking/lockdep_proc.c
92318@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
92319 return 0;
92320 }
92321
92322- seq_printf(m, "%p", class->key);
92323+ seq_printf(m, "%pK", class->key);
92324 #ifdef CONFIG_DEBUG_LOCKDEP
92325 seq_printf(m, " OPS:%8ld", class->ops);
92326 #endif
92327@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
92328
92329 list_for_each_entry(entry, &class->locks_after, entry) {
92330 if (entry->distance == 1) {
92331- seq_printf(m, " -> [%p] ", entry->class->key);
92332+ seq_printf(m, " -> [%pK] ", entry->class->key);
92333 print_name(m, entry->class);
92334 seq_puts(m, "\n");
92335 }
92336@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
92337 if (!class->key)
92338 continue;
92339
92340- seq_printf(m, "[%p] ", class->key);
92341+ seq_printf(m, "[%pK] ", class->key);
92342 print_name(m, class);
92343 seq_puts(m, "\n");
92344 }
92345@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
92346 if (!i)
92347 seq_line(m, '-', 40-namelen, namelen);
92348
92349- snprintf(ip, sizeof(ip), "[<%p>]",
92350+ snprintf(ip, sizeof(ip), "[<%pK>]",
92351 (void *)class->contention_point[i]);
92352 seq_printf(m, "%40s %14lu %29s %pS\n",
92353 name, stats->contention_point[i],
92354@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
92355 if (!i)
92356 seq_line(m, '-', 40-namelen, namelen);
92357
92358- snprintf(ip, sizeof(ip), "[<%p>]",
92359+ snprintf(ip, sizeof(ip), "[<%pK>]",
92360 (void *)class->contending_point[i]);
92361 seq_printf(m, "%40s %14lu %29s %pS\n",
92362 name, stats->contending_point[i],
92363diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
92364index be9ee15..39d6233 100644
92365--- a/kernel/locking/mcs_spinlock.c
92366+++ b/kernel/locking/mcs_spinlock.c
92367@@ -102,7 +102,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
92368
92369 prev = decode_cpu(old);
92370 node->prev = prev;
92371- ACCESS_ONCE(prev->next) = node;
92372+ ACCESS_ONCE_RW(prev->next) = node;
92373
92374 /*
92375 * Normally @prev is untouchable after the above store; because at that
92376@@ -174,8 +174,8 @@ unqueue:
92377 * it will wait in Step-A.
92378 */
92379
92380- ACCESS_ONCE(next->prev) = prev;
92381- ACCESS_ONCE(prev->next) = next;
92382+ ACCESS_ONCE_RW(next->prev) = prev;
92383+ ACCESS_ONCE_RW(prev->next) = next;
92384
92385 return false;
92386 }
92387@@ -197,13 +197,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
92388 node = this_cpu_ptr(&osq_node);
92389 next = xchg(&node->next, NULL);
92390 if (next) {
92391- ACCESS_ONCE(next->locked) = 1;
92392+ ACCESS_ONCE_RW(next->locked) = 1;
92393 return;
92394 }
92395
92396 next = osq_wait_next(lock, node, NULL);
92397 if (next)
92398- ACCESS_ONCE(next->locked) = 1;
92399+ ACCESS_ONCE_RW(next->locked) = 1;
92400 }
92401
92402 #endif
92403diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
92404index 74356dc..48dd5e1 100644
92405--- a/kernel/locking/mcs_spinlock.h
92406+++ b/kernel/locking/mcs_spinlock.h
92407@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
92408 */
92409 return;
92410 }
92411- ACCESS_ONCE(prev->next) = node;
92412+ ACCESS_ONCE_RW(prev->next) = node;
92413
92414 /* Wait until the lock holder passes the lock down. */
92415 arch_mcs_spin_lock_contended(&node->locked);
92416diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
92417index 5cf6731..ce3bc5a 100644
92418--- a/kernel/locking/mutex-debug.c
92419+++ b/kernel/locking/mutex-debug.c
92420@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
92421 }
92422
92423 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92424- struct thread_info *ti)
92425+ struct task_struct *task)
92426 {
92427 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
92428
92429 /* Mark the current thread as blocked on the lock: */
92430- ti->task->blocked_on = waiter;
92431+ task->blocked_on = waiter;
92432 }
92433
92434 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92435- struct thread_info *ti)
92436+ struct task_struct *task)
92437 {
92438 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
92439- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
92440- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
92441- ti->task->blocked_on = NULL;
92442+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
92443+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
92444+ task->blocked_on = NULL;
92445
92446 list_del_init(&waiter->list);
92447 waiter->task = NULL;
92448diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
92449index 0799fd3..d06ae3b 100644
92450--- a/kernel/locking/mutex-debug.h
92451+++ b/kernel/locking/mutex-debug.h
92452@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92453 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92454 extern void debug_mutex_add_waiter(struct mutex *lock,
92455 struct mutex_waiter *waiter,
92456- struct thread_info *ti);
92457+ struct task_struct *task);
92458 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92459- struct thread_info *ti);
92460+ struct task_struct *task);
92461 extern void debug_mutex_unlock(struct mutex *lock);
92462 extern void debug_mutex_init(struct mutex *lock, const char *name,
92463 struct lock_class_key *key);
92464diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
92465index acca2c1..ddeaea8 100644
92466--- a/kernel/locking/mutex.c
92467+++ b/kernel/locking/mutex.c
92468@@ -490,7 +490,7 @@ slowpath:
92469 goto skip_wait;
92470
92471 debug_mutex_lock_common(lock, &waiter);
92472- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92473+ debug_mutex_add_waiter(lock, &waiter, task);
92474
92475 /* add waiting tasks to the end of the waitqueue (FIFO): */
92476 list_add_tail(&waiter.list, &lock->wait_list);
92477@@ -534,7 +534,7 @@ slowpath:
92478 schedule_preempt_disabled();
92479 spin_lock_mutex(&lock->wait_lock, flags);
92480 }
92481- mutex_remove_waiter(lock, &waiter, current_thread_info());
92482+ mutex_remove_waiter(lock, &waiter, task);
92483 /* set it to 0 if there are no waiters left: */
92484 if (likely(list_empty(&lock->wait_list)))
92485 atomic_set(&lock->count, 0);
92486@@ -571,7 +571,7 @@ skip_wait:
92487 return 0;
92488
92489 err:
92490- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
92491+ mutex_remove_waiter(lock, &waiter, task);
92492 spin_unlock_mutex(&lock->wait_lock, flags);
92493 debug_mutex_free_waiter(&waiter);
92494 mutex_release(&lock->dep_map, 1, ip);
92495diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
92496index 1d96dd0..994ff19 100644
92497--- a/kernel/locking/rtmutex-tester.c
92498+++ b/kernel/locking/rtmutex-tester.c
92499@@ -22,7 +22,7 @@
92500 #define MAX_RT_TEST_MUTEXES 8
92501
92502 static spinlock_t rttest_lock;
92503-static atomic_t rttest_event;
92504+static atomic_unchecked_t rttest_event;
92505
92506 struct test_thread_data {
92507 int opcode;
92508@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92509
92510 case RTTEST_LOCKCONT:
92511 td->mutexes[td->opdata] = 1;
92512- td->event = atomic_add_return(1, &rttest_event);
92513+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92514 return 0;
92515
92516 case RTTEST_RESET:
92517@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92518 return 0;
92519
92520 case RTTEST_RESETEVENT:
92521- atomic_set(&rttest_event, 0);
92522+ atomic_set_unchecked(&rttest_event, 0);
92523 return 0;
92524
92525 default:
92526@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92527 return ret;
92528
92529 td->mutexes[id] = 1;
92530- td->event = atomic_add_return(1, &rttest_event);
92531+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92532 rt_mutex_lock(&mutexes[id]);
92533- td->event = atomic_add_return(1, &rttest_event);
92534+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92535 td->mutexes[id] = 4;
92536 return 0;
92537
92538@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92539 return ret;
92540
92541 td->mutexes[id] = 1;
92542- td->event = atomic_add_return(1, &rttest_event);
92543+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92544 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
92545- td->event = atomic_add_return(1, &rttest_event);
92546+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92547 td->mutexes[id] = ret ? 0 : 4;
92548 return ret ? -EINTR : 0;
92549
92550@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92551 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
92552 return ret;
92553
92554- td->event = atomic_add_return(1, &rttest_event);
92555+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92556 rt_mutex_unlock(&mutexes[id]);
92557- td->event = atomic_add_return(1, &rttest_event);
92558+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92559 td->mutexes[id] = 0;
92560 return 0;
92561
92562@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
92563 break;
92564
92565 td->mutexes[dat] = 2;
92566- td->event = atomic_add_return(1, &rttest_event);
92567+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92568 break;
92569
92570 default:
92571@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
92572 return;
92573
92574 td->mutexes[dat] = 3;
92575- td->event = atomic_add_return(1, &rttest_event);
92576+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92577 break;
92578
92579 case RTTEST_LOCKNOWAIT:
92580@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
92581 return;
92582
92583 td->mutexes[dat] = 1;
92584- td->event = atomic_add_return(1, &rttest_event);
92585+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92586 return;
92587
92588 default:
92589diff --git a/kernel/module.c b/kernel/module.c
92590index 81e727c..a8ea6f9 100644
92591--- a/kernel/module.c
92592+++ b/kernel/module.c
92593@@ -61,6 +61,7 @@
92594 #include <linux/pfn.h>
92595 #include <linux/bsearch.h>
92596 #include <linux/fips.h>
92597+#include <linux/grsecurity.h>
92598 #include <uapi/linux/module.h>
92599 #include "module-internal.h"
92600
92601@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
92602
92603 /* Bounds of module allocation, for speeding __module_address.
92604 * Protected by module_mutex. */
92605-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
92606+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
92607+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
92608
92609 int register_module_notifier(struct notifier_block * nb)
92610 {
92611@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
92612 return true;
92613
92614 list_for_each_entry_rcu(mod, &modules, list) {
92615- struct symsearch arr[] = {
92616+ struct symsearch modarr[] = {
92617 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
92618 NOT_GPL_ONLY, false },
92619 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
92620@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
92621 if (mod->state == MODULE_STATE_UNFORMED)
92622 continue;
92623
92624- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
92625+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
92626 return true;
92627 }
92628 return false;
92629@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
92630 if (!pcpusec->sh_size)
92631 return 0;
92632
92633- if (align > PAGE_SIZE) {
92634+ if (align-1 >= PAGE_SIZE) {
92635 pr_warn("%s: per-cpu alignment %li > %li\n",
92636 mod->name, align, PAGE_SIZE);
92637 align = PAGE_SIZE;
92638@@ -1061,7 +1063,7 @@ struct module_attribute module_uevent =
92639 static ssize_t show_coresize(struct module_attribute *mattr,
92640 struct module_kobject *mk, char *buffer)
92641 {
92642- return sprintf(buffer, "%u\n", mk->mod->core_size);
92643+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
92644 }
92645
92646 static struct module_attribute modinfo_coresize =
92647@@ -1070,7 +1072,7 @@ static struct module_attribute modinfo_coresize =
92648 static ssize_t show_initsize(struct module_attribute *mattr,
92649 struct module_kobject *mk, char *buffer)
92650 {
92651- return sprintf(buffer, "%u\n", mk->mod->init_size);
92652+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
92653 }
92654
92655 static struct module_attribute modinfo_initsize =
92656@@ -1162,12 +1164,29 @@ static int check_version(Elf_Shdr *sechdrs,
92657 goto bad_version;
92658 }
92659
92660+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92661+ /*
92662+ * avoid potentially printing jibberish on attempted load
92663+ * of a module randomized with a different seed
92664+ */
92665+ pr_warn("no symbol version for %s\n", symname);
92666+#else
92667 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
92668+#endif
92669 return 0;
92670
92671 bad_version:
92672+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92673+ /*
92674+ * avoid potentially printing jibberish on attempted load
92675+ * of a module randomized with a different seed
92676+ */
92677+ printk("attempted module disagrees about version of symbol %s\n",
92678+ symname);
92679+#else
92680 printk("%s: disagrees about version of symbol %s\n",
92681 mod->name, symname);
92682+#endif
92683 return 0;
92684 }
92685
92686@@ -1283,7 +1302,7 @@ resolve_symbol_wait(struct module *mod,
92687 */
92688 #ifdef CONFIG_SYSFS
92689
92690-#ifdef CONFIG_KALLSYMS
92691+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92692 static inline bool sect_empty(const Elf_Shdr *sect)
92693 {
92694 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
92695@@ -1423,7 +1442,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
92696 {
92697 unsigned int notes, loaded, i;
92698 struct module_notes_attrs *notes_attrs;
92699- struct bin_attribute *nattr;
92700+ bin_attribute_no_const *nattr;
92701
92702 /* failed to create section attributes, so can't create notes */
92703 if (!mod->sect_attrs)
92704@@ -1535,7 +1554,7 @@ static void del_usage_links(struct module *mod)
92705 static int module_add_modinfo_attrs(struct module *mod)
92706 {
92707 struct module_attribute *attr;
92708- struct module_attribute *temp_attr;
92709+ module_attribute_no_const *temp_attr;
92710 int error = 0;
92711 int i;
92712
92713@@ -1756,21 +1775,21 @@ static void set_section_ro_nx(void *base,
92714
92715 static void unset_module_core_ro_nx(struct module *mod)
92716 {
92717- set_page_attributes(mod->module_core + mod->core_text_size,
92718- mod->module_core + mod->core_size,
92719+ set_page_attributes(mod->module_core_rw,
92720+ mod->module_core_rw + mod->core_size_rw,
92721 set_memory_x);
92722- set_page_attributes(mod->module_core,
92723- mod->module_core + mod->core_ro_size,
92724+ set_page_attributes(mod->module_core_rx,
92725+ mod->module_core_rx + mod->core_size_rx,
92726 set_memory_rw);
92727 }
92728
92729 static void unset_module_init_ro_nx(struct module *mod)
92730 {
92731- set_page_attributes(mod->module_init + mod->init_text_size,
92732- mod->module_init + mod->init_size,
92733+ set_page_attributes(mod->module_init_rw,
92734+ mod->module_init_rw + mod->init_size_rw,
92735 set_memory_x);
92736- set_page_attributes(mod->module_init,
92737- mod->module_init + mod->init_ro_size,
92738+ set_page_attributes(mod->module_init_rx,
92739+ mod->module_init_rx + mod->init_size_rx,
92740 set_memory_rw);
92741 }
92742
92743@@ -1783,14 +1802,14 @@ void set_all_modules_text_rw(void)
92744 list_for_each_entry_rcu(mod, &modules, list) {
92745 if (mod->state == MODULE_STATE_UNFORMED)
92746 continue;
92747- if ((mod->module_core) && (mod->core_text_size)) {
92748- set_page_attributes(mod->module_core,
92749- mod->module_core + mod->core_text_size,
92750+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92751+ set_page_attributes(mod->module_core_rx,
92752+ mod->module_core_rx + mod->core_size_rx,
92753 set_memory_rw);
92754 }
92755- if ((mod->module_init) && (mod->init_text_size)) {
92756- set_page_attributes(mod->module_init,
92757- mod->module_init + mod->init_text_size,
92758+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92759+ set_page_attributes(mod->module_init_rx,
92760+ mod->module_init_rx + mod->init_size_rx,
92761 set_memory_rw);
92762 }
92763 }
92764@@ -1806,14 +1825,14 @@ void set_all_modules_text_ro(void)
92765 list_for_each_entry_rcu(mod, &modules, list) {
92766 if (mod->state == MODULE_STATE_UNFORMED)
92767 continue;
92768- if ((mod->module_core) && (mod->core_text_size)) {
92769- set_page_attributes(mod->module_core,
92770- mod->module_core + mod->core_text_size,
92771+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92772+ set_page_attributes(mod->module_core_rx,
92773+ mod->module_core_rx + mod->core_size_rx,
92774 set_memory_ro);
92775 }
92776- if ((mod->module_init) && (mod->init_text_size)) {
92777- set_page_attributes(mod->module_init,
92778- mod->module_init + mod->init_text_size,
92779+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92780+ set_page_attributes(mod->module_init_rx,
92781+ mod->module_init_rx + mod->init_size_rx,
92782 set_memory_ro);
92783 }
92784 }
92785@@ -1864,16 +1883,19 @@ static void free_module(struct module *mod)
92786
92787 /* This may be NULL, but that's OK */
92788 unset_module_init_ro_nx(mod);
92789- module_free(mod, mod->module_init);
92790+ module_free(mod, mod->module_init_rw);
92791+ module_free_exec(mod, mod->module_init_rx);
92792 kfree(mod->args);
92793 percpu_modfree(mod);
92794
92795 /* Free lock-classes: */
92796- lockdep_free_key_range(mod->module_core, mod->core_size);
92797+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92798+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92799
92800 /* Finally, free the core (containing the module structure) */
92801 unset_module_core_ro_nx(mod);
92802- module_free(mod, mod->module_core);
92803+ module_free_exec(mod, mod->module_core_rx);
92804+ module_free(mod, mod->module_core_rw);
92805
92806 #ifdef CONFIG_MPU
92807 update_protections(current->mm);
92808@@ -1942,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92809 int ret = 0;
92810 const struct kernel_symbol *ksym;
92811
92812+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92813+ int is_fs_load = 0;
92814+ int register_filesystem_found = 0;
92815+ char *p;
92816+
92817+ p = strstr(mod->args, "grsec_modharden_fs");
92818+ if (p) {
92819+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
92820+ /* copy \0 as well */
92821+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92822+ is_fs_load = 1;
92823+ }
92824+#endif
92825+
92826 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
92827 const char *name = info->strtab + sym[i].st_name;
92828
92829+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92830+ /* it's a real shame this will never get ripped and copied
92831+ upstream! ;(
92832+ */
92833+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92834+ register_filesystem_found = 1;
92835+#endif
92836+
92837 switch (sym[i].st_shndx) {
92838 case SHN_COMMON:
92839 /* Ignore common symbols */
92840@@ -1969,7 +2013,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92841 ksym = resolve_symbol_wait(mod, info, name);
92842 /* Ok if resolved. */
92843 if (ksym && !IS_ERR(ksym)) {
92844+ pax_open_kernel();
92845 sym[i].st_value = ksym->value;
92846+ pax_close_kernel();
92847 break;
92848 }
92849
92850@@ -1988,11 +2034,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92851 secbase = (unsigned long)mod_percpu(mod);
92852 else
92853 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
92854+ pax_open_kernel();
92855 sym[i].st_value += secbase;
92856+ pax_close_kernel();
92857 break;
92858 }
92859 }
92860
92861+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92862+ if (is_fs_load && !register_filesystem_found) {
92863+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92864+ ret = -EPERM;
92865+ }
92866+#endif
92867+
92868 return ret;
92869 }
92870
92871@@ -2076,22 +2131,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
92872 || s->sh_entsize != ~0UL
92873 || strstarts(sname, ".init"))
92874 continue;
92875- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92876+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92877+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92878+ else
92879+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92880 pr_debug("\t%s\n", sname);
92881 }
92882- switch (m) {
92883- case 0: /* executable */
92884- mod->core_size = debug_align(mod->core_size);
92885- mod->core_text_size = mod->core_size;
92886- break;
92887- case 1: /* RO: text and ro-data */
92888- mod->core_size = debug_align(mod->core_size);
92889- mod->core_ro_size = mod->core_size;
92890- break;
92891- case 3: /* whole core */
92892- mod->core_size = debug_align(mod->core_size);
92893- break;
92894- }
92895 }
92896
92897 pr_debug("Init section allocation order:\n");
92898@@ -2105,23 +2150,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
92899 || s->sh_entsize != ~0UL
92900 || !strstarts(sname, ".init"))
92901 continue;
92902- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92903- | INIT_OFFSET_MASK);
92904+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92905+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92906+ else
92907+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92908+ s->sh_entsize |= INIT_OFFSET_MASK;
92909 pr_debug("\t%s\n", sname);
92910 }
92911- switch (m) {
92912- case 0: /* executable */
92913- mod->init_size = debug_align(mod->init_size);
92914- mod->init_text_size = mod->init_size;
92915- break;
92916- case 1: /* RO: text and ro-data */
92917- mod->init_size = debug_align(mod->init_size);
92918- mod->init_ro_size = mod->init_size;
92919- break;
92920- case 3: /* whole init */
92921- mod->init_size = debug_align(mod->init_size);
92922- break;
92923- }
92924 }
92925 }
92926
92927@@ -2294,7 +2329,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92928
92929 /* Put symbol section at end of init part of module. */
92930 symsect->sh_flags |= SHF_ALLOC;
92931- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92932+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92933 info->index.sym) | INIT_OFFSET_MASK;
92934 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
92935
92936@@ -2311,13 +2346,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92937 }
92938
92939 /* Append room for core symbols at end of core part. */
92940- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92941- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
92942- mod->core_size += strtab_size;
92943+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92944+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
92945+ mod->core_size_rx += strtab_size;
92946
92947 /* Put string table section at end of init part of module. */
92948 strsect->sh_flags |= SHF_ALLOC;
92949- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92950+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92951 info->index.str) | INIT_OFFSET_MASK;
92952 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
92953 }
92954@@ -2335,12 +2370,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92955 /* Make sure we get permanent strtab: don't use info->strtab. */
92956 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
92957
92958+ pax_open_kernel();
92959+
92960 /* Set types up while we still have access to sections. */
92961 for (i = 0; i < mod->num_symtab; i++)
92962 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
92963
92964- mod->core_symtab = dst = mod->module_core + info->symoffs;
92965- mod->core_strtab = s = mod->module_core + info->stroffs;
92966+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
92967+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
92968 src = mod->symtab;
92969 for (ndst = i = 0; i < mod->num_symtab; i++) {
92970 if (i == 0 ||
92971@@ -2352,6 +2389,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92972 }
92973 }
92974 mod->core_num_syms = ndst;
92975+
92976+ pax_close_kernel();
92977 }
92978 #else
92979 static inline void layout_symtab(struct module *mod, struct load_info *info)
92980@@ -2385,17 +2424,33 @@ void * __weak module_alloc(unsigned long size)
92981 return vmalloc_exec(size);
92982 }
92983
92984-static void *module_alloc_update_bounds(unsigned long size)
92985+static void *module_alloc_update_bounds_rw(unsigned long size)
92986 {
92987 void *ret = module_alloc(size);
92988
92989 if (ret) {
92990 mutex_lock(&module_mutex);
92991 /* Update module bounds. */
92992- if ((unsigned long)ret < module_addr_min)
92993- module_addr_min = (unsigned long)ret;
92994- if ((unsigned long)ret + size > module_addr_max)
92995- module_addr_max = (unsigned long)ret + size;
92996+ if ((unsigned long)ret < module_addr_min_rw)
92997+ module_addr_min_rw = (unsigned long)ret;
92998+ if ((unsigned long)ret + size > module_addr_max_rw)
92999+ module_addr_max_rw = (unsigned long)ret + size;
93000+ mutex_unlock(&module_mutex);
93001+ }
93002+ return ret;
93003+}
93004+
93005+static void *module_alloc_update_bounds_rx(unsigned long size)
93006+{
93007+ void *ret = module_alloc_exec(size);
93008+
93009+ if (ret) {
93010+ mutex_lock(&module_mutex);
93011+ /* Update module bounds. */
93012+ if ((unsigned long)ret < module_addr_min_rx)
93013+ module_addr_min_rx = (unsigned long)ret;
93014+ if ((unsigned long)ret + size > module_addr_max_rx)
93015+ module_addr_max_rx = (unsigned long)ret + size;
93016 mutex_unlock(&module_mutex);
93017 }
93018 return ret;
93019@@ -2652,7 +2707,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
93020 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
93021
93022 if (info->index.sym == 0) {
93023+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
93024+ /*
93025+ * avoid potentially printing jibberish on attempted load
93026+ * of a module randomized with a different seed
93027+ */
93028+ pr_warn("module has no symbols (stripped?)\n");
93029+#else
93030 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
93031+#endif
93032 return ERR_PTR(-ENOEXEC);
93033 }
93034
93035@@ -2668,8 +2731,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
93036 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
93037 {
93038 const char *modmagic = get_modinfo(info, "vermagic");
93039+ const char *license = get_modinfo(info, "license");
93040 int err;
93041
93042+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
93043+ if (!license || !license_is_gpl_compatible(license))
93044+ return -ENOEXEC;
93045+#endif
93046+
93047 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
93048 modmagic = NULL;
93049
93050@@ -2694,7 +2763,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
93051 }
93052
93053 /* Set up license info based on the info section */
93054- set_license(mod, get_modinfo(info, "license"));
93055+ set_license(mod, license);
93056
93057 return 0;
93058 }
93059@@ -2788,7 +2857,7 @@ static int move_module(struct module *mod, struct load_info *info)
93060 void *ptr;
93061
93062 /* Do the allocs. */
93063- ptr = module_alloc_update_bounds(mod->core_size);
93064+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
93065 /*
93066 * The pointer to this block is stored in the module structure
93067 * which is inside the block. Just mark it as not being a
93068@@ -2798,11 +2867,11 @@ static int move_module(struct module *mod, struct load_info *info)
93069 if (!ptr)
93070 return -ENOMEM;
93071
93072- memset(ptr, 0, mod->core_size);
93073- mod->module_core = ptr;
93074+ memset(ptr, 0, mod->core_size_rw);
93075+ mod->module_core_rw = ptr;
93076
93077- if (mod->init_size) {
93078- ptr = module_alloc_update_bounds(mod->init_size);
93079+ if (mod->init_size_rw) {
93080+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
93081 /*
93082 * The pointer to this block is stored in the module structure
93083 * which is inside the block. This block doesn't need to be
93084@@ -2811,13 +2880,45 @@ static int move_module(struct module *mod, struct load_info *info)
93085 */
93086 kmemleak_ignore(ptr);
93087 if (!ptr) {
93088- module_free(mod, mod->module_core);
93089+ module_free(mod, mod->module_core_rw);
93090 return -ENOMEM;
93091 }
93092- memset(ptr, 0, mod->init_size);
93093- mod->module_init = ptr;
93094+ memset(ptr, 0, mod->init_size_rw);
93095+ mod->module_init_rw = ptr;
93096 } else
93097- mod->module_init = NULL;
93098+ mod->module_init_rw = NULL;
93099+
93100+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
93101+ kmemleak_not_leak(ptr);
93102+ if (!ptr) {
93103+ if (mod->module_init_rw)
93104+ module_free(mod, mod->module_init_rw);
93105+ module_free(mod, mod->module_core_rw);
93106+ return -ENOMEM;
93107+ }
93108+
93109+ pax_open_kernel();
93110+ memset(ptr, 0, mod->core_size_rx);
93111+ pax_close_kernel();
93112+ mod->module_core_rx = ptr;
93113+
93114+ if (mod->init_size_rx) {
93115+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
93116+ kmemleak_ignore(ptr);
93117+ if (!ptr && mod->init_size_rx) {
93118+ module_free_exec(mod, mod->module_core_rx);
93119+ if (mod->module_init_rw)
93120+ module_free(mod, mod->module_init_rw);
93121+ module_free(mod, mod->module_core_rw);
93122+ return -ENOMEM;
93123+ }
93124+
93125+ pax_open_kernel();
93126+ memset(ptr, 0, mod->init_size_rx);
93127+ pax_close_kernel();
93128+ mod->module_init_rx = ptr;
93129+ } else
93130+ mod->module_init_rx = NULL;
93131
93132 /* Transfer each section which specifies SHF_ALLOC */
93133 pr_debug("final section addresses:\n");
93134@@ -2828,16 +2929,45 @@ static int move_module(struct module *mod, struct load_info *info)
93135 if (!(shdr->sh_flags & SHF_ALLOC))
93136 continue;
93137
93138- if (shdr->sh_entsize & INIT_OFFSET_MASK)
93139- dest = mod->module_init
93140- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
93141- else
93142- dest = mod->module_core + shdr->sh_entsize;
93143+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
93144+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
93145+ dest = mod->module_init_rw
93146+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
93147+ else
93148+ dest = mod->module_init_rx
93149+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
93150+ } else {
93151+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
93152+ dest = mod->module_core_rw + shdr->sh_entsize;
93153+ else
93154+ dest = mod->module_core_rx + shdr->sh_entsize;
93155+ }
93156+
93157+ if (shdr->sh_type != SHT_NOBITS) {
93158+
93159+#ifdef CONFIG_PAX_KERNEXEC
93160+#ifdef CONFIG_X86_64
93161+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
93162+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
93163+#endif
93164+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
93165+ pax_open_kernel();
93166+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
93167+ pax_close_kernel();
93168+ } else
93169+#endif
93170
93171- if (shdr->sh_type != SHT_NOBITS)
93172 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
93173+ }
93174 /* Update sh_addr to point to copy in image. */
93175- shdr->sh_addr = (unsigned long)dest;
93176+
93177+#ifdef CONFIG_PAX_KERNEXEC
93178+ if (shdr->sh_flags & SHF_EXECINSTR)
93179+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
93180+ else
93181+#endif
93182+
93183+ shdr->sh_addr = (unsigned long)dest;
93184 pr_debug("\t0x%lx %s\n",
93185 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
93186 }
93187@@ -2894,12 +3024,12 @@ static void flush_module_icache(const struct module *mod)
93188 * Do it before processing of module parameters, so the module
93189 * can provide parameter accessor functions of its own.
93190 */
93191- if (mod->module_init)
93192- flush_icache_range((unsigned long)mod->module_init,
93193- (unsigned long)mod->module_init
93194- + mod->init_size);
93195- flush_icache_range((unsigned long)mod->module_core,
93196- (unsigned long)mod->module_core + mod->core_size);
93197+ if (mod->module_init_rx)
93198+ flush_icache_range((unsigned long)mod->module_init_rx,
93199+ (unsigned long)mod->module_init_rx
93200+ + mod->init_size_rx);
93201+ flush_icache_range((unsigned long)mod->module_core_rx,
93202+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
93203
93204 set_fs(old_fs);
93205 }
93206@@ -2956,8 +3086,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
93207 static void module_deallocate(struct module *mod, struct load_info *info)
93208 {
93209 percpu_modfree(mod);
93210- module_free(mod, mod->module_init);
93211- module_free(mod, mod->module_core);
93212+ module_free_exec(mod, mod->module_init_rx);
93213+ module_free_exec(mod, mod->module_core_rx);
93214+ module_free(mod, mod->module_init_rw);
93215+ module_free(mod, mod->module_core_rw);
93216 }
93217
93218 int __weak module_finalize(const Elf_Ehdr *hdr,
93219@@ -2970,7 +3102,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
93220 static int post_relocation(struct module *mod, const struct load_info *info)
93221 {
93222 /* Sort exception table now relocations are done. */
93223+ pax_open_kernel();
93224 sort_extable(mod->extable, mod->extable + mod->num_exentries);
93225+ pax_close_kernel();
93226
93227 /* Copy relocated percpu area over. */
93228 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
93229@@ -3079,11 +3213,12 @@ static int do_init_module(struct module *mod)
93230 mod->strtab = mod->core_strtab;
93231 #endif
93232 unset_module_init_ro_nx(mod);
93233- module_free(mod, mod->module_init);
93234- mod->module_init = NULL;
93235- mod->init_size = 0;
93236- mod->init_ro_size = 0;
93237- mod->init_text_size = 0;
93238+ module_free(mod, mod->module_init_rw);
93239+ module_free_exec(mod, mod->module_init_rx);
93240+ mod->module_init_rw = NULL;
93241+ mod->module_init_rx = NULL;
93242+ mod->init_size_rw = 0;
93243+ mod->init_size_rx = 0;
93244 mutex_unlock(&module_mutex);
93245 wake_up_all(&module_wq);
93246
93247@@ -3151,16 +3286,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
93248 module_bug_finalize(info->hdr, info->sechdrs, mod);
93249
93250 /* Set RO and NX regions for core */
93251- set_section_ro_nx(mod->module_core,
93252- mod->core_text_size,
93253- mod->core_ro_size,
93254- mod->core_size);
93255+ set_section_ro_nx(mod->module_core_rx,
93256+ mod->core_size_rx,
93257+ mod->core_size_rx,
93258+ mod->core_size_rx);
93259
93260 /* Set RO and NX regions for init */
93261- set_section_ro_nx(mod->module_init,
93262- mod->init_text_size,
93263- mod->init_ro_size,
93264- mod->init_size);
93265+ set_section_ro_nx(mod->module_init_rx,
93266+ mod->init_size_rx,
93267+ mod->init_size_rx,
93268+ mod->init_size_rx);
93269
93270 /* Mark state as coming so strong_try_module_get() ignores us,
93271 * but kallsyms etc. can see us. */
93272@@ -3244,9 +3379,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
93273 if (err)
93274 goto free_unload;
93275
93276+ /* Now copy in args */
93277+ mod->args = strndup_user(uargs, ~0UL >> 1);
93278+ if (IS_ERR(mod->args)) {
93279+ err = PTR_ERR(mod->args);
93280+ goto free_unload;
93281+ }
93282+
93283 /* Set up MODINFO_ATTR fields */
93284 setup_modinfo(mod, info);
93285
93286+#ifdef CONFIG_GRKERNSEC_MODHARDEN
93287+ {
93288+ char *p, *p2;
93289+
93290+ if (strstr(mod->args, "grsec_modharden_netdev")) {
93291+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
93292+ err = -EPERM;
93293+ goto free_modinfo;
93294+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
93295+ p += sizeof("grsec_modharden_normal") - 1;
93296+ p2 = strstr(p, "_");
93297+ if (p2) {
93298+ *p2 = '\0';
93299+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
93300+ *p2 = '_';
93301+ }
93302+ err = -EPERM;
93303+ goto free_modinfo;
93304+ }
93305+ }
93306+#endif
93307+
93308 /* Fix up syms, so that st_value is a pointer to location. */
93309 err = simplify_symbols(mod, info);
93310 if (err < 0)
93311@@ -3262,13 +3426,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
93312
93313 flush_module_icache(mod);
93314
93315- /* Now copy in args */
93316- mod->args = strndup_user(uargs, ~0UL >> 1);
93317- if (IS_ERR(mod->args)) {
93318- err = PTR_ERR(mod->args);
93319- goto free_arch_cleanup;
93320- }
93321-
93322 dynamic_debug_setup(info->debug, info->num_debug);
93323
93324 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
93325@@ -3311,11 +3468,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
93326 ddebug_cleanup:
93327 dynamic_debug_remove(info->debug);
93328 synchronize_sched();
93329- kfree(mod->args);
93330- free_arch_cleanup:
93331 module_arch_cleanup(mod);
93332 free_modinfo:
93333 free_modinfo(mod);
93334+ kfree(mod->args);
93335 free_unload:
93336 module_unload_free(mod);
93337 unlink_mod:
93338@@ -3398,10 +3554,16 @@ static const char *get_ksymbol(struct module *mod,
93339 unsigned long nextval;
93340
93341 /* At worse, next value is at end of module */
93342- if (within_module_init(addr, mod))
93343- nextval = (unsigned long)mod->module_init+mod->init_text_size;
93344+ if (within_module_init_rx(addr, mod))
93345+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
93346+ else if (within_module_init_rw(addr, mod))
93347+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
93348+ else if (within_module_core_rx(addr, mod))
93349+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
93350+ else if (within_module_core_rw(addr, mod))
93351+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
93352 else
93353- nextval = (unsigned long)mod->module_core+mod->core_text_size;
93354+ return NULL;
93355
93356 /* Scan for closest preceding symbol, and next symbol. (ELF
93357 starts real symbols at 1). */
93358@@ -3652,7 +3814,7 @@ static int m_show(struct seq_file *m, void *p)
93359 return 0;
93360
93361 seq_printf(m, "%s %u",
93362- mod->name, mod->init_size + mod->core_size);
93363+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
93364 print_unload_info(m, mod);
93365
93366 /* Informative for users. */
93367@@ -3661,7 +3823,7 @@ static int m_show(struct seq_file *m, void *p)
93368 mod->state == MODULE_STATE_COMING ? "Loading":
93369 "Live");
93370 /* Used by oprofile and other similar tools. */
93371- seq_printf(m, " 0x%pK", mod->module_core);
93372+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
93373
93374 /* Taints info */
93375 if (mod->taints)
93376@@ -3697,7 +3859,17 @@ static const struct file_operations proc_modules_operations = {
93377
93378 static int __init proc_modules_init(void)
93379 {
93380+#ifndef CONFIG_GRKERNSEC_HIDESYM
93381+#ifdef CONFIG_GRKERNSEC_PROC_USER
93382+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
93383+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93384+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
93385+#else
93386 proc_create("modules", 0, NULL, &proc_modules_operations);
93387+#endif
93388+#else
93389+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
93390+#endif
93391 return 0;
93392 }
93393 module_init(proc_modules_init);
93394@@ -3758,14 +3930,14 @@ struct module *__module_address(unsigned long addr)
93395 {
93396 struct module *mod;
93397
93398- if (addr < module_addr_min || addr > module_addr_max)
93399+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
93400+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
93401 return NULL;
93402
93403 list_for_each_entry_rcu(mod, &modules, list) {
93404 if (mod->state == MODULE_STATE_UNFORMED)
93405 continue;
93406- if (within_module_core(addr, mod)
93407- || within_module_init(addr, mod))
93408+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
93409 return mod;
93410 }
93411 return NULL;
93412@@ -3800,11 +3972,20 @@ bool is_module_text_address(unsigned long addr)
93413 */
93414 struct module *__module_text_address(unsigned long addr)
93415 {
93416- struct module *mod = __module_address(addr);
93417+ struct module *mod;
93418+
93419+#ifdef CONFIG_X86_32
93420+ addr = ktla_ktva(addr);
93421+#endif
93422+
93423+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
93424+ return NULL;
93425+
93426+ mod = __module_address(addr);
93427+
93428 if (mod) {
93429 /* Make sure it's within the text section. */
93430- if (!within(addr, mod->module_init, mod->init_text_size)
93431- && !within(addr, mod->module_core, mod->core_text_size))
93432+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
93433 mod = NULL;
93434 }
93435 return mod;
93436diff --git a/kernel/notifier.c b/kernel/notifier.c
93437index 4803da6..1c5eea6 100644
93438--- a/kernel/notifier.c
93439+++ b/kernel/notifier.c
93440@@ -5,6 +5,7 @@
93441 #include <linux/rcupdate.h>
93442 #include <linux/vmalloc.h>
93443 #include <linux/reboot.h>
93444+#include <linux/mm.h>
93445
93446 /*
93447 * Notifier list for kernel code which wants to be called
93448@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
93449 while ((*nl) != NULL) {
93450 if (n->priority > (*nl)->priority)
93451 break;
93452- nl = &((*nl)->next);
93453+ nl = (struct notifier_block **)&((*nl)->next);
93454 }
93455- n->next = *nl;
93456+ pax_open_kernel();
93457+ *(const void **)&n->next = *nl;
93458 rcu_assign_pointer(*nl, n);
93459+ pax_close_kernel();
93460 return 0;
93461 }
93462
93463@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
93464 return 0;
93465 if (n->priority > (*nl)->priority)
93466 break;
93467- nl = &((*nl)->next);
93468+ nl = (struct notifier_block **)&((*nl)->next);
93469 }
93470- n->next = *nl;
93471+ pax_open_kernel();
93472+ *(const void **)&n->next = *nl;
93473 rcu_assign_pointer(*nl, n);
93474+ pax_close_kernel();
93475 return 0;
93476 }
93477
93478@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
93479 {
93480 while ((*nl) != NULL) {
93481 if ((*nl) == n) {
93482+ pax_open_kernel();
93483 rcu_assign_pointer(*nl, n->next);
93484+ pax_close_kernel();
93485 return 0;
93486 }
93487- nl = &((*nl)->next);
93488+ nl = (struct notifier_block **)&((*nl)->next);
93489 }
93490 return -ENOENT;
93491 }
93492diff --git a/kernel/padata.c b/kernel/padata.c
93493index 161402f..598814c 100644
93494--- a/kernel/padata.c
93495+++ b/kernel/padata.c
93496@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
93497 * seq_nr mod. number of cpus in use.
93498 */
93499
93500- seq_nr = atomic_inc_return(&pd->seq_nr);
93501+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
93502 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
93503
93504 return padata_index_to_cpu(pd, cpu_index);
93505@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
93506 padata_init_pqueues(pd);
93507 padata_init_squeues(pd);
93508 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
93509- atomic_set(&pd->seq_nr, -1);
93510+ atomic_set_unchecked(&pd->seq_nr, -1);
93511 atomic_set(&pd->reorder_objects, 0);
93512 atomic_set(&pd->refcnt, 0);
93513 pd->pinst = pinst;
93514diff --git a/kernel/panic.c b/kernel/panic.c
93515index 62e16ce..9db5047b 100644
93516--- a/kernel/panic.c
93517+++ b/kernel/panic.c
93518@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
93519 /*
93520 * Stop ourself in panic -- architecture code may override this
93521 */
93522-void __weak panic_smp_self_stop(void)
93523+void __weak __noreturn panic_smp_self_stop(void)
93524 {
93525 while (1)
93526 cpu_relax();
93527@@ -420,7 +420,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
93528 disable_trace_on_warning();
93529
93530 pr_warn("------------[ cut here ]------------\n");
93531- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
93532+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
93533 raw_smp_processor_id(), current->pid, file, line, caller);
93534
93535 if (args)
93536@@ -474,7 +474,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
93537 */
93538 __visible void __stack_chk_fail(void)
93539 {
93540- panic("stack-protector: Kernel stack is corrupted in: %p\n",
93541+ dump_stack();
93542+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
93543 __builtin_return_address(0));
93544 }
93545 EXPORT_SYMBOL(__stack_chk_fail);
93546diff --git a/kernel/pid.c b/kernel/pid.c
93547index 9b9a266..c20ef80 100644
93548--- a/kernel/pid.c
93549+++ b/kernel/pid.c
93550@@ -33,6 +33,7 @@
93551 #include <linux/rculist.h>
93552 #include <linux/bootmem.h>
93553 #include <linux/hash.h>
93554+#include <linux/security.h>
93555 #include <linux/pid_namespace.h>
93556 #include <linux/init_task.h>
93557 #include <linux/syscalls.h>
93558@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
93559
93560 int pid_max = PID_MAX_DEFAULT;
93561
93562-#define RESERVED_PIDS 300
93563+#define RESERVED_PIDS 500
93564
93565 int pid_max_min = RESERVED_PIDS + 1;
93566 int pid_max_max = PID_MAX_LIMIT;
93567@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
93568 */
93569 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
93570 {
93571+ struct task_struct *task;
93572+
93573 rcu_lockdep_assert(rcu_read_lock_held(),
93574 "find_task_by_pid_ns() needs rcu_read_lock()"
93575 " protection");
93576- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93577+
93578+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93579+
93580+ if (gr_pid_is_chrooted(task))
93581+ return NULL;
93582+
93583+ return task;
93584 }
93585
93586 struct task_struct *find_task_by_vpid(pid_t vnr)
93587@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93588 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
93589 }
93590
93591+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93592+{
93593+ rcu_lockdep_assert(rcu_read_lock_held(),
93594+ "find_task_by_pid_ns() needs rcu_read_lock()"
93595+ " protection");
93596+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
93597+}
93598+
93599 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93600 {
93601 struct pid *pid;
93602diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
93603index db95d8e..a0ca23f 100644
93604--- a/kernel/pid_namespace.c
93605+++ b/kernel/pid_namespace.c
93606@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
93607 void __user *buffer, size_t *lenp, loff_t *ppos)
93608 {
93609 struct pid_namespace *pid_ns = task_active_pid_ns(current);
93610- struct ctl_table tmp = *table;
93611+ ctl_table_no_const tmp = *table;
93612
93613 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
93614 return -EPERM;
93615diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
93616index 3b89464..5e38379 100644
93617--- a/kernel/posix-cpu-timers.c
93618+++ b/kernel/posix-cpu-timers.c
93619@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
93620
93621 static __init int init_posix_cpu_timers(void)
93622 {
93623- struct k_clock process = {
93624+ static struct k_clock process = {
93625 .clock_getres = process_cpu_clock_getres,
93626 .clock_get = process_cpu_clock_get,
93627 .timer_create = process_cpu_timer_create,
93628 .nsleep = process_cpu_nsleep,
93629 .nsleep_restart = process_cpu_nsleep_restart,
93630 };
93631- struct k_clock thread = {
93632+ static struct k_clock thread = {
93633 .clock_getres = thread_cpu_clock_getres,
93634 .clock_get = thread_cpu_clock_get,
93635 .timer_create = thread_cpu_timer_create,
93636diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93637index 424c2d4..679242f 100644
93638--- a/kernel/posix-timers.c
93639+++ b/kernel/posix-timers.c
93640@@ -43,6 +43,7 @@
93641 #include <linux/hash.h>
93642 #include <linux/posix-clock.h>
93643 #include <linux/posix-timers.h>
93644+#include <linux/grsecurity.h>
93645 #include <linux/syscalls.h>
93646 #include <linux/wait.h>
93647 #include <linux/workqueue.h>
93648@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
93649 * which we beg off on and pass to do_sys_settimeofday().
93650 */
93651
93652-static struct k_clock posix_clocks[MAX_CLOCKS];
93653+static struct k_clock *posix_clocks[MAX_CLOCKS];
93654
93655 /*
93656 * These ones are defined below.
93657@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93658 */
93659 static __init int init_posix_timers(void)
93660 {
93661- struct k_clock clock_realtime = {
93662+ static struct k_clock clock_realtime = {
93663 .clock_getres = hrtimer_get_res,
93664 .clock_get = posix_clock_realtime_get,
93665 .clock_set = posix_clock_realtime_set,
93666@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
93667 .timer_get = common_timer_get,
93668 .timer_del = common_timer_del,
93669 };
93670- struct k_clock clock_monotonic = {
93671+ static struct k_clock clock_monotonic = {
93672 .clock_getres = hrtimer_get_res,
93673 .clock_get = posix_ktime_get_ts,
93674 .nsleep = common_nsleep,
93675@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
93676 .timer_get = common_timer_get,
93677 .timer_del = common_timer_del,
93678 };
93679- struct k_clock clock_monotonic_raw = {
93680+ static struct k_clock clock_monotonic_raw = {
93681 .clock_getres = hrtimer_get_res,
93682 .clock_get = posix_get_monotonic_raw,
93683 };
93684- struct k_clock clock_realtime_coarse = {
93685+ static struct k_clock clock_realtime_coarse = {
93686 .clock_getres = posix_get_coarse_res,
93687 .clock_get = posix_get_realtime_coarse,
93688 };
93689- struct k_clock clock_monotonic_coarse = {
93690+ static struct k_clock clock_monotonic_coarse = {
93691 .clock_getres = posix_get_coarse_res,
93692 .clock_get = posix_get_monotonic_coarse,
93693 };
93694- struct k_clock clock_tai = {
93695+ static struct k_clock clock_tai = {
93696 .clock_getres = hrtimer_get_res,
93697 .clock_get = posix_get_tai,
93698 .nsleep = common_nsleep,
93699@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
93700 .timer_get = common_timer_get,
93701 .timer_del = common_timer_del,
93702 };
93703- struct k_clock clock_boottime = {
93704+ static struct k_clock clock_boottime = {
93705 .clock_getres = hrtimer_get_res,
93706 .clock_get = posix_get_boottime,
93707 .nsleep = common_nsleep,
93708@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93709 return;
93710 }
93711
93712- posix_clocks[clock_id] = *new_clock;
93713+ posix_clocks[clock_id] = new_clock;
93714 }
93715 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93716
93717@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93718 return (id & CLOCKFD_MASK) == CLOCKFD ?
93719 &clock_posix_dynamic : &clock_posix_cpu;
93720
93721- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93722+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93723 return NULL;
93724- return &posix_clocks[id];
93725+ return posix_clocks[id];
93726 }
93727
93728 static int common_timer_create(struct k_itimer *new_timer)
93729@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93730 struct k_clock *kc = clockid_to_kclock(which_clock);
93731 struct k_itimer *new_timer;
93732 int error, new_timer_id;
93733- sigevent_t event;
93734+ sigevent_t event = { };
93735 int it_id_set = IT_ID_NOT_SET;
93736
93737 if (!kc)
93738@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93739 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93740 return -EFAULT;
93741
93742+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93743+ have their clock_set fptr set to a nosettime dummy function
93744+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93745+ call common_clock_set, which calls do_sys_settimeofday, which
93746+ we hook
93747+ */
93748+
93749 return kc->clock_set(which_clock, &new_tp);
93750 }
93751
93752diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
93753index 9a83d78..128bfc0 100644
93754--- a/kernel/power/Kconfig
93755+++ b/kernel/power/Kconfig
93756@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
93757 config HIBERNATION
93758 bool "Hibernation (aka 'suspend to disk')"
93759 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
93760+ depends on !GRKERNSEC_KMEM
93761+ depends on !PAX_MEMORY_SANITIZE
93762 select HIBERNATE_CALLBACKS
93763 select LZO_COMPRESS
93764 select LZO_DECOMPRESS
93765diff --git a/kernel/power/process.c b/kernel/power/process.c
93766index 4ee194e..925778f 100644
93767--- a/kernel/power/process.c
93768+++ b/kernel/power/process.c
93769@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
93770 unsigned int elapsed_msecs;
93771 bool wakeup = false;
93772 int sleep_usecs = USEC_PER_MSEC;
93773+ bool timedout = false;
93774
93775 do_gettimeofday(&start);
93776
93777@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
93778
93779 while (true) {
93780 todo = 0;
93781+ if (time_after(jiffies, end_time))
93782+ timedout = true;
93783 read_lock(&tasklist_lock);
93784 do_each_thread(g, p) {
93785 if (p == current || !freeze_task(p))
93786 continue;
93787
93788- if (!freezer_should_skip(p))
93789+ if (!freezer_should_skip(p)) {
93790 todo++;
93791+ if (timedout) {
93792+ printk(KERN_ERR "Task refusing to freeze:\n");
93793+ sched_show_task(p);
93794+ }
93795+ }
93796 } while_each_thread(g, p);
93797 read_unlock(&tasklist_lock);
93798
93799@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
93800 todo += wq_busy;
93801 }
93802
93803- if (!todo || time_after(jiffies, end_time))
93804+ if (!todo || timedout)
93805 break;
93806
93807 if (pm_wakeup_pending()) {
93808diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
93809index 13e839d..8a71f12 100644
93810--- a/kernel/printk/printk.c
93811+++ b/kernel/printk/printk.c
93812@@ -480,6 +480,11 @@ static int check_syslog_permissions(int type, bool from_file)
93813 if (from_file && type != SYSLOG_ACTION_OPEN)
93814 return 0;
93815
93816+#ifdef CONFIG_GRKERNSEC_DMESG
93817+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
93818+ return -EPERM;
93819+#endif
93820+
93821 if (syslog_action_restricted(type)) {
93822 if (capable(CAP_SYSLOG))
93823 return 0;
93824diff --git a/kernel/profile.c b/kernel/profile.c
93825index 54bf5ba..df6e0a2 100644
93826--- a/kernel/profile.c
93827+++ b/kernel/profile.c
93828@@ -37,7 +37,7 @@ struct profile_hit {
93829 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
93830 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
93831
93832-static atomic_t *prof_buffer;
93833+static atomic_unchecked_t *prof_buffer;
93834 static unsigned long prof_len, prof_shift;
93835
93836 int prof_on __read_mostly;
93837@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
93838 hits[i].pc = 0;
93839 continue;
93840 }
93841- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93842+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93843 hits[i].hits = hits[i].pc = 0;
93844 }
93845 }
93846@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93847 * Add the current hit(s) and flush the write-queue out
93848 * to the global buffer:
93849 */
93850- atomic_add(nr_hits, &prof_buffer[pc]);
93851+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93852 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93853- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93854+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93855 hits[i].pc = hits[i].hits = 0;
93856 }
93857 out:
93858@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93859 {
93860 unsigned long pc;
93861 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93862- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93863+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93864 }
93865 #endif /* !CONFIG_SMP */
93866
93867@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93868 return -EFAULT;
93869 buf++; p++; count--; read++;
93870 }
93871- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93872+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93873 if (copy_to_user(buf, (void *)pnt, count))
93874 return -EFAULT;
93875 read += count;
93876@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93877 }
93878 #endif
93879 profile_discard_flip_buffers();
93880- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93881+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93882 return count;
93883 }
93884
93885diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93886index adf9862..9d86345 100644
93887--- a/kernel/ptrace.c
93888+++ b/kernel/ptrace.c
93889@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
93890 if (seize)
93891 flags |= PT_SEIZED;
93892 rcu_read_lock();
93893- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93894+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93895 flags |= PT_PTRACE_CAP;
93896 rcu_read_unlock();
93897 task->ptrace = flags;
93898@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93899 break;
93900 return -EIO;
93901 }
93902- if (copy_to_user(dst, buf, retval))
93903+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
93904 return -EFAULT;
93905 copied += retval;
93906 src += retval;
93907@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
93908 bool seized = child->ptrace & PT_SEIZED;
93909 int ret = -EIO;
93910 siginfo_t siginfo, *si;
93911- void __user *datavp = (void __user *) data;
93912+ void __user *datavp = (__force void __user *) data;
93913 unsigned long __user *datalp = datavp;
93914 unsigned long flags;
93915
93916@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
93917 goto out;
93918 }
93919
93920+ if (gr_handle_ptrace(child, request)) {
93921+ ret = -EPERM;
93922+ goto out_put_task_struct;
93923+ }
93924+
93925 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93926 ret = ptrace_attach(child, request, addr, data);
93927 /*
93928 * Some architectures need to do book-keeping after
93929 * a ptrace attach.
93930 */
93931- if (!ret)
93932+ if (!ret) {
93933 arch_ptrace_attach(child);
93934+ gr_audit_ptrace(child);
93935+ }
93936 goto out_put_task_struct;
93937 }
93938
93939@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
93940 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93941 if (copied != sizeof(tmp))
93942 return -EIO;
93943- return put_user(tmp, (unsigned long __user *)data);
93944+ return put_user(tmp, (__force unsigned long __user *)data);
93945 }
93946
93947 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
93948@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93949 }
93950
93951 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93952- compat_long_t, addr, compat_long_t, data)
93953+ compat_ulong_t, addr, compat_ulong_t, data)
93954 {
93955 struct task_struct *child;
93956 long ret;
93957@@ -1197,14 +1204,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93958 goto out;
93959 }
93960
93961+ if (gr_handle_ptrace(child, request)) {
93962+ ret = -EPERM;
93963+ goto out_put_task_struct;
93964+ }
93965+
93966 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93967 ret = ptrace_attach(child, request, addr, data);
93968 /*
93969 * Some architectures need to do book-keeping after
93970 * a ptrace attach.
93971 */
93972- if (!ret)
93973+ if (!ret) {
93974 arch_ptrace_attach(child);
93975+ gr_audit_ptrace(child);
93976+ }
93977 goto out_put_task_struct;
93978 }
93979
93980diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
93981index 948a769..5ca842b 100644
93982--- a/kernel/rcu/rcutorture.c
93983+++ b/kernel/rcu/rcutorture.c
93984@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93985 rcu_torture_count) = { 0 };
93986 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93987 rcu_torture_batch) = { 0 };
93988-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93989-static atomic_t n_rcu_torture_alloc;
93990-static atomic_t n_rcu_torture_alloc_fail;
93991-static atomic_t n_rcu_torture_free;
93992-static atomic_t n_rcu_torture_mberror;
93993-static atomic_t n_rcu_torture_error;
93994+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93995+static atomic_unchecked_t n_rcu_torture_alloc;
93996+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93997+static atomic_unchecked_t n_rcu_torture_free;
93998+static atomic_unchecked_t n_rcu_torture_mberror;
93999+static atomic_unchecked_t n_rcu_torture_error;
94000 static long n_rcu_torture_barrier_error;
94001 static long n_rcu_torture_boost_ktrerror;
94002 static long n_rcu_torture_boost_rterror;
94003@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
94004
94005 spin_lock_bh(&rcu_torture_lock);
94006 if (list_empty(&rcu_torture_freelist)) {
94007- atomic_inc(&n_rcu_torture_alloc_fail);
94008+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
94009 spin_unlock_bh(&rcu_torture_lock);
94010 return NULL;
94011 }
94012- atomic_inc(&n_rcu_torture_alloc);
94013+ atomic_inc_unchecked(&n_rcu_torture_alloc);
94014 p = rcu_torture_freelist.next;
94015 list_del_init(p);
94016 spin_unlock_bh(&rcu_torture_lock);
94017@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
94018 static void
94019 rcu_torture_free(struct rcu_torture *p)
94020 {
94021- atomic_inc(&n_rcu_torture_free);
94022+ atomic_inc_unchecked(&n_rcu_torture_free);
94023 spin_lock_bh(&rcu_torture_lock);
94024 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
94025 spin_unlock_bh(&rcu_torture_lock);
94026@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
94027 i = rp->rtort_pipe_count;
94028 if (i > RCU_TORTURE_PIPE_LEN)
94029 i = RCU_TORTURE_PIPE_LEN;
94030- atomic_inc(&rcu_torture_wcount[i]);
94031+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
94032 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
94033 rp->rtort_mbtest = 0;
94034 return true;
94035@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
94036 i = old_rp->rtort_pipe_count;
94037 if (i > RCU_TORTURE_PIPE_LEN)
94038 i = RCU_TORTURE_PIPE_LEN;
94039- atomic_inc(&rcu_torture_wcount[i]);
94040+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
94041 old_rp->rtort_pipe_count++;
94042 switch (synctype[torture_random(&rand) % nsynctypes]) {
94043 case RTWS_DEF_FREE:
94044@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
94045 return;
94046 }
94047 if (p->rtort_mbtest == 0)
94048- atomic_inc(&n_rcu_torture_mberror);
94049+ atomic_inc_unchecked(&n_rcu_torture_mberror);
94050 spin_lock(&rand_lock);
94051 cur_ops->read_delay(&rand);
94052 n_rcu_torture_timers++;
94053@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
94054 continue;
94055 }
94056 if (p->rtort_mbtest == 0)
94057- atomic_inc(&n_rcu_torture_mberror);
94058+ atomic_inc_unchecked(&n_rcu_torture_mberror);
94059 cur_ops->read_delay(&rand);
94060 preempt_disable();
94061 pipe_count = p->rtort_pipe_count;
94062@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
94063 }
94064 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
94065 page += sprintf(page,
94066- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
94067+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
94068 rcu_torture_current,
94069 rcu_torture_current_version,
94070 list_empty(&rcu_torture_freelist),
94071- atomic_read(&n_rcu_torture_alloc),
94072- atomic_read(&n_rcu_torture_alloc_fail),
94073- atomic_read(&n_rcu_torture_free));
94074+ atomic_read_unchecked(&n_rcu_torture_alloc),
94075+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
94076+ atomic_read_unchecked(&n_rcu_torture_free));
94077 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
94078- atomic_read(&n_rcu_torture_mberror),
94079+ atomic_read_unchecked(&n_rcu_torture_mberror),
94080 n_rcu_torture_boost_ktrerror,
94081 n_rcu_torture_boost_rterror);
94082 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
94083@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
94084 n_barrier_attempts,
94085 n_rcu_torture_barrier_error);
94086 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
94087- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
94088+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
94089 n_rcu_torture_barrier_error != 0 ||
94090 n_rcu_torture_boost_ktrerror != 0 ||
94091 n_rcu_torture_boost_rterror != 0 ||
94092 n_rcu_torture_boost_failure != 0 ||
94093 i > 1) {
94094 page += sprintf(page, "!!! ");
94095- atomic_inc(&n_rcu_torture_error);
94096+ atomic_inc_unchecked(&n_rcu_torture_error);
94097 WARN_ON_ONCE(1);
94098 }
94099 page += sprintf(page, "Reader Pipe: ");
94100@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
94101 page += sprintf(page, "Free-Block Circulation: ");
94102 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
94103 page += sprintf(page, " %d",
94104- atomic_read(&rcu_torture_wcount[i]));
94105+ atomic_read_unchecked(&rcu_torture_wcount[i]));
94106 }
94107 page += sprintf(page, "\n");
94108 if (cur_ops->stats)
94109@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
94110
94111 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
94112
94113- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
94114+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
94115 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
94116 else if (torture_onoff_failures())
94117 rcu_torture_print_module_parms(cur_ops,
94118@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
94119
94120 rcu_torture_current = NULL;
94121 rcu_torture_current_version = 0;
94122- atomic_set(&n_rcu_torture_alloc, 0);
94123- atomic_set(&n_rcu_torture_alloc_fail, 0);
94124- atomic_set(&n_rcu_torture_free, 0);
94125- atomic_set(&n_rcu_torture_mberror, 0);
94126- atomic_set(&n_rcu_torture_error, 0);
94127+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
94128+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
94129+ atomic_set_unchecked(&n_rcu_torture_free, 0);
94130+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
94131+ atomic_set_unchecked(&n_rcu_torture_error, 0);
94132 n_rcu_torture_barrier_error = 0;
94133 n_rcu_torture_boost_ktrerror = 0;
94134 n_rcu_torture_boost_rterror = 0;
94135 n_rcu_torture_boost_failure = 0;
94136 n_rcu_torture_boosts = 0;
94137 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
94138- atomic_set(&rcu_torture_wcount[i], 0);
94139+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
94140 for_each_possible_cpu(cpu) {
94141 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
94142 per_cpu(rcu_torture_count, cpu)[i] = 0;
94143diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
94144index c639556..cf0a0d5 100644
94145--- a/kernel/rcu/srcu.c
94146+++ b/kernel/rcu/srcu.c
94147@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
94148
94149 idx = ACCESS_ONCE(sp->completed) & 0x1;
94150 preempt_disable();
94151- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
94152+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
94153 smp_mb(); /* B */ /* Avoid leaking the critical section. */
94154- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
94155+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
94156 preempt_enable();
94157 return idx;
94158 }
94159diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
94160index d9efcc1..ea543e9 100644
94161--- a/kernel/rcu/tiny.c
94162+++ b/kernel/rcu/tiny.c
94163@@ -42,7 +42,7 @@
94164 /* Forward declarations for tiny_plugin.h. */
94165 struct rcu_ctrlblk;
94166 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
94167-static void rcu_process_callbacks(struct softirq_action *unused);
94168+static void rcu_process_callbacks(void);
94169 static void __call_rcu(struct rcu_head *head,
94170 void (*func)(struct rcu_head *rcu),
94171 struct rcu_ctrlblk *rcp);
94172@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
94173 false));
94174 }
94175
94176-static void rcu_process_callbacks(struct softirq_action *unused)
94177+static __latent_entropy void rcu_process_callbacks(void)
94178 {
94179 __rcu_process_callbacks(&rcu_sched_ctrlblk);
94180 __rcu_process_callbacks(&rcu_bh_ctrlblk);
94181diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
94182index 858c565..7efd915 100644
94183--- a/kernel/rcu/tiny_plugin.h
94184+++ b/kernel/rcu/tiny_plugin.h
94185@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
94186 dump_stack();
94187 }
94188 if (*rcp->curtail && ULONG_CMP_GE(j, js))
94189- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
94190+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
94191 3 * rcu_jiffies_till_stall_check() + 3;
94192 else if (ULONG_CMP_GE(j, js))
94193- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
94194+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
94195 }
94196
94197 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
94198 {
94199 rcp->ticks_this_gp = 0;
94200 rcp->gp_start = jiffies;
94201- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
94202+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
94203 }
94204
94205 static void check_cpu_stalls(void)
94206diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
94207index 625d0b0..0bce4d6 100644
94208--- a/kernel/rcu/tree.c
94209+++ b/kernel/rcu/tree.c
94210@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
94211 */
94212 rdtp = this_cpu_ptr(&rcu_dynticks);
94213 smp_mb__before_atomic(); /* Earlier stuff before QS. */
94214- atomic_add(2, &rdtp->dynticks); /* QS. */
94215+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
94216 smp_mb__after_atomic(); /* Later stuff after QS. */
94217 break;
94218 }
94219@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
94220 rcu_prepare_for_idle(smp_processor_id());
94221 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
94222 smp_mb__before_atomic(); /* See above. */
94223- atomic_inc(&rdtp->dynticks);
94224+ atomic_inc_unchecked(&rdtp->dynticks);
94225 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
94226- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
94227+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
94228
94229 /*
94230 * It is illegal to enter an extended quiescent state while
94231@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
94232 int user)
94233 {
94234 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
94235- atomic_inc(&rdtp->dynticks);
94236+ atomic_inc_unchecked(&rdtp->dynticks);
94237 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
94238 smp_mb__after_atomic(); /* See above. */
94239- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
94240+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
94241 rcu_cleanup_after_idle(smp_processor_id());
94242 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
94243 if (!user && !is_idle_task(current)) {
94244@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
94245 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
94246
94247 if (rdtp->dynticks_nmi_nesting == 0 &&
94248- (atomic_read(&rdtp->dynticks) & 0x1))
94249+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
94250 return;
94251 rdtp->dynticks_nmi_nesting++;
94252 smp_mb__before_atomic(); /* Force delay from prior write. */
94253- atomic_inc(&rdtp->dynticks);
94254+ atomic_inc_unchecked(&rdtp->dynticks);
94255 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
94256 smp_mb__after_atomic(); /* See above. */
94257- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
94258+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
94259 }
94260
94261 /**
94262@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
94263 return;
94264 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
94265 smp_mb__before_atomic(); /* See above. */
94266- atomic_inc(&rdtp->dynticks);
94267+ atomic_inc_unchecked(&rdtp->dynticks);
94268 smp_mb__after_atomic(); /* Force delay to next write. */
94269- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
94270+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
94271 }
94272
94273 /**
94274@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
94275 */
94276 bool notrace __rcu_is_watching(void)
94277 {
94278- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
94279+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
94280 }
94281
94282 /**
94283@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
94284 static int dyntick_save_progress_counter(struct rcu_data *rdp,
94285 bool *isidle, unsigned long *maxj)
94286 {
94287- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
94288+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
94289 rcu_sysidle_check_cpu(rdp, isidle, maxj);
94290 if ((rdp->dynticks_snap & 0x1) == 0) {
94291 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
94292@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
94293 int *rcrmp;
94294 unsigned int snap;
94295
94296- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
94297+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
94298 snap = (unsigned int)rdp->dynticks_snap;
94299
94300 /*
94301@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
94302 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
94303 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
94304 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
94305- ACCESS_ONCE(rdp->cond_resched_completed) =
94306+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
94307 ACCESS_ONCE(rdp->mynode->completed);
94308 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
94309- ACCESS_ONCE(*rcrmp) =
94310+ ACCESS_ONCE_RW(*rcrmp) =
94311 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
94312 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
94313 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
94314@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
94315 rsp->gp_start = j;
94316 smp_wmb(); /* Record start time before stall time. */
94317 j1 = rcu_jiffies_till_stall_check();
94318- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
94319+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
94320 rsp->jiffies_resched = j + j1 / 2;
94321 }
94322
94323@@ -1052,7 +1052,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
94324 raw_spin_unlock_irqrestore(&rnp->lock, flags);
94325 return;
94326 }
94327- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
94328+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
94329 raw_spin_unlock_irqrestore(&rnp->lock, flags);
94330
94331 /*
94332@@ -1130,7 +1130,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
94333
94334 raw_spin_lock_irqsave(&rnp->lock, flags);
94335 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
94336- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
94337+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
94338 3 * rcu_jiffies_till_stall_check() + 3;
94339 raw_spin_unlock_irqrestore(&rnp->lock, flags);
94340
94341@@ -1214,7 +1214,7 @@ void rcu_cpu_stall_reset(void)
94342 struct rcu_state *rsp;
94343
94344 for_each_rcu_flavor(rsp)
94345- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
94346+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
94347 }
94348
94349 /*
94350@@ -1594,7 +1594,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
94351 raw_spin_unlock_irq(&rnp->lock);
94352 return 0;
94353 }
94354- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
94355+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
94356
94357 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
94358 /*
94359@@ -1635,9 +1635,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
94360 rdp = this_cpu_ptr(rsp->rda);
94361 rcu_preempt_check_blocked_tasks(rnp);
94362 rnp->qsmask = rnp->qsmaskinit;
94363- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
94364+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
94365 WARN_ON_ONCE(rnp->completed != rsp->completed);
94366- ACCESS_ONCE(rnp->completed) = rsp->completed;
94367+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
94368 if (rnp == rdp->mynode)
94369 (void)__note_gp_changes(rsp, rnp, rdp);
94370 rcu_preempt_boost_start_gp(rnp);
94371@@ -1687,7 +1687,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
94372 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
94373 raw_spin_lock_irq(&rnp->lock);
94374 smp_mb__after_unlock_lock();
94375- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
94376+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
94377 raw_spin_unlock_irq(&rnp->lock);
94378 }
94379 return fqs_state;
94380@@ -1732,7 +1732,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
94381 rcu_for_each_node_breadth_first(rsp, rnp) {
94382 raw_spin_lock_irq(&rnp->lock);
94383 smp_mb__after_unlock_lock();
94384- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
94385+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
94386 rdp = this_cpu_ptr(rsp->rda);
94387 if (rnp == rdp->mynode)
94388 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
94389@@ -1747,14 +1747,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
94390 rcu_nocb_gp_set(rnp, nocb);
94391
94392 /* Declare grace period done. */
94393- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
94394+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
94395 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
94396 rsp->fqs_state = RCU_GP_IDLE;
94397 rdp = this_cpu_ptr(rsp->rda);
94398 /* Advance CBs to reduce false positives below. */
94399 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
94400 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
94401- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94402+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94403 trace_rcu_grace_period(rsp->name,
94404 ACCESS_ONCE(rsp->gpnum),
94405 TPS("newreq"));
94406@@ -1879,7 +1879,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
94407 */
94408 return false;
94409 }
94410- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94411+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94412 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
94413 TPS("newreq"));
94414
94415@@ -2100,7 +2100,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
94416 rsp->qlen += rdp->qlen;
94417 rdp->n_cbs_orphaned += rdp->qlen;
94418 rdp->qlen_lazy = 0;
94419- ACCESS_ONCE(rdp->qlen) = 0;
94420+ ACCESS_ONCE_RW(rdp->qlen) = 0;
94421 }
94422
94423 /*
94424@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
94425 }
94426 smp_mb(); /* List handling before counting for rcu_barrier(). */
94427 rdp->qlen_lazy -= count_lazy;
94428- ACCESS_ONCE(rdp->qlen) -= count;
94429+ ACCESS_ONCE_RW(rdp->qlen) -= count;
94430 rdp->n_cbs_invoked += count;
94431
94432 /* Reinstate batch limit if we have worked down the excess. */
94433@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
94434 if (rnp_old != NULL)
94435 raw_spin_unlock(&rnp_old->fqslock);
94436 if (ret) {
94437- ACCESS_ONCE(rsp->n_force_qs_lh)++;
94438+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
94439 return;
94440 }
94441 rnp_old = rnp;
94442@@ -2504,11 +2504,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
94443 smp_mb__after_unlock_lock();
94444 raw_spin_unlock(&rnp_old->fqslock);
94445 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
94446- ACCESS_ONCE(rsp->n_force_qs_lh)++;
94447+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
94448 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
94449 return; /* Someone beat us to it. */
94450 }
94451- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
94452+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
94453 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
94454 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
94455 }
94456@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
94457 /*
94458 * Do RCU core processing for the current CPU.
94459 */
94460-static void rcu_process_callbacks(struct softirq_action *unused)
94461+static void rcu_process_callbacks(void)
94462 {
94463 struct rcu_state *rsp;
94464
94465@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
94466 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
94467 if (debug_rcu_head_queue(head)) {
94468 /* Probable double call_rcu(), so leak the callback. */
94469- ACCESS_ONCE(head->func) = rcu_leak_callback;
94470+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
94471 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
94472 return;
94473 }
94474@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
94475 local_irq_restore(flags);
94476 return;
94477 }
94478- ACCESS_ONCE(rdp->qlen)++;
94479+ ACCESS_ONCE_RW(rdp->qlen)++;
94480 if (lazy)
94481 rdp->qlen_lazy++;
94482 else
94483@@ -2968,11 +2968,11 @@ void synchronize_sched_expedited(void)
94484 * counter wrap on a 32-bit system. Quite a few more CPUs would of
94485 * course be required on a 64-bit system.
94486 */
94487- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
94488+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
94489 (ulong)atomic_long_read(&rsp->expedited_done) +
94490 ULONG_MAX / 8)) {
94491 synchronize_sched();
94492- atomic_long_inc(&rsp->expedited_wrap);
94493+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
94494 return;
94495 }
94496
94497@@ -2980,7 +2980,7 @@ void synchronize_sched_expedited(void)
94498 * Take a ticket. Note that atomic_inc_return() implies a
94499 * full memory barrier.
94500 */
94501- snap = atomic_long_inc_return(&rsp->expedited_start);
94502+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
94503 firstsnap = snap;
94504 get_online_cpus();
94505 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
94506@@ -2993,14 +2993,14 @@ void synchronize_sched_expedited(void)
94507 synchronize_sched_expedited_cpu_stop,
94508 NULL) == -EAGAIN) {
94509 put_online_cpus();
94510- atomic_long_inc(&rsp->expedited_tryfail);
94511+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
94512
94513 /* Check to see if someone else did our work for us. */
94514 s = atomic_long_read(&rsp->expedited_done);
94515 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
94516 /* ensure test happens before caller kfree */
94517 smp_mb__before_atomic(); /* ^^^ */
94518- atomic_long_inc(&rsp->expedited_workdone1);
94519+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
94520 return;
94521 }
94522
94523@@ -3009,7 +3009,7 @@ void synchronize_sched_expedited(void)
94524 udelay(trycount * num_online_cpus());
94525 } else {
94526 wait_rcu_gp(call_rcu_sched);
94527- atomic_long_inc(&rsp->expedited_normal);
94528+ atomic_long_inc_unchecked(&rsp->expedited_normal);
94529 return;
94530 }
94531
94532@@ -3018,7 +3018,7 @@ void synchronize_sched_expedited(void)
94533 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
94534 /* ensure test happens before caller kfree */
94535 smp_mb__before_atomic(); /* ^^^ */
94536- atomic_long_inc(&rsp->expedited_workdone2);
94537+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
94538 return;
94539 }
94540
94541@@ -3030,10 +3030,10 @@ void synchronize_sched_expedited(void)
94542 * period works for us.
94543 */
94544 get_online_cpus();
94545- snap = atomic_long_read(&rsp->expedited_start);
94546+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
94547 smp_mb(); /* ensure read is before try_stop_cpus(). */
94548 }
94549- atomic_long_inc(&rsp->expedited_stoppedcpus);
94550+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
94551
94552 /*
94553 * Everyone up to our most recent fetch is covered by our grace
94554@@ -3042,16 +3042,16 @@ void synchronize_sched_expedited(void)
94555 * than we did already did their update.
94556 */
94557 do {
94558- atomic_long_inc(&rsp->expedited_done_tries);
94559+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
94560 s = atomic_long_read(&rsp->expedited_done);
94561 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
94562 /* ensure test happens before caller kfree */
94563 smp_mb__before_atomic(); /* ^^^ */
94564- atomic_long_inc(&rsp->expedited_done_lost);
94565+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
94566 break;
94567 }
94568 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
94569- atomic_long_inc(&rsp->expedited_done_exit);
94570+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
94571
94572 put_online_cpus();
94573 }
94574@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
94575 * ACCESS_ONCE() to prevent the compiler from speculating
94576 * the increment to precede the early-exit check.
94577 */
94578- ACCESS_ONCE(rsp->n_barrier_done)++;
94579+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
94580 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
94581 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
94582 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
94583@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
94584
94585 /* Increment ->n_barrier_done to prevent duplicate work. */
94586 smp_mb(); /* Keep increment after above mechanism. */
94587- ACCESS_ONCE(rsp->n_barrier_done)++;
94588+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
94589 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
94590 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
94591 smp_mb(); /* Keep increment before caller's subsequent code. */
94592@@ -3352,10 +3352,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
94593 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
94594 init_callback_list(rdp);
94595 rdp->qlen_lazy = 0;
94596- ACCESS_ONCE(rdp->qlen) = 0;
94597+ ACCESS_ONCE_RW(rdp->qlen) = 0;
94598 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
94599 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
94600- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
94601+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
94602 rdp->cpu = cpu;
94603 rdp->rsp = rsp;
94604 rcu_boot_init_nocb_percpu_data(rdp);
94605@@ -3388,8 +3388,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
94606 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
94607 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
94608 rcu_sysidle_init_percpu_data(rdp->dynticks);
94609- atomic_set(&rdp->dynticks->dynticks,
94610- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
94611+ atomic_set_unchecked(&rdp->dynticks->dynticks,
94612+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
94613 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
94614
94615 /* Add CPU to rcu_node bitmasks. */
94616diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
94617index 0f69a79..c85c2dc 100644
94618--- a/kernel/rcu/tree.h
94619+++ b/kernel/rcu/tree.h
94620@@ -87,11 +87,11 @@ struct rcu_dynticks {
94621 long long dynticks_nesting; /* Track irq/process nesting level. */
94622 /* Process level is worth LLONG_MAX/2. */
94623 int dynticks_nmi_nesting; /* Track NMI nesting level. */
94624- atomic_t dynticks; /* Even value for idle, else odd. */
94625+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
94626 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
94627 long long dynticks_idle_nesting;
94628 /* irq/process nesting level from idle. */
94629- atomic_t dynticks_idle; /* Even value for idle, else odd. */
94630+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
94631 /* "Idle" excludes userspace execution. */
94632 unsigned long dynticks_idle_jiffies;
94633 /* End of last non-NMI non-idle period. */
94634@@ -435,17 +435,17 @@ struct rcu_state {
94635 /* _rcu_barrier(). */
94636 /* End of fields guarded by barrier_mutex. */
94637
94638- atomic_long_t expedited_start; /* Starting ticket. */
94639- atomic_long_t expedited_done; /* Done ticket. */
94640- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
94641- atomic_long_t expedited_tryfail; /* # acquisition failures. */
94642- atomic_long_t expedited_workdone1; /* # done by others #1. */
94643- atomic_long_t expedited_workdone2; /* # done by others #2. */
94644- atomic_long_t expedited_normal; /* # fallbacks to normal. */
94645- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
94646- atomic_long_t expedited_done_tries; /* # tries to update _done. */
94647- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
94648- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
94649+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
94650+ atomic_long_t expedited_done; /* Done ticket. */
94651+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
94652+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
94653+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
94654+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
94655+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
94656+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
94657+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
94658+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
94659+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
94660
94661 unsigned long jiffies_force_qs; /* Time at which to invoke */
94662 /* force_quiescent_state(). */
94663diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
94664index 02ac0fb..4aa4a36 100644
94665--- a/kernel/rcu/tree_plugin.h
94666+++ b/kernel/rcu/tree_plugin.h
94667@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
94668 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
94669 {
94670 return !rcu_preempted_readers_exp(rnp) &&
94671- ACCESS_ONCE(rnp->expmask) == 0;
94672+ ACCESS_ONCE_RW(rnp->expmask) == 0;
94673 }
94674
94675 /*
94676@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
94677
94678 /* Clean up and exit. */
94679 smp_mb(); /* ensure expedited GP seen before counter increment. */
94680- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
94681+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
94682 unlock_mb_ret:
94683 mutex_unlock(&sync_rcu_preempt_exp_mutex);
94684 mb_ret:
94685@@ -1447,7 +1447,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
94686 free_cpumask_var(cm);
94687 }
94688
94689-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
94690+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
94691 .store = &rcu_cpu_kthread_task,
94692 .thread_should_run = rcu_cpu_kthread_should_run,
94693 .thread_fn = rcu_cpu_kthread,
94694@@ -1926,7 +1926,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
94695 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
94696 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
94697 cpu, ticks_value, ticks_title,
94698- atomic_read(&rdtp->dynticks) & 0xfff,
94699+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
94700 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
94701 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
94702 fast_no_hz);
94703@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
94704
94705 /* Enqueue the callback on the nocb list and update counts. */
94706 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
94707- ACCESS_ONCE(*old_rhpp) = rhp;
94708+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
94709 atomic_long_add(rhcount, &rdp->nocb_q_count);
94710 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
94711
94712@@ -2255,12 +2255,12 @@ static int rcu_nocb_kthread(void *arg)
94713 * Extract queued callbacks, update counts, and wait
94714 * for a grace period to elapse.
94715 */
94716- ACCESS_ONCE(rdp->nocb_head) = NULL;
94717+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
94718 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
94719 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
94720 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
94721- ACCESS_ONCE(rdp->nocb_p_count) += c;
94722- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
94723+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
94724+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
94725 rcu_nocb_wait_gp(rdp);
94726
94727 /* Each pass through the following loop invokes a callback. */
94728@@ -2286,8 +2286,8 @@ static int rcu_nocb_kthread(void *arg)
94729 list = next;
94730 }
94731 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
94732- ACCESS_ONCE(rdp->nocb_p_count) -= c;
94733- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
94734+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
94735+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
94736 rdp->n_nocbs_invoked += c;
94737 }
94738 return 0;
94739@@ -2304,7 +2304,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
94740 {
94741 if (!rcu_nocb_need_deferred_wakeup(rdp))
94742 return;
94743- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
94744+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
94745 wake_up(&rdp->nocb_wq);
94746 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
94747 }
94748@@ -2330,7 +2330,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
94749 t = kthread_run(rcu_nocb_kthread, rdp,
94750 "rcuo%c/%d", rsp->abbr, cpu);
94751 BUG_ON(IS_ERR(t));
94752- ACCESS_ONCE(rdp->nocb_kthread) = t;
94753+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
94754 }
94755 }
94756
94757@@ -2461,11 +2461,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
94758
94759 /* Record start of fully idle period. */
94760 j = jiffies;
94761- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
94762+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
94763 smp_mb__before_atomic();
94764- atomic_inc(&rdtp->dynticks_idle);
94765+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94766 smp_mb__after_atomic();
94767- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
94768+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
94769 }
94770
94771 /*
94772@@ -2530,9 +2530,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
94773
94774 /* Record end of idle period. */
94775 smp_mb__before_atomic();
94776- atomic_inc(&rdtp->dynticks_idle);
94777+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94778 smp_mb__after_atomic();
94779- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
94780+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
94781
94782 /*
94783 * If we are the timekeeping CPU, we are permitted to be non-idle
94784@@ -2573,7 +2573,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
94785 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
94786
94787 /* Pick up current idle and NMI-nesting counter and check. */
94788- cur = atomic_read(&rdtp->dynticks_idle);
94789+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
94790 if (cur & 0x1) {
94791 *isidle = false; /* We are not idle! */
94792 return;
94793@@ -2622,7 +2622,7 @@ static void rcu_sysidle(unsigned long j)
94794 case RCU_SYSIDLE_NOT:
94795
94796 /* First time all are idle, so note a short idle period. */
94797- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94798+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94799 break;
94800
94801 case RCU_SYSIDLE_SHORT:
94802@@ -2660,7 +2660,7 @@ static void rcu_sysidle_cancel(void)
94803 {
94804 smp_mb();
94805 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
94806- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
94807+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
94808 }
94809
94810 /*
94811@@ -2708,7 +2708,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
94812 smp_mb(); /* grace period precedes setting inuse. */
94813
94814 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
94815- ACCESS_ONCE(rshp->inuse) = 0;
94816+ ACCESS_ONCE_RW(rshp->inuse) = 0;
94817 }
94818
94819 /*
94820diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
94821index 5cdc62e..cc52e88 100644
94822--- a/kernel/rcu/tree_trace.c
94823+++ b/kernel/rcu/tree_trace.c
94824@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
94825 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
94826 rdp->passed_quiesce, rdp->qs_pending);
94827 seq_printf(m, " dt=%d/%llx/%d df=%lu",
94828- atomic_read(&rdp->dynticks->dynticks),
94829+ atomic_read_unchecked(&rdp->dynticks->dynticks),
94830 rdp->dynticks->dynticks_nesting,
94831 rdp->dynticks->dynticks_nmi_nesting,
94832 rdp->dynticks_fqs);
94833@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
94834 struct rcu_state *rsp = (struct rcu_state *)m->private;
94835
94836 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
94837- atomic_long_read(&rsp->expedited_start),
94838+ atomic_long_read_unchecked(&rsp->expedited_start),
94839 atomic_long_read(&rsp->expedited_done),
94840- atomic_long_read(&rsp->expedited_wrap),
94841- atomic_long_read(&rsp->expedited_tryfail),
94842- atomic_long_read(&rsp->expedited_workdone1),
94843- atomic_long_read(&rsp->expedited_workdone2),
94844- atomic_long_read(&rsp->expedited_normal),
94845- atomic_long_read(&rsp->expedited_stoppedcpus),
94846- atomic_long_read(&rsp->expedited_done_tries),
94847- atomic_long_read(&rsp->expedited_done_lost),
94848- atomic_long_read(&rsp->expedited_done_exit));
94849+ atomic_long_read_unchecked(&rsp->expedited_wrap),
94850+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
94851+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
94852+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
94853+ atomic_long_read_unchecked(&rsp->expedited_normal),
94854+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
94855+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
94856+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
94857+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
94858 return 0;
94859 }
94860
94861diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
94862index bc78835..7691a45 100644
94863--- a/kernel/rcu/update.c
94864+++ b/kernel/rcu/update.c
94865@@ -311,10 +311,10 @@ int rcu_jiffies_till_stall_check(void)
94866 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
94867 */
94868 if (till_stall_check < 3) {
94869- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
94870+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
94871 till_stall_check = 3;
94872 } else if (till_stall_check > 300) {
94873- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
94874+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
94875 till_stall_check = 300;
94876 }
94877 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
94878diff --git a/kernel/resource.c b/kernel/resource.c
94879index 3c2237a..4568d96 100644
94880--- a/kernel/resource.c
94881+++ b/kernel/resource.c
94882@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
94883
94884 static int __init ioresources_init(void)
94885 {
94886+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94887+#ifdef CONFIG_GRKERNSEC_PROC_USER
94888+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94889+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94890+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94891+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94892+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94893+#endif
94894+#else
94895 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94896 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94897+#endif
94898 return 0;
94899 }
94900 __initcall(ioresources_init);
94901diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
94902index e73efba..c9bfbd4 100644
94903--- a/kernel/sched/auto_group.c
94904+++ b/kernel/sched/auto_group.c
94905@@ -11,7 +11,7 @@
94906
94907 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
94908 static struct autogroup autogroup_default;
94909-static atomic_t autogroup_seq_nr;
94910+static atomic_unchecked_t autogroup_seq_nr;
94911
94912 void __init autogroup_init(struct task_struct *init_task)
94913 {
94914@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
94915
94916 kref_init(&ag->kref);
94917 init_rwsem(&ag->lock);
94918- ag->id = atomic_inc_return(&autogroup_seq_nr);
94919+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
94920 ag->tg = tg;
94921 #ifdef CONFIG_RT_GROUP_SCHED
94922 /*
94923diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
94924index a63f4dc..349bbb0 100644
94925--- a/kernel/sched/completion.c
94926+++ b/kernel/sched/completion.c
94927@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
94928 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94929 * or number of jiffies left till timeout) if completed.
94930 */
94931-long __sched
94932+long __sched __intentional_overflow(-1)
94933 wait_for_completion_interruptible_timeout(struct completion *x,
94934 unsigned long timeout)
94935 {
94936@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
94937 *
94938 * Return: -ERESTARTSYS if interrupted, 0 if completed.
94939 */
94940-int __sched wait_for_completion_killable(struct completion *x)
94941+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
94942 {
94943 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
94944 if (t == -ERESTARTSYS)
94945@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
94946 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94947 * or number of jiffies left till timeout) if completed.
94948 */
94949-long __sched
94950+long __sched __intentional_overflow(-1)
94951 wait_for_completion_killable_timeout(struct completion *x,
94952 unsigned long timeout)
94953 {
94954diff --git a/kernel/sched/core.c b/kernel/sched/core.c
94955index 0acf96b..80ba955 100644
94956--- a/kernel/sched/core.c
94957+++ b/kernel/sched/core.c
94958@@ -1849,7 +1849,7 @@ void set_numabalancing_state(bool enabled)
94959 int sysctl_numa_balancing(struct ctl_table *table, int write,
94960 void __user *buffer, size_t *lenp, loff_t *ppos)
94961 {
94962- struct ctl_table t;
94963+ ctl_table_no_const t;
94964 int err;
94965 int state = numabalancing_enabled;
94966
94967@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
94968 next->active_mm = oldmm;
94969 atomic_inc(&oldmm->mm_count);
94970 enter_lazy_tlb(oldmm, next);
94971- } else
94972+ } else {
94973 switch_mm(oldmm, mm, next);
94974+ populate_stack();
94975+ }
94976
94977 if (!prev->mm) {
94978 prev->active_mm = NULL;
94979@@ -3081,6 +3083,8 @@ int can_nice(const struct task_struct *p, const int nice)
94980 /* convert nice value [19,-20] to rlimit style value [1,40] */
94981 int nice_rlim = nice_to_rlimit(nice);
94982
94983+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94984+
94985 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
94986 capable(CAP_SYS_NICE));
94987 }
94988@@ -3107,7 +3111,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94989 nice = task_nice(current) + increment;
94990
94991 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
94992- if (increment < 0 && !can_nice(current, nice))
94993+ if (increment < 0 && (!can_nice(current, nice) ||
94994+ gr_handle_chroot_nice()))
94995 return -EPERM;
94996
94997 retval = security_task_setnice(current, nice);
94998@@ -3380,6 +3385,7 @@ recheck:
94999 if (policy != p->policy && !rlim_rtprio)
95000 return -EPERM;
95001
95002+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
95003 /* can't increase priority */
95004 if (attr->sched_priority > p->rt_priority &&
95005 attr->sched_priority > rlim_rtprio)
95006@@ -4772,6 +4778,7 @@ void idle_task_exit(void)
95007
95008 if (mm != &init_mm) {
95009 switch_mm(mm, &init_mm, current);
95010+ populate_stack();
95011 finish_arch_post_lock_switch();
95012 }
95013 mmdrop(mm);
95014@@ -4867,7 +4874,7 @@ static void migrate_tasks(unsigned int dead_cpu)
95015
95016 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
95017
95018-static struct ctl_table sd_ctl_dir[] = {
95019+static ctl_table_no_const sd_ctl_dir[] __read_only = {
95020 {
95021 .procname = "sched_domain",
95022 .mode = 0555,
95023@@ -4884,17 +4891,17 @@ static struct ctl_table sd_ctl_root[] = {
95024 {}
95025 };
95026
95027-static struct ctl_table *sd_alloc_ctl_entry(int n)
95028+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
95029 {
95030- struct ctl_table *entry =
95031+ ctl_table_no_const *entry =
95032 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
95033
95034 return entry;
95035 }
95036
95037-static void sd_free_ctl_entry(struct ctl_table **tablep)
95038+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
95039 {
95040- struct ctl_table *entry;
95041+ ctl_table_no_const *entry;
95042
95043 /*
95044 * In the intermediate directories, both the child directory and
95045@@ -4902,22 +4909,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
95046 * will always be set. In the lowest directory the names are
95047 * static strings and all have proc handlers.
95048 */
95049- for (entry = *tablep; entry->mode; entry++) {
95050- if (entry->child)
95051- sd_free_ctl_entry(&entry->child);
95052+ for (entry = tablep; entry->mode; entry++) {
95053+ if (entry->child) {
95054+ sd_free_ctl_entry(entry->child);
95055+ pax_open_kernel();
95056+ entry->child = NULL;
95057+ pax_close_kernel();
95058+ }
95059 if (entry->proc_handler == NULL)
95060 kfree(entry->procname);
95061 }
95062
95063- kfree(*tablep);
95064- *tablep = NULL;
95065+ kfree(tablep);
95066 }
95067
95068 static int min_load_idx = 0;
95069 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
95070
95071 static void
95072-set_table_entry(struct ctl_table *entry,
95073+set_table_entry(ctl_table_no_const *entry,
95074 const char *procname, void *data, int maxlen,
95075 umode_t mode, proc_handler *proc_handler,
95076 bool load_idx)
95077@@ -4937,7 +4947,7 @@ set_table_entry(struct ctl_table *entry,
95078 static struct ctl_table *
95079 sd_alloc_ctl_domain_table(struct sched_domain *sd)
95080 {
95081- struct ctl_table *table = sd_alloc_ctl_entry(14);
95082+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
95083
95084 if (table == NULL)
95085 return NULL;
95086@@ -4975,9 +4985,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
95087 return table;
95088 }
95089
95090-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
95091+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
95092 {
95093- struct ctl_table *entry, *table;
95094+ ctl_table_no_const *entry, *table;
95095 struct sched_domain *sd;
95096 int domain_num = 0, i;
95097 char buf[32];
95098@@ -5004,11 +5014,13 @@ static struct ctl_table_header *sd_sysctl_header;
95099 static void register_sched_domain_sysctl(void)
95100 {
95101 int i, cpu_num = num_possible_cpus();
95102- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
95103+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
95104 char buf[32];
95105
95106 WARN_ON(sd_ctl_dir[0].child);
95107+ pax_open_kernel();
95108 sd_ctl_dir[0].child = entry;
95109+ pax_close_kernel();
95110
95111 if (entry == NULL)
95112 return;
95113@@ -5031,8 +5043,12 @@ static void unregister_sched_domain_sysctl(void)
95114 if (sd_sysctl_header)
95115 unregister_sysctl_table(sd_sysctl_header);
95116 sd_sysctl_header = NULL;
95117- if (sd_ctl_dir[0].child)
95118- sd_free_ctl_entry(&sd_ctl_dir[0].child);
95119+ if (sd_ctl_dir[0].child) {
95120+ sd_free_ctl_entry(sd_ctl_dir[0].child);
95121+ pax_open_kernel();
95122+ sd_ctl_dir[0].child = NULL;
95123+ pax_close_kernel();
95124+ }
95125 }
95126 #else
95127 static void register_sched_domain_sysctl(void)
95128diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
95129index fea7d33..84faa94 100644
95130--- a/kernel/sched/fair.c
95131+++ b/kernel/sched/fair.c
95132@@ -1857,7 +1857,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
95133
95134 static void reset_ptenuma_scan(struct task_struct *p)
95135 {
95136- ACCESS_ONCE(p->mm->numa_scan_seq)++;
95137+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
95138 p->mm->numa_scan_offset = 0;
95139 }
95140
95141@@ -7289,7 +7289,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
95142 * run_rebalance_domains is triggered when needed from the scheduler tick.
95143 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
95144 */
95145-static void run_rebalance_domains(struct softirq_action *h)
95146+static __latent_entropy void run_rebalance_domains(void)
95147 {
95148 struct rq *this_rq = this_rq();
95149 enum cpu_idle_type idle = this_rq->idle_balance ?
95150diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
95151index 31cc02e..734fb85 100644
95152--- a/kernel/sched/sched.h
95153+++ b/kernel/sched/sched.h
95154@@ -1153,7 +1153,7 @@ struct sched_class {
95155 #ifdef CONFIG_FAIR_GROUP_SCHED
95156 void (*task_move_group) (struct task_struct *p, int on_rq);
95157 #endif
95158-};
95159+} __do_const;
95160
95161 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
95162 {
95163diff --git a/kernel/seccomp.c b/kernel/seccomp.c
95164index 301bbc2..eda2da3 100644
95165--- a/kernel/seccomp.c
95166+++ b/kernel/seccomp.c
95167@@ -39,7 +39,7 @@
95168 * is only needed for handling filters shared across tasks.
95169 * @prev: points to a previously installed, or inherited, filter
95170 * @len: the number of instructions in the program
95171- * @insnsi: the BPF program instructions to evaluate
95172+ * @insns: the BPF program instructions to evaluate
95173 *
95174 * seccomp_filter objects are organized in a tree linked via the @prev
95175 * pointer. For any task, it appears to be a singly-linked list starting
95176@@ -54,32 +54,61 @@
95177 struct seccomp_filter {
95178 atomic_t usage;
95179 struct seccomp_filter *prev;
95180- struct sk_filter *prog;
95181+ unsigned short len; /* Instruction count */
95182+ struct sock_filter insns[];
95183 };
95184
95185 /* Limit any path through the tree to 256KB worth of instructions. */
95186 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
95187
95188-/*
95189+/**
95190+ * get_u32 - returns a u32 offset into data
95191+ * @data: a unsigned 64 bit value
95192+ * @index: 0 or 1 to return the first or second 32-bits
95193+ *
95194+ * This inline exists to hide the length of unsigned long. If a 32-bit
95195+ * unsigned long is passed in, it will be extended and the top 32-bits will be
95196+ * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
95197+ * properly returned.
95198+ *
95199 * Endianness is explicitly ignored and left for BPF program authors to manage
95200 * as per the specific architecture.
95201 */
95202-static void populate_seccomp_data(struct seccomp_data *sd)
95203+static inline u32 get_u32(u64 data, int index)
95204 {
95205- struct task_struct *task = current;
95206- struct pt_regs *regs = task_pt_regs(task);
95207- unsigned long args[6];
95208+ return ((u32 *)&data)[index];
95209+}
95210
95211- sd->nr = syscall_get_nr(task, regs);
95212- sd->arch = syscall_get_arch();
95213- syscall_get_arguments(task, regs, 0, 6, args);
95214- sd->args[0] = args[0];
95215- sd->args[1] = args[1];
95216- sd->args[2] = args[2];
95217- sd->args[3] = args[3];
95218- sd->args[4] = args[4];
95219- sd->args[5] = args[5];
95220- sd->instruction_pointer = KSTK_EIP(task);
95221+/* Helper for bpf_load below. */
95222+#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
95223+/**
95224+ * bpf_load: checks and returns a pointer to the requested offset
95225+ * @off: offset into struct seccomp_data to load from
95226+ *
95227+ * Returns the requested 32-bits of data.
95228+ * seccomp_check_filter() should assure that @off is 32-bit aligned
95229+ * and not out of bounds. Failure to do so is a BUG.
95230+ */
95231+u32 seccomp_bpf_load(int off)
95232+{
95233+ struct pt_regs *regs = task_pt_regs(current);
95234+ if (off == BPF_DATA(nr))
95235+ return syscall_get_nr(current, regs);
95236+ if (off == BPF_DATA(arch))
95237+ return syscall_get_arch();
95238+ if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
95239+ unsigned long value;
95240+ int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
95241+ int index = !!(off % sizeof(u64));
95242+ syscall_get_arguments(current, regs, arg, 1, &value);
95243+ return get_u32(value, index);
95244+ }
95245+ if (off == BPF_DATA(instruction_pointer))
95246+ return get_u32(KSTK_EIP(current), 0);
95247+ if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
95248+ return get_u32(KSTK_EIP(current), 1);
95249+ /* seccomp_check_filter should make this impossible. */
95250+ BUG();
95251 }
95252
95253 /**
95254@@ -103,59 +132,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
95255 u32 k = ftest->k;
95256
95257 switch (code) {
95258- case BPF_LD | BPF_W | BPF_ABS:
95259- ftest->code = BPF_LDX | BPF_W | BPF_ABS;
95260+ case BPF_S_LD_W_ABS:
95261+ ftest->code = BPF_S_ANC_SECCOMP_LD_W;
95262 /* 32-bit aligned and not out of bounds. */
95263 if (k >= sizeof(struct seccomp_data) || k & 3)
95264 return -EINVAL;
95265 continue;
95266- case BPF_LD | BPF_W | BPF_LEN:
95267- ftest->code = BPF_LD | BPF_IMM;
95268+ case BPF_S_LD_W_LEN:
95269+ ftest->code = BPF_S_LD_IMM;
95270 ftest->k = sizeof(struct seccomp_data);
95271 continue;
95272- case BPF_LDX | BPF_W | BPF_LEN:
95273- ftest->code = BPF_LDX | BPF_IMM;
95274+ case BPF_S_LDX_W_LEN:
95275+ ftest->code = BPF_S_LDX_IMM;
95276 ftest->k = sizeof(struct seccomp_data);
95277 continue;
95278 /* Explicitly include allowed calls. */
95279- case BPF_RET | BPF_K:
95280- case BPF_RET | BPF_A:
95281- case BPF_ALU | BPF_ADD | BPF_K:
95282- case BPF_ALU | BPF_ADD | BPF_X:
95283- case BPF_ALU | BPF_SUB | BPF_K:
95284- case BPF_ALU | BPF_SUB | BPF_X:
95285- case BPF_ALU | BPF_MUL | BPF_K:
95286- case BPF_ALU | BPF_MUL | BPF_X:
95287- case BPF_ALU | BPF_DIV | BPF_K:
95288- case BPF_ALU | BPF_DIV | BPF_X:
95289- case BPF_ALU | BPF_AND | BPF_K:
95290- case BPF_ALU | BPF_AND | BPF_X:
95291- case BPF_ALU | BPF_OR | BPF_K:
95292- case BPF_ALU | BPF_OR | BPF_X:
95293- case BPF_ALU | BPF_XOR | BPF_K:
95294- case BPF_ALU | BPF_XOR | BPF_X:
95295- case BPF_ALU | BPF_LSH | BPF_K:
95296- case BPF_ALU | BPF_LSH | BPF_X:
95297- case BPF_ALU | BPF_RSH | BPF_K:
95298- case BPF_ALU | BPF_RSH | BPF_X:
95299- case BPF_ALU | BPF_NEG:
95300- case BPF_LD | BPF_IMM:
95301- case BPF_LDX | BPF_IMM:
95302- case BPF_MISC | BPF_TAX:
95303- case BPF_MISC | BPF_TXA:
95304- case BPF_LD | BPF_MEM:
95305- case BPF_LDX | BPF_MEM:
95306- case BPF_ST:
95307- case BPF_STX:
95308- case BPF_JMP | BPF_JA:
95309- case BPF_JMP | BPF_JEQ | BPF_K:
95310- case BPF_JMP | BPF_JEQ | BPF_X:
95311- case BPF_JMP | BPF_JGE | BPF_K:
95312- case BPF_JMP | BPF_JGE | BPF_X:
95313- case BPF_JMP | BPF_JGT | BPF_K:
95314- case BPF_JMP | BPF_JGT | BPF_X:
95315- case BPF_JMP | BPF_JSET | BPF_K:
95316- case BPF_JMP | BPF_JSET | BPF_X:
95317+ case BPF_S_RET_K:
95318+ case BPF_S_RET_A:
95319+ case BPF_S_ALU_ADD_K:
95320+ case BPF_S_ALU_ADD_X:
95321+ case BPF_S_ALU_SUB_K:
95322+ case BPF_S_ALU_SUB_X:
95323+ case BPF_S_ALU_MUL_K:
95324+ case BPF_S_ALU_MUL_X:
95325+ case BPF_S_ALU_DIV_X:
95326+ case BPF_S_ALU_AND_K:
95327+ case BPF_S_ALU_AND_X:
95328+ case BPF_S_ALU_OR_K:
95329+ case BPF_S_ALU_OR_X:
95330+ case BPF_S_ALU_XOR_K:
95331+ case BPF_S_ALU_XOR_X:
95332+ case BPF_S_ALU_LSH_K:
95333+ case BPF_S_ALU_LSH_X:
95334+ case BPF_S_ALU_RSH_K:
95335+ case BPF_S_ALU_RSH_X:
95336+ case BPF_S_ALU_NEG:
95337+ case BPF_S_LD_IMM:
95338+ case BPF_S_LDX_IMM:
95339+ case BPF_S_MISC_TAX:
95340+ case BPF_S_MISC_TXA:
95341+ case BPF_S_ALU_DIV_K:
95342+ case BPF_S_LD_MEM:
95343+ case BPF_S_LDX_MEM:
95344+ case BPF_S_ST:
95345+ case BPF_S_STX:
95346+ case BPF_S_JMP_JA:
95347+ case BPF_S_JMP_JEQ_K:
95348+ case BPF_S_JMP_JEQ_X:
95349+ case BPF_S_JMP_JGE_K:
95350+ case BPF_S_JMP_JGE_X:
95351+ case BPF_S_JMP_JGT_K:
95352+ case BPF_S_JMP_JGT_X:
95353+ case BPF_S_JMP_JSET_K:
95354+ case BPF_S_JMP_JSET_X:
95355 continue;
95356 default:
95357 return -EINVAL;
95358@@ -173,22 +202,18 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
95359 static u32 seccomp_run_filters(int syscall)
95360 {
95361 struct seccomp_filter *f;
95362- struct seccomp_data sd;
95363 u32 ret = SECCOMP_RET_ALLOW;
95364
95365 /* Ensure unexpected behavior doesn't result in failing open. */
95366 if (WARN_ON(current->seccomp.filter == NULL))
95367 return SECCOMP_RET_KILL;
95368
95369- populate_seccomp_data(&sd);
95370-
95371 /*
95372 * All filters in the list are evaluated and the lowest BPF return
95373 * value always takes priority (ignoring the DATA).
95374 */
95375 for (f = current->seccomp.filter; f; f = f->prev) {
95376- u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
95377-
95378+ u32 cur_ret = sk_run_filter(NULL, f->insns);
95379 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
95380 ret = cur_ret;
95381 }
95382@@ -206,20 +231,18 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
95383 struct seccomp_filter *filter;
95384 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
95385 unsigned long total_insns = fprog->len;
95386- struct sock_filter *fp;
95387- int new_len;
95388 long ret;
95389
95390 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
95391 return -EINVAL;
95392
95393 for (filter = current->seccomp.filter; filter; filter = filter->prev)
95394- total_insns += filter->prog->len + 4; /* include a 4 instr penalty */
95395+ total_insns += filter->len + 4; /* include a 4 instr penalty */
95396 if (total_insns > MAX_INSNS_PER_PATH)
95397 return -ENOMEM;
95398
95399 /*
95400- * Installing a seccomp filter requires that the task has
95401+ * Installing a seccomp filter requires that the task have
95402 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
95403 * This avoids scenarios where unprivileged tasks can affect the
95404 * behavior of privileged children.
95405@@ -229,51 +252,28 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
95406 CAP_SYS_ADMIN) != 0)
95407 return -EACCES;
95408
95409- fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
95410- if (!fp)
95411- return -ENOMEM;
95412-
95413- /* Copy the instructions from fprog. */
95414- ret = -EFAULT;
95415- if (copy_from_user(fp, fprog->filter, fp_size))
95416- goto free_prog;
95417-
95418- /* Check and rewrite the fprog via the skb checker */
95419- ret = sk_chk_filter(fp, fprog->len);
95420- if (ret)
95421- goto free_prog;
95422-
95423- /* Check and rewrite the fprog for seccomp use */
95424- ret = seccomp_check_filter(fp, fprog->len);
95425- if (ret)
95426- goto free_prog;
95427-
95428- /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
95429- ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
95430- if (ret)
95431- goto free_prog;
95432-
95433 /* Allocate a new seccomp_filter */
95434- ret = -ENOMEM;
95435- filter = kzalloc(sizeof(struct seccomp_filter),
95436+ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
95437 GFP_KERNEL|__GFP_NOWARN);
95438 if (!filter)
95439- goto free_prog;
95440-
95441- filter->prog = kzalloc(sk_filter_size(new_len),
95442- GFP_KERNEL|__GFP_NOWARN);
95443- if (!filter->prog)
95444- goto free_filter;
95445-
95446- ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
95447- if (ret)
95448- goto free_filter_prog;
95449- kfree(fp);
95450-
95451+ return -ENOMEM;
95452 atomic_set(&filter->usage, 1);
95453- filter->prog->len = new_len;
95454+ filter->len = fprog->len;
95455
95456- sk_filter_select_runtime(filter->prog);
95457+ /* Copy the instructions from fprog. */
95458+ ret = -EFAULT;
95459+ if (copy_from_user(filter->insns, fprog->filter, fp_size))
95460+ goto fail;
95461+
95462+ /* Check and rewrite the fprog via the skb checker */
95463+ ret = sk_chk_filter(filter->insns, filter->len);
95464+ if (ret)
95465+ goto fail;
95466+
95467+ /* Check and rewrite the fprog for seccomp use */
95468+ ret = seccomp_check_filter(filter->insns, filter->len);
95469+ if (ret)
95470+ goto fail;
95471
95472 /*
95473 * If there is an existing filter, make it the prev and don't drop its
95474@@ -282,13 +282,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
95475 filter->prev = current->seccomp.filter;
95476 current->seccomp.filter = filter;
95477 return 0;
95478-
95479-free_filter_prog:
95480- kfree(filter->prog);
95481-free_filter:
95482+fail:
95483 kfree(filter);
95484-free_prog:
95485- kfree(fp);
95486 return ret;
95487 }
95488
95489@@ -298,7 +293,7 @@ free_prog:
95490 *
95491 * Returns 0 on success and non-zero otherwise.
95492 */
95493-static long seccomp_attach_user_filter(char __user *user_filter)
95494+long seccomp_attach_user_filter(char __user *user_filter)
95495 {
95496 struct sock_fprog fprog;
95497 long ret = -EFAULT;
95498@@ -337,7 +332,6 @@ void put_seccomp_filter(struct task_struct *tsk)
95499 while (orig && atomic_dec_and_test(&orig->usage)) {
95500 struct seccomp_filter *freeme = orig;
95501 orig = orig->prev;
95502- sk_filter_free(freeme->prog);
95503 kfree(freeme);
95504 }
95505 }
95506diff --git a/kernel/signal.c b/kernel/signal.c
95507index a4077e9..f0d4e5c 100644
95508--- a/kernel/signal.c
95509+++ b/kernel/signal.c
95510@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
95511
95512 int print_fatal_signals __read_mostly;
95513
95514-static void __user *sig_handler(struct task_struct *t, int sig)
95515+static __sighandler_t sig_handler(struct task_struct *t, int sig)
95516 {
95517 return t->sighand->action[sig - 1].sa.sa_handler;
95518 }
95519
95520-static int sig_handler_ignored(void __user *handler, int sig)
95521+static int sig_handler_ignored(__sighandler_t handler, int sig)
95522 {
95523 /* Is it explicitly or implicitly ignored? */
95524 return handler == SIG_IGN ||
95525@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
95526
95527 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
95528 {
95529- void __user *handler;
95530+ __sighandler_t handler;
95531
95532 handler = sig_handler(t, sig);
95533
95534@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
95535 atomic_inc(&user->sigpending);
95536 rcu_read_unlock();
95537
95538+ if (!override_rlimit)
95539+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
95540+
95541 if (override_rlimit ||
95542 atomic_read(&user->sigpending) <=
95543 task_rlimit(t, RLIMIT_SIGPENDING)) {
95544@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
95545
95546 int unhandled_signal(struct task_struct *tsk, int sig)
95547 {
95548- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
95549+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
95550 if (is_global_init(tsk))
95551 return 1;
95552 if (handler != SIG_IGN && handler != SIG_DFL)
95553@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
95554 }
95555 }
95556
95557+ /* allow glibc communication via tgkill to other threads in our
95558+ thread group */
95559+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
95560+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
95561+ && gr_handle_signal(t, sig))
95562+ return -EPERM;
95563+
95564 return security_task_kill(t, info, sig, 0);
95565 }
95566
95567@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
95568 return send_signal(sig, info, p, 1);
95569 }
95570
95571-static int
95572+int
95573 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
95574 {
95575 return send_signal(sig, info, t, 0);
95576@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
95577 unsigned long int flags;
95578 int ret, blocked, ignored;
95579 struct k_sigaction *action;
95580+ int is_unhandled = 0;
95581
95582 spin_lock_irqsave(&t->sighand->siglock, flags);
95583 action = &t->sighand->action[sig-1];
95584@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
95585 }
95586 if (action->sa.sa_handler == SIG_DFL)
95587 t->signal->flags &= ~SIGNAL_UNKILLABLE;
95588+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
95589+ is_unhandled = 1;
95590 ret = specific_send_sig_info(sig, info, t);
95591 spin_unlock_irqrestore(&t->sighand->siglock, flags);
95592
95593+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
95594+ normal operation */
95595+ if (is_unhandled) {
95596+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
95597+ gr_handle_crash(t, sig);
95598+ }
95599+
95600 return ret;
95601 }
95602
95603@@ -1296,8 +1316,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
95604 ret = check_kill_permission(sig, info, p);
95605 rcu_read_unlock();
95606
95607- if (!ret && sig)
95608+ if (!ret && sig) {
95609 ret = do_send_sig_info(sig, info, p, true);
95610+ if (!ret)
95611+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
95612+ }
95613
95614 return ret;
95615 }
95616@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
95617 int error = -ESRCH;
95618
95619 rcu_read_lock();
95620- p = find_task_by_vpid(pid);
95621+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
95622+ /* allow glibc communication via tgkill to other threads in our
95623+ thread group */
95624+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
95625+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
95626+ p = find_task_by_vpid_unrestricted(pid);
95627+ else
95628+#endif
95629+ p = find_task_by_vpid(pid);
95630 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
95631 error = check_kill_permission(sig, info, p);
95632 /*
95633@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
95634 }
95635 seg = get_fs();
95636 set_fs(KERNEL_DS);
95637- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
95638- (stack_t __force __user *) &uoss,
95639+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
95640+ (stack_t __force_user *) &uoss,
95641 compat_user_stack_pointer());
95642 set_fs(seg);
95643 if (ret >= 0 && uoss_ptr) {
95644diff --git a/kernel/smpboot.c b/kernel/smpboot.c
95645index eb89e18..a4e6792 100644
95646--- a/kernel/smpboot.c
95647+++ b/kernel/smpboot.c
95648@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
95649 }
95650 smpboot_unpark_thread(plug_thread, cpu);
95651 }
95652- list_add(&plug_thread->list, &hotplug_threads);
95653+ pax_list_add(&plug_thread->list, &hotplug_threads);
95654 out:
95655 mutex_unlock(&smpboot_threads_lock);
95656 return ret;
95657@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
95658 {
95659 get_online_cpus();
95660 mutex_lock(&smpboot_threads_lock);
95661- list_del(&plug_thread->list);
95662+ pax_list_del(&plug_thread->list);
95663 smpboot_destroy_threads(plug_thread);
95664 mutex_unlock(&smpboot_threads_lock);
95665 put_online_cpus();
95666diff --git a/kernel/softirq.c b/kernel/softirq.c
95667index 5918d22..e95d1926 100644
95668--- a/kernel/softirq.c
95669+++ b/kernel/softirq.c
95670@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
95671 EXPORT_SYMBOL(irq_stat);
95672 #endif
95673
95674-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
95675+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
95676
95677 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
95678
95679@@ -266,7 +266,7 @@ restart:
95680 kstat_incr_softirqs_this_cpu(vec_nr);
95681
95682 trace_softirq_entry(vec_nr);
95683- h->action(h);
95684+ h->action();
95685 trace_softirq_exit(vec_nr);
95686 if (unlikely(prev_count != preempt_count())) {
95687 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
95688@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
95689 or_softirq_pending(1UL << nr);
95690 }
95691
95692-void open_softirq(int nr, void (*action)(struct softirq_action *))
95693+void __init open_softirq(int nr, void (*action)(void))
95694 {
95695 softirq_vec[nr].action = action;
95696 }
95697@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
95698 }
95699 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
95700
95701-static void tasklet_action(struct softirq_action *a)
95702+static void tasklet_action(void)
95703 {
95704 struct tasklet_struct *list;
95705
95706@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
95707 }
95708 }
95709
95710-static void tasklet_hi_action(struct softirq_action *a)
95711+static __latent_entropy void tasklet_hi_action(void)
95712 {
95713 struct tasklet_struct *list;
95714
95715@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
95716 .notifier_call = cpu_callback
95717 };
95718
95719-static struct smp_hotplug_thread softirq_threads = {
95720+static struct smp_hotplug_thread softirq_threads __read_only = {
95721 .store = &ksoftirqd,
95722 .thread_should_run = ksoftirqd_should_run,
95723 .thread_fn = run_ksoftirqd,
95724diff --git a/kernel/sys.c b/kernel/sys.c
95725index 66a751e..a42497e 100644
95726--- a/kernel/sys.c
95727+++ b/kernel/sys.c
95728@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
95729 error = -EACCES;
95730 goto out;
95731 }
95732+
95733+ if (gr_handle_chroot_setpriority(p, niceval)) {
95734+ error = -EACCES;
95735+ goto out;
95736+ }
95737+
95738 no_nice = security_task_setnice(p, niceval);
95739 if (no_nice) {
95740 error = no_nice;
95741@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
95742 goto error;
95743 }
95744
95745+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
95746+ goto error;
95747+
95748+ if (!gid_eq(new->gid, old->gid)) {
95749+ /* make sure we generate a learn log for what will
95750+ end up being a role transition after a full-learning
95751+ policy is generated
95752+ CAP_SETGID is required to perform a transition
95753+ we may not log a CAP_SETGID check above, e.g.
95754+ in the case where new rgid = old egid
95755+ */
95756+ gr_learn_cap(current, new, CAP_SETGID);
95757+ }
95758+
95759 if (rgid != (gid_t) -1 ||
95760 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
95761 new->sgid = new->egid;
95762@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
95763 old = current_cred();
95764
95765 retval = -EPERM;
95766+
95767+ if (gr_check_group_change(kgid, kgid, kgid))
95768+ goto error;
95769+
95770 if (ns_capable(old->user_ns, CAP_SETGID))
95771 new->gid = new->egid = new->sgid = new->fsgid = kgid;
95772 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
95773@@ -403,7 +427,7 @@ error:
95774 /*
95775 * change the user struct in a credentials set to match the new UID
95776 */
95777-static int set_user(struct cred *new)
95778+int set_user(struct cred *new)
95779 {
95780 struct user_struct *new_user;
95781
95782@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
95783 goto error;
95784 }
95785
95786+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
95787+ goto error;
95788+
95789 if (!uid_eq(new->uid, old->uid)) {
95790+ /* make sure we generate a learn log for what will
95791+ end up being a role transition after a full-learning
95792+ policy is generated
95793+ CAP_SETUID is required to perform a transition
95794+ we may not log a CAP_SETUID check above, e.g.
95795+ in the case where new ruid = old euid
95796+ */
95797+ gr_learn_cap(current, new, CAP_SETUID);
95798 retval = set_user(new);
95799 if (retval < 0)
95800 goto error;
95801@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
95802 old = current_cred();
95803
95804 retval = -EPERM;
95805+
95806+ if (gr_check_crash_uid(kuid))
95807+ goto error;
95808+ if (gr_check_user_change(kuid, kuid, kuid))
95809+ goto error;
95810+
95811 if (ns_capable(old->user_ns, CAP_SETUID)) {
95812 new->suid = new->uid = kuid;
95813 if (!uid_eq(kuid, old->uid)) {
95814@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
95815 goto error;
95816 }
95817
95818+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
95819+ goto error;
95820+
95821 if (ruid != (uid_t) -1) {
95822 new->uid = kruid;
95823 if (!uid_eq(kruid, old->uid)) {
95824@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
95825 goto error;
95826 }
95827
95828+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
95829+ goto error;
95830+
95831 if (rgid != (gid_t) -1)
95832 new->gid = krgid;
95833 if (egid != (gid_t) -1)
95834@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
95835 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
95836 ns_capable(old->user_ns, CAP_SETUID)) {
95837 if (!uid_eq(kuid, old->fsuid)) {
95838+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
95839+ goto error;
95840+
95841 new->fsuid = kuid;
95842 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
95843 goto change_okay;
95844 }
95845 }
95846
95847+error:
95848 abort_creds(new);
95849 return old_fsuid;
95850
95851@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
95852 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
95853 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
95854 ns_capable(old->user_ns, CAP_SETGID)) {
95855+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
95856+ goto error;
95857+
95858 if (!gid_eq(kgid, old->fsgid)) {
95859 new->fsgid = kgid;
95860 goto change_okay;
95861 }
95862 }
95863
95864+error:
95865 abort_creds(new);
95866 return old_fsgid;
95867
95868@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
95869 return -EFAULT;
95870
95871 down_read(&uts_sem);
95872- error = __copy_to_user(&name->sysname, &utsname()->sysname,
95873+ error = __copy_to_user(name->sysname, &utsname()->sysname,
95874 __OLD_UTS_LEN);
95875 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
95876- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
95877+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
95878 __OLD_UTS_LEN);
95879 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
95880- error |= __copy_to_user(&name->release, &utsname()->release,
95881+ error |= __copy_to_user(name->release, &utsname()->release,
95882 __OLD_UTS_LEN);
95883 error |= __put_user(0, name->release + __OLD_UTS_LEN);
95884- error |= __copy_to_user(&name->version, &utsname()->version,
95885+ error |= __copy_to_user(name->version, &utsname()->version,
95886 __OLD_UTS_LEN);
95887 error |= __put_user(0, name->version + __OLD_UTS_LEN);
95888- error |= __copy_to_user(&name->machine, &utsname()->machine,
95889+ error |= __copy_to_user(name->machine, &utsname()->machine,
95890 __OLD_UTS_LEN);
95891 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
95892 up_read(&uts_sem);
95893@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
95894 */
95895 new_rlim->rlim_cur = 1;
95896 }
95897+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
95898+ is changed to a lower value. Since tasks can be created by the same
95899+ user in between this limit change and an execve by this task, force
95900+ a recheck only for this task by setting PF_NPROC_EXCEEDED
95901+ */
95902+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
95903+ tsk->flags |= PF_NPROC_EXCEEDED;
95904 }
95905 if (!retval) {
95906 if (old_rlim)
95907diff --git a/kernel/sysctl.c b/kernel/sysctl.c
95908index 75b22e2..65c0ac8 100644
95909--- a/kernel/sysctl.c
95910+++ b/kernel/sysctl.c
95911@@ -94,7 +94,6 @@
95912
95913
95914 #if defined(CONFIG_SYSCTL)
95915-
95916 /* External variables not in a header file. */
95917 extern int max_threads;
95918 extern int suid_dumpable;
95919@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
95920
95921 /* Constants used for minimum and maximum */
95922 #ifdef CONFIG_LOCKUP_DETECTOR
95923-static int sixty = 60;
95924+static int sixty __read_only = 60;
95925 #endif
95926
95927-static int __maybe_unused neg_one = -1;
95928+static int __maybe_unused neg_one __read_only = -1;
95929
95930-static int zero;
95931-static int __maybe_unused one = 1;
95932-static int __maybe_unused two = 2;
95933-static int __maybe_unused four = 4;
95934-static unsigned long one_ul = 1;
95935-static int one_hundred = 100;
95936+static int zero __read_only = 0;
95937+static int __maybe_unused one __read_only = 1;
95938+static int __maybe_unused two __read_only = 2;
95939+static int __maybe_unused three __read_only = 3;
95940+static int __maybe_unused four __read_only = 4;
95941+static unsigned long one_ul __read_only = 1;
95942+static int one_hundred __read_only = 100;
95943 #ifdef CONFIG_PRINTK
95944-static int ten_thousand = 10000;
95945+static int ten_thousand __read_only = 10000;
95946 #endif
95947
95948 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
95949@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
95950 void __user *buffer, size_t *lenp, loff_t *ppos);
95951 #endif
95952
95953-#ifdef CONFIG_PRINTK
95954 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95955 void __user *buffer, size_t *lenp, loff_t *ppos);
95956-#endif
95957
95958 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
95959 void __user *buffer, size_t *lenp, loff_t *ppos);
95960@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
95961
95962 #endif
95963
95964+extern struct ctl_table grsecurity_table[];
95965+
95966 static struct ctl_table kern_table[];
95967 static struct ctl_table vm_table[];
95968 static struct ctl_table fs_table[];
95969@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
95970 int sysctl_legacy_va_layout;
95971 #endif
95972
95973+#ifdef CONFIG_PAX_SOFTMODE
95974+static ctl_table pax_table[] = {
95975+ {
95976+ .procname = "softmode",
95977+ .data = &pax_softmode,
95978+ .maxlen = sizeof(unsigned int),
95979+ .mode = 0600,
95980+ .proc_handler = &proc_dointvec,
95981+ },
95982+
95983+ { }
95984+};
95985+#endif
95986+
95987 /* The default sysctl tables: */
95988
95989 static struct ctl_table sysctl_base_table[] = {
95990@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
95991 #endif
95992
95993 static struct ctl_table kern_table[] = {
95994+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
95995+ {
95996+ .procname = "grsecurity",
95997+ .mode = 0500,
95998+ .child = grsecurity_table,
95999+ },
96000+#endif
96001+
96002+#ifdef CONFIG_PAX_SOFTMODE
96003+ {
96004+ .procname = "pax",
96005+ .mode = 0500,
96006+ .child = pax_table,
96007+ },
96008+#endif
96009+
96010 {
96011 .procname = "sched_child_runs_first",
96012 .data = &sysctl_sched_child_runs_first,
96013@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
96014 .data = &modprobe_path,
96015 .maxlen = KMOD_PATH_LEN,
96016 .mode = 0644,
96017- .proc_handler = proc_dostring,
96018+ .proc_handler = proc_dostring_modpriv,
96019 },
96020 {
96021 .procname = "modules_disabled",
96022@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
96023 .extra1 = &zero,
96024 .extra2 = &one,
96025 },
96026+#endif
96027 {
96028 .procname = "kptr_restrict",
96029 .data = &kptr_restrict,
96030 .maxlen = sizeof(int),
96031 .mode = 0644,
96032 .proc_handler = proc_dointvec_minmax_sysadmin,
96033+#ifdef CONFIG_GRKERNSEC_HIDESYM
96034+ .extra1 = &two,
96035+#else
96036 .extra1 = &zero,
96037+#endif
96038 .extra2 = &two,
96039 },
96040-#endif
96041 {
96042 .procname = "ngroups_max",
96043 .data = &ngroups_max,
96044@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
96045 */
96046 {
96047 .procname = "perf_event_paranoid",
96048- .data = &sysctl_perf_event_paranoid,
96049- .maxlen = sizeof(sysctl_perf_event_paranoid),
96050+ .data = &sysctl_perf_event_legitimately_concerned,
96051+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
96052 .mode = 0644,
96053- .proc_handler = proc_dointvec,
96054+ /* go ahead, be a hero */
96055+ .proc_handler = proc_dointvec_minmax_sysadmin,
96056+ .extra1 = &neg_one,
96057+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
96058+ .extra2 = &three,
96059+#else
96060+ .extra2 = &two,
96061+#endif
96062 },
96063 {
96064 .procname = "perf_event_mlock_kb",
96065@@ -1338,6 +1379,13 @@ static struct ctl_table vm_table[] = {
96066 .proc_handler = proc_dointvec_minmax,
96067 .extra1 = &zero,
96068 },
96069+ {
96070+ .procname = "heap_stack_gap",
96071+ .data = &sysctl_heap_stack_gap,
96072+ .maxlen = sizeof(sysctl_heap_stack_gap),
96073+ .mode = 0644,
96074+ .proc_handler = proc_doulongvec_minmax,
96075+ },
96076 #else
96077 {
96078 .procname = "nr_trim_pages",
96079@@ -1827,6 +1875,16 @@ int proc_dostring(struct ctl_table *table, int write,
96080 (char __user *)buffer, lenp, ppos);
96081 }
96082
96083+int proc_dostring_modpriv(struct ctl_table *table, int write,
96084+ void __user *buffer, size_t *lenp, loff_t *ppos)
96085+{
96086+ if (write && !capable(CAP_SYS_MODULE))
96087+ return -EPERM;
96088+
96089+ return _proc_do_string(table->data, table->maxlen, write,
96090+ buffer, lenp, ppos);
96091+}
96092+
96093 static size_t proc_skip_spaces(char **buf)
96094 {
96095 size_t ret;
96096@@ -1932,6 +1990,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
96097 len = strlen(tmp);
96098 if (len > *size)
96099 len = *size;
96100+ if (len > sizeof(tmp))
96101+ len = sizeof(tmp);
96102 if (copy_to_user(*buf, tmp, len))
96103 return -EFAULT;
96104 *size -= len;
96105@@ -2109,7 +2169,7 @@ int proc_dointvec(struct ctl_table *table, int write,
96106 static int proc_taint(struct ctl_table *table, int write,
96107 void __user *buffer, size_t *lenp, loff_t *ppos)
96108 {
96109- struct ctl_table t;
96110+ ctl_table_no_const t;
96111 unsigned long tmptaint = get_taint();
96112 int err;
96113
96114@@ -2137,7 +2197,6 @@ static int proc_taint(struct ctl_table *table, int write,
96115 return err;
96116 }
96117
96118-#ifdef CONFIG_PRINTK
96119 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
96120 void __user *buffer, size_t *lenp, loff_t *ppos)
96121 {
96122@@ -2146,7 +2205,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
96123
96124 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
96125 }
96126-#endif
96127
96128 struct do_proc_dointvec_minmax_conv_param {
96129 int *min;
96130@@ -2706,6 +2764,12 @@ int proc_dostring(struct ctl_table *table, int write,
96131 return -ENOSYS;
96132 }
96133
96134+int proc_dostring_modpriv(struct ctl_table *table, int write,
96135+ void __user *buffer, size_t *lenp, loff_t *ppos)
96136+{
96137+ return -ENOSYS;
96138+}
96139+
96140 int proc_dointvec(struct ctl_table *table, int write,
96141 void __user *buffer, size_t *lenp, loff_t *ppos)
96142 {
96143@@ -2762,5 +2826,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
96144 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
96145 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
96146 EXPORT_SYMBOL(proc_dostring);
96147+EXPORT_SYMBOL(proc_dostring_modpriv);
96148 EXPORT_SYMBOL(proc_doulongvec_minmax);
96149 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
96150diff --git a/kernel/taskstats.c b/kernel/taskstats.c
96151index 13d2f7c..c93d0b0 100644
96152--- a/kernel/taskstats.c
96153+++ b/kernel/taskstats.c
96154@@ -28,9 +28,12 @@
96155 #include <linux/fs.h>
96156 #include <linux/file.h>
96157 #include <linux/pid_namespace.h>
96158+#include <linux/grsecurity.h>
96159 #include <net/genetlink.h>
96160 #include <linux/atomic.h>
96161
96162+extern int gr_is_taskstats_denied(int pid);
96163+
96164 /*
96165 * Maximum length of a cpumask that can be specified in
96166 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
96167@@ -576,6 +579,9 @@ err:
96168
96169 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
96170 {
96171+ if (gr_is_taskstats_denied(current->pid))
96172+ return -EACCES;
96173+
96174 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
96175 return cmd_attr_register_cpumask(info);
96176 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
96177diff --git a/kernel/time.c b/kernel/time.c
96178index 7c7964c..2a0d412 100644
96179--- a/kernel/time.c
96180+++ b/kernel/time.c
96181@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
96182 return error;
96183
96184 if (tz) {
96185+ /* we log in do_settimeofday called below, so don't log twice
96186+ */
96187+ if (!tv)
96188+ gr_log_timechange();
96189+
96190 sys_tz = *tz;
96191 update_vsyscall_tz();
96192 if (firsttime) {
96193diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
96194index fe75444..b8a1463 100644
96195--- a/kernel/time/alarmtimer.c
96196+++ b/kernel/time/alarmtimer.c
96197@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
96198 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
96199 ktime_t now)
96200 {
96201+ unsigned long flags;
96202 struct k_itimer *ptr = container_of(alarm, struct k_itimer,
96203 it.alarm.alarmtimer);
96204- if (posix_timer_event(ptr, 0) != 0)
96205- ptr->it_overrun++;
96206+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
96207+
96208+ spin_lock_irqsave(&ptr->it_lock, flags);
96209+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
96210+ if (posix_timer_event(ptr, 0) != 0)
96211+ ptr->it_overrun++;
96212+ }
96213
96214 /* Re-add periodic timers */
96215 if (ptr->it.alarm.interval.tv64) {
96216 ptr->it_overrun += alarm_forward(alarm, now,
96217 ptr->it.alarm.interval);
96218- return ALARMTIMER_RESTART;
96219+ result = ALARMTIMER_RESTART;
96220 }
96221- return ALARMTIMER_NORESTART;
96222+ spin_unlock_irqrestore(&ptr->it_lock, flags);
96223+
96224+ return result;
96225 }
96226
96227 /**
96228@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
96229 * @new_timer: k_itimer pointer
96230 * @cur_setting: itimerspec data to fill
96231 *
96232- * Copies the itimerspec data out from the k_itimer
96233+ * Copies out the current itimerspec data
96234 */
96235 static void alarm_timer_get(struct k_itimer *timr,
96236 struct itimerspec *cur_setting)
96237 {
96238- memset(cur_setting, 0, sizeof(struct itimerspec));
96239+ ktime_t relative_expiry_time =
96240+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
96241
96242- cur_setting->it_interval =
96243- ktime_to_timespec(timr->it.alarm.interval);
96244- cur_setting->it_value =
96245- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
96246- return;
96247+ if (ktime_to_ns(relative_expiry_time) > 0) {
96248+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
96249+ } else {
96250+ cur_setting->it_value.tv_sec = 0;
96251+ cur_setting->it_value.tv_nsec = 0;
96252+ }
96253+
96254+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
96255 }
96256
96257 /**
96258@@ -811,7 +823,7 @@ static int __init alarmtimer_init(void)
96259 struct platform_device *pdev;
96260 int error = 0;
96261 int i;
96262- struct k_clock alarm_clock = {
96263+ static struct k_clock alarm_clock = {
96264 .clock_getres = alarm_clock_getres,
96265 .clock_get = alarm_clock_get,
96266 .timer_create = alarm_timer_create,
96267diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
96268index 32d8d6a..11486af 100644
96269--- a/kernel/time/timekeeping.c
96270+++ b/kernel/time/timekeeping.c
96271@@ -15,6 +15,7 @@
96272 #include <linux/init.h>
96273 #include <linux/mm.h>
96274 #include <linux/sched.h>
96275+#include <linux/grsecurity.h>
96276 #include <linux/syscore_ops.h>
96277 #include <linux/clocksource.h>
96278 #include <linux/jiffies.h>
96279@@ -502,6 +503,8 @@ int do_settimeofday(const struct timespec *tv)
96280 if (!timespec_valid_strict(tv))
96281 return -EINVAL;
96282
96283+ gr_log_timechange();
96284+
96285 raw_spin_lock_irqsave(&timekeeper_lock, flags);
96286 write_seqcount_begin(&timekeeper_seq);
96287
96288diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
96289index 61ed862..3b52c65 100644
96290--- a/kernel/time/timer_list.c
96291+++ b/kernel/time/timer_list.c
96292@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
96293
96294 static void print_name_offset(struct seq_file *m, void *sym)
96295 {
96296+#ifdef CONFIG_GRKERNSEC_HIDESYM
96297+ SEQ_printf(m, "<%p>", NULL);
96298+#else
96299 char symname[KSYM_NAME_LEN];
96300
96301 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
96302 SEQ_printf(m, "<%pK>", sym);
96303 else
96304 SEQ_printf(m, "%s", symname);
96305+#endif
96306 }
96307
96308 static void
96309@@ -119,7 +123,11 @@ next_one:
96310 static void
96311 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
96312 {
96313+#ifdef CONFIG_GRKERNSEC_HIDESYM
96314+ SEQ_printf(m, " .base: %p\n", NULL);
96315+#else
96316 SEQ_printf(m, " .base: %pK\n", base);
96317+#endif
96318 SEQ_printf(m, " .index: %d\n",
96319 base->index);
96320 SEQ_printf(m, " .resolution: %Lu nsecs\n",
96321@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
96322 {
96323 struct proc_dir_entry *pe;
96324
96325+#ifdef CONFIG_GRKERNSEC_PROC_ADD
96326+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
96327+#else
96328 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
96329+#endif
96330 if (!pe)
96331 return -ENOMEM;
96332 return 0;
96333diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
96334index 1fb08f2..ca4bb1e 100644
96335--- a/kernel/time/timer_stats.c
96336+++ b/kernel/time/timer_stats.c
96337@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
96338 static unsigned long nr_entries;
96339 static struct entry entries[MAX_ENTRIES];
96340
96341-static atomic_t overflow_count;
96342+static atomic_unchecked_t overflow_count;
96343
96344 /*
96345 * The entries are in a hash-table, for fast lookup:
96346@@ -140,7 +140,7 @@ static void reset_entries(void)
96347 nr_entries = 0;
96348 memset(entries, 0, sizeof(entries));
96349 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
96350- atomic_set(&overflow_count, 0);
96351+ atomic_set_unchecked(&overflow_count, 0);
96352 }
96353
96354 static struct entry *alloc_entry(void)
96355@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
96356 if (likely(entry))
96357 entry->count++;
96358 else
96359- atomic_inc(&overflow_count);
96360+ atomic_inc_unchecked(&overflow_count);
96361
96362 out_unlock:
96363 raw_spin_unlock_irqrestore(lock, flags);
96364@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
96365
96366 static void print_name_offset(struct seq_file *m, unsigned long addr)
96367 {
96368+#ifdef CONFIG_GRKERNSEC_HIDESYM
96369+ seq_printf(m, "<%p>", NULL);
96370+#else
96371 char symname[KSYM_NAME_LEN];
96372
96373 if (lookup_symbol_name(addr, symname) < 0)
96374- seq_printf(m, "<%p>", (void *)addr);
96375+ seq_printf(m, "<%pK>", (void *)addr);
96376 else
96377 seq_printf(m, "%s", symname);
96378+#endif
96379 }
96380
96381 static int tstats_show(struct seq_file *m, void *v)
96382@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
96383
96384 seq_puts(m, "Timer Stats Version: v0.3\n");
96385 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
96386- if (atomic_read(&overflow_count))
96387- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
96388+ if (atomic_read_unchecked(&overflow_count))
96389+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
96390 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
96391
96392 for (i = 0; i < nr_entries; i++) {
96393@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
96394 {
96395 struct proc_dir_entry *pe;
96396
96397+#ifdef CONFIG_GRKERNSEC_PROC_ADD
96398+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
96399+#else
96400 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
96401+#endif
96402 if (!pe)
96403 return -ENOMEM;
96404 return 0;
96405diff --git a/kernel/timer.c b/kernel/timer.c
96406index 3bb01a3..0e7760e 100644
96407--- a/kernel/timer.c
96408+++ b/kernel/timer.c
96409@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
96410 /*
96411 * This function runs timers and the timer-tq in bottom half context.
96412 */
96413-static void run_timer_softirq(struct softirq_action *h)
96414+static __latent_entropy void run_timer_softirq(void)
96415 {
96416 struct tvec_base *base = __this_cpu_read(tvec_bases);
96417
96418@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
96419 *
96420 * In all cases the return value is guaranteed to be non-negative.
96421 */
96422-signed long __sched schedule_timeout(signed long timeout)
96423+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
96424 {
96425 struct timer_list timer;
96426 unsigned long expire;
96427diff --git a/kernel/torture.c b/kernel/torture.c
96428index 40bb511..91190b9 100644
96429--- a/kernel/torture.c
96430+++ b/kernel/torture.c
96431@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
96432 mutex_lock(&fullstop_mutex);
96433 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
96434 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
96435- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
96436+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
96437 } else {
96438 pr_warn("Concurrent rmmod and shutdown illegal!\n");
96439 }
96440@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
96441 if (!torture_must_stop()) {
96442 if (stutter > 1) {
96443 schedule_timeout_interruptible(stutter - 1);
96444- ACCESS_ONCE(stutter_pause_test) = 2;
96445+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
96446 }
96447 schedule_timeout_interruptible(1);
96448- ACCESS_ONCE(stutter_pause_test) = 1;
96449+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
96450 }
96451 if (!torture_must_stop())
96452 schedule_timeout_interruptible(stutter);
96453- ACCESS_ONCE(stutter_pause_test) = 0;
96454+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
96455 torture_shutdown_absorb("torture_stutter");
96456 } while (!torture_must_stop());
96457 torture_kthread_stopping("torture_stutter");
96458@@ -645,7 +645,7 @@ bool torture_cleanup(void)
96459 schedule_timeout_uninterruptible(10);
96460 return true;
96461 }
96462- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
96463+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
96464 mutex_unlock(&fullstop_mutex);
96465 torture_shutdown_cleanup();
96466 torture_shuffle_cleanup();
96467diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
96468index c1bd4ad..4b861dc 100644
96469--- a/kernel/trace/blktrace.c
96470+++ b/kernel/trace/blktrace.c
96471@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
96472 struct blk_trace *bt = filp->private_data;
96473 char buf[16];
96474
96475- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
96476+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
96477
96478 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
96479 }
96480@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
96481 return 1;
96482
96483 bt = buf->chan->private_data;
96484- atomic_inc(&bt->dropped);
96485+ atomic_inc_unchecked(&bt->dropped);
96486 return 0;
96487 }
96488
96489@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
96490
96491 bt->dir = dir;
96492 bt->dev = dev;
96493- atomic_set(&bt->dropped, 0);
96494+ atomic_set_unchecked(&bt->dropped, 0);
96495 INIT_LIST_HEAD(&bt->running_list);
96496
96497 ret = -EIO;
96498diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
96499index ac9d1da..ce98b35 100644
96500--- a/kernel/trace/ftrace.c
96501+++ b/kernel/trace/ftrace.c
96502@@ -1920,12 +1920,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
96503 if (unlikely(ftrace_disabled))
96504 return 0;
96505
96506+ ret = ftrace_arch_code_modify_prepare();
96507+ FTRACE_WARN_ON(ret);
96508+ if (ret)
96509+ return 0;
96510+
96511 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
96512+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
96513 if (ret) {
96514 ftrace_bug(ret, ip);
96515- return 0;
96516 }
96517- return 1;
96518+ return ret ? 0 : 1;
96519 }
96520
96521 /*
96522@@ -4126,8 +4131,10 @@ static int ftrace_process_locs(struct module *mod,
96523 if (!count)
96524 return 0;
96525
96526+ pax_open_kernel();
96527 sort(start, count, sizeof(*start),
96528 ftrace_cmp_ips, ftrace_swap_ips);
96529+ pax_close_kernel();
96530
96531 start_pg = ftrace_allocate_pages(count);
96532 if (!start_pg)
96533diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
96534index b95381e..af2fddd 100644
96535--- a/kernel/trace/ring_buffer.c
96536+++ b/kernel/trace/ring_buffer.c
96537@@ -352,9 +352,9 @@ struct buffer_data_page {
96538 */
96539 struct buffer_page {
96540 struct list_head list; /* list of buffer pages */
96541- local_t write; /* index for next write */
96542+ local_unchecked_t write; /* index for next write */
96543 unsigned read; /* index for next read */
96544- local_t entries; /* entries on this page */
96545+ local_unchecked_t entries; /* entries on this page */
96546 unsigned long real_end; /* real end of data */
96547 struct buffer_data_page *page; /* Actual data page */
96548 };
96549@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
96550 unsigned long last_overrun;
96551 local_t entries_bytes;
96552 local_t entries;
96553- local_t overrun;
96554- local_t commit_overrun;
96555+ local_unchecked_t overrun;
96556+ local_unchecked_t commit_overrun;
96557 local_t dropped_events;
96558 local_t committing;
96559 local_t commits;
96560@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
96561 work = &cpu_buffer->irq_work;
96562 }
96563
96564- work->waiters_pending = true;
96565 poll_wait(filp, &work->waiters, poll_table);
96566+ work->waiters_pending = true;
96567+ /*
96568+ * There's a tight race between setting the waiters_pending and
96569+ * checking if the ring buffer is empty. Once the waiters_pending bit
96570+ * is set, the next event will wake the task up, but we can get stuck
96571+ * if there's only a single event in.
96572+ *
96573+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
96574+ * but adding a memory barrier to all events will cause too much of a
96575+ * performance hit in the fast path. We only need a memory barrier when
96576+ * the buffer goes from empty to having content. But as this race is
96577+ * extremely small, and it's not a problem if another event comes in, we
96578+ * will fix it later.
96579+ */
96580+ smp_mb();
96581
96582 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
96583 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
96584@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
96585 *
96586 * We add a counter to the write field to denote this.
96587 */
96588- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
96589- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
96590+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
96591+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
96592
96593 /*
96594 * Just make sure we have seen our old_write and synchronize
96595@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
96596 * cmpxchg to only update if an interrupt did not already
96597 * do it for us. If the cmpxchg fails, we don't care.
96598 */
96599- (void)local_cmpxchg(&next_page->write, old_write, val);
96600- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
96601+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
96602+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
96603
96604 /*
96605 * No need to worry about races with clearing out the commit.
96606@@ -1388,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
96607
96608 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
96609 {
96610- return local_read(&bpage->entries) & RB_WRITE_MASK;
96611+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
96612 }
96613
96614 static inline unsigned long rb_page_write(struct buffer_page *bpage)
96615 {
96616- return local_read(&bpage->write) & RB_WRITE_MASK;
96617+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
96618 }
96619
96620 static int
96621@@ -1488,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
96622 * bytes consumed in ring buffer from here.
96623 * Increment overrun to account for the lost events.
96624 */
96625- local_add(page_entries, &cpu_buffer->overrun);
96626+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
96627 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
96628 }
96629
96630@@ -2066,7 +2080,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
96631 * it is our responsibility to update
96632 * the counters.
96633 */
96634- local_add(entries, &cpu_buffer->overrun);
96635+ local_add_unchecked(entries, &cpu_buffer->overrun);
96636 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
96637
96638 /*
96639@@ -2216,7 +2230,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96640 if (tail == BUF_PAGE_SIZE)
96641 tail_page->real_end = 0;
96642
96643- local_sub(length, &tail_page->write);
96644+ local_sub_unchecked(length, &tail_page->write);
96645 return;
96646 }
96647
96648@@ -2251,7 +2265,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96649 rb_event_set_padding(event);
96650
96651 /* Set the write back to the previous setting */
96652- local_sub(length, &tail_page->write);
96653+ local_sub_unchecked(length, &tail_page->write);
96654 return;
96655 }
96656
96657@@ -2263,7 +2277,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96658
96659 /* Set write to end of buffer */
96660 length = (tail + length) - BUF_PAGE_SIZE;
96661- local_sub(length, &tail_page->write);
96662+ local_sub_unchecked(length, &tail_page->write);
96663 }
96664
96665 /*
96666@@ -2289,7 +2303,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
96667 * about it.
96668 */
96669 if (unlikely(next_page == commit_page)) {
96670- local_inc(&cpu_buffer->commit_overrun);
96671+ local_inc_unchecked(&cpu_buffer->commit_overrun);
96672 goto out_reset;
96673 }
96674
96675@@ -2345,7 +2359,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
96676 cpu_buffer->tail_page) &&
96677 (cpu_buffer->commit_page ==
96678 cpu_buffer->reader_page))) {
96679- local_inc(&cpu_buffer->commit_overrun);
96680+ local_inc_unchecked(&cpu_buffer->commit_overrun);
96681 goto out_reset;
96682 }
96683 }
96684@@ -2393,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
96685 length += RB_LEN_TIME_EXTEND;
96686
96687 tail_page = cpu_buffer->tail_page;
96688- write = local_add_return(length, &tail_page->write);
96689+ write = local_add_return_unchecked(length, &tail_page->write);
96690
96691 /* set write to only the index of the write */
96692 write &= RB_WRITE_MASK;
96693@@ -2417,7 +2431,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
96694 kmemcheck_annotate_bitfield(event, bitfield);
96695 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
96696
96697- local_inc(&tail_page->entries);
96698+ local_inc_unchecked(&tail_page->entries);
96699
96700 /*
96701 * If this is the first commit on the page, then update
96702@@ -2450,7 +2464,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
96703
96704 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
96705 unsigned long write_mask =
96706- local_read(&bpage->write) & ~RB_WRITE_MASK;
96707+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
96708 unsigned long event_length = rb_event_length(event);
96709 /*
96710 * This is on the tail page. It is possible that
96711@@ -2460,7 +2474,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
96712 */
96713 old_index += write_mask;
96714 new_index += write_mask;
96715- index = local_cmpxchg(&bpage->write, old_index, new_index);
96716+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
96717 if (index == old_index) {
96718 /* update counters */
96719 local_sub(event_length, &cpu_buffer->entries_bytes);
96720@@ -2852,7 +2866,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
96721
96722 /* Do the likely case first */
96723 if (likely(bpage->page == (void *)addr)) {
96724- local_dec(&bpage->entries);
96725+ local_dec_unchecked(&bpage->entries);
96726 return;
96727 }
96728
96729@@ -2864,7 +2878,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
96730 start = bpage;
96731 do {
96732 if (bpage->page == (void *)addr) {
96733- local_dec(&bpage->entries);
96734+ local_dec_unchecked(&bpage->entries);
96735 return;
96736 }
96737 rb_inc_page(cpu_buffer, &bpage);
96738@@ -3148,7 +3162,7 @@ static inline unsigned long
96739 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
96740 {
96741 return local_read(&cpu_buffer->entries) -
96742- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
96743+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
96744 }
96745
96746 /**
96747@@ -3237,7 +3251,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
96748 return 0;
96749
96750 cpu_buffer = buffer->buffers[cpu];
96751- ret = local_read(&cpu_buffer->overrun);
96752+ ret = local_read_unchecked(&cpu_buffer->overrun);
96753
96754 return ret;
96755 }
96756@@ -3260,7 +3274,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
96757 return 0;
96758
96759 cpu_buffer = buffer->buffers[cpu];
96760- ret = local_read(&cpu_buffer->commit_overrun);
96761+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
96762
96763 return ret;
96764 }
96765@@ -3345,7 +3359,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
96766 /* if you care about this being correct, lock the buffer */
96767 for_each_buffer_cpu(buffer, cpu) {
96768 cpu_buffer = buffer->buffers[cpu];
96769- overruns += local_read(&cpu_buffer->overrun);
96770+ overruns += local_read_unchecked(&cpu_buffer->overrun);
96771 }
96772
96773 return overruns;
96774@@ -3516,8 +3530,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96775 /*
96776 * Reset the reader page to size zero.
96777 */
96778- local_set(&cpu_buffer->reader_page->write, 0);
96779- local_set(&cpu_buffer->reader_page->entries, 0);
96780+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96781+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96782 local_set(&cpu_buffer->reader_page->page->commit, 0);
96783 cpu_buffer->reader_page->real_end = 0;
96784
96785@@ -3551,7 +3565,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96786 * want to compare with the last_overrun.
96787 */
96788 smp_mb();
96789- overwrite = local_read(&(cpu_buffer->overrun));
96790+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
96791
96792 /*
96793 * Here's the tricky part.
96794@@ -4123,8 +4137,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96795
96796 cpu_buffer->head_page
96797 = list_entry(cpu_buffer->pages, struct buffer_page, list);
96798- local_set(&cpu_buffer->head_page->write, 0);
96799- local_set(&cpu_buffer->head_page->entries, 0);
96800+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
96801+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
96802 local_set(&cpu_buffer->head_page->page->commit, 0);
96803
96804 cpu_buffer->head_page->read = 0;
96805@@ -4134,14 +4148,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96806
96807 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
96808 INIT_LIST_HEAD(&cpu_buffer->new_pages);
96809- local_set(&cpu_buffer->reader_page->write, 0);
96810- local_set(&cpu_buffer->reader_page->entries, 0);
96811+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96812+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96813 local_set(&cpu_buffer->reader_page->page->commit, 0);
96814 cpu_buffer->reader_page->read = 0;
96815
96816 local_set(&cpu_buffer->entries_bytes, 0);
96817- local_set(&cpu_buffer->overrun, 0);
96818- local_set(&cpu_buffer->commit_overrun, 0);
96819+ local_set_unchecked(&cpu_buffer->overrun, 0);
96820+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
96821 local_set(&cpu_buffer->dropped_events, 0);
96822 local_set(&cpu_buffer->entries, 0);
96823 local_set(&cpu_buffer->committing, 0);
96824@@ -4546,8 +4560,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
96825 rb_init_page(bpage);
96826 bpage = reader->page;
96827 reader->page = *data_page;
96828- local_set(&reader->write, 0);
96829- local_set(&reader->entries, 0);
96830+ local_set_unchecked(&reader->write, 0);
96831+ local_set_unchecked(&reader->entries, 0);
96832 reader->read = 0;
96833 *data_page = bpage;
96834
96835diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
96836index 291397e..db3836d 100644
96837--- a/kernel/trace/trace.c
96838+++ b/kernel/trace/trace.c
96839@@ -3510,7 +3510,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
96840 return 0;
96841 }
96842
96843-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
96844+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
96845 {
96846 /* do nothing if flag is already set */
96847 if (!!(trace_flags & mask) == !!enabled)
96848diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
96849index 9258f5a..9b1e41e 100644
96850--- a/kernel/trace/trace.h
96851+++ b/kernel/trace/trace.h
96852@@ -1278,7 +1278,7 @@ extern const char *__stop___tracepoint_str[];
96853 void trace_printk_init_buffers(void);
96854 void trace_printk_start_comm(void);
96855 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
96856-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
96857+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
96858
96859 /*
96860 * Normal trace_printk() and friends allocates special buffers
96861diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
96862index 57b67b1..66082a9 100644
96863--- a/kernel/trace/trace_clock.c
96864+++ b/kernel/trace/trace_clock.c
96865@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
96866 return now;
96867 }
96868
96869-static atomic64_t trace_counter;
96870+static atomic64_unchecked_t trace_counter;
96871
96872 /*
96873 * trace_clock_counter(): simply an atomic counter.
96874@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
96875 */
96876 u64 notrace trace_clock_counter(void)
96877 {
96878- return atomic64_add_return(1, &trace_counter);
96879+ return atomic64_inc_return_unchecked(&trace_counter);
96880 }
96881diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
96882index 2de5362..c4c7003 100644
96883--- a/kernel/trace/trace_events.c
96884+++ b/kernel/trace/trace_events.c
96885@@ -1722,7 +1722,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
96886 return 0;
96887 }
96888
96889-struct ftrace_module_file_ops;
96890 static void __add_event_to_tracers(struct ftrace_event_call *call);
96891
96892 /* Add an additional event_call dynamically */
96893diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
96894index 0abd9b8..6a663a2 100644
96895--- a/kernel/trace/trace_mmiotrace.c
96896+++ b/kernel/trace/trace_mmiotrace.c
96897@@ -24,7 +24,7 @@ struct header_iter {
96898 static struct trace_array *mmio_trace_array;
96899 static bool overrun_detected;
96900 static unsigned long prev_overruns;
96901-static atomic_t dropped_count;
96902+static atomic_unchecked_t dropped_count;
96903
96904 static void mmio_reset_data(struct trace_array *tr)
96905 {
96906@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
96907
96908 static unsigned long count_overruns(struct trace_iterator *iter)
96909 {
96910- unsigned long cnt = atomic_xchg(&dropped_count, 0);
96911+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
96912 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
96913
96914 if (over > prev_overruns)
96915@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
96916 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
96917 sizeof(*entry), 0, pc);
96918 if (!event) {
96919- atomic_inc(&dropped_count);
96920+ atomic_inc_unchecked(&dropped_count);
96921 return;
96922 }
96923 entry = ring_buffer_event_data(event);
96924@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
96925 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
96926 sizeof(*entry), 0, pc);
96927 if (!event) {
96928- atomic_inc(&dropped_count);
96929+ atomic_inc_unchecked(&dropped_count);
96930 return;
96931 }
96932 entry = ring_buffer_event_data(event);
96933diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
96934index f3dad80..d291d61 100644
96935--- a/kernel/trace/trace_output.c
96936+++ b/kernel/trace/trace_output.c
96937@@ -322,7 +322,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
96938
96939 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
96940 if (!IS_ERR(p)) {
96941- p = mangle_path(s->buffer + s->len, p, "\n");
96942+ p = mangle_path(s->buffer + s->len, p, "\n\\");
96943 if (p) {
96944 s->len = p - s->buffer;
96945 return 1;
96946@@ -980,14 +980,16 @@ int register_ftrace_event(struct trace_event *event)
96947 goto out;
96948 }
96949
96950+ pax_open_kernel();
96951 if (event->funcs->trace == NULL)
96952- event->funcs->trace = trace_nop_print;
96953+ *(void **)&event->funcs->trace = trace_nop_print;
96954 if (event->funcs->raw == NULL)
96955- event->funcs->raw = trace_nop_print;
96956+ *(void **)&event->funcs->raw = trace_nop_print;
96957 if (event->funcs->hex == NULL)
96958- event->funcs->hex = trace_nop_print;
96959+ *(void **)&event->funcs->hex = trace_nop_print;
96960 if (event->funcs->binary == NULL)
96961- event->funcs->binary = trace_nop_print;
96962+ *(void **)&event->funcs->binary = trace_nop_print;
96963+ pax_close_kernel();
96964
96965 key = event->type & (EVENT_HASHSIZE - 1);
96966
96967diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
96968index 8a4e5cb..64f270d 100644
96969--- a/kernel/trace/trace_stack.c
96970+++ b/kernel/trace/trace_stack.c
96971@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
96972 return;
96973
96974 /* we do not handle interrupt stacks yet */
96975- if (!object_is_on_stack(stack))
96976+ if (!object_starts_on_stack(stack))
96977 return;
96978
96979 local_irq_save(flags);
96980diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
96981index fcc0256..aee880f 100644
96982--- a/kernel/user_namespace.c
96983+++ b/kernel/user_namespace.c
96984@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
96985 !kgid_has_mapping(parent_ns, group))
96986 return -EPERM;
96987
96988+#ifdef CONFIG_GRKERNSEC
96989+ /*
96990+ * This doesn't really inspire confidence:
96991+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
96992+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
96993+ * Increases kernel attack surface in areas developers
96994+ * previously cared little about ("low importance due
96995+ * to requiring "root" capability")
96996+ * To be removed when this code receives *proper* review
96997+ */
96998+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
96999+ !capable(CAP_SETGID))
97000+ return -EPERM;
97001+#endif
97002+
97003 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
97004 if (!ns)
97005 return -ENOMEM;
97006@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
97007 if (atomic_read(&current->mm->mm_users) > 1)
97008 return -EINVAL;
97009
97010- if (current->fs->users != 1)
97011+ if (atomic_read(&current->fs->users) != 1)
97012 return -EINVAL;
97013
97014 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
97015diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
97016index c8eac43..4b5f08f 100644
97017--- a/kernel/utsname_sysctl.c
97018+++ b/kernel/utsname_sysctl.c
97019@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
97020 static int proc_do_uts_string(struct ctl_table *table, int write,
97021 void __user *buffer, size_t *lenp, loff_t *ppos)
97022 {
97023- struct ctl_table uts_table;
97024+ ctl_table_no_const uts_table;
97025 int r;
97026 memcpy(&uts_table, table, sizeof(uts_table));
97027 uts_table.data = get_uts(table, write);
97028diff --git a/kernel/watchdog.c b/kernel/watchdog.c
97029index c3319bd..67efc3c 100644
97030--- a/kernel/watchdog.c
97031+++ b/kernel/watchdog.c
97032@@ -518,7 +518,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
97033 static void watchdog_nmi_disable(unsigned int cpu) { return; }
97034 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
97035
97036-static struct smp_hotplug_thread watchdog_threads = {
97037+static struct smp_hotplug_thread watchdog_threads __read_only = {
97038 .store = &softlockup_watchdog,
97039 .thread_should_run = watchdog_should_run,
97040 .thread_fn = watchdog,
97041diff --git a/kernel/workqueue.c b/kernel/workqueue.c
97042index 35974ac..43c9e87 100644
97043--- a/kernel/workqueue.c
97044+++ b/kernel/workqueue.c
97045@@ -4576,7 +4576,7 @@ static void rebind_workers(struct worker_pool *pool)
97046 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
97047 worker_flags |= WORKER_REBOUND;
97048 worker_flags &= ~WORKER_UNBOUND;
97049- ACCESS_ONCE(worker->flags) = worker_flags;
97050+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
97051 }
97052
97053 spin_unlock_irq(&pool->lock);
97054diff --git a/lib/Kconfig b/lib/Kconfig
97055index 334f772..74b8ec3 100644
97056--- a/lib/Kconfig
97057+++ b/lib/Kconfig
97058@@ -51,6 +51,9 @@ config PERCPU_RWSEM
97059 config ARCH_USE_CMPXCHG_LOCKREF
97060 bool
97061
97062+config ARCH_HAS_FAST_MULTIPLIER
97063+ bool
97064+
97065 config CRC_CCITT
97066 tristate "CRC-CCITT functions"
97067 help
97068diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
97069index 7a638aa..20db901 100644
97070--- a/lib/Kconfig.debug
97071+++ b/lib/Kconfig.debug
97072@@ -858,7 +858,7 @@ config DEBUG_MUTEXES
97073
97074 config DEBUG_WW_MUTEX_SLOWPATH
97075 bool "Wait/wound mutex debugging: Slowpath testing"
97076- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
97077+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
97078 select DEBUG_LOCK_ALLOC
97079 select DEBUG_SPINLOCK
97080 select DEBUG_MUTEXES
97081@@ -871,7 +871,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
97082
97083 config DEBUG_LOCK_ALLOC
97084 bool "Lock debugging: detect incorrect freeing of live locks"
97085- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
97086+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
97087 select DEBUG_SPINLOCK
97088 select DEBUG_MUTEXES
97089 select LOCKDEP
97090@@ -885,7 +885,7 @@ config DEBUG_LOCK_ALLOC
97091
97092 config PROVE_LOCKING
97093 bool "Lock debugging: prove locking correctness"
97094- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
97095+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
97096 select LOCKDEP
97097 select DEBUG_SPINLOCK
97098 select DEBUG_MUTEXES
97099@@ -936,7 +936,7 @@ config LOCKDEP
97100
97101 config LOCK_STAT
97102 bool "Lock usage statistics"
97103- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
97104+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
97105 select LOCKDEP
97106 select DEBUG_SPINLOCK
97107 select DEBUG_MUTEXES
97108@@ -1418,6 +1418,7 @@ config LATENCYTOP
97109 depends on DEBUG_KERNEL
97110 depends on STACKTRACE_SUPPORT
97111 depends on PROC_FS
97112+ depends on !GRKERNSEC_HIDESYM
97113 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
97114 select KALLSYMS
97115 select KALLSYMS_ALL
97116@@ -1434,7 +1435,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
97117 config DEBUG_STRICT_USER_COPY_CHECKS
97118 bool "Strict user copy size checks"
97119 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
97120- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
97121+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
97122 help
97123 Enabling this option turns a certain set of sanity checks for user
97124 copy operations into compile time failures.
97125@@ -1554,7 +1555,7 @@ endmenu # runtime tests
97126
97127 config PROVIDE_OHCI1394_DMA_INIT
97128 bool "Remote debugging over FireWire early on boot"
97129- depends on PCI && X86
97130+ depends on PCI && X86 && !GRKERNSEC
97131 help
97132 If you want to debug problems which hang or crash the kernel early
97133 on boot and the crashing machine has a FireWire port, you can use
97134diff --git a/lib/Makefile b/lib/Makefile
97135index ba967a1..2cc869a 100644
97136--- a/lib/Makefile
97137+++ b/lib/Makefile
97138@@ -33,7 +33,6 @@ obj-y += kstrtox.o
97139 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
97140 obj-$(CONFIG_TEST_MODULE) += test_module.o
97141 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
97142-obj-$(CONFIG_TEST_BPF) += test_bpf.o
97143
97144 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
97145 CFLAGS_kobject.o += -DDEBUG
97146@@ -54,7 +53,7 @@ obj-$(CONFIG_BTREE) += btree.o
97147 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
97148 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
97149 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
97150-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
97151+obj-y += list_debug.o
97152 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
97153
97154 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
97155diff --git a/lib/average.c b/lib/average.c
97156index 114d1be..ab0350c 100644
97157--- a/lib/average.c
97158+++ b/lib/average.c
97159@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
97160 {
97161 unsigned long internal = ACCESS_ONCE(avg->internal);
97162
97163- ACCESS_ONCE(avg->internal) = internal ?
97164+ ACCESS_ONCE_RW(avg->internal) = internal ?
97165 (((internal << avg->weight) - internal) +
97166 (val << avg->factor)) >> avg->weight :
97167 (val << avg->factor);
97168diff --git a/lib/bitmap.c b/lib/bitmap.c
97169index 06f7e4f..f3cf2b0 100644
97170--- a/lib/bitmap.c
97171+++ b/lib/bitmap.c
97172@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
97173 {
97174 int c, old_c, totaldigits, ndigits, nchunks, nbits;
97175 u32 chunk;
97176- const char __user __force *ubuf = (const char __user __force *)buf;
97177+ const char __user *ubuf = (const char __force_user *)buf;
97178
97179 bitmap_zero(maskp, nmaskbits);
97180
97181@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
97182 {
97183 if (!access_ok(VERIFY_READ, ubuf, ulen))
97184 return -EFAULT;
97185- return __bitmap_parse((const char __force *)ubuf,
97186+ return __bitmap_parse((const char __force_kernel *)ubuf,
97187 ulen, 1, maskp, nmaskbits);
97188
97189 }
97190@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
97191 {
97192 unsigned a, b;
97193 int c, old_c, totaldigits;
97194- const char __user __force *ubuf = (const char __user __force *)buf;
97195+ const char __user *ubuf = (const char __force_user *)buf;
97196 int exp_digit, in_range;
97197
97198 totaldigits = c = 0;
97199@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
97200 {
97201 if (!access_ok(VERIFY_READ, ubuf, ulen))
97202 return -EFAULT;
97203- return __bitmap_parselist((const char __force *)ubuf,
97204+ return __bitmap_parselist((const char __force_kernel *)ubuf,
97205 ulen, 1, maskp, nmaskbits);
97206 }
97207 EXPORT_SYMBOL(bitmap_parselist_user);
97208diff --git a/lib/bug.c b/lib/bug.c
97209index d1d7c78..b354235 100644
97210--- a/lib/bug.c
97211+++ b/lib/bug.c
97212@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
97213 return BUG_TRAP_TYPE_NONE;
97214
97215 bug = find_bug(bugaddr);
97216+ if (!bug)
97217+ return BUG_TRAP_TYPE_NONE;
97218
97219 file = NULL;
97220 line = 0;
97221diff --git a/lib/debugobjects.c b/lib/debugobjects.c
97222index 547f7f9..a6d4ba0 100644
97223--- a/lib/debugobjects.c
97224+++ b/lib/debugobjects.c
97225@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
97226 if (limit > 4)
97227 return;
97228
97229- is_on_stack = object_is_on_stack(addr);
97230+ is_on_stack = object_starts_on_stack(addr);
97231 if (is_on_stack == onstack)
97232 return;
97233
97234diff --git a/lib/div64.c b/lib/div64.c
97235index 4382ad7..08aa558 100644
97236--- a/lib/div64.c
97237+++ b/lib/div64.c
97238@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
97239 EXPORT_SYMBOL(__div64_32);
97240
97241 #ifndef div_s64_rem
97242-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
97243+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
97244 {
97245 u64 quotient;
97246
97247@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
97248 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
97249 */
97250 #ifndef div64_u64
97251-u64 div64_u64(u64 dividend, u64 divisor)
97252+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
97253 {
97254 u32 high = divisor >> 32;
97255 u64 quot;
97256diff --git a/lib/dma-debug.c b/lib/dma-debug.c
97257index 98f2d7e..899da5c 100644
97258--- a/lib/dma-debug.c
97259+++ b/lib/dma-debug.c
97260@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
97261
97262 void dma_debug_add_bus(struct bus_type *bus)
97263 {
97264- struct notifier_block *nb;
97265+ notifier_block_no_const *nb;
97266
97267 if (global_disable)
97268 return;
97269@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
97270
97271 static void check_for_stack(struct device *dev, void *addr)
97272 {
97273- if (object_is_on_stack(addr))
97274+ if (object_starts_on_stack(addr))
97275 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
97276 "stack [addr=%p]\n", addr);
97277 }
97278diff --git a/lib/hash.c b/lib/hash.c
97279index fea973f..386626f 100644
97280--- a/lib/hash.c
97281+++ b/lib/hash.c
97282@@ -14,7 +14,7 @@
97283 #include <linux/hash.h>
97284 #include <linux/cache.h>
97285
97286-static struct fast_hash_ops arch_hash_ops __read_mostly = {
97287+static struct fast_hash_ops arch_hash_ops __read_only = {
97288 .hash = jhash,
97289 .hash2 = jhash2,
97290 };
97291diff --git a/lib/hweight.c b/lib/hweight.c
97292index b7d81ba..9a5c1f2 100644
97293--- a/lib/hweight.c
97294+++ b/lib/hweight.c
97295@@ -11,7 +11,7 @@
97296
97297 unsigned int __sw_hweight32(unsigned int w)
97298 {
97299-#ifdef ARCH_HAS_FAST_MULTIPLIER
97300+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
97301 w -= (w >> 1) & 0x55555555;
97302 w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
97303 w = (w + (w >> 4)) & 0x0f0f0f0f;
97304@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
97305 return __sw_hweight32((unsigned int)(w >> 32)) +
97306 __sw_hweight32((unsigned int)w);
97307 #elif BITS_PER_LONG == 64
97308-#ifdef ARCH_HAS_FAST_MULTIPLIER
97309+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
97310 w -= (w >> 1) & 0x5555555555555555ul;
97311 w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
97312 w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
97313diff --git a/lib/inflate.c b/lib/inflate.c
97314index 013a761..c28f3fc 100644
97315--- a/lib/inflate.c
97316+++ b/lib/inflate.c
97317@@ -269,7 +269,7 @@ static void free(void *where)
97318 malloc_ptr = free_mem_ptr;
97319 }
97320 #else
97321-#define malloc(a) kmalloc(a, GFP_KERNEL)
97322+#define malloc(a) kmalloc((a), GFP_KERNEL)
97323 #define free(a) kfree(a)
97324 #endif
97325
97326diff --git a/lib/ioremap.c b/lib/ioremap.c
97327index 0c9216c..863bd89 100644
97328--- a/lib/ioremap.c
97329+++ b/lib/ioremap.c
97330@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
97331 unsigned long next;
97332
97333 phys_addr -= addr;
97334- pmd = pmd_alloc(&init_mm, pud, addr);
97335+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
97336 if (!pmd)
97337 return -ENOMEM;
97338 do {
97339@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
97340 unsigned long next;
97341
97342 phys_addr -= addr;
97343- pud = pud_alloc(&init_mm, pgd, addr);
97344+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
97345 if (!pud)
97346 return -ENOMEM;
97347 do {
97348diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
97349index bd2bea9..6b3c95e 100644
97350--- a/lib/is_single_threaded.c
97351+++ b/lib/is_single_threaded.c
97352@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
97353 struct task_struct *p, *t;
97354 bool ret;
97355
97356+ if (!mm)
97357+ return true;
97358+
97359 if (atomic_read(&task->signal->live) != 1)
97360 return false;
97361
97362diff --git a/lib/kobject.c b/lib/kobject.c
97363index 58751bb..93a1853 100644
97364--- a/lib/kobject.c
97365+++ b/lib/kobject.c
97366@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
97367
97368
97369 static DEFINE_SPINLOCK(kobj_ns_type_lock);
97370-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
97371+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
97372
97373-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
97374+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
97375 {
97376 enum kobj_ns_type type = ops->type;
97377 int error;
97378diff --git a/lib/list_debug.c b/lib/list_debug.c
97379index c24c2f7..f0296f4 100644
97380--- a/lib/list_debug.c
97381+++ b/lib/list_debug.c
97382@@ -11,7 +11,9 @@
97383 #include <linux/bug.h>
97384 #include <linux/kernel.h>
97385 #include <linux/rculist.h>
97386+#include <linux/mm.h>
97387
97388+#ifdef CONFIG_DEBUG_LIST
97389 /*
97390 * Insert a new entry between two known consecutive entries.
97391 *
97392@@ -19,21 +21,40 @@
97393 * the prev/next entries already!
97394 */
97395
97396+static bool __list_add_debug(struct list_head *new,
97397+ struct list_head *prev,
97398+ struct list_head *next)
97399+{
97400+ if (unlikely(next->prev != prev)) {
97401+ printk(KERN_ERR "list_add corruption. next->prev should be "
97402+ "prev (%p), but was %p. (next=%p).\n",
97403+ prev, next->prev, next);
97404+ BUG();
97405+ return false;
97406+ }
97407+ if (unlikely(prev->next != next)) {
97408+ printk(KERN_ERR "list_add corruption. prev->next should be "
97409+ "next (%p), but was %p. (prev=%p).\n",
97410+ next, prev->next, prev);
97411+ BUG();
97412+ return false;
97413+ }
97414+ if (unlikely(new == prev || new == next)) {
97415+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
97416+ new, prev, next);
97417+ BUG();
97418+ return false;
97419+ }
97420+ return true;
97421+}
97422+
97423 void __list_add(struct list_head *new,
97424- struct list_head *prev,
97425- struct list_head *next)
97426+ struct list_head *prev,
97427+ struct list_head *next)
97428 {
97429- WARN(next->prev != prev,
97430- "list_add corruption. next->prev should be "
97431- "prev (%p), but was %p. (next=%p).\n",
97432- prev, next->prev, next);
97433- WARN(prev->next != next,
97434- "list_add corruption. prev->next should be "
97435- "next (%p), but was %p. (prev=%p).\n",
97436- next, prev->next, prev);
97437- WARN(new == prev || new == next,
97438- "list_add double add: new=%p, prev=%p, next=%p.\n",
97439- new, prev, next);
97440+ if (!__list_add_debug(new, prev, next))
97441+ return;
97442+
97443 next->prev = new;
97444 new->next = next;
97445 new->prev = prev;
97446@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
97447 }
97448 EXPORT_SYMBOL(__list_add);
97449
97450-void __list_del_entry(struct list_head *entry)
97451+static bool __list_del_entry_debug(struct list_head *entry)
97452 {
97453 struct list_head *prev, *next;
97454
97455 prev = entry->prev;
97456 next = entry->next;
97457
97458- if (WARN(next == LIST_POISON1,
97459- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
97460- entry, LIST_POISON1) ||
97461- WARN(prev == LIST_POISON2,
97462- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
97463- entry, LIST_POISON2) ||
97464- WARN(prev->next != entry,
97465- "list_del corruption. prev->next should be %p, "
97466- "but was %p\n", entry, prev->next) ||
97467- WARN(next->prev != entry,
97468- "list_del corruption. next->prev should be %p, "
97469- "but was %p\n", entry, next->prev))
97470+ if (unlikely(next == LIST_POISON1)) {
97471+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
97472+ entry, LIST_POISON1);
97473+ BUG();
97474+ return false;
97475+ }
97476+ if (unlikely(prev == LIST_POISON2)) {
97477+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
97478+ entry, LIST_POISON2);
97479+ BUG();
97480+ return false;
97481+ }
97482+ if (unlikely(entry->prev->next != entry)) {
97483+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
97484+ "but was %p\n", entry, prev->next);
97485+ BUG();
97486+ return false;
97487+ }
97488+ if (unlikely(entry->next->prev != entry)) {
97489+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
97490+ "but was %p\n", entry, next->prev);
97491+ BUG();
97492+ return false;
97493+ }
97494+ return true;
97495+}
97496+
97497+void __list_del_entry(struct list_head *entry)
97498+{
97499+ if (!__list_del_entry_debug(entry))
97500 return;
97501
97502- __list_del(prev, next);
97503+ __list_del(entry->prev, entry->next);
97504 }
97505 EXPORT_SYMBOL(__list_del_entry);
97506
97507@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
97508 void __list_add_rcu(struct list_head *new,
97509 struct list_head *prev, struct list_head *next)
97510 {
97511- WARN(next->prev != prev,
97512- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
97513- prev, next->prev, next);
97514- WARN(prev->next != next,
97515- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
97516- next, prev->next, prev);
97517+ if (!__list_add_debug(new, prev, next))
97518+ return;
97519+
97520 new->next = next;
97521 new->prev = prev;
97522 rcu_assign_pointer(list_next_rcu(prev), new);
97523 next->prev = new;
97524 }
97525 EXPORT_SYMBOL(__list_add_rcu);
97526+#endif
97527+
97528+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
97529+{
97530+#ifdef CONFIG_DEBUG_LIST
97531+ if (!__list_add_debug(new, prev, next))
97532+ return;
97533+#endif
97534+
97535+ pax_open_kernel();
97536+ next->prev = new;
97537+ new->next = next;
97538+ new->prev = prev;
97539+ prev->next = new;
97540+ pax_close_kernel();
97541+}
97542+EXPORT_SYMBOL(__pax_list_add);
97543+
97544+void pax_list_del(struct list_head *entry)
97545+{
97546+#ifdef CONFIG_DEBUG_LIST
97547+ if (!__list_del_entry_debug(entry))
97548+ return;
97549+#endif
97550+
97551+ pax_open_kernel();
97552+ __list_del(entry->prev, entry->next);
97553+ entry->next = LIST_POISON1;
97554+ entry->prev = LIST_POISON2;
97555+ pax_close_kernel();
97556+}
97557+EXPORT_SYMBOL(pax_list_del);
97558+
97559+void pax_list_del_init(struct list_head *entry)
97560+{
97561+ pax_open_kernel();
97562+ __list_del(entry->prev, entry->next);
97563+ INIT_LIST_HEAD(entry);
97564+ pax_close_kernel();
97565+}
97566+EXPORT_SYMBOL(pax_list_del_init);
97567+
97568+void __pax_list_add_rcu(struct list_head *new,
97569+ struct list_head *prev, struct list_head *next)
97570+{
97571+#ifdef CONFIG_DEBUG_LIST
97572+ if (!__list_add_debug(new, prev, next))
97573+ return;
97574+#endif
97575+
97576+ pax_open_kernel();
97577+ new->next = next;
97578+ new->prev = prev;
97579+ rcu_assign_pointer(list_next_rcu(prev), new);
97580+ next->prev = new;
97581+ pax_close_kernel();
97582+}
97583+EXPORT_SYMBOL(__pax_list_add_rcu);
97584+
97585+void pax_list_del_rcu(struct list_head *entry)
97586+{
97587+#ifdef CONFIG_DEBUG_LIST
97588+ if (!__list_del_entry_debug(entry))
97589+ return;
97590+#endif
97591+
97592+ pax_open_kernel();
97593+ __list_del(entry->prev, entry->next);
97594+ entry->next = LIST_POISON1;
97595+ entry->prev = LIST_POISON2;
97596+ pax_close_kernel();
97597+}
97598+EXPORT_SYMBOL(pax_list_del_rcu);
97599diff --git a/lib/lockref.c b/lib/lockref.c
97600index f07a40d..0a445a7 100644
97601--- a/lib/lockref.c
97602+++ b/lib/lockref.c
97603@@ -49,13 +49,13 @@
97604 void lockref_get(struct lockref *lockref)
97605 {
97606 CMPXCHG_LOOP(
97607- new.count++;
97608+ __lockref_inc(&new);
97609 ,
97610 return;
97611 );
97612
97613 spin_lock(&lockref->lock);
97614- lockref->count++;
97615+ __lockref_inc(lockref);
97616 spin_unlock(&lockref->lock);
97617 }
97618 EXPORT_SYMBOL(lockref_get);
97619@@ -70,7 +70,7 @@ int lockref_get_not_zero(struct lockref *lockref)
97620 int retval;
97621
97622 CMPXCHG_LOOP(
97623- new.count++;
97624+ __lockref_inc(&new);
97625 if (!old.count)
97626 return 0;
97627 ,
97628@@ -80,7 +80,7 @@ int lockref_get_not_zero(struct lockref *lockref)
97629 spin_lock(&lockref->lock);
97630 retval = 0;
97631 if (lockref->count) {
97632- lockref->count++;
97633+ __lockref_inc(lockref);
97634 retval = 1;
97635 }
97636 spin_unlock(&lockref->lock);
97637@@ -97,7 +97,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
97638 int lockref_get_or_lock(struct lockref *lockref)
97639 {
97640 CMPXCHG_LOOP(
97641- new.count++;
97642+ __lockref_inc(&new);
97643 if (!old.count)
97644 break;
97645 ,
97646@@ -107,7 +107,7 @@ int lockref_get_or_lock(struct lockref *lockref)
97647 spin_lock(&lockref->lock);
97648 if (!lockref->count)
97649 return 0;
97650- lockref->count++;
97651+ __lockref_inc(lockref);
97652 spin_unlock(&lockref->lock);
97653 return 1;
97654 }
97655@@ -121,7 +121,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
97656 int lockref_put_or_lock(struct lockref *lockref)
97657 {
97658 CMPXCHG_LOOP(
97659- new.count--;
97660+ __lockref_dec(&new);
97661 if (old.count <= 1)
97662 break;
97663 ,
97664@@ -131,7 +131,7 @@ int lockref_put_or_lock(struct lockref *lockref)
97665 spin_lock(&lockref->lock);
97666 if (lockref->count <= 1)
97667 return 0;
97668- lockref->count--;
97669+ __lockref_dec(lockref);
97670 spin_unlock(&lockref->lock);
97671 return 1;
97672 }
97673@@ -158,7 +158,7 @@ int lockref_get_not_dead(struct lockref *lockref)
97674 int retval;
97675
97676 CMPXCHG_LOOP(
97677- new.count++;
97678+ __lockref_inc(&new);
97679 if ((int)old.count < 0)
97680 return 0;
97681 ,
97682@@ -168,7 +168,7 @@ int lockref_get_not_dead(struct lockref *lockref)
97683 spin_lock(&lockref->lock);
97684 retval = 0;
97685 if ((int) lockref->count >= 0) {
97686- lockref->count++;
97687+ __lockref_inc(lockref);
97688 retval = 1;
97689 }
97690 spin_unlock(&lockref->lock);
97691diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
97692index 963b703..438bc51 100644
97693--- a/lib/percpu-refcount.c
97694+++ b/lib/percpu-refcount.c
97695@@ -29,7 +29,7 @@
97696 * can't hit 0 before we've added up all the percpu refs.
97697 */
97698
97699-#define PCPU_COUNT_BIAS (1U << 31)
97700+#define PCPU_COUNT_BIAS (1U << 30)
97701
97702 /**
97703 * percpu_ref_init - initialize a percpu refcount
97704diff --git a/lib/radix-tree.c b/lib/radix-tree.c
97705index 3291a8e..346a91e 100644
97706--- a/lib/radix-tree.c
97707+++ b/lib/radix-tree.c
97708@@ -67,7 +67,7 @@ struct radix_tree_preload {
97709 int nr;
97710 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
97711 };
97712-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
97713+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
97714
97715 static inline void *ptr_to_indirect(void *ptr)
97716 {
97717diff --git a/lib/random32.c b/lib/random32.c
97718index fa5da61..35fe9af 100644
97719--- a/lib/random32.c
97720+++ b/lib/random32.c
97721@@ -42,7 +42,7 @@
97722 static void __init prandom_state_selftest(void);
97723 #endif
97724
97725-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
97726+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
97727
97728 /**
97729 * prandom_u32_state - seeded pseudo-random number generator.
97730diff --git a/lib/rbtree.c b/lib/rbtree.c
97731index 65f4eff..2cfa167 100644
97732--- a/lib/rbtree.c
97733+++ b/lib/rbtree.c
97734@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
97735 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
97736
97737 static const struct rb_augment_callbacks dummy_callbacks = {
97738- dummy_propagate, dummy_copy, dummy_rotate
97739+ .propagate = dummy_propagate,
97740+ .copy = dummy_copy,
97741+ .rotate = dummy_rotate
97742 };
97743
97744 void rb_insert_color(struct rb_node *node, struct rb_root *root)
97745diff --git a/lib/show_mem.c b/lib/show_mem.c
97746index 0922579..9d7adb9 100644
97747--- a/lib/show_mem.c
97748+++ b/lib/show_mem.c
97749@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
97750 quicklist_total_size());
97751 #endif
97752 #ifdef CONFIG_MEMORY_FAILURE
97753- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
97754+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
97755 #endif
97756 }
97757diff --git a/lib/string.c b/lib/string.c
97758index 992bf30..f3c6ff5 100644
97759--- a/lib/string.c
97760+++ b/lib/string.c
97761@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
97762 return check_bytes8(start, value, bytes);
97763
97764 value64 = value;
97765-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
97766+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
97767 value64 *= 0x0101010101010101;
97768-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
97769+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
97770 value64 *= 0x01010101;
97771 value64 |= value64 << 32;
97772 #else
97773diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
97774index bb2b201..46abaf9 100644
97775--- a/lib/strncpy_from_user.c
97776+++ b/lib/strncpy_from_user.c
97777@@ -21,7 +21,7 @@
97778 */
97779 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
97780 {
97781- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97782+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97783 long res = 0;
97784
97785 /*
97786diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
97787index a28df52..3d55877 100644
97788--- a/lib/strnlen_user.c
97789+++ b/lib/strnlen_user.c
97790@@ -26,7 +26,7 @@
97791 */
97792 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
97793 {
97794- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97795+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97796 long align, res = 0;
97797 unsigned long c;
97798
97799diff --git a/lib/swiotlb.c b/lib/swiotlb.c
97800index 4abda07..b9d3765 100644
97801--- a/lib/swiotlb.c
97802+++ b/lib/swiotlb.c
97803@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
97804
97805 void
97806 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
97807- dma_addr_t dev_addr)
97808+ dma_addr_t dev_addr, struct dma_attrs *attrs)
97809 {
97810 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
97811
97812diff --git a/lib/test_bpf.c b/lib/test_bpf.c
97813deleted file mode 100644
97814index c579e0f..0000000
97815--- a/lib/test_bpf.c
97816+++ /dev/null
97817@@ -1,1929 +0,0 @@
97818-/*
97819- * Testsuite for BPF interpreter and BPF JIT compiler
97820- *
97821- * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
97822- *
97823- * This program is free software; you can redistribute it and/or
97824- * modify it under the terms of version 2 of the GNU General Public
97825- * License as published by the Free Software Foundation.
97826- *
97827- * This program is distributed in the hope that it will be useful, but
97828- * WITHOUT ANY WARRANTY; without even the implied warranty of
97829- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
97830- * General Public License for more details.
97831- */
97832-
97833-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
97834-
97835-#include <linux/init.h>
97836-#include <linux/module.h>
97837-#include <linux/filter.h>
97838-#include <linux/skbuff.h>
97839-#include <linux/netdevice.h>
97840-#include <linux/if_vlan.h>
97841-
97842-/* General test specific settings */
97843-#define MAX_SUBTESTS 3
97844-#define MAX_TESTRUNS 10000
97845-#define MAX_DATA 128
97846-#define MAX_INSNS 512
97847-#define MAX_K 0xffffFFFF
97848-
97849-/* Few constants used to init test 'skb' */
97850-#define SKB_TYPE 3
97851-#define SKB_MARK 0x1234aaaa
97852-#define SKB_HASH 0x1234aaab
97853-#define SKB_QUEUE_MAP 123
97854-#define SKB_VLAN_TCI 0xffff
97855-#define SKB_DEV_IFINDEX 577
97856-#define SKB_DEV_TYPE 588
97857-
97858-/* Redefine REGs to make tests less verbose */
97859-#define R0 BPF_REG_0
97860-#define R1 BPF_REG_1
97861-#define R2 BPF_REG_2
97862-#define R3 BPF_REG_3
97863-#define R4 BPF_REG_4
97864-#define R5 BPF_REG_5
97865-#define R6 BPF_REG_6
97866-#define R7 BPF_REG_7
97867-#define R8 BPF_REG_8
97868-#define R9 BPF_REG_9
97869-#define R10 BPF_REG_10
97870-
97871-/* Flags that can be passed to test cases */
97872-#define FLAG_NO_DATA BIT(0)
97873-#define FLAG_EXPECTED_FAIL BIT(1)
97874-
97875-enum {
97876- CLASSIC = BIT(6), /* Old BPF instructions only. */
97877- INTERNAL = BIT(7), /* Extended instruction set. */
97878-};
97879-
97880-#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
97881-
97882-struct bpf_test {
97883- const char *descr;
97884- union {
97885- struct sock_filter insns[MAX_INSNS];
97886- struct sock_filter_int insns_int[MAX_INSNS];
97887- } u;
97888- __u8 aux;
97889- __u8 data[MAX_DATA];
97890- struct {
97891- int data_size;
97892- __u32 result;
97893- } test[MAX_SUBTESTS];
97894-};
97895-
97896-static struct bpf_test tests[] = {
97897- {
97898- "TAX",
97899- .u.insns = {
97900- BPF_STMT(BPF_LD | BPF_IMM, 1),
97901- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97902- BPF_STMT(BPF_LD | BPF_IMM, 2),
97903- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97904- BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
97905- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97906- BPF_STMT(BPF_LD | BPF_LEN, 0),
97907- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97908- BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
97909- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
97910- BPF_STMT(BPF_RET | BPF_A, 0)
97911- },
97912- CLASSIC,
97913- { 10, 20, 30, 40, 50 },
97914- { { 2, 10 }, { 3, 20 }, { 4, 30 } },
97915- },
97916- {
97917- "TXA",
97918- .u.insns = {
97919- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97920- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97921- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97922- BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
97923- },
97924- CLASSIC,
97925- { 10, 20, 30, 40, 50 },
97926- { { 1, 2 }, { 3, 6 }, { 4, 8 } },
97927- },
97928- {
97929- "ADD_SUB_MUL_K",
97930- .u.insns = {
97931- BPF_STMT(BPF_LD | BPF_IMM, 1),
97932- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
97933- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97934- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97935- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
97936- BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
97937- BPF_STMT(BPF_RET | BPF_A, 0)
97938- },
97939- CLASSIC | FLAG_NO_DATA,
97940- { },
97941- { { 0, 0xfffffffd } }
97942- },
97943- {
97944- "DIV_KX",
97945- .u.insns = {
97946- BPF_STMT(BPF_LD | BPF_IMM, 8),
97947- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
97948- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97949- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
97950- BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
97951- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97952- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
97953- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
97954- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97955- BPF_STMT(BPF_RET | BPF_A, 0)
97956- },
97957- CLASSIC | FLAG_NO_DATA,
97958- { },
97959- { { 0, 0x40000001 } }
97960- },
97961- {
97962- "AND_OR_LSH_K",
97963- .u.insns = {
97964- BPF_STMT(BPF_LD | BPF_IMM, 0xff),
97965- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
97966- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
97967- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97968- BPF_STMT(BPF_LD | BPF_IMM, 0xf),
97969- BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
97970- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97971- BPF_STMT(BPF_RET | BPF_A, 0)
97972- },
97973- CLASSIC | FLAG_NO_DATA,
97974- { },
97975- { { 0, 0x800000ff }, { 1, 0x800000ff } },
97976- },
97977- {
97978- "LD_IMM_0",
97979- .u.insns = {
97980- BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
97981- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
97982- BPF_STMT(BPF_RET | BPF_K, 0),
97983- BPF_STMT(BPF_RET | BPF_K, 1),
97984- },
97985- CLASSIC,
97986- { },
97987- { { 1, 1 } },
97988- },
97989- {
97990- "LD_IND",
97991- .u.insns = {
97992- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97993- BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
97994- BPF_STMT(BPF_RET | BPF_K, 1)
97995- },
97996- CLASSIC,
97997- { },
97998- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
97999- },
98000- {
98001- "LD_ABS",
98002- .u.insns = {
98003- BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
98004- BPF_STMT(BPF_RET | BPF_K, 1)
98005- },
98006- CLASSIC,
98007- { },
98008- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
98009- },
98010- {
98011- "LD_ABS_LL",
98012- .u.insns = {
98013- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
98014- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98015- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
98016- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98017- BPF_STMT(BPF_RET | BPF_A, 0)
98018- },
98019- CLASSIC,
98020- { 1, 2, 3 },
98021- { { 1, 0 }, { 2, 3 } },
98022- },
98023- {
98024- "LD_IND_LL",
98025- .u.insns = {
98026- BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
98027- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98028- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98029- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98030- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
98031- BPF_STMT(BPF_RET | BPF_A, 0)
98032- },
98033- CLASSIC,
98034- { 1, 2, 3, 0xff },
98035- { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
98036- },
98037- {
98038- "LD_ABS_NET",
98039- .u.insns = {
98040- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
98041- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98042- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
98043- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98044- BPF_STMT(BPF_RET | BPF_A, 0)
98045- },
98046- CLASSIC,
98047- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
98048- { { 15, 0 }, { 16, 3 } },
98049- },
98050- {
98051- "LD_IND_NET",
98052- .u.insns = {
98053- BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
98054- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98055- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98056- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98057- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
98058- BPF_STMT(BPF_RET | BPF_A, 0)
98059- },
98060- CLASSIC,
98061- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
98062- { { 14, 0 }, { 15, 1 }, { 17, 3 } },
98063- },
98064- {
98065- "LD_PKTTYPE",
98066- .u.insns = {
98067- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98068- SKF_AD_OFF + SKF_AD_PKTTYPE),
98069- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
98070- BPF_STMT(BPF_RET | BPF_K, 1),
98071- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98072- SKF_AD_OFF + SKF_AD_PKTTYPE),
98073- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
98074- BPF_STMT(BPF_RET | BPF_K, 1),
98075- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98076- SKF_AD_OFF + SKF_AD_PKTTYPE),
98077- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
98078- BPF_STMT(BPF_RET | BPF_K, 1),
98079- BPF_STMT(BPF_RET | BPF_A, 0)
98080- },
98081- CLASSIC,
98082- { },
98083- { { 1, 3 }, { 10, 3 } },
98084- },
98085- {
98086- "LD_MARK",
98087- .u.insns = {
98088- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98089- SKF_AD_OFF + SKF_AD_MARK),
98090- BPF_STMT(BPF_RET | BPF_A, 0)
98091- },
98092- CLASSIC,
98093- { },
98094- { { 1, SKB_MARK}, { 10, SKB_MARK} },
98095- },
98096- {
98097- "LD_RXHASH",
98098- .u.insns = {
98099- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98100- SKF_AD_OFF + SKF_AD_RXHASH),
98101- BPF_STMT(BPF_RET | BPF_A, 0)
98102- },
98103- CLASSIC,
98104- { },
98105- { { 1, SKB_HASH}, { 10, SKB_HASH} },
98106- },
98107- {
98108- "LD_QUEUE",
98109- .u.insns = {
98110- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98111- SKF_AD_OFF + SKF_AD_QUEUE),
98112- BPF_STMT(BPF_RET | BPF_A, 0)
98113- },
98114- CLASSIC,
98115- { },
98116- { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
98117- },
98118- {
98119- "LD_PROTOCOL",
98120- .u.insns = {
98121- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
98122- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
98123- BPF_STMT(BPF_RET | BPF_K, 0),
98124- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98125- SKF_AD_OFF + SKF_AD_PROTOCOL),
98126- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98127- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
98128- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
98129- BPF_STMT(BPF_RET | BPF_K, 0),
98130- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98131- BPF_STMT(BPF_RET | BPF_A, 0)
98132- },
98133- CLASSIC,
98134- { 10, 20, 30 },
98135- { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
98136- },
98137- {
98138- "LD_VLAN_TAG",
98139- .u.insns = {
98140- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98141- SKF_AD_OFF + SKF_AD_VLAN_TAG),
98142- BPF_STMT(BPF_RET | BPF_A, 0)
98143- },
98144- CLASSIC,
98145- { },
98146- {
98147- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
98148- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
98149- },
98150- },
98151- {
98152- "LD_VLAN_TAG_PRESENT",
98153- .u.insns = {
98154- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98155- SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
98156- BPF_STMT(BPF_RET | BPF_A, 0)
98157- },
98158- CLASSIC,
98159- { },
98160- {
98161- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
98162- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
98163- },
98164- },
98165- {
98166- "LD_IFINDEX",
98167- .u.insns = {
98168- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98169- SKF_AD_OFF + SKF_AD_IFINDEX),
98170- BPF_STMT(BPF_RET | BPF_A, 0)
98171- },
98172- CLASSIC,
98173- { },
98174- { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
98175- },
98176- {
98177- "LD_HATYPE",
98178- .u.insns = {
98179- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98180- SKF_AD_OFF + SKF_AD_HATYPE),
98181- BPF_STMT(BPF_RET | BPF_A, 0)
98182- },
98183- CLASSIC,
98184- { },
98185- { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
98186- },
98187- {
98188- "LD_CPU",
98189- .u.insns = {
98190- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98191- SKF_AD_OFF + SKF_AD_CPU),
98192- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98193- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98194- SKF_AD_OFF + SKF_AD_CPU),
98195- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
98196- BPF_STMT(BPF_RET | BPF_A, 0)
98197- },
98198- CLASSIC,
98199- { },
98200- { { 1, 0 }, { 10, 0 } },
98201- },
98202- {
98203- "LD_NLATTR",
98204- .u.insns = {
98205- BPF_STMT(BPF_LDX | BPF_IMM, 2),
98206- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98207- BPF_STMT(BPF_LDX | BPF_IMM, 3),
98208- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98209- SKF_AD_OFF + SKF_AD_NLATTR),
98210- BPF_STMT(BPF_RET | BPF_A, 0)
98211- },
98212- CLASSIC,
98213-#ifdef __BIG_ENDIAN
98214- { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
98215-#else
98216- { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
98217-#endif
98218- { { 4, 0 }, { 20, 6 } },
98219- },
98220- {
98221- "LD_NLATTR_NEST",
98222- .u.insns = {
98223- BPF_STMT(BPF_LD | BPF_IMM, 2),
98224- BPF_STMT(BPF_LDX | BPF_IMM, 3),
98225- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98226- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98227- BPF_STMT(BPF_LD | BPF_IMM, 2),
98228- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98229- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98230- BPF_STMT(BPF_LD | BPF_IMM, 2),
98231- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98232- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98233- BPF_STMT(BPF_LD | BPF_IMM, 2),
98234- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98235- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98236- BPF_STMT(BPF_LD | BPF_IMM, 2),
98237- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98238- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98239- BPF_STMT(BPF_LD | BPF_IMM, 2),
98240- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98241- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98242- BPF_STMT(BPF_LD | BPF_IMM, 2),
98243- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98244- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98245- BPF_STMT(BPF_LD | BPF_IMM, 2),
98246- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98247- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
98248- BPF_STMT(BPF_RET | BPF_A, 0)
98249- },
98250- CLASSIC,
98251-#ifdef __BIG_ENDIAN
98252- { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
98253-#else
98254- { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
98255-#endif
98256- { { 4, 0 }, { 20, 10 } },
98257- },
98258- {
98259- "LD_PAYLOAD_OFF",
98260- .u.insns = {
98261- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98262- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
98263- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98264- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
98265- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98266- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
98267- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98268- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
98269- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98270- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
98271- BPF_STMT(BPF_RET | BPF_A, 0)
98272- },
98273- CLASSIC,
98274- /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
98275- * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
98276- * id 9737, seq 1, length 64
98277- */
98278- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98279- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98280- 0x08, 0x00,
98281- 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
98282- 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
98283- { { 30, 0 }, { 100, 42 } },
98284- },
98285- {
98286- "LD_ANC_XOR",
98287- .u.insns = {
98288- BPF_STMT(BPF_LD | BPF_IMM, 10),
98289- BPF_STMT(BPF_LDX | BPF_IMM, 300),
98290- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98291- SKF_AD_OFF + SKF_AD_ALU_XOR_X),
98292- BPF_STMT(BPF_RET | BPF_A, 0)
98293- },
98294- CLASSIC,
98295- { },
98296- { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
98297- },
98298- {
98299- "SPILL_FILL",
98300- .u.insns = {
98301- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98302- BPF_STMT(BPF_LD | BPF_IMM, 2),
98303- BPF_STMT(BPF_ALU | BPF_RSH, 1),
98304- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
98305- BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
98306- BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
98307- BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
98308- BPF_STMT(BPF_STX, 15), /* M3 = len */
98309- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98310- BPF_STMT(BPF_LD | BPF_MEM, 2),
98311- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
98312- BPF_STMT(BPF_LDX | BPF_MEM, 15),
98313- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
98314- BPF_STMT(BPF_RET | BPF_A, 0)
98315- },
98316- CLASSIC,
98317- { },
98318- { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
98319- },
98320- {
98321- "JEQ",
98322- .u.insns = {
98323- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98324- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
98325- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
98326- BPF_STMT(BPF_RET | BPF_K, 1),
98327- BPF_STMT(BPF_RET | BPF_K, MAX_K)
98328- },
98329- CLASSIC,
98330- { 3, 3, 3, 3, 3 },
98331- { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
98332- },
98333- {
98334- "JGT",
98335- .u.insns = {
98336- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98337- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
98338- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
98339- BPF_STMT(BPF_RET | BPF_K, 1),
98340- BPF_STMT(BPF_RET | BPF_K, MAX_K)
98341- },
98342- CLASSIC,
98343- { 4, 4, 4, 3, 3 },
98344- { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
98345- },
98346- {
98347- "JGE",
98348- .u.insns = {
98349- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98350- BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
98351- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
98352- BPF_STMT(BPF_RET | BPF_K, 10),
98353- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
98354- BPF_STMT(BPF_RET | BPF_K, 20),
98355- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
98356- BPF_STMT(BPF_RET | BPF_K, 30),
98357- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
98358- BPF_STMT(BPF_RET | BPF_K, 40),
98359- BPF_STMT(BPF_RET | BPF_K, MAX_K)
98360- },
98361- CLASSIC,
98362- { 1, 2, 3, 4, 5 },
98363- { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
98364- },
98365- {
98366- "JSET",
98367- .u.insns = {
98368- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
98369- BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
98370- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
98371- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
98372- BPF_STMT(BPF_LDX | BPF_LEN, 0),
98373- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98374- BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
98375- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98376- BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
98377- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
98378- BPF_STMT(BPF_RET | BPF_K, 10),
98379- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
98380- BPF_STMT(BPF_RET | BPF_K, 20),
98381- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
98382- BPF_STMT(BPF_RET | BPF_K, 30),
98383- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
98384- BPF_STMT(BPF_RET | BPF_K, 30),
98385- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
98386- BPF_STMT(BPF_RET | BPF_K, 30),
98387- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
98388- BPF_STMT(BPF_RET | BPF_K, 30),
98389- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
98390- BPF_STMT(BPF_RET | BPF_K, 30),
98391- BPF_STMT(BPF_RET | BPF_K, MAX_K)
98392- },
98393- CLASSIC,
98394- { 0, 0xAA, 0x55, 1 },
98395- { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
98396- },
98397- {
98398- "tcpdump port 22",
98399- .u.insns = {
98400- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
98401- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
98402- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
98403- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
98404- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
98405- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
98406- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
98407- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
98408- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
98409- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
98410- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
98411- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
98412- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
98413- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
98414- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
98415- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
98416- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
98417- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
98418- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
98419- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
98420- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
98421- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
98422- BPF_STMT(BPF_RET | BPF_K, 0xffff),
98423- BPF_STMT(BPF_RET | BPF_K, 0),
98424- },
98425- CLASSIC,
98426- /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
98427- * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
98428- * seq 1305692979:1305693027, ack 3650467037, win 65535,
98429- * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
98430- */
98431- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
98432- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
98433- 0x08, 0x00,
98434- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
98435- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
98436- 0x0a, 0x01, 0x01, 0x95, /* ip src */
98437- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
98438- 0xc2, 0x24,
98439- 0x00, 0x16 /* dst port */ },
98440- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
98441- },
98442- {
98443- "tcpdump complex",
98444- .u.insns = {
98445- /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
98446- * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
98447- * (len > 115 or len < 30000000000)' -d
98448- */
98449- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
98450- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
98451- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
98452- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
98453- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
98454- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
98455- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
98456- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
98457- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
98458- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
98459- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
98460- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
98461- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
98462- BPF_STMT(BPF_ST, 1),
98463- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
98464- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
98465- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
98466- BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
98467- BPF_STMT(BPF_LD | BPF_MEM, 1),
98468- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
98469- BPF_STMT(BPF_ST, 5),
98470- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
98471- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
98472- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
98473- BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
98474- BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
98475- BPF_STMT(BPF_LD | BPF_MEM, 5),
98476- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
98477- BPF_STMT(BPF_LD | BPF_LEN, 0),
98478- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
98479- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
98480- BPF_STMT(BPF_RET | BPF_K, 0xffff),
98481- BPF_STMT(BPF_RET | BPF_K, 0),
98482- },
98483- CLASSIC,
98484- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
98485- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
98486- 0x08, 0x00,
98487- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
98488- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
98489- 0x0a, 0x01, 0x01, 0x95, /* ip src */
98490- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
98491- 0xc2, 0x24,
98492- 0x00, 0x16 /* dst port */ },
98493- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
98494- },
98495- {
98496- "RET_A",
98497- .u.insns = {
98498- /* check that unitialized X and A contain zeros */
98499- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98500- BPF_STMT(BPF_RET | BPF_A, 0)
98501- },
98502- CLASSIC,
98503- { },
98504- { {1, 0}, {2, 0} },
98505- },
98506- {
98507- "INT: ADD trivial",
98508- .u.insns_int = {
98509- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98510- BPF_ALU64_IMM(BPF_ADD, R1, 2),
98511- BPF_ALU64_IMM(BPF_MOV, R2, 3),
98512- BPF_ALU64_REG(BPF_SUB, R1, R2),
98513- BPF_ALU64_IMM(BPF_ADD, R1, -1),
98514- BPF_ALU64_IMM(BPF_MUL, R1, 3),
98515- BPF_ALU64_REG(BPF_MOV, R0, R1),
98516- BPF_EXIT_INSN(),
98517- },
98518- INTERNAL,
98519- { },
98520- { { 0, 0xfffffffd } }
98521- },
98522- {
98523- "INT: MUL_X",
98524- .u.insns_int = {
98525- BPF_ALU64_IMM(BPF_MOV, R0, -1),
98526- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98527- BPF_ALU64_IMM(BPF_MOV, R2, 3),
98528- BPF_ALU64_REG(BPF_MUL, R1, R2),
98529- BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
98530- BPF_EXIT_INSN(),
98531- BPF_ALU64_IMM(BPF_MOV, R0, 1),
98532- BPF_EXIT_INSN(),
98533- },
98534- INTERNAL,
98535- { },
98536- { { 0, 1 } }
98537- },
98538- {
98539- "INT: MUL_X2",
98540- .u.insns_int = {
98541- BPF_ALU32_IMM(BPF_MOV, R0, -1),
98542- BPF_ALU32_IMM(BPF_MOV, R1, -1),
98543- BPF_ALU32_IMM(BPF_MOV, R2, 3),
98544- BPF_ALU64_REG(BPF_MUL, R1, R2),
98545- BPF_ALU64_IMM(BPF_RSH, R1, 8),
98546- BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
98547- BPF_EXIT_INSN(),
98548- BPF_ALU32_IMM(BPF_MOV, R0, 1),
98549- BPF_EXIT_INSN(),
98550- },
98551- INTERNAL,
98552- { },
98553- { { 0, 1 } }
98554- },
98555- {
98556- "INT: MUL32_X",
98557- .u.insns_int = {
98558- BPF_ALU32_IMM(BPF_MOV, R0, -1),
98559- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98560- BPF_ALU32_IMM(BPF_MOV, R2, 3),
98561- BPF_ALU32_REG(BPF_MUL, R1, R2),
98562- BPF_ALU64_IMM(BPF_RSH, R1, 8),
98563- BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
98564- BPF_EXIT_INSN(),
98565- BPF_ALU32_IMM(BPF_MOV, R0, 1),
98566- BPF_EXIT_INSN(),
98567- },
98568- INTERNAL,
98569- { },
98570- { { 0, 1 } }
98571- },
98572- {
98573- /* Have to test all register combinations, since
98574- * JITing of different registers will produce
98575- * different asm code.
98576- */
98577- "INT: ADD 64-bit",
98578- .u.insns_int = {
98579- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98580- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98581- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98582- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98583- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98584- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98585- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98586- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98587- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98588- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98589- BPF_ALU64_IMM(BPF_ADD, R0, 20),
98590- BPF_ALU64_IMM(BPF_ADD, R1, 20),
98591- BPF_ALU64_IMM(BPF_ADD, R2, 20),
98592- BPF_ALU64_IMM(BPF_ADD, R3, 20),
98593- BPF_ALU64_IMM(BPF_ADD, R4, 20),
98594- BPF_ALU64_IMM(BPF_ADD, R5, 20),
98595- BPF_ALU64_IMM(BPF_ADD, R6, 20),
98596- BPF_ALU64_IMM(BPF_ADD, R7, 20),
98597- BPF_ALU64_IMM(BPF_ADD, R8, 20),
98598- BPF_ALU64_IMM(BPF_ADD, R9, 20),
98599- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98600- BPF_ALU64_IMM(BPF_SUB, R1, 10),
98601- BPF_ALU64_IMM(BPF_SUB, R2, 10),
98602- BPF_ALU64_IMM(BPF_SUB, R3, 10),
98603- BPF_ALU64_IMM(BPF_SUB, R4, 10),
98604- BPF_ALU64_IMM(BPF_SUB, R5, 10),
98605- BPF_ALU64_IMM(BPF_SUB, R6, 10),
98606- BPF_ALU64_IMM(BPF_SUB, R7, 10),
98607- BPF_ALU64_IMM(BPF_SUB, R8, 10),
98608- BPF_ALU64_IMM(BPF_SUB, R9, 10),
98609- BPF_ALU64_REG(BPF_ADD, R0, R0),
98610- BPF_ALU64_REG(BPF_ADD, R0, R1),
98611- BPF_ALU64_REG(BPF_ADD, R0, R2),
98612- BPF_ALU64_REG(BPF_ADD, R0, R3),
98613- BPF_ALU64_REG(BPF_ADD, R0, R4),
98614- BPF_ALU64_REG(BPF_ADD, R0, R5),
98615- BPF_ALU64_REG(BPF_ADD, R0, R6),
98616- BPF_ALU64_REG(BPF_ADD, R0, R7),
98617- BPF_ALU64_REG(BPF_ADD, R0, R8),
98618- BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
98619- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
98620- BPF_EXIT_INSN(),
98621- BPF_ALU64_REG(BPF_ADD, R1, R0),
98622- BPF_ALU64_REG(BPF_ADD, R1, R1),
98623- BPF_ALU64_REG(BPF_ADD, R1, R2),
98624- BPF_ALU64_REG(BPF_ADD, R1, R3),
98625- BPF_ALU64_REG(BPF_ADD, R1, R4),
98626- BPF_ALU64_REG(BPF_ADD, R1, R5),
98627- BPF_ALU64_REG(BPF_ADD, R1, R6),
98628- BPF_ALU64_REG(BPF_ADD, R1, R7),
98629- BPF_ALU64_REG(BPF_ADD, R1, R8),
98630- BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
98631- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
98632- BPF_EXIT_INSN(),
98633- BPF_ALU64_REG(BPF_ADD, R2, R0),
98634- BPF_ALU64_REG(BPF_ADD, R2, R1),
98635- BPF_ALU64_REG(BPF_ADD, R2, R2),
98636- BPF_ALU64_REG(BPF_ADD, R2, R3),
98637- BPF_ALU64_REG(BPF_ADD, R2, R4),
98638- BPF_ALU64_REG(BPF_ADD, R2, R5),
98639- BPF_ALU64_REG(BPF_ADD, R2, R6),
98640- BPF_ALU64_REG(BPF_ADD, R2, R7),
98641- BPF_ALU64_REG(BPF_ADD, R2, R8),
98642- BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
98643- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
98644- BPF_EXIT_INSN(),
98645- BPF_ALU64_REG(BPF_ADD, R3, R0),
98646- BPF_ALU64_REG(BPF_ADD, R3, R1),
98647- BPF_ALU64_REG(BPF_ADD, R3, R2),
98648- BPF_ALU64_REG(BPF_ADD, R3, R3),
98649- BPF_ALU64_REG(BPF_ADD, R3, R4),
98650- BPF_ALU64_REG(BPF_ADD, R3, R5),
98651- BPF_ALU64_REG(BPF_ADD, R3, R6),
98652- BPF_ALU64_REG(BPF_ADD, R3, R7),
98653- BPF_ALU64_REG(BPF_ADD, R3, R8),
98654- BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
98655- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
98656- BPF_EXIT_INSN(),
98657- BPF_ALU64_REG(BPF_ADD, R4, R0),
98658- BPF_ALU64_REG(BPF_ADD, R4, R1),
98659- BPF_ALU64_REG(BPF_ADD, R4, R2),
98660- BPF_ALU64_REG(BPF_ADD, R4, R3),
98661- BPF_ALU64_REG(BPF_ADD, R4, R4),
98662- BPF_ALU64_REG(BPF_ADD, R4, R5),
98663- BPF_ALU64_REG(BPF_ADD, R4, R6),
98664- BPF_ALU64_REG(BPF_ADD, R4, R7),
98665- BPF_ALU64_REG(BPF_ADD, R4, R8),
98666- BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
98667- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
98668- BPF_EXIT_INSN(),
98669- BPF_ALU64_REG(BPF_ADD, R5, R0),
98670- BPF_ALU64_REG(BPF_ADD, R5, R1),
98671- BPF_ALU64_REG(BPF_ADD, R5, R2),
98672- BPF_ALU64_REG(BPF_ADD, R5, R3),
98673- BPF_ALU64_REG(BPF_ADD, R5, R4),
98674- BPF_ALU64_REG(BPF_ADD, R5, R5),
98675- BPF_ALU64_REG(BPF_ADD, R5, R6),
98676- BPF_ALU64_REG(BPF_ADD, R5, R7),
98677- BPF_ALU64_REG(BPF_ADD, R5, R8),
98678- BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
98679- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
98680- BPF_EXIT_INSN(),
98681- BPF_ALU64_REG(BPF_ADD, R6, R0),
98682- BPF_ALU64_REG(BPF_ADD, R6, R1),
98683- BPF_ALU64_REG(BPF_ADD, R6, R2),
98684- BPF_ALU64_REG(BPF_ADD, R6, R3),
98685- BPF_ALU64_REG(BPF_ADD, R6, R4),
98686- BPF_ALU64_REG(BPF_ADD, R6, R5),
98687- BPF_ALU64_REG(BPF_ADD, R6, R6),
98688- BPF_ALU64_REG(BPF_ADD, R6, R7),
98689- BPF_ALU64_REG(BPF_ADD, R6, R8),
98690- BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
98691- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
98692- BPF_EXIT_INSN(),
98693- BPF_ALU64_REG(BPF_ADD, R7, R0),
98694- BPF_ALU64_REG(BPF_ADD, R7, R1),
98695- BPF_ALU64_REG(BPF_ADD, R7, R2),
98696- BPF_ALU64_REG(BPF_ADD, R7, R3),
98697- BPF_ALU64_REG(BPF_ADD, R7, R4),
98698- BPF_ALU64_REG(BPF_ADD, R7, R5),
98699- BPF_ALU64_REG(BPF_ADD, R7, R6),
98700- BPF_ALU64_REG(BPF_ADD, R7, R7),
98701- BPF_ALU64_REG(BPF_ADD, R7, R8),
98702- BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
98703- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
98704- BPF_EXIT_INSN(),
98705- BPF_ALU64_REG(BPF_ADD, R8, R0),
98706- BPF_ALU64_REG(BPF_ADD, R8, R1),
98707- BPF_ALU64_REG(BPF_ADD, R8, R2),
98708- BPF_ALU64_REG(BPF_ADD, R8, R3),
98709- BPF_ALU64_REG(BPF_ADD, R8, R4),
98710- BPF_ALU64_REG(BPF_ADD, R8, R5),
98711- BPF_ALU64_REG(BPF_ADD, R8, R6),
98712- BPF_ALU64_REG(BPF_ADD, R8, R7),
98713- BPF_ALU64_REG(BPF_ADD, R8, R8),
98714- BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
98715- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
98716- BPF_EXIT_INSN(),
98717- BPF_ALU64_REG(BPF_ADD, R9, R0),
98718- BPF_ALU64_REG(BPF_ADD, R9, R1),
98719- BPF_ALU64_REG(BPF_ADD, R9, R2),
98720- BPF_ALU64_REG(BPF_ADD, R9, R3),
98721- BPF_ALU64_REG(BPF_ADD, R9, R4),
98722- BPF_ALU64_REG(BPF_ADD, R9, R5),
98723- BPF_ALU64_REG(BPF_ADD, R9, R6),
98724- BPF_ALU64_REG(BPF_ADD, R9, R7),
98725- BPF_ALU64_REG(BPF_ADD, R9, R8),
98726- BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
98727- BPF_ALU64_REG(BPF_MOV, R0, R9),
98728- BPF_EXIT_INSN(),
98729- },
98730- INTERNAL,
98731- { },
98732- { { 0, 2957380 } }
98733- },
98734- {
98735- "INT: ADD 32-bit",
98736- .u.insns_int = {
98737- BPF_ALU32_IMM(BPF_MOV, R0, 20),
98738- BPF_ALU32_IMM(BPF_MOV, R1, 1),
98739- BPF_ALU32_IMM(BPF_MOV, R2, 2),
98740- BPF_ALU32_IMM(BPF_MOV, R3, 3),
98741- BPF_ALU32_IMM(BPF_MOV, R4, 4),
98742- BPF_ALU32_IMM(BPF_MOV, R5, 5),
98743- BPF_ALU32_IMM(BPF_MOV, R6, 6),
98744- BPF_ALU32_IMM(BPF_MOV, R7, 7),
98745- BPF_ALU32_IMM(BPF_MOV, R8, 8),
98746- BPF_ALU32_IMM(BPF_MOV, R9, 9),
98747- BPF_ALU64_IMM(BPF_ADD, R1, 10),
98748- BPF_ALU64_IMM(BPF_ADD, R2, 10),
98749- BPF_ALU64_IMM(BPF_ADD, R3, 10),
98750- BPF_ALU64_IMM(BPF_ADD, R4, 10),
98751- BPF_ALU64_IMM(BPF_ADD, R5, 10),
98752- BPF_ALU64_IMM(BPF_ADD, R6, 10),
98753- BPF_ALU64_IMM(BPF_ADD, R7, 10),
98754- BPF_ALU64_IMM(BPF_ADD, R8, 10),
98755- BPF_ALU64_IMM(BPF_ADD, R9, 10),
98756- BPF_ALU32_REG(BPF_ADD, R0, R1),
98757- BPF_ALU32_REG(BPF_ADD, R0, R2),
98758- BPF_ALU32_REG(BPF_ADD, R0, R3),
98759- BPF_ALU32_REG(BPF_ADD, R0, R4),
98760- BPF_ALU32_REG(BPF_ADD, R0, R5),
98761- BPF_ALU32_REG(BPF_ADD, R0, R6),
98762- BPF_ALU32_REG(BPF_ADD, R0, R7),
98763- BPF_ALU32_REG(BPF_ADD, R0, R8),
98764- BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
98765- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
98766- BPF_EXIT_INSN(),
98767- BPF_ALU32_REG(BPF_ADD, R1, R0),
98768- BPF_ALU32_REG(BPF_ADD, R1, R1),
98769- BPF_ALU32_REG(BPF_ADD, R1, R2),
98770- BPF_ALU32_REG(BPF_ADD, R1, R3),
98771- BPF_ALU32_REG(BPF_ADD, R1, R4),
98772- BPF_ALU32_REG(BPF_ADD, R1, R5),
98773- BPF_ALU32_REG(BPF_ADD, R1, R6),
98774- BPF_ALU32_REG(BPF_ADD, R1, R7),
98775- BPF_ALU32_REG(BPF_ADD, R1, R8),
98776- BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
98777- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
98778- BPF_EXIT_INSN(),
98779- BPF_ALU32_REG(BPF_ADD, R2, R0),
98780- BPF_ALU32_REG(BPF_ADD, R2, R1),
98781- BPF_ALU32_REG(BPF_ADD, R2, R2),
98782- BPF_ALU32_REG(BPF_ADD, R2, R3),
98783- BPF_ALU32_REG(BPF_ADD, R2, R4),
98784- BPF_ALU32_REG(BPF_ADD, R2, R5),
98785- BPF_ALU32_REG(BPF_ADD, R2, R6),
98786- BPF_ALU32_REG(BPF_ADD, R2, R7),
98787- BPF_ALU32_REG(BPF_ADD, R2, R8),
98788- BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
98789- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
98790- BPF_EXIT_INSN(),
98791- BPF_ALU32_REG(BPF_ADD, R3, R0),
98792- BPF_ALU32_REG(BPF_ADD, R3, R1),
98793- BPF_ALU32_REG(BPF_ADD, R3, R2),
98794- BPF_ALU32_REG(BPF_ADD, R3, R3),
98795- BPF_ALU32_REG(BPF_ADD, R3, R4),
98796- BPF_ALU32_REG(BPF_ADD, R3, R5),
98797- BPF_ALU32_REG(BPF_ADD, R3, R6),
98798- BPF_ALU32_REG(BPF_ADD, R3, R7),
98799- BPF_ALU32_REG(BPF_ADD, R3, R8),
98800- BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
98801- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
98802- BPF_EXIT_INSN(),
98803- BPF_ALU32_REG(BPF_ADD, R4, R0),
98804- BPF_ALU32_REG(BPF_ADD, R4, R1),
98805- BPF_ALU32_REG(BPF_ADD, R4, R2),
98806- BPF_ALU32_REG(BPF_ADD, R4, R3),
98807- BPF_ALU32_REG(BPF_ADD, R4, R4),
98808- BPF_ALU32_REG(BPF_ADD, R4, R5),
98809- BPF_ALU32_REG(BPF_ADD, R4, R6),
98810- BPF_ALU32_REG(BPF_ADD, R4, R7),
98811- BPF_ALU32_REG(BPF_ADD, R4, R8),
98812- BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
98813- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
98814- BPF_EXIT_INSN(),
98815- BPF_ALU32_REG(BPF_ADD, R5, R0),
98816- BPF_ALU32_REG(BPF_ADD, R5, R1),
98817- BPF_ALU32_REG(BPF_ADD, R5, R2),
98818- BPF_ALU32_REG(BPF_ADD, R5, R3),
98819- BPF_ALU32_REG(BPF_ADD, R5, R4),
98820- BPF_ALU32_REG(BPF_ADD, R5, R5),
98821- BPF_ALU32_REG(BPF_ADD, R5, R6),
98822- BPF_ALU32_REG(BPF_ADD, R5, R7),
98823- BPF_ALU32_REG(BPF_ADD, R5, R8),
98824- BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
98825- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
98826- BPF_EXIT_INSN(),
98827- BPF_ALU32_REG(BPF_ADD, R6, R0),
98828- BPF_ALU32_REG(BPF_ADD, R6, R1),
98829- BPF_ALU32_REG(BPF_ADD, R6, R2),
98830- BPF_ALU32_REG(BPF_ADD, R6, R3),
98831- BPF_ALU32_REG(BPF_ADD, R6, R4),
98832- BPF_ALU32_REG(BPF_ADD, R6, R5),
98833- BPF_ALU32_REG(BPF_ADD, R6, R6),
98834- BPF_ALU32_REG(BPF_ADD, R6, R7),
98835- BPF_ALU32_REG(BPF_ADD, R6, R8),
98836- BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
98837- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
98838- BPF_EXIT_INSN(),
98839- BPF_ALU32_REG(BPF_ADD, R7, R0),
98840- BPF_ALU32_REG(BPF_ADD, R7, R1),
98841- BPF_ALU32_REG(BPF_ADD, R7, R2),
98842- BPF_ALU32_REG(BPF_ADD, R7, R3),
98843- BPF_ALU32_REG(BPF_ADD, R7, R4),
98844- BPF_ALU32_REG(BPF_ADD, R7, R5),
98845- BPF_ALU32_REG(BPF_ADD, R7, R6),
98846- BPF_ALU32_REG(BPF_ADD, R7, R7),
98847- BPF_ALU32_REG(BPF_ADD, R7, R8),
98848- BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
98849- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
98850- BPF_EXIT_INSN(),
98851- BPF_ALU32_REG(BPF_ADD, R8, R0),
98852- BPF_ALU32_REG(BPF_ADD, R8, R1),
98853- BPF_ALU32_REG(BPF_ADD, R8, R2),
98854- BPF_ALU32_REG(BPF_ADD, R8, R3),
98855- BPF_ALU32_REG(BPF_ADD, R8, R4),
98856- BPF_ALU32_REG(BPF_ADD, R8, R5),
98857- BPF_ALU32_REG(BPF_ADD, R8, R6),
98858- BPF_ALU32_REG(BPF_ADD, R8, R7),
98859- BPF_ALU32_REG(BPF_ADD, R8, R8),
98860- BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
98861- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
98862- BPF_EXIT_INSN(),
98863- BPF_ALU32_REG(BPF_ADD, R9, R0),
98864- BPF_ALU32_REG(BPF_ADD, R9, R1),
98865- BPF_ALU32_REG(BPF_ADD, R9, R2),
98866- BPF_ALU32_REG(BPF_ADD, R9, R3),
98867- BPF_ALU32_REG(BPF_ADD, R9, R4),
98868- BPF_ALU32_REG(BPF_ADD, R9, R5),
98869- BPF_ALU32_REG(BPF_ADD, R9, R6),
98870- BPF_ALU32_REG(BPF_ADD, R9, R7),
98871- BPF_ALU32_REG(BPF_ADD, R9, R8),
98872- BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
98873- BPF_ALU32_REG(BPF_MOV, R0, R9),
98874- BPF_EXIT_INSN(),
98875- },
98876- INTERNAL,
98877- { },
98878- { { 0, 2957380 } }
98879- },
98880- { /* Mainly checking JIT here. */
98881- "INT: SUB",
98882- .u.insns_int = {
98883- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98884- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98885- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98886- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98887- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98888- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98889- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98890- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98891- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98892- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98893- BPF_ALU64_REG(BPF_SUB, R0, R0),
98894- BPF_ALU64_REG(BPF_SUB, R0, R1),
98895- BPF_ALU64_REG(BPF_SUB, R0, R2),
98896- BPF_ALU64_REG(BPF_SUB, R0, R3),
98897- BPF_ALU64_REG(BPF_SUB, R0, R4),
98898- BPF_ALU64_REG(BPF_SUB, R0, R5),
98899- BPF_ALU64_REG(BPF_SUB, R0, R6),
98900- BPF_ALU64_REG(BPF_SUB, R0, R7),
98901- BPF_ALU64_REG(BPF_SUB, R0, R8),
98902- BPF_ALU64_REG(BPF_SUB, R0, R9),
98903- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98904- BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
98905- BPF_EXIT_INSN(),
98906- BPF_ALU64_REG(BPF_SUB, R1, R0),
98907- BPF_ALU64_REG(BPF_SUB, R1, R2),
98908- BPF_ALU64_REG(BPF_SUB, R1, R3),
98909- BPF_ALU64_REG(BPF_SUB, R1, R4),
98910- BPF_ALU64_REG(BPF_SUB, R1, R5),
98911- BPF_ALU64_REG(BPF_SUB, R1, R6),
98912- BPF_ALU64_REG(BPF_SUB, R1, R7),
98913- BPF_ALU64_REG(BPF_SUB, R1, R8),
98914- BPF_ALU64_REG(BPF_SUB, R1, R9),
98915- BPF_ALU64_IMM(BPF_SUB, R1, 10),
98916- BPF_ALU64_REG(BPF_SUB, R2, R0),
98917- BPF_ALU64_REG(BPF_SUB, R2, R1),
98918- BPF_ALU64_REG(BPF_SUB, R2, R3),
98919- BPF_ALU64_REG(BPF_SUB, R2, R4),
98920- BPF_ALU64_REG(BPF_SUB, R2, R5),
98921- BPF_ALU64_REG(BPF_SUB, R2, R6),
98922- BPF_ALU64_REG(BPF_SUB, R2, R7),
98923- BPF_ALU64_REG(BPF_SUB, R2, R8),
98924- BPF_ALU64_REG(BPF_SUB, R2, R9),
98925- BPF_ALU64_IMM(BPF_SUB, R2, 10),
98926- BPF_ALU64_REG(BPF_SUB, R3, R0),
98927- BPF_ALU64_REG(BPF_SUB, R3, R1),
98928- BPF_ALU64_REG(BPF_SUB, R3, R2),
98929- BPF_ALU64_REG(BPF_SUB, R3, R4),
98930- BPF_ALU64_REG(BPF_SUB, R3, R5),
98931- BPF_ALU64_REG(BPF_SUB, R3, R6),
98932- BPF_ALU64_REG(BPF_SUB, R3, R7),
98933- BPF_ALU64_REG(BPF_SUB, R3, R8),
98934- BPF_ALU64_REG(BPF_SUB, R3, R9),
98935- BPF_ALU64_IMM(BPF_SUB, R3, 10),
98936- BPF_ALU64_REG(BPF_SUB, R4, R0),
98937- BPF_ALU64_REG(BPF_SUB, R4, R1),
98938- BPF_ALU64_REG(BPF_SUB, R4, R2),
98939- BPF_ALU64_REG(BPF_SUB, R4, R3),
98940- BPF_ALU64_REG(BPF_SUB, R4, R5),
98941- BPF_ALU64_REG(BPF_SUB, R4, R6),
98942- BPF_ALU64_REG(BPF_SUB, R4, R7),
98943- BPF_ALU64_REG(BPF_SUB, R4, R8),
98944- BPF_ALU64_REG(BPF_SUB, R4, R9),
98945- BPF_ALU64_IMM(BPF_SUB, R4, 10),
98946- BPF_ALU64_REG(BPF_SUB, R5, R0),
98947- BPF_ALU64_REG(BPF_SUB, R5, R1),
98948- BPF_ALU64_REG(BPF_SUB, R5, R2),
98949- BPF_ALU64_REG(BPF_SUB, R5, R3),
98950- BPF_ALU64_REG(BPF_SUB, R5, R4),
98951- BPF_ALU64_REG(BPF_SUB, R5, R6),
98952- BPF_ALU64_REG(BPF_SUB, R5, R7),
98953- BPF_ALU64_REG(BPF_SUB, R5, R8),
98954- BPF_ALU64_REG(BPF_SUB, R5, R9),
98955- BPF_ALU64_IMM(BPF_SUB, R5, 10),
98956- BPF_ALU64_REG(BPF_SUB, R6, R0),
98957- BPF_ALU64_REG(BPF_SUB, R6, R1),
98958- BPF_ALU64_REG(BPF_SUB, R6, R2),
98959- BPF_ALU64_REG(BPF_SUB, R6, R3),
98960- BPF_ALU64_REG(BPF_SUB, R6, R4),
98961- BPF_ALU64_REG(BPF_SUB, R6, R5),
98962- BPF_ALU64_REG(BPF_SUB, R6, R7),
98963- BPF_ALU64_REG(BPF_SUB, R6, R8),
98964- BPF_ALU64_REG(BPF_SUB, R6, R9),
98965- BPF_ALU64_IMM(BPF_SUB, R6, 10),
98966- BPF_ALU64_REG(BPF_SUB, R7, R0),
98967- BPF_ALU64_REG(BPF_SUB, R7, R1),
98968- BPF_ALU64_REG(BPF_SUB, R7, R2),
98969- BPF_ALU64_REG(BPF_SUB, R7, R3),
98970- BPF_ALU64_REG(BPF_SUB, R7, R4),
98971- BPF_ALU64_REG(BPF_SUB, R7, R5),
98972- BPF_ALU64_REG(BPF_SUB, R7, R6),
98973- BPF_ALU64_REG(BPF_SUB, R7, R8),
98974- BPF_ALU64_REG(BPF_SUB, R7, R9),
98975- BPF_ALU64_IMM(BPF_SUB, R7, 10),
98976- BPF_ALU64_REG(BPF_SUB, R8, R0),
98977- BPF_ALU64_REG(BPF_SUB, R8, R1),
98978- BPF_ALU64_REG(BPF_SUB, R8, R2),
98979- BPF_ALU64_REG(BPF_SUB, R8, R3),
98980- BPF_ALU64_REG(BPF_SUB, R8, R4),
98981- BPF_ALU64_REG(BPF_SUB, R8, R5),
98982- BPF_ALU64_REG(BPF_SUB, R8, R6),
98983- BPF_ALU64_REG(BPF_SUB, R8, R7),
98984- BPF_ALU64_REG(BPF_SUB, R8, R9),
98985- BPF_ALU64_IMM(BPF_SUB, R8, 10),
98986- BPF_ALU64_REG(BPF_SUB, R9, R0),
98987- BPF_ALU64_REG(BPF_SUB, R9, R1),
98988- BPF_ALU64_REG(BPF_SUB, R9, R2),
98989- BPF_ALU64_REG(BPF_SUB, R9, R3),
98990- BPF_ALU64_REG(BPF_SUB, R9, R4),
98991- BPF_ALU64_REG(BPF_SUB, R9, R5),
98992- BPF_ALU64_REG(BPF_SUB, R9, R6),
98993- BPF_ALU64_REG(BPF_SUB, R9, R7),
98994- BPF_ALU64_REG(BPF_SUB, R9, R8),
98995- BPF_ALU64_IMM(BPF_SUB, R9, 10),
98996- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98997- BPF_ALU64_IMM(BPF_NEG, R0, 0),
98998- BPF_ALU64_REG(BPF_SUB, R0, R1),
98999- BPF_ALU64_REG(BPF_SUB, R0, R2),
99000- BPF_ALU64_REG(BPF_SUB, R0, R3),
99001- BPF_ALU64_REG(BPF_SUB, R0, R4),
99002- BPF_ALU64_REG(BPF_SUB, R0, R5),
99003- BPF_ALU64_REG(BPF_SUB, R0, R6),
99004- BPF_ALU64_REG(BPF_SUB, R0, R7),
99005- BPF_ALU64_REG(BPF_SUB, R0, R8),
99006- BPF_ALU64_REG(BPF_SUB, R0, R9),
99007- BPF_EXIT_INSN(),
99008- },
99009- INTERNAL,
99010- { },
99011- { { 0, 11 } }
99012- },
99013- { /* Mainly checking JIT here. */
99014- "INT: XOR",
99015- .u.insns_int = {
99016- BPF_ALU64_REG(BPF_SUB, R0, R0),
99017- BPF_ALU64_REG(BPF_XOR, R1, R1),
99018- BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
99019- BPF_EXIT_INSN(),
99020- BPF_ALU64_IMM(BPF_MOV, R0, 10),
99021- BPF_ALU64_IMM(BPF_MOV, R1, -1),
99022- BPF_ALU64_REG(BPF_SUB, R1, R1),
99023- BPF_ALU64_REG(BPF_XOR, R2, R2),
99024- BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
99025- BPF_EXIT_INSN(),
99026- BPF_ALU64_REG(BPF_SUB, R2, R2),
99027- BPF_ALU64_REG(BPF_XOR, R3, R3),
99028- BPF_ALU64_IMM(BPF_MOV, R0, 10),
99029- BPF_ALU64_IMM(BPF_MOV, R1, -1),
99030- BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
99031- BPF_EXIT_INSN(),
99032- BPF_ALU64_REG(BPF_SUB, R3, R3),
99033- BPF_ALU64_REG(BPF_XOR, R4, R4),
99034- BPF_ALU64_IMM(BPF_MOV, R2, 1),
99035- BPF_ALU64_IMM(BPF_MOV, R5, -1),
99036- BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
99037- BPF_EXIT_INSN(),
99038- BPF_ALU64_REG(BPF_SUB, R4, R4),
99039- BPF_ALU64_REG(BPF_XOR, R5, R5),
99040- BPF_ALU64_IMM(BPF_MOV, R3, 1),
99041- BPF_ALU64_IMM(BPF_MOV, R7, -1),
99042- BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
99043- BPF_EXIT_INSN(),
99044- BPF_ALU64_IMM(BPF_MOV, R5, 1),
99045- BPF_ALU64_REG(BPF_SUB, R5, R5),
99046- BPF_ALU64_REG(BPF_XOR, R6, R6),
99047- BPF_ALU64_IMM(BPF_MOV, R1, 1),
99048- BPF_ALU64_IMM(BPF_MOV, R8, -1),
99049- BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
99050- BPF_EXIT_INSN(),
99051- BPF_ALU64_REG(BPF_SUB, R6, R6),
99052- BPF_ALU64_REG(BPF_XOR, R7, R7),
99053- BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
99054- BPF_EXIT_INSN(),
99055- BPF_ALU64_REG(BPF_SUB, R7, R7),
99056- BPF_ALU64_REG(BPF_XOR, R8, R8),
99057- BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
99058- BPF_EXIT_INSN(),
99059- BPF_ALU64_REG(BPF_SUB, R8, R8),
99060- BPF_ALU64_REG(BPF_XOR, R9, R9),
99061- BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
99062- BPF_EXIT_INSN(),
99063- BPF_ALU64_REG(BPF_SUB, R9, R9),
99064- BPF_ALU64_REG(BPF_XOR, R0, R0),
99065- BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
99066- BPF_EXIT_INSN(),
99067- BPF_ALU64_REG(BPF_SUB, R1, R1),
99068- BPF_ALU64_REG(BPF_XOR, R0, R0),
99069- BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
99070- BPF_ALU64_IMM(BPF_MOV, R0, 0),
99071- BPF_EXIT_INSN(),
99072- BPF_ALU64_IMM(BPF_MOV, R0, 1),
99073- BPF_EXIT_INSN(),
99074- },
99075- INTERNAL,
99076- { },
99077- { { 0, 1 } }
99078- },
99079- { /* Mainly checking JIT here. */
99080- "INT: MUL",
99081- .u.insns_int = {
99082- BPF_ALU64_IMM(BPF_MOV, R0, 11),
99083- BPF_ALU64_IMM(BPF_MOV, R1, 1),
99084- BPF_ALU64_IMM(BPF_MOV, R2, 2),
99085- BPF_ALU64_IMM(BPF_MOV, R3, 3),
99086- BPF_ALU64_IMM(BPF_MOV, R4, 4),
99087- BPF_ALU64_IMM(BPF_MOV, R5, 5),
99088- BPF_ALU64_IMM(BPF_MOV, R6, 6),
99089- BPF_ALU64_IMM(BPF_MOV, R7, 7),
99090- BPF_ALU64_IMM(BPF_MOV, R8, 8),
99091- BPF_ALU64_IMM(BPF_MOV, R9, 9),
99092- BPF_ALU64_REG(BPF_MUL, R0, R0),
99093- BPF_ALU64_REG(BPF_MUL, R0, R1),
99094- BPF_ALU64_REG(BPF_MUL, R0, R2),
99095- BPF_ALU64_REG(BPF_MUL, R0, R3),
99096- BPF_ALU64_REG(BPF_MUL, R0, R4),
99097- BPF_ALU64_REG(BPF_MUL, R0, R5),
99098- BPF_ALU64_REG(BPF_MUL, R0, R6),
99099- BPF_ALU64_REG(BPF_MUL, R0, R7),
99100- BPF_ALU64_REG(BPF_MUL, R0, R8),
99101- BPF_ALU64_REG(BPF_MUL, R0, R9),
99102- BPF_ALU64_IMM(BPF_MUL, R0, 10),
99103- BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
99104- BPF_EXIT_INSN(),
99105- BPF_ALU64_REG(BPF_MUL, R1, R0),
99106- BPF_ALU64_REG(BPF_MUL, R1, R2),
99107- BPF_ALU64_REG(BPF_MUL, R1, R3),
99108- BPF_ALU64_REG(BPF_MUL, R1, R4),
99109- BPF_ALU64_REG(BPF_MUL, R1, R5),
99110- BPF_ALU64_REG(BPF_MUL, R1, R6),
99111- BPF_ALU64_REG(BPF_MUL, R1, R7),
99112- BPF_ALU64_REG(BPF_MUL, R1, R8),
99113- BPF_ALU64_REG(BPF_MUL, R1, R9),
99114- BPF_ALU64_IMM(BPF_MUL, R1, 10),
99115- BPF_ALU64_REG(BPF_MOV, R2, R1),
99116- BPF_ALU64_IMM(BPF_RSH, R2, 32),
99117- BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
99118- BPF_EXIT_INSN(),
99119- BPF_ALU64_IMM(BPF_LSH, R1, 32),
99120- BPF_ALU64_IMM(BPF_ARSH, R1, 32),
99121- BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
99122- BPF_EXIT_INSN(),
99123- BPF_ALU64_REG(BPF_MUL, R2, R0),
99124- BPF_ALU64_REG(BPF_MUL, R2, R1),
99125- BPF_ALU64_REG(BPF_MUL, R2, R3),
99126- BPF_ALU64_REG(BPF_MUL, R2, R4),
99127- BPF_ALU64_REG(BPF_MUL, R2, R5),
99128- BPF_ALU64_REG(BPF_MUL, R2, R6),
99129- BPF_ALU64_REG(BPF_MUL, R2, R7),
99130- BPF_ALU64_REG(BPF_MUL, R2, R8),
99131- BPF_ALU64_REG(BPF_MUL, R2, R9),
99132- BPF_ALU64_IMM(BPF_MUL, R2, 10),
99133- BPF_ALU64_IMM(BPF_RSH, R2, 32),
99134- BPF_ALU64_REG(BPF_MOV, R0, R2),
99135- BPF_EXIT_INSN(),
99136- },
99137- INTERNAL,
99138- { },
99139- { { 0, 0x35d97ef2 } }
99140- },
99141- {
99142- "INT: ALU MIX",
99143- .u.insns_int = {
99144- BPF_ALU64_IMM(BPF_MOV, R0, 11),
99145- BPF_ALU64_IMM(BPF_ADD, R0, -1),
99146- BPF_ALU64_IMM(BPF_MOV, R2, 2),
99147- BPF_ALU64_IMM(BPF_XOR, R2, 3),
99148- BPF_ALU64_REG(BPF_DIV, R0, R2),
99149- BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
99150- BPF_EXIT_INSN(),
99151- BPF_ALU64_IMM(BPF_MOD, R0, 3),
99152- BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
99153- BPF_EXIT_INSN(),
99154- BPF_ALU64_IMM(BPF_MOV, R0, -1),
99155- BPF_EXIT_INSN(),
99156- },
99157- INTERNAL,
99158- { },
99159- { { 0, -1 } }
99160- },
99161- {
99162- "INT: DIV + ABS",
99163- .u.insns_int = {
99164- BPF_ALU64_REG(BPF_MOV, R6, R1),
99165- BPF_LD_ABS(BPF_B, 3),
99166- BPF_ALU64_IMM(BPF_MOV, R2, 2),
99167- BPF_ALU32_REG(BPF_DIV, R0, R2),
99168- BPF_ALU64_REG(BPF_MOV, R8, R0),
99169- BPF_LD_ABS(BPF_B, 4),
99170- BPF_ALU64_REG(BPF_ADD, R8, R0),
99171- BPF_LD_IND(BPF_B, R8, -70),
99172- BPF_EXIT_INSN(),
99173- },
99174- INTERNAL,
99175- { 10, 20, 30, 40, 50 },
99176- { { 4, 0 }, { 5, 10 } }
99177- },
99178- {
99179- "INT: DIV by zero",
99180- .u.insns_int = {
99181- BPF_ALU64_REG(BPF_MOV, R6, R1),
99182- BPF_ALU64_IMM(BPF_MOV, R7, 0),
99183- BPF_LD_ABS(BPF_B, 3),
99184- BPF_ALU32_REG(BPF_DIV, R0, R7),
99185- BPF_EXIT_INSN(),
99186- },
99187- INTERNAL,
99188- { 10, 20, 30, 40, 50 },
99189- { { 3, 0 }, { 4, 0 } }
99190- },
99191- {
99192- "check: missing ret",
99193- .u.insns = {
99194- BPF_STMT(BPF_LD | BPF_IMM, 1),
99195- },
99196- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99197- { },
99198- { }
99199- },
99200- {
99201- "check: div_k_0",
99202- .u.insns = {
99203- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
99204- BPF_STMT(BPF_RET | BPF_K, 0)
99205- },
99206- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99207- { },
99208- { }
99209- },
99210- {
99211- "check: unknown insn",
99212- .u.insns = {
99213- /* seccomp insn, rejected in socket filter */
99214- BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
99215- BPF_STMT(BPF_RET | BPF_K, 0)
99216- },
99217- CLASSIC | FLAG_EXPECTED_FAIL,
99218- { },
99219- { }
99220- },
99221- {
99222- "check: out of range spill/fill",
99223- .u.insns = {
99224- BPF_STMT(BPF_STX, 16),
99225- BPF_STMT(BPF_RET | BPF_K, 0)
99226- },
99227- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99228- { },
99229- { }
99230- },
99231- {
99232- "JUMPS + HOLES",
99233- .u.insns = {
99234- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99235- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
99236- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99237- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99238- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99239- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99240- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99241- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99242- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99243- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99244- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99245- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99246- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99247- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99248- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99249- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
99250- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99251- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
99252- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99253- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
99254- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
99255- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99256- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99257- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99258- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99259- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99260- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99261- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99262- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99263- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99264- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99265- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99266- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99267- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99268- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
99269- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
99270- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99271- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
99272- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
99273- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99274- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99275- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99276- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99277- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99278- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99279- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99280- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99281- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99282- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99283- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99284- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99285- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99286- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
99287- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
99288- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
99289- BPF_STMT(BPF_RET | BPF_A, 0),
99290- BPF_STMT(BPF_RET | BPF_A, 0),
99291- },
99292- CLASSIC,
99293- { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
99294- 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
99295- 0x08, 0x00,
99296- 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
99297- 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
99298- 0xc0, 0xa8, 0x33, 0x01,
99299- 0xc0, 0xa8, 0x33, 0x02,
99300- 0xbb, 0xb6,
99301- 0xa9, 0xfa,
99302- 0x00, 0x14, 0x00, 0x00,
99303- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99304- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99305- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99306- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99307- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99308- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99309- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
99310- 0xcc, 0xcc, 0xcc, 0xcc },
99311- { { 88, 0x001b } }
99312- },
99313- {
99314- "check: RET X",
99315- .u.insns = {
99316- BPF_STMT(BPF_RET | BPF_X, 0),
99317- },
99318- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99319- { },
99320- { },
99321- },
99322- {
99323- "check: LDX + RET X",
99324- .u.insns = {
99325- BPF_STMT(BPF_LDX | BPF_IMM, 42),
99326- BPF_STMT(BPF_RET | BPF_X, 0),
99327- },
99328- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99329- { },
99330- { },
99331- },
99332- { /* Mainly checking JIT here. */
99333- "M[]: alt STX + LDX",
99334- .u.insns = {
99335- BPF_STMT(BPF_LDX | BPF_IMM, 100),
99336- BPF_STMT(BPF_STX, 0),
99337- BPF_STMT(BPF_LDX | BPF_MEM, 0),
99338- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99339- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99340- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99341- BPF_STMT(BPF_STX, 1),
99342- BPF_STMT(BPF_LDX | BPF_MEM, 1),
99343- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99344- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99345- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99346- BPF_STMT(BPF_STX, 2),
99347- BPF_STMT(BPF_LDX | BPF_MEM, 2),
99348- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99349- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99350- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99351- BPF_STMT(BPF_STX, 3),
99352- BPF_STMT(BPF_LDX | BPF_MEM, 3),
99353- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99354- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99355- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99356- BPF_STMT(BPF_STX, 4),
99357- BPF_STMT(BPF_LDX | BPF_MEM, 4),
99358- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99359- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99360- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99361- BPF_STMT(BPF_STX, 5),
99362- BPF_STMT(BPF_LDX | BPF_MEM, 5),
99363- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99364- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99365- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99366- BPF_STMT(BPF_STX, 6),
99367- BPF_STMT(BPF_LDX | BPF_MEM, 6),
99368- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99369- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99370- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99371- BPF_STMT(BPF_STX, 7),
99372- BPF_STMT(BPF_LDX | BPF_MEM, 7),
99373- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99374- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99375- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99376- BPF_STMT(BPF_STX, 8),
99377- BPF_STMT(BPF_LDX | BPF_MEM, 8),
99378- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99379- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99380- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99381- BPF_STMT(BPF_STX, 9),
99382- BPF_STMT(BPF_LDX | BPF_MEM, 9),
99383- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99384- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99385- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99386- BPF_STMT(BPF_STX, 10),
99387- BPF_STMT(BPF_LDX | BPF_MEM, 10),
99388- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99389- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99390- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99391- BPF_STMT(BPF_STX, 11),
99392- BPF_STMT(BPF_LDX | BPF_MEM, 11),
99393- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99394- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99395- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99396- BPF_STMT(BPF_STX, 12),
99397- BPF_STMT(BPF_LDX | BPF_MEM, 12),
99398- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99399- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99400- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99401- BPF_STMT(BPF_STX, 13),
99402- BPF_STMT(BPF_LDX | BPF_MEM, 13),
99403- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99404- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99405- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99406- BPF_STMT(BPF_STX, 14),
99407- BPF_STMT(BPF_LDX | BPF_MEM, 14),
99408- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99409- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99410- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99411- BPF_STMT(BPF_STX, 15),
99412- BPF_STMT(BPF_LDX | BPF_MEM, 15),
99413- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99414- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
99415- BPF_STMT(BPF_MISC | BPF_TAX, 0),
99416- BPF_STMT(BPF_RET | BPF_A, 0),
99417- },
99418- CLASSIC | FLAG_NO_DATA,
99419- { },
99420- { { 0, 116 } },
99421- },
99422- { /* Mainly checking JIT here. */
99423- "M[]: full STX + full LDX",
99424- .u.insns = {
99425- BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
99426- BPF_STMT(BPF_STX, 0),
99427- BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
99428- BPF_STMT(BPF_STX, 1),
99429- BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
99430- BPF_STMT(BPF_STX, 2),
99431- BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
99432- BPF_STMT(BPF_STX, 3),
99433- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
99434- BPF_STMT(BPF_STX, 4),
99435- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
99436- BPF_STMT(BPF_STX, 5),
99437- BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
99438- BPF_STMT(BPF_STX, 6),
99439- BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
99440- BPF_STMT(BPF_STX, 7),
99441- BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
99442- BPF_STMT(BPF_STX, 8),
99443- BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
99444- BPF_STMT(BPF_STX, 9),
99445- BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
99446- BPF_STMT(BPF_STX, 10),
99447- BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
99448- BPF_STMT(BPF_STX, 11),
99449- BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
99450- BPF_STMT(BPF_STX, 12),
99451- BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
99452- BPF_STMT(BPF_STX, 13),
99453- BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
99454- BPF_STMT(BPF_STX, 14),
99455- BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
99456- BPF_STMT(BPF_STX, 15),
99457- BPF_STMT(BPF_LDX | BPF_MEM, 0),
99458- BPF_STMT(BPF_MISC | BPF_TXA, 0),
99459- BPF_STMT(BPF_LDX | BPF_MEM, 1),
99460- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99461- BPF_STMT(BPF_LDX | BPF_MEM, 2),
99462- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99463- BPF_STMT(BPF_LDX | BPF_MEM, 3),
99464- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99465- BPF_STMT(BPF_LDX | BPF_MEM, 4),
99466- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99467- BPF_STMT(BPF_LDX | BPF_MEM, 5),
99468- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99469- BPF_STMT(BPF_LDX | BPF_MEM, 6),
99470- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99471- BPF_STMT(BPF_LDX | BPF_MEM, 7),
99472- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99473- BPF_STMT(BPF_LDX | BPF_MEM, 8),
99474- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99475- BPF_STMT(BPF_LDX | BPF_MEM, 9),
99476- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99477- BPF_STMT(BPF_LDX | BPF_MEM, 10),
99478- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99479- BPF_STMT(BPF_LDX | BPF_MEM, 11),
99480- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99481- BPF_STMT(BPF_LDX | BPF_MEM, 12),
99482- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99483- BPF_STMT(BPF_LDX | BPF_MEM, 13),
99484- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99485- BPF_STMT(BPF_LDX | BPF_MEM, 14),
99486- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99487- BPF_STMT(BPF_LDX | BPF_MEM, 15),
99488- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99489- BPF_STMT(BPF_RET | BPF_A, 0),
99490- },
99491- CLASSIC | FLAG_NO_DATA,
99492- { },
99493- { { 0, 0x2a5a5e5 } },
99494- },
99495- {
99496- "check: SKF_AD_MAX",
99497- .u.insns = {
99498- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
99499- SKF_AD_OFF + SKF_AD_MAX),
99500- BPF_STMT(BPF_RET | BPF_A, 0),
99501- },
99502- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99503- { },
99504- { },
99505- },
99506- { /* Passes checker but fails during runtime. */
99507- "LD [SKF_AD_OFF-1]",
99508- .u.insns = {
99509- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
99510- SKF_AD_OFF - 1),
99511- BPF_STMT(BPF_RET | BPF_K, 1),
99512- },
99513- CLASSIC,
99514- { },
99515- { { 1, 0 } },
99516- },
99517-};
99518-
99519-static struct net_device dev;
99520-
99521-static struct sk_buff *populate_skb(char *buf, int size)
99522-{
99523- struct sk_buff *skb;
99524-
99525- if (size >= MAX_DATA)
99526- return NULL;
99527-
99528- skb = alloc_skb(MAX_DATA, GFP_KERNEL);
99529- if (!skb)
99530- return NULL;
99531-
99532- memcpy(__skb_put(skb, size), buf, size);
99533-
99534- /* Initialize a fake skb with test pattern. */
99535- skb_reset_mac_header(skb);
99536- skb->protocol = htons(ETH_P_IP);
99537- skb->pkt_type = SKB_TYPE;
99538- skb->mark = SKB_MARK;
99539- skb->hash = SKB_HASH;
99540- skb->queue_mapping = SKB_QUEUE_MAP;
99541- skb->vlan_tci = SKB_VLAN_TCI;
99542- skb->dev = &dev;
99543- skb->dev->ifindex = SKB_DEV_IFINDEX;
99544- skb->dev->type = SKB_DEV_TYPE;
99545- skb_set_network_header(skb, min(size, ETH_HLEN));
99546-
99547- return skb;
99548-}
99549-
99550-static void *generate_test_data(struct bpf_test *test, int sub)
99551-{
99552- if (test->aux & FLAG_NO_DATA)
99553- return NULL;
99554-
99555- /* Test case expects an skb, so populate one. Various
99556- * subtests generate skbs of different sizes based on
99557- * the same data.
99558- */
99559- return populate_skb(test->data, test->test[sub].data_size);
99560-}
99561-
99562-static void release_test_data(const struct bpf_test *test, void *data)
99563-{
99564- if (test->aux & FLAG_NO_DATA)
99565- return;
99566-
99567- kfree_skb(data);
99568-}
99569-
99570-static int probe_filter_length(struct sock_filter *fp)
99571-{
99572- int len = 0;
99573-
99574- for (len = MAX_INSNS - 1; len > 0; --len)
99575- if (fp[len].code != 0 || fp[len].k != 0)
99576- break;
99577-
99578- return len + 1;
99579-}
99580-
99581-static struct sk_filter *generate_filter(int which, int *err)
99582-{
99583- struct sk_filter *fp;
99584- struct sock_fprog_kern fprog;
99585- unsigned int flen = probe_filter_length(tests[which].u.insns);
99586- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
99587-
99588- switch (test_type) {
99589- case CLASSIC:
99590- fprog.filter = tests[which].u.insns;
99591- fprog.len = flen;
99592-
99593- *err = sk_unattached_filter_create(&fp, &fprog);
99594- if (tests[which].aux & FLAG_EXPECTED_FAIL) {
99595- if (*err == -EINVAL) {
99596- pr_cont("PASS\n");
99597- /* Verifier rejected filter as expected. */
99598- *err = 0;
99599- return NULL;
99600- } else {
99601- pr_cont("UNEXPECTED_PASS\n");
99602- /* Verifier didn't reject the test that's
99603- * bad enough, just return!
99604- */
99605- *err = -EINVAL;
99606- return NULL;
99607- }
99608- }
99609- /* We don't expect to fail. */
99610- if (*err) {
99611- pr_cont("FAIL to attach err=%d len=%d\n",
99612- *err, fprog.len);
99613- return NULL;
99614- }
99615- break;
99616-
99617- case INTERNAL:
99618- fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
99619- if (fp == NULL) {
99620- pr_cont("UNEXPECTED_FAIL no memory left\n");
99621- *err = -ENOMEM;
99622- return NULL;
99623- }
99624-
99625- fp->len = flen;
99626- memcpy(fp->insnsi, tests[which].u.insns_int,
99627- fp->len * sizeof(struct sock_filter_int));
99628-
99629- sk_filter_select_runtime(fp);
99630- break;
99631- }
99632-
99633- *err = 0;
99634- return fp;
99635-}
99636-
99637-static void release_filter(struct sk_filter *fp, int which)
99638-{
99639- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
99640-
99641- switch (test_type) {
99642- case CLASSIC:
99643- sk_unattached_filter_destroy(fp);
99644- break;
99645- case INTERNAL:
99646- sk_filter_free(fp);
99647- break;
99648- }
99649-}
99650-
99651-static int __run_one(const struct sk_filter *fp, const void *data,
99652- int runs, u64 *duration)
99653-{
99654- u64 start, finish;
99655- int ret, i;
99656-
99657- start = ktime_to_us(ktime_get());
99658-
99659- for (i = 0; i < runs; i++)
99660- ret = SK_RUN_FILTER(fp, data);
99661-
99662- finish = ktime_to_us(ktime_get());
99663-
99664- *duration = (finish - start) * 1000ULL;
99665- do_div(*duration, runs);
99666-
99667- return ret;
99668-}
99669-
99670-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
99671-{
99672- int err_cnt = 0, i, runs = MAX_TESTRUNS;
99673-
99674- for (i = 0; i < MAX_SUBTESTS; i++) {
99675- void *data;
99676- u64 duration;
99677- u32 ret;
99678-
99679- if (test->test[i].data_size == 0 &&
99680- test->test[i].result == 0)
99681- break;
99682-
99683- data = generate_test_data(test, i);
99684- ret = __run_one(fp, data, runs, &duration);
99685- release_test_data(test, data);
99686-
99687- if (ret == test->test[i].result) {
99688- pr_cont("%lld ", duration);
99689- } else {
99690- pr_cont("ret %d != %d ", ret,
99691- test->test[i].result);
99692- err_cnt++;
99693- }
99694- }
99695-
99696- return err_cnt;
99697-}
99698-
99699-static __init int test_bpf(void)
99700-{
99701- int i, err_cnt = 0, pass_cnt = 0;
99702-
99703- for (i = 0; i < ARRAY_SIZE(tests); i++) {
99704- struct sk_filter *fp;
99705- int err;
99706-
99707- pr_info("#%d %s ", i, tests[i].descr);
99708-
99709- fp = generate_filter(i, &err);
99710- if (fp == NULL) {
99711- if (err == 0) {
99712- pass_cnt++;
99713- continue;
99714- }
99715-
99716- return err;
99717- }
99718- err = run_one(fp, &tests[i]);
99719- release_filter(fp, i);
99720-
99721- if (err) {
99722- pr_cont("FAIL (%d times)\n", err);
99723- err_cnt++;
99724- } else {
99725- pr_cont("PASS\n");
99726- pass_cnt++;
99727- }
99728- }
99729-
99730- pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
99731- return err_cnt ? -EINVAL : 0;
99732-}
99733-
99734-static int __init test_bpf_init(void)
99735-{
99736- return test_bpf();
99737-}
99738-
99739-static void __exit test_bpf_exit(void)
99740-{
99741-}
99742-
99743-module_init(test_bpf_init);
99744-module_exit(test_bpf_exit);
99745-
99746-MODULE_LICENSE("GPL");
99747diff --git a/lib/usercopy.c b/lib/usercopy.c
99748index 4f5b1dd..7cab418 100644
99749--- a/lib/usercopy.c
99750+++ b/lib/usercopy.c
99751@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
99752 WARN(1, "Buffer overflow detected!\n");
99753 }
99754 EXPORT_SYMBOL(copy_from_user_overflow);
99755+
99756+void copy_to_user_overflow(void)
99757+{
99758+ WARN(1, "Buffer overflow detected!\n");
99759+}
99760+EXPORT_SYMBOL(copy_to_user_overflow);
99761diff --git a/lib/vsprintf.c b/lib/vsprintf.c
99762index 6fe2c84..2fe5ec6 100644
99763--- a/lib/vsprintf.c
99764+++ b/lib/vsprintf.c
99765@@ -16,6 +16,9 @@
99766 * - scnprintf and vscnprintf
99767 */
99768
99769+#ifdef CONFIG_GRKERNSEC_HIDESYM
99770+#define __INCLUDED_BY_HIDESYM 1
99771+#endif
99772 #include <stdarg.h>
99773 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
99774 #include <linux/types.h>
99775@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
99776 #ifdef CONFIG_KALLSYMS
99777 if (*fmt == 'B')
99778 sprint_backtrace(sym, value);
99779- else if (*fmt != 'f' && *fmt != 's')
99780+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
99781 sprint_symbol(sym, value);
99782 else
99783 sprint_symbol_no_offset(sym, value);
99784@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
99785 return number(buf, end, num, spec);
99786 }
99787
99788+#ifdef CONFIG_GRKERNSEC_HIDESYM
99789+int kptr_restrict __read_mostly = 2;
99790+#else
99791 int kptr_restrict __read_mostly;
99792+#endif
99793
99794 /*
99795 * Show a '%p' thing. A kernel extension is that the '%p' is followed
99796@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
99797 *
99798 * - 'F' For symbolic function descriptor pointers with offset
99799 * - 'f' For simple symbolic function names without offset
99800+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
99801 * - 'S' For symbolic direct pointers with offset
99802 * - 's' For symbolic direct pointers without offset
99803+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
99804 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
99805 * - 'B' For backtraced symbolic direct pointers with offset
99806 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
99807@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99808
99809 if (!ptr && *fmt != 'K') {
99810 /*
99811- * Print (null) with the same width as a pointer so it makes
99812+ * Print (nil) with the same width as a pointer so it makes
99813 * tabular output look nice.
99814 */
99815 if (spec.field_width == -1)
99816 spec.field_width = default_width;
99817- return string(buf, end, "(null)", spec);
99818+ return string(buf, end, "(nil)", spec);
99819 }
99820
99821 switch (*fmt) {
99822@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99823 /* Fallthrough */
99824 case 'S':
99825 case 's':
99826+#ifdef CONFIG_GRKERNSEC_HIDESYM
99827+ break;
99828+#else
99829+ return symbol_string(buf, end, ptr, spec, fmt);
99830+#endif
99831+ case 'X':
99832+ ptr = dereference_function_descriptor(ptr);
99833+ case 'A':
99834 case 'B':
99835 return symbol_string(buf, end, ptr, spec, fmt);
99836 case 'R':
99837@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99838 va_end(va);
99839 return buf;
99840 }
99841+ case 'P':
99842+ break;
99843 case 'K':
99844 /*
99845 * %pK cannot be used in IRQ context because its test
99846@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99847 ((const struct file *)ptr)->f_path.dentry,
99848 spec, fmt);
99849 }
99850+
99851+#ifdef CONFIG_GRKERNSEC_HIDESYM
99852+ /* 'P' = approved pointers to copy to userland,
99853+ as in the /proc/kallsyms case, as we make it display nothing
99854+ for non-root users, and the real contents for root users
99855+ 'X' = approved simple symbols
99856+ Also ignore 'K' pointers, since we force their NULLing for non-root users
99857+ above
99858+ */
99859+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
99860+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
99861+ dump_stack();
99862+ ptr = NULL;
99863+ }
99864+#endif
99865+
99866 spec.flags |= SMALL;
99867 if (spec.field_width == -1) {
99868 spec.field_width = default_width;
99869@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
99870 typeof(type) value; \
99871 if (sizeof(type) == 8) { \
99872 args = PTR_ALIGN(args, sizeof(u32)); \
99873- *(u32 *)&value = *(u32 *)args; \
99874- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
99875+ *(u32 *)&value = *(const u32 *)args; \
99876+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
99877 } else { \
99878 args = PTR_ALIGN(args, sizeof(type)); \
99879- value = *(typeof(type) *)args; \
99880+ value = *(const typeof(type) *)args; \
99881 } \
99882 args += sizeof(type); \
99883 value; \
99884@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
99885 case FORMAT_TYPE_STR: {
99886 const char *str_arg = args;
99887 args += strlen(str_arg) + 1;
99888- str = string(str, end, (char *)str_arg, spec);
99889+ str = string(str, end, str_arg, spec);
99890 break;
99891 }
99892
99893diff --git a/localversion-grsec b/localversion-grsec
99894new file mode 100644
99895index 0000000..7cd6065
99896--- /dev/null
99897+++ b/localversion-grsec
99898@@ -0,0 +1 @@
99899+-grsec
99900diff --git a/mm/Kconfig b/mm/Kconfig
99901index 3e9977a..205074f 100644
99902--- a/mm/Kconfig
99903+++ b/mm/Kconfig
99904@@ -333,10 +333,11 @@ config KSM
99905 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
99906
99907 config DEFAULT_MMAP_MIN_ADDR
99908- int "Low address space to protect from user allocation"
99909+ int "Low address space to protect from user allocation"
99910 depends on MMU
99911- default 4096
99912- help
99913+ default 32768 if ALPHA || ARM || PARISC || SPARC32
99914+ default 65536
99915+ help
99916 This is the portion of low virtual memory which should be protected
99917 from userspace allocation. Keeping a user from writing to low pages
99918 can help reduce the impact of kernel NULL pointer bugs.
99919@@ -367,7 +368,7 @@ config MEMORY_FAILURE
99920
99921 config HWPOISON_INJECT
99922 tristate "HWPoison pages injector"
99923- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
99924+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
99925 select PROC_PAGE_MONITOR
99926
99927 config NOMMU_INITIAL_TRIM_EXCESS
99928diff --git a/mm/backing-dev.c b/mm/backing-dev.c
99929index 1706cbb..f89dbca 100644
99930--- a/mm/backing-dev.c
99931+++ b/mm/backing-dev.c
99932@@ -12,7 +12,7 @@
99933 #include <linux/device.h>
99934 #include <trace/events/writeback.h>
99935
99936-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
99937+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
99938
99939 struct backing_dev_info default_backing_dev_info = {
99940 .name = "default",
99941@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
99942 return err;
99943
99944 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
99945- atomic_long_inc_return(&bdi_seq));
99946+ atomic_long_inc_return_unchecked(&bdi_seq));
99947 if (err) {
99948 bdi_destroy(bdi);
99949 return err;
99950diff --git a/mm/filemap.c b/mm/filemap.c
99951index 8163e04..191cb97 100644
99952--- a/mm/filemap.c
99953+++ b/mm/filemap.c
99954@@ -2074,7 +2074,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
99955 struct address_space *mapping = file->f_mapping;
99956
99957 if (!mapping->a_ops->readpage)
99958- return -ENOEXEC;
99959+ return -ENODEV;
99960 file_accessed(file);
99961 vma->vm_ops = &generic_file_vm_ops;
99962 return 0;
99963@@ -2252,6 +2252,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
99964 *pos = i_size_read(inode);
99965
99966 if (limit != RLIM_INFINITY) {
99967+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
99968 if (*pos >= limit) {
99969 send_sig(SIGXFSZ, current, 0);
99970 return -EFBIG;
99971diff --git a/mm/fremap.c b/mm/fremap.c
99972index 72b8fa3..c5b39f1 100644
99973--- a/mm/fremap.c
99974+++ b/mm/fremap.c
99975@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
99976 retry:
99977 vma = find_vma(mm, start);
99978
99979+#ifdef CONFIG_PAX_SEGMEXEC
99980+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
99981+ goto out;
99982+#endif
99983+
99984 /*
99985 * Make sure the vma is shared, that it supports prefaulting,
99986 * and that the remapped range is valid and fully within
99987diff --git a/mm/gup.c b/mm/gup.c
99988index cc5a9e7..d496acf 100644
99989--- a/mm/gup.c
99990+++ b/mm/gup.c
99991@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
99992 unsigned int fault_flags = 0;
99993 int ret;
99994
99995- /* For mlock, just skip the stack guard page. */
99996- if ((*flags & FOLL_MLOCK) &&
99997- (stack_guard_page_start(vma, address) ||
99998- stack_guard_page_end(vma, address + PAGE_SIZE)))
99999- return -ENOENT;
100000 if (*flags & FOLL_WRITE)
100001 fault_flags |= FAULT_FLAG_WRITE;
100002 if (nonblocking)
100003@@ -424,14 +419,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
100004 if (!(gup_flags & FOLL_FORCE))
100005 gup_flags |= FOLL_NUMA;
100006
100007- do {
100008+ while (nr_pages) {
100009 struct page *page;
100010 unsigned int foll_flags = gup_flags;
100011 unsigned int page_increm;
100012
100013 /* first iteration or cross vma bound */
100014 if (!vma || start >= vma->vm_end) {
100015- vma = find_extend_vma(mm, start);
100016+ vma = find_vma(mm, start);
100017 if (!vma && in_gate_area(mm, start)) {
100018 int ret;
100019 ret = get_gate_page(mm, start & PAGE_MASK,
100020@@ -443,7 +438,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
100021 goto next_page;
100022 }
100023
100024- if (!vma || check_vma_flags(vma, gup_flags))
100025+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
100026 return i ? : -EFAULT;
100027 if (is_vm_hugetlb_page(vma)) {
100028 i = follow_hugetlb_page(mm, vma, pages, vmas,
100029@@ -498,7 +493,7 @@ next_page:
100030 i += page_increm;
100031 start += page_increm * PAGE_SIZE;
100032 nr_pages -= page_increm;
100033- } while (nr_pages);
100034+ }
100035 return i;
100036 }
100037 EXPORT_SYMBOL(__get_user_pages);
100038diff --git a/mm/highmem.c b/mm/highmem.c
100039index b32b70c..e512eb0 100644
100040--- a/mm/highmem.c
100041+++ b/mm/highmem.c
100042@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
100043 * So no dangers, even with speculative execution.
100044 */
100045 page = pte_page(pkmap_page_table[i]);
100046+ pax_open_kernel();
100047 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
100048-
100049+ pax_close_kernel();
100050 set_page_address(page, NULL);
100051 need_flush = 1;
100052 }
100053@@ -198,9 +199,11 @@ start:
100054 }
100055 }
100056 vaddr = PKMAP_ADDR(last_pkmap_nr);
100057+
100058+ pax_open_kernel();
100059 set_pte_at(&init_mm, vaddr,
100060 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
100061-
100062+ pax_close_kernel();
100063 pkmap_count[last_pkmap_nr] = 1;
100064 set_page_address(page, (void *)vaddr);
100065
100066diff --git a/mm/hugetlb.c b/mm/hugetlb.c
100067index 7ae5444..aea22b2 100644
100068--- a/mm/hugetlb.c
100069+++ b/mm/hugetlb.c
100070@@ -2253,6 +2253,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
100071 struct hstate *h = &default_hstate;
100072 unsigned long tmp;
100073 int ret;
100074+ ctl_table_no_const hugetlb_table;
100075
100076 if (!hugepages_supported())
100077 return -ENOTSUPP;
100078@@ -2262,9 +2263,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
100079 if (write && hstate_is_gigantic(h) && !gigantic_page_supported())
100080 return -EINVAL;
100081
100082- table->data = &tmp;
100083- table->maxlen = sizeof(unsigned long);
100084- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
100085+ hugetlb_table = *table;
100086+ hugetlb_table.data = &tmp;
100087+ hugetlb_table.maxlen = sizeof(unsigned long);
100088+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
100089 if (ret)
100090 goto out;
100091
100092@@ -2309,6 +2311,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
100093 struct hstate *h = &default_hstate;
100094 unsigned long tmp;
100095 int ret;
100096+ ctl_table_no_const hugetlb_table;
100097
100098 if (!hugepages_supported())
100099 return -ENOTSUPP;
100100@@ -2318,9 +2321,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
100101 if (write && hstate_is_gigantic(h))
100102 return -EINVAL;
100103
100104- table->data = &tmp;
100105- table->maxlen = sizeof(unsigned long);
100106- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
100107+ hugetlb_table = *table;
100108+ hugetlb_table.data = &tmp;
100109+ hugetlb_table.maxlen = sizeof(unsigned long);
100110+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
100111 if (ret)
100112 goto out;
100113
100114@@ -2801,6 +2805,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
100115 return 1;
100116 }
100117
100118+#ifdef CONFIG_PAX_SEGMEXEC
100119+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
100120+{
100121+ struct mm_struct *mm = vma->vm_mm;
100122+ struct vm_area_struct *vma_m;
100123+ unsigned long address_m;
100124+ pte_t *ptep_m;
100125+
100126+ vma_m = pax_find_mirror_vma(vma);
100127+ if (!vma_m)
100128+ return;
100129+
100130+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100131+ address_m = address + SEGMEXEC_TASK_SIZE;
100132+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
100133+ get_page(page_m);
100134+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
100135+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
100136+}
100137+#endif
100138+
100139 /*
100140 * Hugetlb_cow() should be called with page lock of the original hugepage held.
100141 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
100142@@ -2918,6 +2943,11 @@ retry_avoidcopy:
100143 make_huge_pte(vma, new_page, 1));
100144 page_remove_rmap(old_page);
100145 hugepage_add_new_anon_rmap(new_page, vma, address);
100146+
100147+#ifdef CONFIG_PAX_SEGMEXEC
100148+ pax_mirror_huge_pte(vma, address, new_page);
100149+#endif
100150+
100151 /* Make the old page be freed below */
100152 new_page = old_page;
100153 }
100154@@ -3077,6 +3107,10 @@ retry:
100155 && (vma->vm_flags & VM_SHARED)));
100156 set_huge_pte_at(mm, address, ptep, new_pte);
100157
100158+#ifdef CONFIG_PAX_SEGMEXEC
100159+ pax_mirror_huge_pte(vma, address, page);
100160+#endif
100161+
100162 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
100163 /* Optimization, do the COW without a second fault */
100164 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
100165@@ -3143,6 +3177,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100166 struct hstate *h = hstate_vma(vma);
100167 struct address_space *mapping;
100168
100169+#ifdef CONFIG_PAX_SEGMEXEC
100170+ struct vm_area_struct *vma_m;
100171+#endif
100172+
100173 address &= huge_page_mask(h);
100174
100175 ptep = huge_pte_offset(mm, address);
100176@@ -3156,6 +3194,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100177 VM_FAULT_SET_HINDEX(hstate_index(h));
100178 }
100179
100180+#ifdef CONFIG_PAX_SEGMEXEC
100181+ vma_m = pax_find_mirror_vma(vma);
100182+ if (vma_m) {
100183+ unsigned long address_m;
100184+
100185+ if (vma->vm_start > vma_m->vm_start) {
100186+ address_m = address;
100187+ address -= SEGMEXEC_TASK_SIZE;
100188+ vma = vma_m;
100189+ h = hstate_vma(vma);
100190+ } else
100191+ address_m = address + SEGMEXEC_TASK_SIZE;
100192+
100193+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
100194+ return VM_FAULT_OOM;
100195+ address_m &= HPAGE_MASK;
100196+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
100197+ }
100198+#endif
100199+
100200 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
100201 if (!ptep)
100202 return VM_FAULT_OOM;
100203diff --git a/mm/internal.h b/mm/internal.h
100204index 7f22a11f..f3c207f 100644
100205--- a/mm/internal.h
100206+++ b/mm/internal.h
100207@@ -109,6 +109,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
100208 * in mm/page_alloc.c
100209 */
100210 extern void __free_pages_bootmem(struct page *page, unsigned int order);
100211+extern void free_compound_page(struct page *page);
100212 extern void prep_compound_page(struct page *page, unsigned long order);
100213 #ifdef CONFIG_MEMORY_FAILURE
100214 extern bool is_free_buddy_page(struct page *page);
100215@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
100216
100217 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
100218 unsigned long, unsigned long,
100219- unsigned long, unsigned long);
100220+ unsigned long, unsigned long) __intentional_overflow(-1);
100221
100222 extern void set_pageblock_order(void);
100223 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
100224diff --git a/mm/iov_iter.c b/mm/iov_iter.c
100225index 7b5dbd1..af0e329 100644
100226--- a/mm/iov_iter.c
100227+++ b/mm/iov_iter.c
100228@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
100229
100230 while (bytes) {
100231 char __user *buf = iov->iov_base + base;
100232- int copy = min(bytes, iov->iov_len - base);
100233+ size_t copy = min(bytes, iov->iov_len - base);
100234
100235 base = 0;
100236 left = __copy_from_user_inatomic(vaddr, buf, copy);
100237@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
100238
100239 kaddr = kmap_atomic(page);
100240 if (likely(i->nr_segs == 1)) {
100241- int left;
100242+ size_t left;
100243 char __user *buf = i->iov->iov_base + i->iov_offset;
100244 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
100245 copied = bytes - left;
100246@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
100247 * zero-length segments (without overruning the iovec).
100248 */
100249 while (bytes || unlikely(i->count && !iov->iov_len)) {
100250- int copy;
100251+ size_t copy;
100252
100253 copy = min(bytes, iov->iov_len - base);
100254 BUG_ON(!i->count || i->count < copy);
100255diff --git a/mm/kmemleak.c b/mm/kmemleak.c
100256index 3cda50c..032ba634 100644
100257--- a/mm/kmemleak.c
100258+++ b/mm/kmemleak.c
100259@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
100260
100261 for (i = 0; i < object->trace_len; i++) {
100262 void *ptr = (void *)object->trace[i];
100263- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
100264+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
100265 }
100266 }
100267
100268@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
100269 return -ENOMEM;
100270 }
100271
100272- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
100273+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
100274 &kmemleak_fops);
100275 if (!dentry)
100276 pr_warning("Failed to create the debugfs kmemleak file\n");
100277diff --git a/mm/maccess.c b/mm/maccess.c
100278index d53adf9..03a24bf 100644
100279--- a/mm/maccess.c
100280+++ b/mm/maccess.c
100281@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
100282 set_fs(KERNEL_DS);
100283 pagefault_disable();
100284 ret = __copy_from_user_inatomic(dst,
100285- (__force const void __user *)src, size);
100286+ (const void __force_user *)src, size);
100287 pagefault_enable();
100288 set_fs(old_fs);
100289
100290@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
100291
100292 set_fs(KERNEL_DS);
100293 pagefault_disable();
100294- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
100295+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
100296 pagefault_enable();
100297 set_fs(old_fs);
100298
100299diff --git a/mm/madvise.c b/mm/madvise.c
100300index a402f8f..f5e5daa 100644
100301--- a/mm/madvise.c
100302+++ b/mm/madvise.c
100303@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
100304 pgoff_t pgoff;
100305 unsigned long new_flags = vma->vm_flags;
100306
100307+#ifdef CONFIG_PAX_SEGMEXEC
100308+ struct vm_area_struct *vma_m;
100309+#endif
100310+
100311 switch (behavior) {
100312 case MADV_NORMAL:
100313 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
100314@@ -126,6 +130,13 @@ success:
100315 /*
100316 * vm_flags is protected by the mmap_sem held in write mode.
100317 */
100318+
100319+#ifdef CONFIG_PAX_SEGMEXEC
100320+ vma_m = pax_find_mirror_vma(vma);
100321+ if (vma_m)
100322+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
100323+#endif
100324+
100325 vma->vm_flags = new_flags;
100326
100327 out:
100328@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
100329 struct vm_area_struct **prev,
100330 unsigned long start, unsigned long end)
100331 {
100332+
100333+#ifdef CONFIG_PAX_SEGMEXEC
100334+ struct vm_area_struct *vma_m;
100335+#endif
100336+
100337 *prev = vma;
100338 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
100339 return -EINVAL;
100340@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
100341 zap_page_range(vma, start, end - start, &details);
100342 } else
100343 zap_page_range(vma, start, end - start, NULL);
100344+
100345+#ifdef CONFIG_PAX_SEGMEXEC
100346+ vma_m = pax_find_mirror_vma(vma);
100347+ if (vma_m) {
100348+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
100349+ struct zap_details details = {
100350+ .nonlinear_vma = vma_m,
100351+ .last_index = ULONG_MAX,
100352+ };
100353+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
100354+ } else
100355+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
100356+ }
100357+#endif
100358+
100359 return 0;
100360 }
100361
100362@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
100363 if (end < start)
100364 return error;
100365
100366+#ifdef CONFIG_PAX_SEGMEXEC
100367+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
100368+ if (end > SEGMEXEC_TASK_SIZE)
100369+ return error;
100370+ } else
100371+#endif
100372+
100373+ if (end > TASK_SIZE)
100374+ return error;
100375+
100376 error = 0;
100377 if (end == start)
100378 return error;
100379diff --git a/mm/memory-failure.c b/mm/memory-failure.c
100380index a013bc9..a897a14 100644
100381--- a/mm/memory-failure.c
100382+++ b/mm/memory-failure.c
100383@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
100384
100385 int sysctl_memory_failure_recovery __read_mostly = 1;
100386
100387-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
100388+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
100389
100390 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
100391
100392@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
100393 pfn, t->comm, t->pid);
100394 si.si_signo = SIGBUS;
100395 si.si_errno = 0;
100396- si.si_addr = (void *)addr;
100397+ si.si_addr = (void __user *)addr;
100398 #ifdef __ARCH_SI_TRAPNO
100399 si.si_trapno = trapno;
100400 #endif
100401@@ -791,7 +791,7 @@ static struct page_state {
100402 unsigned long res;
100403 char *msg;
100404 int (*action)(struct page *p, unsigned long pfn);
100405-} error_states[] = {
100406+} __do_const error_states[] = {
100407 { reserved, reserved, "reserved kernel", me_kernel },
100408 /*
100409 * free pages are specially detected outside this table:
100410@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
100411 nr_pages = 1 << compound_order(hpage);
100412 else /* normal page or thp */
100413 nr_pages = 1;
100414- atomic_long_add(nr_pages, &num_poisoned_pages);
100415+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
100416
100417 /*
100418 * We need/can do nothing about count=0 pages.
100419@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
100420 if (PageHWPoison(hpage)) {
100421 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
100422 || (p != hpage && TestSetPageHWPoison(hpage))) {
100423- atomic_long_sub(nr_pages, &num_poisoned_pages);
100424+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
100425 unlock_page(hpage);
100426 return 0;
100427 }
100428@@ -1186,14 +1186,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
100429 */
100430 if (!PageHWPoison(p)) {
100431 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
100432- atomic_long_sub(nr_pages, &num_poisoned_pages);
100433+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
100434 put_page(hpage);
100435 res = 0;
100436 goto out;
100437 }
100438 if (hwpoison_filter(p)) {
100439 if (TestClearPageHWPoison(p))
100440- atomic_long_sub(nr_pages, &num_poisoned_pages);
100441+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
100442 unlock_page(hpage);
100443 put_page(hpage);
100444 return 0;
100445@@ -1423,7 +1423,7 @@ int unpoison_memory(unsigned long pfn)
100446 return 0;
100447 }
100448 if (TestClearPageHWPoison(p))
100449- atomic_long_dec(&num_poisoned_pages);
100450+ atomic_long_dec_unchecked(&num_poisoned_pages);
100451 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
100452 return 0;
100453 }
100454@@ -1437,7 +1437,7 @@ int unpoison_memory(unsigned long pfn)
100455 */
100456 if (TestClearPageHWPoison(page)) {
100457 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
100458- atomic_long_sub(nr_pages, &num_poisoned_pages);
100459+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
100460 freeit = 1;
100461 if (PageHuge(page))
100462 clear_page_hwpoison_huge_page(page);
100463@@ -1562,11 +1562,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
100464 if (PageHuge(page)) {
100465 set_page_hwpoison_huge_page(hpage);
100466 dequeue_hwpoisoned_huge_page(hpage);
100467- atomic_long_add(1 << compound_order(hpage),
100468+ atomic_long_add_unchecked(1 << compound_order(hpage),
100469 &num_poisoned_pages);
100470 } else {
100471 SetPageHWPoison(page);
100472- atomic_long_inc(&num_poisoned_pages);
100473+ atomic_long_inc_unchecked(&num_poisoned_pages);
100474 }
100475 }
100476 return ret;
100477@@ -1605,7 +1605,7 @@ static int __soft_offline_page(struct page *page, int flags)
100478 put_page(page);
100479 pr_info("soft_offline: %#lx: invalidated\n", pfn);
100480 SetPageHWPoison(page);
100481- atomic_long_inc(&num_poisoned_pages);
100482+ atomic_long_inc_unchecked(&num_poisoned_pages);
100483 return 0;
100484 }
100485
100486@@ -1656,7 +1656,7 @@ static int __soft_offline_page(struct page *page, int flags)
100487 if (!is_free_buddy_page(page))
100488 pr_info("soft offline: %#lx: page leaked\n",
100489 pfn);
100490- atomic_long_inc(&num_poisoned_pages);
100491+ atomic_long_inc_unchecked(&num_poisoned_pages);
100492 }
100493 } else {
100494 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
100495@@ -1726,11 +1726,11 @@ int soft_offline_page(struct page *page, int flags)
100496 if (PageHuge(page)) {
100497 set_page_hwpoison_huge_page(hpage);
100498 dequeue_hwpoisoned_huge_page(hpage);
100499- atomic_long_add(1 << compound_order(hpage),
100500+ atomic_long_add_unchecked(1 << compound_order(hpage),
100501 &num_poisoned_pages);
100502 } else {
100503 SetPageHWPoison(page);
100504- atomic_long_inc(&num_poisoned_pages);
100505+ atomic_long_inc_unchecked(&num_poisoned_pages);
100506 }
100507 }
100508 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
100509diff --git a/mm/memory.c b/mm/memory.c
100510index 0a21f3d..babeaec 100644
100511--- a/mm/memory.c
100512+++ b/mm/memory.c
100513@@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
100514 free_pte_range(tlb, pmd, addr);
100515 } while (pmd++, addr = next, addr != end);
100516
100517+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
100518 start &= PUD_MASK;
100519 if (start < floor)
100520 return;
100521@@ -427,6 +428,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
100522 pmd = pmd_offset(pud, start);
100523 pud_clear(pud);
100524 pmd_free_tlb(tlb, pmd, start);
100525+#endif
100526+
100527 }
100528
100529 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
100530@@ -446,6 +449,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
100531 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
100532 } while (pud++, addr = next, addr != end);
100533
100534+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
100535 start &= PGDIR_MASK;
100536 if (start < floor)
100537 return;
100538@@ -460,6 +464,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
100539 pud = pud_offset(pgd, start);
100540 pgd_clear(pgd);
100541 pud_free_tlb(tlb, pud, start);
100542+#endif
100543+
100544 }
100545
100546 /*
100547@@ -1500,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
100548 page_add_file_rmap(page);
100549 set_pte_at(mm, addr, pte, mk_pte(page, prot));
100550
100551+#ifdef CONFIG_PAX_SEGMEXEC
100552+ pax_mirror_file_pte(vma, addr, page, ptl);
100553+#endif
100554+
100555 retval = 0;
100556 pte_unmap_unlock(pte, ptl);
100557 return retval;
100558@@ -1544,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
100559 if (!page_count(page))
100560 return -EINVAL;
100561 if (!(vma->vm_flags & VM_MIXEDMAP)) {
100562+
100563+#ifdef CONFIG_PAX_SEGMEXEC
100564+ struct vm_area_struct *vma_m;
100565+#endif
100566+
100567 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
100568 BUG_ON(vma->vm_flags & VM_PFNMAP);
100569 vma->vm_flags |= VM_MIXEDMAP;
100570+
100571+#ifdef CONFIG_PAX_SEGMEXEC
100572+ vma_m = pax_find_mirror_vma(vma);
100573+ if (vma_m)
100574+ vma_m->vm_flags |= VM_MIXEDMAP;
100575+#endif
100576+
100577 }
100578 return insert_page(vma, addr, page, vma->vm_page_prot);
100579 }
100580@@ -1629,6 +1651,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
100581 unsigned long pfn)
100582 {
100583 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
100584+ BUG_ON(vma->vm_mirror);
100585
100586 if (addr < vma->vm_start || addr >= vma->vm_end)
100587 return -EFAULT;
100588@@ -1876,7 +1899,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
100589
100590 BUG_ON(pud_huge(*pud));
100591
100592- pmd = pmd_alloc(mm, pud, addr);
100593+ pmd = (mm == &init_mm) ?
100594+ pmd_alloc_kernel(mm, pud, addr) :
100595+ pmd_alloc(mm, pud, addr);
100596 if (!pmd)
100597 return -ENOMEM;
100598 do {
100599@@ -1896,7 +1921,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
100600 unsigned long next;
100601 int err;
100602
100603- pud = pud_alloc(mm, pgd, addr);
100604+ pud = (mm == &init_mm) ?
100605+ pud_alloc_kernel(mm, pgd, addr) :
100606+ pud_alloc(mm, pgd, addr);
100607 if (!pud)
100608 return -ENOMEM;
100609 do {
100610@@ -2018,6 +2045,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
100611 return ret;
100612 }
100613
100614+#ifdef CONFIG_PAX_SEGMEXEC
100615+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
100616+{
100617+ struct mm_struct *mm = vma->vm_mm;
100618+ spinlock_t *ptl;
100619+ pte_t *pte, entry;
100620+
100621+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
100622+ entry = *pte;
100623+ if (!pte_present(entry)) {
100624+ if (!pte_none(entry)) {
100625+ BUG_ON(pte_file(entry));
100626+ free_swap_and_cache(pte_to_swp_entry(entry));
100627+ pte_clear_not_present_full(mm, address, pte, 0);
100628+ }
100629+ } else {
100630+ struct page *page;
100631+
100632+ flush_cache_page(vma, address, pte_pfn(entry));
100633+ entry = ptep_clear_flush(vma, address, pte);
100634+ BUG_ON(pte_dirty(entry));
100635+ page = vm_normal_page(vma, address, entry);
100636+ if (page) {
100637+ update_hiwater_rss(mm);
100638+ if (PageAnon(page))
100639+ dec_mm_counter_fast(mm, MM_ANONPAGES);
100640+ else
100641+ dec_mm_counter_fast(mm, MM_FILEPAGES);
100642+ page_remove_rmap(page);
100643+ page_cache_release(page);
100644+ }
100645+ }
100646+ pte_unmap_unlock(pte, ptl);
100647+}
100648+
100649+/* PaX: if vma is mirrored, synchronize the mirror's PTE
100650+ *
100651+ * the ptl of the lower mapped page is held on entry and is not released on exit
100652+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
100653+ */
100654+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
100655+{
100656+ struct mm_struct *mm = vma->vm_mm;
100657+ unsigned long address_m;
100658+ spinlock_t *ptl_m;
100659+ struct vm_area_struct *vma_m;
100660+ pmd_t *pmd_m;
100661+ pte_t *pte_m, entry_m;
100662+
100663+ BUG_ON(!page_m || !PageAnon(page_m));
100664+
100665+ vma_m = pax_find_mirror_vma(vma);
100666+ if (!vma_m)
100667+ return;
100668+
100669+ BUG_ON(!PageLocked(page_m));
100670+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100671+ address_m = address + SEGMEXEC_TASK_SIZE;
100672+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
100673+ pte_m = pte_offset_map(pmd_m, address_m);
100674+ ptl_m = pte_lockptr(mm, pmd_m);
100675+ if (ptl != ptl_m) {
100676+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
100677+ if (!pte_none(*pte_m))
100678+ goto out;
100679+ }
100680+
100681+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
100682+ page_cache_get(page_m);
100683+ page_add_anon_rmap(page_m, vma_m, address_m);
100684+ inc_mm_counter_fast(mm, MM_ANONPAGES);
100685+ set_pte_at(mm, address_m, pte_m, entry_m);
100686+ update_mmu_cache(vma_m, address_m, pte_m);
100687+out:
100688+ if (ptl != ptl_m)
100689+ spin_unlock(ptl_m);
100690+ pte_unmap(pte_m);
100691+ unlock_page(page_m);
100692+}
100693+
100694+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
100695+{
100696+ struct mm_struct *mm = vma->vm_mm;
100697+ unsigned long address_m;
100698+ spinlock_t *ptl_m;
100699+ struct vm_area_struct *vma_m;
100700+ pmd_t *pmd_m;
100701+ pte_t *pte_m, entry_m;
100702+
100703+ BUG_ON(!page_m || PageAnon(page_m));
100704+
100705+ vma_m = pax_find_mirror_vma(vma);
100706+ if (!vma_m)
100707+ return;
100708+
100709+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100710+ address_m = address + SEGMEXEC_TASK_SIZE;
100711+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
100712+ pte_m = pte_offset_map(pmd_m, address_m);
100713+ ptl_m = pte_lockptr(mm, pmd_m);
100714+ if (ptl != ptl_m) {
100715+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
100716+ if (!pte_none(*pte_m))
100717+ goto out;
100718+ }
100719+
100720+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
100721+ page_cache_get(page_m);
100722+ page_add_file_rmap(page_m);
100723+ inc_mm_counter_fast(mm, MM_FILEPAGES);
100724+ set_pte_at(mm, address_m, pte_m, entry_m);
100725+ update_mmu_cache(vma_m, address_m, pte_m);
100726+out:
100727+ if (ptl != ptl_m)
100728+ spin_unlock(ptl_m);
100729+ pte_unmap(pte_m);
100730+}
100731+
100732+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
100733+{
100734+ struct mm_struct *mm = vma->vm_mm;
100735+ unsigned long address_m;
100736+ spinlock_t *ptl_m;
100737+ struct vm_area_struct *vma_m;
100738+ pmd_t *pmd_m;
100739+ pte_t *pte_m, entry_m;
100740+
100741+ vma_m = pax_find_mirror_vma(vma);
100742+ if (!vma_m)
100743+ return;
100744+
100745+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100746+ address_m = address + SEGMEXEC_TASK_SIZE;
100747+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
100748+ pte_m = pte_offset_map(pmd_m, address_m);
100749+ ptl_m = pte_lockptr(mm, pmd_m);
100750+ if (ptl != ptl_m) {
100751+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
100752+ if (!pte_none(*pte_m))
100753+ goto out;
100754+ }
100755+
100756+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
100757+ set_pte_at(mm, address_m, pte_m, entry_m);
100758+out:
100759+ if (ptl != ptl_m)
100760+ spin_unlock(ptl_m);
100761+ pte_unmap(pte_m);
100762+}
100763+
100764+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
100765+{
100766+ struct page *page_m;
100767+ pte_t entry;
100768+
100769+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
100770+ goto out;
100771+
100772+ entry = *pte;
100773+ page_m = vm_normal_page(vma, address, entry);
100774+ if (!page_m)
100775+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
100776+ else if (PageAnon(page_m)) {
100777+ if (pax_find_mirror_vma(vma)) {
100778+ pte_unmap_unlock(pte, ptl);
100779+ lock_page(page_m);
100780+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
100781+ if (pte_same(entry, *pte))
100782+ pax_mirror_anon_pte(vma, address, page_m, ptl);
100783+ else
100784+ unlock_page(page_m);
100785+ }
100786+ } else
100787+ pax_mirror_file_pte(vma, address, page_m, ptl);
100788+
100789+out:
100790+ pte_unmap_unlock(pte, ptl);
100791+}
100792+#endif
100793+
100794 /*
100795 * This routine handles present pages, when users try to write
100796 * to a shared page. It is done by copying the page to a new address
100797@@ -2215,6 +2422,12 @@ gotten:
100798 */
100799 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
100800 if (likely(pte_same(*page_table, orig_pte))) {
100801+
100802+#ifdef CONFIG_PAX_SEGMEXEC
100803+ if (pax_find_mirror_vma(vma))
100804+ BUG_ON(!trylock_page(new_page));
100805+#endif
100806+
100807 if (old_page) {
100808 if (!PageAnon(old_page)) {
100809 dec_mm_counter_fast(mm, MM_FILEPAGES);
100810@@ -2266,6 +2479,10 @@ gotten:
100811 page_remove_rmap(old_page);
100812 }
100813
100814+#ifdef CONFIG_PAX_SEGMEXEC
100815+ pax_mirror_anon_pte(vma, address, new_page, ptl);
100816+#endif
100817+
100818 /* Free the old page.. */
100819 new_page = old_page;
100820 ret |= VM_FAULT_WRITE;
100821@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
100822 swap_free(entry);
100823 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
100824 try_to_free_swap(page);
100825+
100826+#ifdef CONFIG_PAX_SEGMEXEC
100827+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
100828+#endif
100829+
100830 unlock_page(page);
100831 if (page != swapcache) {
100832 /*
100833@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
100834
100835 /* No need to invalidate - it was non-present before */
100836 update_mmu_cache(vma, address, page_table);
100837+
100838+#ifdef CONFIG_PAX_SEGMEXEC
100839+ pax_mirror_anon_pte(vma, address, page, ptl);
100840+#endif
100841+
100842 unlock:
100843 pte_unmap_unlock(page_table, ptl);
100844 out:
100845@@ -2581,40 +2808,6 @@ out_release:
100846 }
100847
100848 /*
100849- * This is like a special single-page "expand_{down|up}wards()",
100850- * except we must first make sure that 'address{-|+}PAGE_SIZE'
100851- * doesn't hit another vma.
100852- */
100853-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
100854-{
100855- address &= PAGE_MASK;
100856- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
100857- struct vm_area_struct *prev = vma->vm_prev;
100858-
100859- /*
100860- * Is there a mapping abutting this one below?
100861- *
100862- * That's only ok if it's the same stack mapping
100863- * that has gotten split..
100864- */
100865- if (prev && prev->vm_end == address)
100866- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
100867-
100868- expand_downwards(vma, address - PAGE_SIZE);
100869- }
100870- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
100871- struct vm_area_struct *next = vma->vm_next;
100872-
100873- /* As VM_GROWSDOWN but s/below/above/ */
100874- if (next && next->vm_start == address + PAGE_SIZE)
100875- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
100876-
100877- expand_upwards(vma, address + PAGE_SIZE);
100878- }
100879- return 0;
100880-}
100881-
100882-/*
100883 * We enter with non-exclusive mmap_sem (to exclude vma changes,
100884 * but allow concurrent faults), and pte mapped but not yet locked.
100885 * We return with mmap_sem still held, but pte unmapped and unlocked.
100886@@ -2623,27 +2816,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
100887 unsigned long address, pte_t *page_table, pmd_t *pmd,
100888 unsigned int flags)
100889 {
100890- struct page *page;
100891+ struct page *page = NULL;
100892 spinlock_t *ptl;
100893 pte_t entry;
100894
100895- pte_unmap(page_table);
100896-
100897- /* Check if we need to add a guard page to the stack */
100898- if (check_stack_guard_page(vma, address) < 0)
100899- return VM_FAULT_SIGBUS;
100900-
100901- /* Use the zero-page for reads */
100902 if (!(flags & FAULT_FLAG_WRITE)) {
100903 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
100904 vma->vm_page_prot));
100905- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
100906+ ptl = pte_lockptr(mm, pmd);
100907+ spin_lock(ptl);
100908 if (!pte_none(*page_table))
100909 goto unlock;
100910 goto setpte;
100911 }
100912
100913 /* Allocate our own private page. */
100914+ pte_unmap(page_table);
100915+
100916 if (unlikely(anon_vma_prepare(vma)))
100917 goto oom;
100918 page = alloc_zeroed_user_highpage_movable(vma, address);
100919@@ -2667,6 +2856,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
100920 if (!pte_none(*page_table))
100921 goto release;
100922
100923+#ifdef CONFIG_PAX_SEGMEXEC
100924+ if (pax_find_mirror_vma(vma))
100925+ BUG_ON(!trylock_page(page));
100926+#endif
100927+
100928 inc_mm_counter_fast(mm, MM_ANONPAGES);
100929 page_add_new_anon_rmap(page, vma, address);
100930 setpte:
100931@@ -2674,6 +2868,12 @@ setpte:
100932
100933 /* No need to invalidate - it was non-present before */
100934 update_mmu_cache(vma, address, page_table);
100935+
100936+#ifdef CONFIG_PAX_SEGMEXEC
100937+ if (page)
100938+ pax_mirror_anon_pte(vma, address, page, ptl);
100939+#endif
100940+
100941 unlock:
100942 pte_unmap_unlock(page_table, ptl);
100943 return 0;
100944@@ -2905,6 +3105,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100945 return ret;
100946 }
100947 do_set_pte(vma, address, fault_page, pte, false, false);
100948+
100949+#ifdef CONFIG_PAX_SEGMEXEC
100950+ pax_mirror_file_pte(vma, address, fault_page, ptl);
100951+#endif
100952+
100953 unlock_page(fault_page);
100954 unlock_out:
100955 pte_unmap_unlock(pte, ptl);
100956@@ -2946,7 +3151,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100957 page_cache_release(fault_page);
100958 goto uncharge_out;
100959 }
100960+
100961+#ifdef CONFIG_PAX_SEGMEXEC
100962+ if (pax_find_mirror_vma(vma))
100963+ BUG_ON(!trylock_page(new_page));
100964+#endif
100965+
100966 do_set_pte(vma, address, new_page, pte, true, true);
100967+
100968+#ifdef CONFIG_PAX_SEGMEXEC
100969+ pax_mirror_anon_pte(vma, address, new_page, ptl);
100970+#endif
100971+
100972 pte_unmap_unlock(pte, ptl);
100973 unlock_page(fault_page);
100974 page_cache_release(fault_page);
100975@@ -2994,6 +3210,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100976 return ret;
100977 }
100978 do_set_pte(vma, address, fault_page, pte, true, false);
100979+
100980+#ifdef CONFIG_PAX_SEGMEXEC
100981+ pax_mirror_file_pte(vma, address, fault_page, ptl);
100982+#endif
100983+
100984 pte_unmap_unlock(pte, ptl);
100985
100986 if (set_page_dirty(fault_page))
100987@@ -3224,6 +3445,12 @@ static int handle_pte_fault(struct mm_struct *mm,
100988 if (flags & FAULT_FLAG_WRITE)
100989 flush_tlb_fix_spurious_fault(vma, address);
100990 }
100991+
100992+#ifdef CONFIG_PAX_SEGMEXEC
100993+ pax_mirror_pte(vma, address, pte, pmd, ptl);
100994+ return 0;
100995+#endif
100996+
100997 unlock:
100998 pte_unmap_unlock(pte, ptl);
100999 return 0;
101000@@ -3240,9 +3467,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
101001 pmd_t *pmd;
101002 pte_t *pte;
101003
101004+#ifdef CONFIG_PAX_SEGMEXEC
101005+ struct vm_area_struct *vma_m;
101006+#endif
101007+
101008 if (unlikely(is_vm_hugetlb_page(vma)))
101009 return hugetlb_fault(mm, vma, address, flags);
101010
101011+#ifdef CONFIG_PAX_SEGMEXEC
101012+ vma_m = pax_find_mirror_vma(vma);
101013+ if (vma_m) {
101014+ unsigned long address_m;
101015+ pgd_t *pgd_m;
101016+ pud_t *pud_m;
101017+ pmd_t *pmd_m;
101018+
101019+ if (vma->vm_start > vma_m->vm_start) {
101020+ address_m = address;
101021+ address -= SEGMEXEC_TASK_SIZE;
101022+ vma = vma_m;
101023+ } else
101024+ address_m = address + SEGMEXEC_TASK_SIZE;
101025+
101026+ pgd_m = pgd_offset(mm, address_m);
101027+ pud_m = pud_alloc(mm, pgd_m, address_m);
101028+ if (!pud_m)
101029+ return VM_FAULT_OOM;
101030+ pmd_m = pmd_alloc(mm, pud_m, address_m);
101031+ if (!pmd_m)
101032+ return VM_FAULT_OOM;
101033+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
101034+ return VM_FAULT_OOM;
101035+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
101036+ }
101037+#endif
101038+
101039 pgd = pgd_offset(mm, address);
101040 pud = pud_alloc(mm, pgd, address);
101041 if (!pud)
101042@@ -3370,6 +3629,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
101043 spin_unlock(&mm->page_table_lock);
101044 return 0;
101045 }
101046+
101047+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
101048+{
101049+ pud_t *new = pud_alloc_one(mm, address);
101050+ if (!new)
101051+ return -ENOMEM;
101052+
101053+ smp_wmb(); /* See comment in __pte_alloc */
101054+
101055+ spin_lock(&mm->page_table_lock);
101056+ if (pgd_present(*pgd)) /* Another has populated it */
101057+ pud_free(mm, new);
101058+ else
101059+ pgd_populate_kernel(mm, pgd, new);
101060+ spin_unlock(&mm->page_table_lock);
101061+ return 0;
101062+}
101063 #endif /* __PAGETABLE_PUD_FOLDED */
101064
101065 #ifndef __PAGETABLE_PMD_FOLDED
101066@@ -3400,6 +3676,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
101067 spin_unlock(&mm->page_table_lock);
101068 return 0;
101069 }
101070+
101071+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
101072+{
101073+ pmd_t *new = pmd_alloc_one(mm, address);
101074+ if (!new)
101075+ return -ENOMEM;
101076+
101077+ smp_wmb(); /* See comment in __pte_alloc */
101078+
101079+ spin_lock(&mm->page_table_lock);
101080+#ifndef __ARCH_HAS_4LEVEL_HACK
101081+ if (pud_present(*pud)) /* Another has populated it */
101082+ pmd_free(mm, new);
101083+ else
101084+ pud_populate_kernel(mm, pud, new);
101085+#else
101086+ if (pgd_present(*pud)) /* Another has populated it */
101087+ pmd_free(mm, new);
101088+ else
101089+ pgd_populate_kernel(mm, pud, new);
101090+#endif /* __ARCH_HAS_4LEVEL_HACK */
101091+ spin_unlock(&mm->page_table_lock);
101092+ return 0;
101093+}
101094 #endif /* __PAGETABLE_PMD_FOLDED */
101095
101096 #if !defined(__HAVE_ARCH_GATE_AREA)
101097@@ -3413,7 +3713,7 @@ static int __init gate_vma_init(void)
101098 gate_vma.vm_start = FIXADDR_USER_START;
101099 gate_vma.vm_end = FIXADDR_USER_END;
101100 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
101101- gate_vma.vm_page_prot = __P101;
101102+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
101103
101104 return 0;
101105 }
101106@@ -3547,8 +3847,8 @@ out:
101107 return ret;
101108 }
101109
101110-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
101111- void *buf, int len, int write)
101112+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
101113+ void *buf, size_t len, int write)
101114 {
101115 resource_size_t phys_addr;
101116 unsigned long prot = 0;
101117@@ -3574,8 +3874,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
101118 * Access another process' address space as given in mm. If non-NULL, use the
101119 * given task for page fault accounting.
101120 */
101121-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101122- unsigned long addr, void *buf, int len, int write)
101123+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101124+ unsigned long addr, void *buf, size_t len, int write)
101125 {
101126 struct vm_area_struct *vma;
101127 void *old_buf = buf;
101128@@ -3583,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101129 down_read(&mm->mmap_sem);
101130 /* ignore errors, just check how much was successfully transferred */
101131 while (len) {
101132- int bytes, ret, offset;
101133+ ssize_t bytes, ret, offset;
101134 void *maddr;
101135 struct page *page = NULL;
101136
101137@@ -3642,8 +3942,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
101138 *
101139 * The caller must hold a reference on @mm.
101140 */
101141-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
101142- void *buf, int len, int write)
101143+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
101144+ void *buf, size_t len, int write)
101145 {
101146 return __access_remote_vm(NULL, mm, addr, buf, len, write);
101147 }
101148@@ -3653,11 +3953,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
101149 * Source/target buffer must be kernel space,
101150 * Do not walk the page table directly, use get_user_pages
101151 */
101152-int access_process_vm(struct task_struct *tsk, unsigned long addr,
101153- void *buf, int len, int write)
101154+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
101155+ void *buf, size_t len, int write)
101156 {
101157 struct mm_struct *mm;
101158- int ret;
101159+ ssize_t ret;
101160
101161 mm = get_task_mm(tsk);
101162 if (!mm)
101163diff --git a/mm/mempolicy.c b/mm/mempolicy.c
101164index 8f5330d..b41914b 100644
101165--- a/mm/mempolicy.c
101166+++ b/mm/mempolicy.c
101167@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
101168 unsigned long vmstart;
101169 unsigned long vmend;
101170
101171+#ifdef CONFIG_PAX_SEGMEXEC
101172+ struct vm_area_struct *vma_m;
101173+#endif
101174+
101175 vma = find_vma(mm, start);
101176 if (!vma || vma->vm_start > start)
101177 return -EFAULT;
101178@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
101179 err = vma_replace_policy(vma, new_pol);
101180 if (err)
101181 goto out;
101182+
101183+#ifdef CONFIG_PAX_SEGMEXEC
101184+ vma_m = pax_find_mirror_vma(vma);
101185+ if (vma_m) {
101186+ err = vma_replace_policy(vma_m, new_pol);
101187+ if (err)
101188+ goto out;
101189+ }
101190+#endif
101191+
101192 }
101193
101194 out:
101195@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
101196
101197 if (end < start)
101198 return -EINVAL;
101199+
101200+#ifdef CONFIG_PAX_SEGMEXEC
101201+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
101202+ if (end > SEGMEXEC_TASK_SIZE)
101203+ return -EINVAL;
101204+ } else
101205+#endif
101206+
101207+ if (end > TASK_SIZE)
101208+ return -EINVAL;
101209+
101210 if (end == start)
101211 return 0;
101212
101213@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
101214 */
101215 tcred = __task_cred(task);
101216 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
101217- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
101218- !capable(CAP_SYS_NICE)) {
101219+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
101220 rcu_read_unlock();
101221 err = -EPERM;
101222 goto out_put;
101223@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
101224 goto out;
101225 }
101226
101227+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
101228+ if (mm != current->mm &&
101229+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
101230+ mmput(mm);
101231+ err = -EPERM;
101232+ goto out;
101233+ }
101234+#endif
101235+
101236 err = do_migrate_pages(mm, old, new,
101237 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
101238
101239diff --git a/mm/migrate.c b/mm/migrate.c
101240index be6dbf9..febb8ec 100644
101241--- a/mm/migrate.c
101242+++ b/mm/migrate.c
101243@@ -1506,8 +1506,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
101244 */
101245 tcred = __task_cred(task);
101246 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
101247- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
101248- !capable(CAP_SYS_NICE)) {
101249+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
101250 rcu_read_unlock();
101251 err = -EPERM;
101252 goto out;
101253diff --git a/mm/mlock.c b/mm/mlock.c
101254index b1eb536..091d154 100644
101255--- a/mm/mlock.c
101256+++ b/mm/mlock.c
101257@@ -14,6 +14,7 @@
101258 #include <linux/pagevec.h>
101259 #include <linux/mempolicy.h>
101260 #include <linux/syscalls.h>
101261+#include <linux/security.h>
101262 #include <linux/sched.h>
101263 #include <linux/export.h>
101264 #include <linux/rmap.h>
101265@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
101266 {
101267 unsigned long nstart, end, tmp;
101268 struct vm_area_struct * vma, * prev;
101269- int error;
101270+ int error = 0;
101271
101272 VM_BUG_ON(start & ~PAGE_MASK);
101273 VM_BUG_ON(len != PAGE_ALIGN(len));
101274@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
101275 return -EINVAL;
101276 if (end == start)
101277 return 0;
101278+ if (end > TASK_SIZE)
101279+ return -EINVAL;
101280+
101281 vma = find_vma(current->mm, start);
101282 if (!vma || vma->vm_start > start)
101283 return -ENOMEM;
101284@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
101285 for (nstart = start ; ; ) {
101286 vm_flags_t newflags;
101287
101288+#ifdef CONFIG_PAX_SEGMEXEC
101289+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
101290+ break;
101291+#endif
101292+
101293 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
101294
101295 newflags = vma->vm_flags & ~VM_LOCKED;
101296@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
101297 locked += current->mm->locked_vm;
101298
101299 /* check against resource limits */
101300+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
101301 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
101302 error = do_mlock(start, len, 1);
101303
101304@@ -776,6 +786,11 @@ static int do_mlockall(int flags)
101305 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
101306 vm_flags_t newflags;
101307
101308+#ifdef CONFIG_PAX_SEGMEXEC
101309+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
101310+ break;
101311+#endif
101312+
101313 newflags = vma->vm_flags & ~VM_LOCKED;
101314 if (flags & MCL_CURRENT)
101315 newflags |= VM_LOCKED;
101316@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
101317 lock_limit >>= PAGE_SHIFT;
101318
101319 ret = -ENOMEM;
101320+
101321+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
101322+
101323 down_write(&current->mm->mmap_sem);
101324-
101325 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
101326 capable(CAP_IPC_LOCK))
101327 ret = do_mlockall(flags);
101328diff --git a/mm/mmap.c b/mm/mmap.c
101329index 129b847..fbed804 100644
101330--- a/mm/mmap.c
101331+++ b/mm/mmap.c
101332@@ -40,6 +40,7 @@
101333 #include <linux/notifier.h>
101334 #include <linux/memory.h>
101335 #include <linux/printk.h>
101336+#include <linux/random.h>
101337
101338 #include <asm/uaccess.h>
101339 #include <asm/cacheflush.h>
101340@@ -56,6 +57,16 @@
101341 #define arch_rebalance_pgtables(addr, len) (addr)
101342 #endif
101343
101344+static inline void verify_mm_writelocked(struct mm_struct *mm)
101345+{
101346+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
101347+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
101348+ up_read(&mm->mmap_sem);
101349+ BUG();
101350+ }
101351+#endif
101352+}
101353+
101354 static void unmap_region(struct mm_struct *mm,
101355 struct vm_area_struct *vma, struct vm_area_struct *prev,
101356 unsigned long start, unsigned long end);
101357@@ -75,16 +86,25 @@ static void unmap_region(struct mm_struct *mm,
101358 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
101359 *
101360 */
101361-pgprot_t protection_map[16] = {
101362+pgprot_t protection_map[16] __read_only = {
101363 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
101364 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
101365 };
101366
101367-pgprot_t vm_get_page_prot(unsigned long vm_flags)
101368+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
101369 {
101370- return __pgprot(pgprot_val(protection_map[vm_flags &
101371+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
101372 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
101373 pgprot_val(arch_vm_get_page_prot(vm_flags)));
101374+
101375+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
101376+ if (!(__supported_pte_mask & _PAGE_NX) &&
101377+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
101378+ (vm_flags & (VM_READ | VM_WRITE)))
101379+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
101380+#endif
101381+
101382+ return prot;
101383 }
101384 EXPORT_SYMBOL(vm_get_page_prot);
101385
101386@@ -94,6 +114,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
101387 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
101388 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
101389 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
101390+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
101391 /*
101392 * Make sure vm_committed_as in one cacheline and not cacheline shared with
101393 * other variables. It can be updated by several CPUs frequently.
101394@@ -250,6 +271,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
101395 struct vm_area_struct *next = vma->vm_next;
101396
101397 might_sleep();
101398+ BUG_ON(vma->vm_mirror);
101399 if (vma->vm_ops && vma->vm_ops->close)
101400 vma->vm_ops->close(vma);
101401 if (vma->vm_file)
101402@@ -294,6 +316,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
101403 * not page aligned -Ram Gupta
101404 */
101405 rlim = rlimit(RLIMIT_DATA);
101406+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
101407+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
101408+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
101409+ rlim = 4096 * PAGE_SIZE;
101410+#endif
101411+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
101412 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
101413 (mm->end_data - mm->start_data) > rlim)
101414 goto out;
101415@@ -944,6 +972,12 @@ static int
101416 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
101417 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
101418 {
101419+
101420+#ifdef CONFIG_PAX_SEGMEXEC
101421+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
101422+ return 0;
101423+#endif
101424+
101425 if (is_mergeable_vma(vma, file, vm_flags) &&
101426 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
101427 if (vma->vm_pgoff == vm_pgoff)
101428@@ -963,6 +997,12 @@ static int
101429 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
101430 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
101431 {
101432+
101433+#ifdef CONFIG_PAX_SEGMEXEC
101434+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
101435+ return 0;
101436+#endif
101437+
101438 if (is_mergeable_vma(vma, file, vm_flags) &&
101439 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
101440 pgoff_t vm_pglen;
101441@@ -1005,13 +1045,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
101442 struct vm_area_struct *vma_merge(struct mm_struct *mm,
101443 struct vm_area_struct *prev, unsigned long addr,
101444 unsigned long end, unsigned long vm_flags,
101445- struct anon_vma *anon_vma, struct file *file,
101446+ struct anon_vma *anon_vma, struct file *file,
101447 pgoff_t pgoff, struct mempolicy *policy)
101448 {
101449 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
101450 struct vm_area_struct *area, *next;
101451 int err;
101452
101453+#ifdef CONFIG_PAX_SEGMEXEC
101454+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
101455+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
101456+
101457+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
101458+#endif
101459+
101460 /*
101461 * We later require that vma->vm_flags == vm_flags,
101462 * so this tests vma->vm_flags & VM_SPECIAL, too.
101463@@ -1027,6 +1074,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
101464 if (next && next->vm_end == end) /* cases 6, 7, 8 */
101465 next = next->vm_next;
101466
101467+#ifdef CONFIG_PAX_SEGMEXEC
101468+ if (prev)
101469+ prev_m = pax_find_mirror_vma(prev);
101470+ if (area)
101471+ area_m = pax_find_mirror_vma(area);
101472+ if (next)
101473+ next_m = pax_find_mirror_vma(next);
101474+#endif
101475+
101476 /*
101477 * Can it merge with the predecessor?
101478 */
101479@@ -1046,9 +1102,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
101480 /* cases 1, 6 */
101481 err = vma_adjust(prev, prev->vm_start,
101482 next->vm_end, prev->vm_pgoff, NULL);
101483- } else /* cases 2, 5, 7 */
101484+
101485+#ifdef CONFIG_PAX_SEGMEXEC
101486+ if (!err && prev_m)
101487+ err = vma_adjust(prev_m, prev_m->vm_start,
101488+ next_m->vm_end, prev_m->vm_pgoff, NULL);
101489+#endif
101490+
101491+ } else { /* cases 2, 5, 7 */
101492 err = vma_adjust(prev, prev->vm_start,
101493 end, prev->vm_pgoff, NULL);
101494+
101495+#ifdef CONFIG_PAX_SEGMEXEC
101496+ if (!err && prev_m)
101497+ err = vma_adjust(prev_m, prev_m->vm_start,
101498+ end_m, prev_m->vm_pgoff, NULL);
101499+#endif
101500+
101501+ }
101502 if (err)
101503 return NULL;
101504 khugepaged_enter_vma_merge(prev);
101505@@ -1062,12 +1133,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
101506 mpol_equal(policy, vma_policy(next)) &&
101507 can_vma_merge_before(next, vm_flags,
101508 anon_vma, file, pgoff+pglen)) {
101509- if (prev && addr < prev->vm_end) /* case 4 */
101510+ if (prev && addr < prev->vm_end) { /* case 4 */
101511 err = vma_adjust(prev, prev->vm_start,
101512 addr, prev->vm_pgoff, NULL);
101513- else /* cases 3, 8 */
101514+
101515+#ifdef CONFIG_PAX_SEGMEXEC
101516+ if (!err && prev_m)
101517+ err = vma_adjust(prev_m, prev_m->vm_start,
101518+ addr_m, prev_m->vm_pgoff, NULL);
101519+#endif
101520+
101521+ } else { /* cases 3, 8 */
101522 err = vma_adjust(area, addr, next->vm_end,
101523 next->vm_pgoff - pglen, NULL);
101524+
101525+#ifdef CONFIG_PAX_SEGMEXEC
101526+ if (!err && area_m)
101527+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
101528+ next_m->vm_pgoff - pglen, NULL);
101529+#endif
101530+
101531+ }
101532 if (err)
101533 return NULL;
101534 khugepaged_enter_vma_merge(area);
101535@@ -1176,8 +1262,10 @@ none:
101536 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
101537 struct file *file, long pages)
101538 {
101539- const unsigned long stack_flags
101540- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
101541+
101542+#ifdef CONFIG_PAX_RANDMMAP
101543+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
101544+#endif
101545
101546 mm->total_vm += pages;
101547
101548@@ -1185,7 +1273,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
101549 mm->shared_vm += pages;
101550 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
101551 mm->exec_vm += pages;
101552- } else if (flags & stack_flags)
101553+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
101554 mm->stack_vm += pages;
101555 }
101556 #endif /* CONFIG_PROC_FS */
101557@@ -1215,6 +1303,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
101558 locked += mm->locked_vm;
101559 lock_limit = rlimit(RLIMIT_MEMLOCK);
101560 lock_limit >>= PAGE_SHIFT;
101561+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
101562 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
101563 return -EAGAIN;
101564 }
101565@@ -1241,7 +1330,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101566 * (the exception is when the underlying filesystem is noexec
101567 * mounted, in which case we dont add PROT_EXEC.)
101568 */
101569- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
101570+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
101571 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
101572 prot |= PROT_EXEC;
101573
101574@@ -1267,7 +1356,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101575 /* Obtain the address to map to. we verify (or select) it and ensure
101576 * that it represents a valid section of the address space.
101577 */
101578- addr = get_unmapped_area(file, addr, len, pgoff, flags);
101579+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
101580 if (addr & ~PAGE_MASK)
101581 return addr;
101582
101583@@ -1278,6 +1367,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101584 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
101585 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
101586
101587+#ifdef CONFIG_PAX_MPROTECT
101588+ if (mm->pax_flags & MF_PAX_MPROTECT) {
101589+
101590+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
101591+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
101592+ mm->binfmt->handle_mmap)
101593+ mm->binfmt->handle_mmap(file);
101594+#endif
101595+
101596+#ifndef CONFIG_PAX_MPROTECT_COMPAT
101597+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
101598+ gr_log_rwxmmap(file);
101599+
101600+#ifdef CONFIG_PAX_EMUPLT
101601+ vm_flags &= ~VM_EXEC;
101602+#else
101603+ return -EPERM;
101604+#endif
101605+
101606+ }
101607+
101608+ if (!(vm_flags & VM_EXEC))
101609+ vm_flags &= ~VM_MAYEXEC;
101610+#else
101611+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
101612+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
101613+#endif
101614+ else
101615+ vm_flags &= ~VM_MAYWRITE;
101616+ }
101617+#endif
101618+
101619+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
101620+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
101621+ vm_flags &= ~VM_PAGEEXEC;
101622+#endif
101623+
101624 if (flags & MAP_LOCKED)
101625 if (!can_do_mlock())
101626 return -EPERM;
101627@@ -1365,6 +1491,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101628 vm_flags |= VM_NORESERVE;
101629 }
101630
101631+ if (!gr_acl_handle_mmap(file, prot))
101632+ return -EACCES;
101633+
101634 addr = mmap_region(file, addr, len, vm_flags, pgoff);
101635 if (!IS_ERR_VALUE(addr) &&
101636 ((vm_flags & VM_LOCKED) ||
101637@@ -1458,7 +1587,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
101638 vm_flags_t vm_flags = vma->vm_flags;
101639
101640 /* If it was private or non-writable, the write bit is already clear */
101641- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
101642+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
101643 return 0;
101644
101645 /* The backer wishes to know when pages are first written to? */
101646@@ -1504,7 +1633,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
101647 struct rb_node **rb_link, *rb_parent;
101648 unsigned long charged = 0;
101649
101650+#ifdef CONFIG_PAX_SEGMEXEC
101651+ struct vm_area_struct *vma_m = NULL;
101652+#endif
101653+
101654+ /*
101655+ * mm->mmap_sem is required to protect against another thread
101656+ * changing the mappings in case we sleep.
101657+ */
101658+ verify_mm_writelocked(mm);
101659+
101660 /* Check against address space limit. */
101661+
101662+#ifdef CONFIG_PAX_RANDMMAP
101663+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
101664+#endif
101665+
101666 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
101667 unsigned long nr_pages;
101668
101669@@ -1523,11 +1667,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
101670
101671 /* Clear old maps */
101672 error = -ENOMEM;
101673-munmap_back:
101674 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
101675 if (do_munmap(mm, addr, len))
101676 return -ENOMEM;
101677- goto munmap_back;
101678+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
101679 }
101680
101681 /*
101682@@ -1558,6 +1701,16 @@ munmap_back:
101683 goto unacct_error;
101684 }
101685
101686+#ifdef CONFIG_PAX_SEGMEXEC
101687+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
101688+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101689+ if (!vma_m) {
101690+ error = -ENOMEM;
101691+ goto free_vma;
101692+ }
101693+ }
101694+#endif
101695+
101696 vma->vm_mm = mm;
101697 vma->vm_start = addr;
101698 vma->vm_end = addr + len;
101699@@ -1577,6 +1730,13 @@ munmap_back:
101700 if (error)
101701 goto unmap_and_free_vma;
101702
101703+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
101704+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
101705+ vma->vm_flags |= VM_PAGEEXEC;
101706+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
101707+ }
101708+#endif
101709+
101710 /* Can addr have changed??
101711 *
101712 * Answer: Yes, several device drivers can do it in their
101713@@ -1610,6 +1770,12 @@ munmap_back:
101714 }
101715
101716 vma_link(mm, vma, prev, rb_link, rb_parent);
101717+
101718+#ifdef CONFIG_PAX_SEGMEXEC
101719+ if (vma_m)
101720+ BUG_ON(pax_mirror_vma(vma_m, vma));
101721+#endif
101722+
101723 /* Once vma denies write, undo our temporary denial count */
101724 if (vm_flags & VM_DENYWRITE)
101725 allow_write_access(file);
101726@@ -1618,6 +1784,7 @@ out:
101727 perf_event_mmap(vma);
101728
101729 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
101730+ track_exec_limit(mm, addr, addr + len, vm_flags);
101731 if (vm_flags & VM_LOCKED) {
101732 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
101733 vma == get_gate_vma(current->mm)))
101734@@ -1650,6 +1817,12 @@ unmap_and_free_vma:
101735 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
101736 charged = 0;
101737 free_vma:
101738+
101739+#ifdef CONFIG_PAX_SEGMEXEC
101740+ if (vma_m)
101741+ kmem_cache_free(vm_area_cachep, vma_m);
101742+#endif
101743+
101744 kmem_cache_free(vm_area_cachep, vma);
101745 unacct_error:
101746 if (charged)
101747@@ -1657,7 +1830,63 @@ unacct_error:
101748 return error;
101749 }
101750
101751-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
101752+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
101753+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
101754+{
101755+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
101756+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
101757+
101758+ return 0;
101759+}
101760+#endif
101761+
101762+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
101763+{
101764+ if (!vma) {
101765+#ifdef CONFIG_STACK_GROWSUP
101766+ if (addr > sysctl_heap_stack_gap)
101767+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
101768+ else
101769+ vma = find_vma(current->mm, 0);
101770+ if (vma && (vma->vm_flags & VM_GROWSUP))
101771+ return false;
101772+#endif
101773+ return true;
101774+ }
101775+
101776+ if (addr + len > vma->vm_start)
101777+ return false;
101778+
101779+ if (vma->vm_flags & VM_GROWSDOWN)
101780+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
101781+#ifdef CONFIG_STACK_GROWSUP
101782+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
101783+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
101784+#endif
101785+ else if (offset)
101786+ return offset <= vma->vm_start - addr - len;
101787+
101788+ return true;
101789+}
101790+
101791+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
101792+{
101793+ if (vma->vm_start < len)
101794+ return -ENOMEM;
101795+
101796+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
101797+ if (offset <= vma->vm_start - len)
101798+ return vma->vm_start - len - offset;
101799+ else
101800+ return -ENOMEM;
101801+ }
101802+
101803+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
101804+ return vma->vm_start - len - sysctl_heap_stack_gap;
101805+ return -ENOMEM;
101806+}
101807+
101808+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
101809 {
101810 /*
101811 * We implement the search by looking for an rbtree node that
101812@@ -1705,11 +1934,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
101813 }
101814 }
101815
101816- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
101817+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
101818 check_current:
101819 /* Check if current node has a suitable gap */
101820 if (gap_start > high_limit)
101821 return -ENOMEM;
101822+
101823+ if (gap_end - gap_start > info->threadstack_offset)
101824+ gap_start += info->threadstack_offset;
101825+ else
101826+ gap_start = gap_end;
101827+
101828+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
101829+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101830+ gap_start += sysctl_heap_stack_gap;
101831+ else
101832+ gap_start = gap_end;
101833+ }
101834+ if (vma->vm_flags & VM_GROWSDOWN) {
101835+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101836+ gap_end -= sysctl_heap_stack_gap;
101837+ else
101838+ gap_end = gap_start;
101839+ }
101840 if (gap_end >= low_limit && gap_end - gap_start >= length)
101841 goto found;
101842
101843@@ -1759,7 +2006,7 @@ found:
101844 return gap_start;
101845 }
101846
101847-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
101848+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
101849 {
101850 struct mm_struct *mm = current->mm;
101851 struct vm_area_struct *vma;
101852@@ -1813,6 +2060,24 @@ check_current:
101853 gap_end = vma->vm_start;
101854 if (gap_end < low_limit)
101855 return -ENOMEM;
101856+
101857+ if (gap_end - gap_start > info->threadstack_offset)
101858+ gap_end -= info->threadstack_offset;
101859+ else
101860+ gap_end = gap_start;
101861+
101862+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
101863+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101864+ gap_start += sysctl_heap_stack_gap;
101865+ else
101866+ gap_start = gap_end;
101867+ }
101868+ if (vma->vm_flags & VM_GROWSDOWN) {
101869+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101870+ gap_end -= sysctl_heap_stack_gap;
101871+ else
101872+ gap_end = gap_start;
101873+ }
101874 if (gap_start <= high_limit && gap_end - gap_start >= length)
101875 goto found;
101876
101877@@ -1876,6 +2141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101878 struct mm_struct *mm = current->mm;
101879 struct vm_area_struct *vma;
101880 struct vm_unmapped_area_info info;
101881+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
101882
101883 if (len > TASK_SIZE - mmap_min_addr)
101884 return -ENOMEM;
101885@@ -1883,11 +2149,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101886 if (flags & MAP_FIXED)
101887 return addr;
101888
101889+#ifdef CONFIG_PAX_RANDMMAP
101890+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
101891+#endif
101892+
101893 if (addr) {
101894 addr = PAGE_ALIGN(addr);
101895 vma = find_vma(mm, addr);
101896 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101897- (!vma || addr + len <= vma->vm_start))
101898+ check_heap_stack_gap(vma, addr, len, offset))
101899 return addr;
101900 }
101901
101902@@ -1896,6 +2166,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101903 info.low_limit = mm->mmap_base;
101904 info.high_limit = TASK_SIZE;
101905 info.align_mask = 0;
101906+ info.threadstack_offset = offset;
101907 return vm_unmapped_area(&info);
101908 }
101909 #endif
101910@@ -1914,6 +2185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101911 struct mm_struct *mm = current->mm;
101912 unsigned long addr = addr0;
101913 struct vm_unmapped_area_info info;
101914+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
101915
101916 /* requested length too big for entire address space */
101917 if (len > TASK_SIZE - mmap_min_addr)
101918@@ -1922,12 +2194,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101919 if (flags & MAP_FIXED)
101920 return addr;
101921
101922+#ifdef CONFIG_PAX_RANDMMAP
101923+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
101924+#endif
101925+
101926 /* requesting a specific address */
101927 if (addr) {
101928 addr = PAGE_ALIGN(addr);
101929 vma = find_vma(mm, addr);
101930 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101931- (!vma || addr + len <= vma->vm_start))
101932+ check_heap_stack_gap(vma, addr, len, offset))
101933 return addr;
101934 }
101935
101936@@ -1936,6 +2212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101937 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
101938 info.high_limit = mm->mmap_base;
101939 info.align_mask = 0;
101940+ info.threadstack_offset = offset;
101941 addr = vm_unmapped_area(&info);
101942
101943 /*
101944@@ -1948,6 +2225,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101945 VM_BUG_ON(addr != -ENOMEM);
101946 info.flags = 0;
101947 info.low_limit = TASK_UNMAPPED_BASE;
101948+
101949+#ifdef CONFIG_PAX_RANDMMAP
101950+ if (mm->pax_flags & MF_PAX_RANDMMAP)
101951+ info.low_limit += mm->delta_mmap;
101952+#endif
101953+
101954 info.high_limit = TASK_SIZE;
101955 addr = vm_unmapped_area(&info);
101956 }
101957@@ -2048,6 +2331,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
101958 return vma;
101959 }
101960
101961+#ifdef CONFIG_PAX_SEGMEXEC
101962+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
101963+{
101964+ struct vm_area_struct *vma_m;
101965+
101966+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
101967+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
101968+ BUG_ON(vma->vm_mirror);
101969+ return NULL;
101970+ }
101971+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
101972+ vma_m = vma->vm_mirror;
101973+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
101974+ BUG_ON(vma->vm_file != vma_m->vm_file);
101975+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
101976+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
101977+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
101978+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
101979+ return vma_m;
101980+}
101981+#endif
101982+
101983 /*
101984 * Verify that the stack growth is acceptable and
101985 * update accounting. This is shared with both the
101986@@ -2064,6 +2369,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101987 return -ENOMEM;
101988
101989 /* Stack limit test */
101990+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
101991 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
101992 return -ENOMEM;
101993
101994@@ -2074,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101995 locked = mm->locked_vm + grow;
101996 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
101997 limit >>= PAGE_SHIFT;
101998+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
101999 if (locked > limit && !capable(CAP_IPC_LOCK))
102000 return -ENOMEM;
102001 }
102002@@ -2103,37 +2410,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
102003 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
102004 * vma is the last one with address > vma->vm_end. Have to extend vma.
102005 */
102006+#ifndef CONFIG_IA64
102007+static
102008+#endif
102009 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
102010 {
102011 int error;
102012+ bool locknext;
102013
102014 if (!(vma->vm_flags & VM_GROWSUP))
102015 return -EFAULT;
102016
102017+ /* Also guard against wrapping around to address 0. */
102018+ if (address < PAGE_ALIGN(address+1))
102019+ address = PAGE_ALIGN(address+1);
102020+ else
102021+ return -ENOMEM;
102022+
102023 /*
102024 * We must make sure the anon_vma is allocated
102025 * so that the anon_vma locking is not a noop.
102026 */
102027 if (unlikely(anon_vma_prepare(vma)))
102028 return -ENOMEM;
102029+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
102030+ if (locknext && anon_vma_prepare(vma->vm_next))
102031+ return -ENOMEM;
102032 vma_lock_anon_vma(vma);
102033+ if (locknext)
102034+ vma_lock_anon_vma(vma->vm_next);
102035
102036 /*
102037 * vma->vm_start/vm_end cannot change under us because the caller
102038 * is required to hold the mmap_sem in read mode. We need the
102039- * anon_vma lock to serialize against concurrent expand_stacks.
102040- * Also guard against wrapping around to address 0.
102041+ * anon_vma locks to serialize against concurrent expand_stacks
102042+ * and expand_upwards.
102043 */
102044- if (address < PAGE_ALIGN(address+4))
102045- address = PAGE_ALIGN(address+4);
102046- else {
102047- vma_unlock_anon_vma(vma);
102048- return -ENOMEM;
102049- }
102050 error = 0;
102051
102052 /* Somebody else might have raced and expanded it already */
102053- if (address > vma->vm_end) {
102054+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
102055+ error = -ENOMEM;
102056+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
102057 unsigned long size, grow;
102058
102059 size = address - vma->vm_start;
102060@@ -2168,6 +2486,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
102061 }
102062 }
102063 }
102064+ if (locknext)
102065+ vma_unlock_anon_vma(vma->vm_next);
102066 vma_unlock_anon_vma(vma);
102067 khugepaged_enter_vma_merge(vma);
102068 validate_mm(vma->vm_mm);
102069@@ -2182,6 +2502,8 @@ int expand_downwards(struct vm_area_struct *vma,
102070 unsigned long address)
102071 {
102072 int error;
102073+ bool lockprev = false;
102074+ struct vm_area_struct *prev;
102075
102076 /*
102077 * We must make sure the anon_vma is allocated
102078@@ -2195,6 +2517,15 @@ int expand_downwards(struct vm_area_struct *vma,
102079 if (error)
102080 return error;
102081
102082+ prev = vma->vm_prev;
102083+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
102084+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
102085+#endif
102086+ if (lockprev && anon_vma_prepare(prev))
102087+ return -ENOMEM;
102088+ if (lockprev)
102089+ vma_lock_anon_vma(prev);
102090+
102091 vma_lock_anon_vma(vma);
102092
102093 /*
102094@@ -2204,9 +2535,17 @@ int expand_downwards(struct vm_area_struct *vma,
102095 */
102096
102097 /* Somebody else might have raced and expanded it already */
102098- if (address < vma->vm_start) {
102099+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
102100+ error = -ENOMEM;
102101+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
102102 unsigned long size, grow;
102103
102104+#ifdef CONFIG_PAX_SEGMEXEC
102105+ struct vm_area_struct *vma_m;
102106+
102107+ vma_m = pax_find_mirror_vma(vma);
102108+#endif
102109+
102110 size = vma->vm_end - address;
102111 grow = (vma->vm_start - address) >> PAGE_SHIFT;
102112
102113@@ -2231,13 +2570,27 @@ int expand_downwards(struct vm_area_struct *vma,
102114 vma->vm_pgoff -= grow;
102115 anon_vma_interval_tree_post_update_vma(vma);
102116 vma_gap_update(vma);
102117+
102118+#ifdef CONFIG_PAX_SEGMEXEC
102119+ if (vma_m) {
102120+ anon_vma_interval_tree_pre_update_vma(vma_m);
102121+ vma_m->vm_start -= grow << PAGE_SHIFT;
102122+ vma_m->vm_pgoff -= grow;
102123+ anon_vma_interval_tree_post_update_vma(vma_m);
102124+ vma_gap_update(vma_m);
102125+ }
102126+#endif
102127+
102128 spin_unlock(&vma->vm_mm->page_table_lock);
102129
102130+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
102131 perf_event_mmap(vma);
102132 }
102133 }
102134 }
102135 vma_unlock_anon_vma(vma);
102136+ if (lockprev)
102137+ vma_unlock_anon_vma(prev);
102138 khugepaged_enter_vma_merge(vma);
102139 validate_mm(vma->vm_mm);
102140 return error;
102141@@ -2335,6 +2688,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
102142 do {
102143 long nrpages = vma_pages(vma);
102144
102145+#ifdef CONFIG_PAX_SEGMEXEC
102146+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
102147+ vma = remove_vma(vma);
102148+ continue;
102149+ }
102150+#endif
102151+
102152 if (vma->vm_flags & VM_ACCOUNT)
102153 nr_accounted += nrpages;
102154 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
102155@@ -2379,6 +2739,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
102156 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
102157 vma->vm_prev = NULL;
102158 do {
102159+
102160+#ifdef CONFIG_PAX_SEGMEXEC
102161+ if (vma->vm_mirror) {
102162+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
102163+ vma->vm_mirror->vm_mirror = NULL;
102164+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
102165+ vma->vm_mirror = NULL;
102166+ }
102167+#endif
102168+
102169 vma_rb_erase(vma, &mm->mm_rb);
102170 mm->map_count--;
102171 tail_vma = vma;
102172@@ -2406,14 +2776,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
102173 struct vm_area_struct *new;
102174 int err = -ENOMEM;
102175
102176+#ifdef CONFIG_PAX_SEGMEXEC
102177+ struct vm_area_struct *vma_m, *new_m = NULL;
102178+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
102179+#endif
102180+
102181 if (is_vm_hugetlb_page(vma) && (addr &
102182 ~(huge_page_mask(hstate_vma(vma)))))
102183 return -EINVAL;
102184
102185+#ifdef CONFIG_PAX_SEGMEXEC
102186+ vma_m = pax_find_mirror_vma(vma);
102187+#endif
102188+
102189 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
102190 if (!new)
102191 goto out_err;
102192
102193+#ifdef CONFIG_PAX_SEGMEXEC
102194+ if (vma_m) {
102195+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
102196+ if (!new_m) {
102197+ kmem_cache_free(vm_area_cachep, new);
102198+ goto out_err;
102199+ }
102200+ }
102201+#endif
102202+
102203 /* most fields are the same, copy all, and then fixup */
102204 *new = *vma;
102205
102206@@ -2426,6 +2815,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
102207 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
102208 }
102209
102210+#ifdef CONFIG_PAX_SEGMEXEC
102211+ if (vma_m) {
102212+ *new_m = *vma_m;
102213+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
102214+ new_m->vm_mirror = new;
102215+ new->vm_mirror = new_m;
102216+
102217+ if (new_below)
102218+ new_m->vm_end = addr_m;
102219+ else {
102220+ new_m->vm_start = addr_m;
102221+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
102222+ }
102223+ }
102224+#endif
102225+
102226 err = vma_dup_policy(vma, new);
102227 if (err)
102228 goto out_free_vma;
102229@@ -2445,6 +2850,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
102230 else
102231 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
102232
102233+#ifdef CONFIG_PAX_SEGMEXEC
102234+ if (!err && vma_m) {
102235+ struct mempolicy *pol = vma_policy(new);
102236+
102237+ if (anon_vma_clone(new_m, vma_m))
102238+ goto out_free_mpol;
102239+
102240+ mpol_get(pol);
102241+ set_vma_policy(new_m, pol);
102242+
102243+ if (new_m->vm_file)
102244+ get_file(new_m->vm_file);
102245+
102246+ if (new_m->vm_ops && new_m->vm_ops->open)
102247+ new_m->vm_ops->open(new_m);
102248+
102249+ if (new_below)
102250+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
102251+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
102252+ else
102253+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
102254+
102255+ if (err) {
102256+ if (new_m->vm_ops && new_m->vm_ops->close)
102257+ new_m->vm_ops->close(new_m);
102258+ if (new_m->vm_file)
102259+ fput(new_m->vm_file);
102260+ mpol_put(pol);
102261+ }
102262+ }
102263+#endif
102264+
102265 /* Success. */
102266 if (!err)
102267 return 0;
102268@@ -2454,10 +2891,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
102269 new->vm_ops->close(new);
102270 if (new->vm_file)
102271 fput(new->vm_file);
102272- unlink_anon_vmas(new);
102273 out_free_mpol:
102274 mpol_put(vma_policy(new));
102275 out_free_vma:
102276+
102277+#ifdef CONFIG_PAX_SEGMEXEC
102278+ if (new_m) {
102279+ unlink_anon_vmas(new_m);
102280+ kmem_cache_free(vm_area_cachep, new_m);
102281+ }
102282+#endif
102283+
102284+ unlink_anon_vmas(new);
102285 kmem_cache_free(vm_area_cachep, new);
102286 out_err:
102287 return err;
102288@@ -2470,6 +2915,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
102289 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
102290 unsigned long addr, int new_below)
102291 {
102292+
102293+#ifdef CONFIG_PAX_SEGMEXEC
102294+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
102295+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
102296+ if (mm->map_count >= sysctl_max_map_count-1)
102297+ return -ENOMEM;
102298+ } else
102299+#endif
102300+
102301 if (mm->map_count >= sysctl_max_map_count)
102302 return -ENOMEM;
102303
102304@@ -2481,11 +2935,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
102305 * work. This now handles partial unmappings.
102306 * Jeremy Fitzhardinge <jeremy@goop.org>
102307 */
102308+#ifdef CONFIG_PAX_SEGMEXEC
102309 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
102310 {
102311+ int ret = __do_munmap(mm, start, len);
102312+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
102313+ return ret;
102314+
102315+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
102316+}
102317+
102318+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
102319+#else
102320+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
102321+#endif
102322+{
102323 unsigned long end;
102324 struct vm_area_struct *vma, *prev, *last;
102325
102326+ /*
102327+ * mm->mmap_sem is required to protect against another thread
102328+ * changing the mappings in case we sleep.
102329+ */
102330+ verify_mm_writelocked(mm);
102331+
102332 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
102333 return -EINVAL;
102334
102335@@ -2560,6 +3033,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
102336 /* Fix up all other VM information */
102337 remove_vma_list(mm, vma);
102338
102339+ track_exec_limit(mm, start, end, 0UL);
102340+
102341 return 0;
102342 }
102343
102344@@ -2568,6 +3043,13 @@ int vm_munmap(unsigned long start, size_t len)
102345 int ret;
102346 struct mm_struct *mm = current->mm;
102347
102348+
102349+#ifdef CONFIG_PAX_SEGMEXEC
102350+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
102351+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
102352+ return -EINVAL;
102353+#endif
102354+
102355 down_write(&mm->mmap_sem);
102356 ret = do_munmap(mm, start, len);
102357 up_write(&mm->mmap_sem);
102358@@ -2581,16 +3063,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
102359 return vm_munmap(addr, len);
102360 }
102361
102362-static inline void verify_mm_writelocked(struct mm_struct *mm)
102363-{
102364-#ifdef CONFIG_DEBUG_VM
102365- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
102366- WARN_ON(1);
102367- up_read(&mm->mmap_sem);
102368- }
102369-#endif
102370-}
102371-
102372 /*
102373 * this is really a simplified "do_mmap". it only handles
102374 * anonymous maps. eventually we may be able to do some
102375@@ -2604,6 +3076,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
102376 struct rb_node ** rb_link, * rb_parent;
102377 pgoff_t pgoff = addr >> PAGE_SHIFT;
102378 int error;
102379+ unsigned long charged;
102380
102381 len = PAGE_ALIGN(len);
102382 if (!len)
102383@@ -2611,10 +3084,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
102384
102385 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
102386
102387+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
102388+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
102389+ flags &= ~VM_EXEC;
102390+
102391+#ifdef CONFIG_PAX_MPROTECT
102392+ if (mm->pax_flags & MF_PAX_MPROTECT)
102393+ flags &= ~VM_MAYEXEC;
102394+#endif
102395+
102396+ }
102397+#endif
102398+
102399 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
102400 if (error & ~PAGE_MASK)
102401 return error;
102402
102403+ charged = len >> PAGE_SHIFT;
102404+
102405 error = mlock_future_check(mm, mm->def_flags, len);
102406 if (error)
102407 return error;
102408@@ -2628,21 +3115,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
102409 /*
102410 * Clear old maps. this also does some error checking for us
102411 */
102412- munmap_back:
102413 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
102414 if (do_munmap(mm, addr, len))
102415 return -ENOMEM;
102416- goto munmap_back;
102417+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
102418 }
102419
102420 /* Check against address space limits *after* clearing old maps... */
102421- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
102422+ if (!may_expand_vm(mm, charged))
102423 return -ENOMEM;
102424
102425 if (mm->map_count > sysctl_max_map_count)
102426 return -ENOMEM;
102427
102428- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
102429+ if (security_vm_enough_memory_mm(mm, charged))
102430 return -ENOMEM;
102431
102432 /* Can we just expand an old private anonymous mapping? */
102433@@ -2656,7 +3142,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
102434 */
102435 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
102436 if (!vma) {
102437- vm_unacct_memory(len >> PAGE_SHIFT);
102438+ vm_unacct_memory(charged);
102439 return -ENOMEM;
102440 }
102441
102442@@ -2670,10 +3156,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
102443 vma_link(mm, vma, prev, rb_link, rb_parent);
102444 out:
102445 perf_event_mmap(vma);
102446- mm->total_vm += len >> PAGE_SHIFT;
102447+ mm->total_vm += charged;
102448 if (flags & VM_LOCKED)
102449- mm->locked_vm += (len >> PAGE_SHIFT);
102450+ mm->locked_vm += charged;
102451 vma->vm_flags |= VM_SOFTDIRTY;
102452+ track_exec_limit(mm, addr, addr + len, flags);
102453 return addr;
102454 }
102455
102456@@ -2735,6 +3222,7 @@ void exit_mmap(struct mm_struct *mm)
102457 while (vma) {
102458 if (vma->vm_flags & VM_ACCOUNT)
102459 nr_accounted += vma_pages(vma);
102460+ vma->vm_mirror = NULL;
102461 vma = remove_vma(vma);
102462 }
102463 vm_unacct_memory(nr_accounted);
102464@@ -2752,6 +3240,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
102465 struct vm_area_struct *prev;
102466 struct rb_node **rb_link, *rb_parent;
102467
102468+#ifdef CONFIG_PAX_SEGMEXEC
102469+ struct vm_area_struct *vma_m = NULL;
102470+#endif
102471+
102472+ if (security_mmap_addr(vma->vm_start))
102473+ return -EPERM;
102474+
102475 /*
102476 * The vm_pgoff of a purely anonymous vma should be irrelevant
102477 * until its first write fault, when page's anon_vma and index
102478@@ -2775,7 +3270,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
102479 security_vm_enough_memory_mm(mm, vma_pages(vma)))
102480 return -ENOMEM;
102481
102482+#ifdef CONFIG_PAX_SEGMEXEC
102483+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
102484+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
102485+ if (!vma_m)
102486+ return -ENOMEM;
102487+ }
102488+#endif
102489+
102490 vma_link(mm, vma, prev, rb_link, rb_parent);
102491+
102492+#ifdef CONFIG_PAX_SEGMEXEC
102493+ if (vma_m)
102494+ BUG_ON(pax_mirror_vma(vma_m, vma));
102495+#endif
102496+
102497 return 0;
102498 }
102499
102500@@ -2794,6 +3303,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
102501 struct rb_node **rb_link, *rb_parent;
102502 bool faulted_in_anon_vma = true;
102503
102504+ BUG_ON(vma->vm_mirror);
102505+
102506 /*
102507 * If anonymous vma has not yet been faulted, update new pgoff
102508 * to match new location, to increase its chance of merging.
102509@@ -2858,6 +3369,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
102510 return NULL;
102511 }
102512
102513+#ifdef CONFIG_PAX_SEGMEXEC
102514+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
102515+{
102516+ struct vm_area_struct *prev_m;
102517+ struct rb_node **rb_link_m, *rb_parent_m;
102518+ struct mempolicy *pol_m;
102519+
102520+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
102521+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
102522+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
102523+ *vma_m = *vma;
102524+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
102525+ if (anon_vma_clone(vma_m, vma))
102526+ return -ENOMEM;
102527+ pol_m = vma_policy(vma_m);
102528+ mpol_get(pol_m);
102529+ set_vma_policy(vma_m, pol_m);
102530+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
102531+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
102532+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
102533+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
102534+ if (vma_m->vm_file)
102535+ get_file(vma_m->vm_file);
102536+ if (vma_m->vm_ops && vma_m->vm_ops->open)
102537+ vma_m->vm_ops->open(vma_m);
102538+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
102539+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
102540+ vma_m->vm_mirror = vma;
102541+ vma->vm_mirror = vma_m;
102542+ return 0;
102543+}
102544+#endif
102545+
102546 /*
102547 * Return true if the calling process may expand its vm space by the passed
102548 * number of pages
102549@@ -2869,6 +3413,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
102550
102551 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
102552
102553+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
102554 if (cur + npages > lim)
102555 return 0;
102556 return 1;
102557@@ -2951,6 +3496,22 @@ static struct vm_area_struct *__install_special_mapping(
102558 vma->vm_start = addr;
102559 vma->vm_end = addr + len;
102560
102561+#ifdef CONFIG_PAX_MPROTECT
102562+ if (mm->pax_flags & MF_PAX_MPROTECT) {
102563+#ifndef CONFIG_PAX_MPROTECT_COMPAT
102564+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
102565+ return ERR_PTR(-EPERM);
102566+ if (!(vm_flags & VM_EXEC))
102567+ vm_flags &= ~VM_MAYEXEC;
102568+#else
102569+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
102570+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
102571+#endif
102572+ else
102573+ vm_flags &= ~VM_MAYWRITE;
102574+ }
102575+#endif
102576+
102577 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
102578 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
102579
102580diff --git a/mm/mprotect.c b/mm/mprotect.c
102581index c43d557..0b7ccd2 100644
102582--- a/mm/mprotect.c
102583+++ b/mm/mprotect.c
102584@@ -24,10 +24,18 @@
102585 #include <linux/migrate.h>
102586 #include <linux/perf_event.h>
102587 #include <linux/ksm.h>
102588+#include <linux/sched/sysctl.h>
102589+
102590+#ifdef CONFIG_PAX_MPROTECT
102591+#include <linux/elf.h>
102592+#include <linux/binfmts.h>
102593+#endif
102594+
102595 #include <asm/uaccess.h>
102596 #include <asm/pgtable.h>
102597 #include <asm/cacheflush.h>
102598 #include <asm/tlbflush.h>
102599+#include <asm/mmu_context.h>
102600
102601 #ifndef pgprot_modify
102602 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
102603@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
102604 return pages;
102605 }
102606
102607+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
102608+/* called while holding the mmap semaphor for writing except stack expansion */
102609+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
102610+{
102611+ unsigned long oldlimit, newlimit = 0UL;
102612+
102613+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
102614+ return;
102615+
102616+ spin_lock(&mm->page_table_lock);
102617+ oldlimit = mm->context.user_cs_limit;
102618+ if ((prot & VM_EXEC) && oldlimit < end)
102619+ /* USER_CS limit moved up */
102620+ newlimit = end;
102621+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
102622+ /* USER_CS limit moved down */
102623+ newlimit = start;
102624+
102625+ if (newlimit) {
102626+ mm->context.user_cs_limit = newlimit;
102627+
102628+#ifdef CONFIG_SMP
102629+ wmb();
102630+ cpus_clear(mm->context.cpu_user_cs_mask);
102631+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
102632+#endif
102633+
102634+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
102635+ }
102636+ spin_unlock(&mm->page_table_lock);
102637+ if (newlimit == end) {
102638+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
102639+
102640+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
102641+ if (is_vm_hugetlb_page(vma))
102642+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
102643+ else
102644+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
102645+ }
102646+}
102647+#endif
102648+
102649 int
102650 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
102651 unsigned long start, unsigned long end, unsigned long newflags)
102652@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
102653 int error;
102654 int dirty_accountable = 0;
102655
102656+#ifdef CONFIG_PAX_SEGMEXEC
102657+ struct vm_area_struct *vma_m = NULL;
102658+ unsigned long start_m, end_m;
102659+
102660+ start_m = start + SEGMEXEC_TASK_SIZE;
102661+ end_m = end + SEGMEXEC_TASK_SIZE;
102662+#endif
102663+
102664 if (newflags == oldflags) {
102665 *pprev = vma;
102666 return 0;
102667 }
102668
102669+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
102670+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
102671+
102672+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
102673+ return -ENOMEM;
102674+
102675+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
102676+ return -ENOMEM;
102677+ }
102678+
102679 /*
102680 * If we make a private mapping writable we increase our commit;
102681 * but (without finer accounting) cannot reduce our commit if we
102682@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
102683 }
102684 }
102685
102686+#ifdef CONFIG_PAX_SEGMEXEC
102687+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
102688+ if (start != vma->vm_start) {
102689+ error = split_vma(mm, vma, start, 1);
102690+ if (error)
102691+ goto fail;
102692+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
102693+ *pprev = (*pprev)->vm_next;
102694+ }
102695+
102696+ if (end != vma->vm_end) {
102697+ error = split_vma(mm, vma, end, 0);
102698+ if (error)
102699+ goto fail;
102700+ }
102701+
102702+ if (pax_find_mirror_vma(vma)) {
102703+ error = __do_munmap(mm, start_m, end_m - start_m);
102704+ if (error)
102705+ goto fail;
102706+ } else {
102707+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
102708+ if (!vma_m) {
102709+ error = -ENOMEM;
102710+ goto fail;
102711+ }
102712+ vma->vm_flags = newflags;
102713+ error = pax_mirror_vma(vma_m, vma);
102714+ if (error) {
102715+ vma->vm_flags = oldflags;
102716+ goto fail;
102717+ }
102718+ }
102719+ }
102720+#endif
102721+
102722 /*
102723 * First try to merge with previous and/or next vma.
102724 */
102725@@ -319,9 +423,21 @@ success:
102726 * vm_flags and vm_page_prot are protected by the mmap_sem
102727 * held in write mode.
102728 */
102729+
102730+#ifdef CONFIG_PAX_SEGMEXEC
102731+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
102732+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
102733+#endif
102734+
102735 vma->vm_flags = newflags;
102736+
102737+#ifdef CONFIG_PAX_MPROTECT
102738+ if (mm->binfmt && mm->binfmt->handle_mprotect)
102739+ mm->binfmt->handle_mprotect(vma, newflags);
102740+#endif
102741+
102742 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
102743- vm_get_page_prot(newflags));
102744+ vm_get_page_prot(vma->vm_flags));
102745
102746 if (vma_wants_writenotify(vma)) {
102747 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
102748@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102749 end = start + len;
102750 if (end <= start)
102751 return -ENOMEM;
102752+
102753+#ifdef CONFIG_PAX_SEGMEXEC
102754+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
102755+ if (end > SEGMEXEC_TASK_SIZE)
102756+ return -EINVAL;
102757+ } else
102758+#endif
102759+
102760+ if (end > TASK_SIZE)
102761+ return -EINVAL;
102762+
102763 if (!arch_validate_prot(prot))
102764 return -EINVAL;
102765
102766@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102767 /*
102768 * Does the application expect PROT_READ to imply PROT_EXEC:
102769 */
102770- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
102771+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
102772 prot |= PROT_EXEC;
102773
102774 vm_flags = calc_vm_prot_bits(prot);
102775@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102776 if (start > vma->vm_start)
102777 prev = vma;
102778
102779+#ifdef CONFIG_PAX_MPROTECT
102780+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
102781+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
102782+#endif
102783+
102784 for (nstart = start ; ; ) {
102785 unsigned long newflags;
102786
102787@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102788
102789 /* newflags >> 4 shift VM_MAY% in place of VM_% */
102790 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
102791+ if (prot & (PROT_WRITE | PROT_EXEC))
102792+ gr_log_rwxmprotect(vma);
102793+
102794+ error = -EACCES;
102795+ goto out;
102796+ }
102797+
102798+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
102799 error = -EACCES;
102800 goto out;
102801 }
102802@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102803 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
102804 if (error)
102805 goto out;
102806+
102807+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
102808+
102809 nstart = tmp;
102810
102811 if (nstart < prev->vm_end)
102812diff --git a/mm/mremap.c b/mm/mremap.c
102813index 05f1180..c3cde48 100644
102814--- a/mm/mremap.c
102815+++ b/mm/mremap.c
102816@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
102817 continue;
102818 pte = ptep_get_and_clear(mm, old_addr, old_pte);
102819 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
102820+
102821+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
102822+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
102823+ pte = pte_exprotect(pte);
102824+#endif
102825+
102826 pte = move_soft_dirty_pte(pte);
102827 set_pte_at(mm, new_addr, new_pte, pte);
102828 }
102829@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
102830 if (is_vm_hugetlb_page(vma))
102831 goto Einval;
102832
102833+#ifdef CONFIG_PAX_SEGMEXEC
102834+ if (pax_find_mirror_vma(vma))
102835+ goto Einval;
102836+#endif
102837+
102838 /* We can't remap across vm area boundaries */
102839 if (old_len > vma->vm_end - addr)
102840 goto Efault;
102841@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
102842 unsigned long ret = -EINVAL;
102843 unsigned long charged = 0;
102844 unsigned long map_flags;
102845+ unsigned long pax_task_size = TASK_SIZE;
102846
102847 if (new_addr & ~PAGE_MASK)
102848 goto out;
102849
102850- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
102851+#ifdef CONFIG_PAX_SEGMEXEC
102852+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
102853+ pax_task_size = SEGMEXEC_TASK_SIZE;
102854+#endif
102855+
102856+ pax_task_size -= PAGE_SIZE;
102857+
102858+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
102859 goto out;
102860
102861 /* Check if the location we're moving into overlaps the
102862 * old location at all, and fail if it does.
102863 */
102864- if ((new_addr <= addr) && (new_addr+new_len) > addr)
102865- goto out;
102866-
102867- if ((addr <= new_addr) && (addr+old_len) > new_addr)
102868+ if (addr + old_len > new_addr && new_addr + new_len > addr)
102869 goto out;
102870
102871 ret = do_munmap(mm, new_addr, new_len);
102872@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102873 unsigned long ret = -EINVAL;
102874 unsigned long charged = 0;
102875 bool locked = false;
102876+ unsigned long pax_task_size = TASK_SIZE;
102877
102878 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
102879 return ret;
102880@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102881 if (!new_len)
102882 return ret;
102883
102884+#ifdef CONFIG_PAX_SEGMEXEC
102885+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
102886+ pax_task_size = SEGMEXEC_TASK_SIZE;
102887+#endif
102888+
102889+ pax_task_size -= PAGE_SIZE;
102890+
102891+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
102892+ old_len > pax_task_size || addr > pax_task_size-old_len)
102893+ return ret;
102894+
102895 down_write(&current->mm->mmap_sem);
102896
102897 if (flags & MREMAP_FIXED) {
102898@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102899 new_addr = addr;
102900 }
102901 ret = addr;
102902+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
102903 goto out;
102904 }
102905 }
102906@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102907 goto out;
102908 }
102909
102910+ map_flags = vma->vm_flags;
102911 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
102912+ if (!(ret & ~PAGE_MASK)) {
102913+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
102914+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
102915+ }
102916 }
102917 out:
102918 if (ret & ~PAGE_MASK)
102919diff --git a/mm/nommu.c b/mm/nommu.c
102920index 4a852f6..4371a6b 100644
102921--- a/mm/nommu.c
102922+++ b/mm/nommu.c
102923@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
102924 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
102925 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
102926 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
102927-int heap_stack_gap = 0;
102928
102929 atomic_long_t mmap_pages_allocated;
102930
102931@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
102932 EXPORT_SYMBOL(find_vma);
102933
102934 /*
102935- * find a VMA
102936- * - we don't extend stack VMAs under NOMMU conditions
102937- */
102938-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
102939-{
102940- return find_vma(mm, addr);
102941-}
102942-
102943-/*
102944 * expand a stack to a given address
102945 * - not supported under NOMMU conditions
102946 */
102947@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
102948
102949 /* most fields are the same, copy all, and then fixup */
102950 *new = *vma;
102951+ INIT_LIST_HEAD(&new->anon_vma_chain);
102952 *region = *vma->vm_region;
102953 new->vm_region = region;
102954
102955@@ -2007,8 +1998,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
102956 }
102957 EXPORT_SYMBOL(generic_file_remap_pages);
102958
102959-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102960- unsigned long addr, void *buf, int len, int write)
102961+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102962+ unsigned long addr, void *buf, size_t len, int write)
102963 {
102964 struct vm_area_struct *vma;
102965
102966@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102967 *
102968 * The caller must hold a reference on @mm.
102969 */
102970-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
102971- void *buf, int len, int write)
102972+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
102973+ void *buf, size_t len, int write)
102974 {
102975 return __access_remote_vm(NULL, mm, addr, buf, len, write);
102976 }
102977@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
102978 * Access another process' address space.
102979 * - source/target buffer must be kernel space
102980 */
102981-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
102982+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
102983 {
102984 struct mm_struct *mm;
102985
102986diff --git a/mm/page-writeback.c b/mm/page-writeback.c
102987index e0c9430..3c6bf79 100644
102988--- a/mm/page-writeback.c
102989+++ b/mm/page-writeback.c
102990@@ -667,7 +667,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
102991 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
102992 * - the bdi dirty thresh drops quickly due to change of JBOD workload
102993 */
102994-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
102995+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
102996 unsigned long thresh,
102997 unsigned long bg_thresh,
102998 unsigned long dirty,
102999diff --git a/mm/page_alloc.c b/mm/page_alloc.c
103000index ef44ad7..1056bc7 100644
103001--- a/mm/page_alloc.c
103002+++ b/mm/page_alloc.c
103003@@ -61,6 +61,7 @@
103004 #include <linux/page-debug-flags.h>
103005 #include <linux/hugetlb.h>
103006 #include <linux/sched/rt.h>
103007+#include <linux/random.h>
103008
103009 #include <asm/sections.h>
103010 #include <asm/tlbflush.h>
103011@@ -357,7 +358,7 @@ out:
103012 * This usage means that zero-order pages may not be compound.
103013 */
103014
103015-static void free_compound_page(struct page *page)
103016+void free_compound_page(struct page *page)
103017 {
103018 __free_pages_ok(page, compound_order(page));
103019 }
103020@@ -745,6 +746,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
103021 int i;
103022 int bad = 0;
103023
103024+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103025+ unsigned long index = 1UL << order;
103026+#endif
103027+
103028 trace_mm_page_free(page, order);
103029 kmemcheck_free_shadow(page, order);
103030
103031@@ -761,6 +766,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
103032 debug_check_no_obj_freed(page_address(page),
103033 PAGE_SIZE << order);
103034 }
103035+
103036+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103037+ for (; index; --index)
103038+ sanitize_highpage(page + index - 1);
103039+#endif
103040+
103041 arch_free_page(page, order);
103042 kernel_map_pages(page, 1 << order, 0);
103043
103044@@ -784,6 +795,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
103045 local_irq_restore(flags);
103046 }
103047
103048+#ifdef CONFIG_PAX_LATENT_ENTROPY
103049+bool __meminitdata extra_latent_entropy;
103050+
103051+static int __init setup_pax_extra_latent_entropy(char *str)
103052+{
103053+ extra_latent_entropy = true;
103054+ return 0;
103055+}
103056+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
103057+
103058+volatile u64 latent_entropy __latent_entropy;
103059+EXPORT_SYMBOL(latent_entropy);
103060+#endif
103061+
103062 void __init __free_pages_bootmem(struct page *page, unsigned int order)
103063 {
103064 unsigned int nr_pages = 1 << order;
103065@@ -799,6 +824,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
103066 __ClearPageReserved(p);
103067 set_page_count(p, 0);
103068
103069+#ifdef CONFIG_PAX_LATENT_ENTROPY
103070+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
103071+ u64 hash = 0;
103072+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
103073+ const u64 *data = lowmem_page_address(page);
103074+
103075+ for (index = 0; index < end; index++)
103076+ hash ^= hash + data[index];
103077+ latent_entropy ^= hash;
103078+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
103079+ }
103080+#endif
103081+
103082 page_zone(page)->managed_pages += nr_pages;
103083 set_page_refcounted(page);
103084 __free_pages(page, order);
103085@@ -927,8 +965,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
103086 arch_alloc_page(page, order);
103087 kernel_map_pages(page, 1 << order, 1);
103088
103089+#ifndef CONFIG_PAX_MEMORY_SANITIZE
103090 if (gfp_flags & __GFP_ZERO)
103091 prep_zero_page(page, order, gfp_flags);
103092+#endif
103093
103094 if (order && (gfp_flags & __GFP_COMP))
103095 prep_compound_page(page, order);
103096@@ -2427,7 +2467,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
103097 continue;
103098 mod_zone_page_state(zone, NR_ALLOC_BATCH,
103099 high_wmark_pages(zone) - low_wmark_pages(zone) -
103100- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
103101+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
103102 }
103103 }
103104
103105diff --git a/mm/percpu.c b/mm/percpu.c
103106index 2ddf9a9..f8fc075 100644
103107--- a/mm/percpu.c
103108+++ b/mm/percpu.c
103109@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
103110 static unsigned int pcpu_high_unit_cpu __read_mostly;
103111
103112 /* the address of the first chunk which starts with the kernel static area */
103113-void *pcpu_base_addr __read_mostly;
103114+void *pcpu_base_addr __read_only;
103115 EXPORT_SYMBOL_GPL(pcpu_base_addr);
103116
103117 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
103118diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
103119index a8b9199..dfb79e0 100644
103120--- a/mm/pgtable-generic.c
103121+++ b/mm/pgtable-generic.c
103122@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
103123 pmd_t entry = *pmdp;
103124 if (pmd_numa(entry))
103125 entry = pmd_mknonnuma(entry);
103126- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
103127+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
103128 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
103129 }
103130 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
103131diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
103132index 5077afc..846c9ef 100644
103133--- a/mm/process_vm_access.c
103134+++ b/mm/process_vm_access.c
103135@@ -13,6 +13,7 @@
103136 #include <linux/uio.h>
103137 #include <linux/sched.h>
103138 #include <linux/highmem.h>
103139+#include <linux/security.h>
103140 #include <linux/ptrace.h>
103141 #include <linux/slab.h>
103142 #include <linux/syscalls.h>
103143@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
103144 ssize_t iov_len;
103145 size_t total_len = iov_iter_count(iter);
103146
103147+ return -ENOSYS; // PaX: until properly audited
103148+
103149 /*
103150 * Work out how many pages of struct pages we're going to need
103151 * when eventually calling get_user_pages
103152 */
103153 for (i = 0; i < riovcnt; i++) {
103154 iov_len = rvec[i].iov_len;
103155- if (iov_len > 0) {
103156- nr_pages_iov = ((unsigned long)rvec[i].iov_base
103157- + iov_len)
103158- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
103159- / PAGE_SIZE + 1;
103160- nr_pages = max(nr_pages, nr_pages_iov);
103161- }
103162+ if (iov_len <= 0)
103163+ continue;
103164+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
103165+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
103166+ nr_pages = max(nr_pages, nr_pages_iov);
103167 }
103168
103169 if (nr_pages == 0)
103170@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
103171 goto free_proc_pages;
103172 }
103173
103174+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
103175+ rc = -EPERM;
103176+ goto put_task_struct;
103177+ }
103178+
103179 mm = mm_access(task, PTRACE_MODE_ATTACH);
103180 if (!mm || IS_ERR(mm)) {
103181 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
103182diff --git a/mm/rmap.c b/mm/rmap.c
103183index 22a4a76..9551288 100644
103184--- a/mm/rmap.c
103185+++ b/mm/rmap.c
103186@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
103187 struct anon_vma *anon_vma = vma->anon_vma;
103188 struct anon_vma_chain *avc;
103189
103190+#ifdef CONFIG_PAX_SEGMEXEC
103191+ struct anon_vma_chain *avc_m = NULL;
103192+#endif
103193+
103194 might_sleep();
103195 if (unlikely(!anon_vma)) {
103196 struct mm_struct *mm = vma->vm_mm;
103197@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
103198 if (!avc)
103199 goto out_enomem;
103200
103201+#ifdef CONFIG_PAX_SEGMEXEC
103202+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
103203+ if (!avc_m)
103204+ goto out_enomem_free_avc;
103205+#endif
103206+
103207 anon_vma = find_mergeable_anon_vma(vma);
103208 allocated = NULL;
103209 if (!anon_vma) {
103210@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
103211 /* page_table_lock to protect against threads */
103212 spin_lock(&mm->page_table_lock);
103213 if (likely(!vma->anon_vma)) {
103214+
103215+#ifdef CONFIG_PAX_SEGMEXEC
103216+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
103217+
103218+ if (vma_m) {
103219+ BUG_ON(vma_m->anon_vma);
103220+ vma_m->anon_vma = anon_vma;
103221+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
103222+ avc_m = NULL;
103223+ }
103224+#endif
103225+
103226 vma->anon_vma = anon_vma;
103227 anon_vma_chain_link(vma, avc, anon_vma);
103228 allocated = NULL;
103229@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
103230
103231 if (unlikely(allocated))
103232 put_anon_vma(allocated);
103233+
103234+#ifdef CONFIG_PAX_SEGMEXEC
103235+ if (unlikely(avc_m))
103236+ anon_vma_chain_free(avc_m);
103237+#endif
103238+
103239 if (unlikely(avc))
103240 anon_vma_chain_free(avc);
103241 }
103242 return 0;
103243
103244 out_enomem_free_avc:
103245+
103246+#ifdef CONFIG_PAX_SEGMEXEC
103247+ if (avc_m)
103248+ anon_vma_chain_free(avc_m);
103249+#endif
103250+
103251 anon_vma_chain_free(avc);
103252 out_enomem:
103253 return -ENOMEM;
103254@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
103255 * Attach the anon_vmas from src to dst.
103256 * Returns 0 on success, -ENOMEM on failure.
103257 */
103258-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
103259+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
103260 {
103261 struct anon_vma_chain *avc, *pavc;
103262 struct anon_vma *root = NULL;
103263@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
103264 * the corresponding VMA in the parent process is attached to.
103265 * Returns 0 on success, non-zero on failure.
103266 */
103267-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
103268+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
103269 {
103270 struct anon_vma_chain *avc;
103271 struct anon_vma *anon_vma;
103272@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
103273 void __init anon_vma_init(void)
103274 {
103275 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
103276- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
103277- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
103278+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
103279+ anon_vma_ctor);
103280+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
103281+ SLAB_PANIC|SLAB_NO_SANITIZE);
103282 }
103283
103284 /*
103285diff --git a/mm/shmem.c b/mm/shmem.c
103286index af68b15..1227320 100644
103287--- a/mm/shmem.c
103288+++ b/mm/shmem.c
103289@@ -33,7 +33,7 @@
103290 #include <linux/swap.h>
103291 #include <linux/aio.h>
103292
103293-static struct vfsmount *shm_mnt;
103294+struct vfsmount *shm_mnt;
103295
103296 #ifdef CONFIG_SHMEM
103297 /*
103298@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
103299 #define BOGO_DIRENT_SIZE 20
103300
103301 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
103302-#define SHORT_SYMLINK_LEN 128
103303+#define SHORT_SYMLINK_LEN 64
103304
103305 /*
103306 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
103307@@ -2219,6 +2219,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
103308 static int shmem_xattr_validate(const char *name)
103309 {
103310 struct { const char *prefix; size_t len; } arr[] = {
103311+
103312+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
103313+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
103314+#endif
103315+
103316 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
103317 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
103318 };
103319@@ -2274,6 +2279,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
103320 if (err)
103321 return err;
103322
103323+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
103324+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
103325+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
103326+ return -EOPNOTSUPP;
103327+ if (size > 8)
103328+ return -EINVAL;
103329+ }
103330+#endif
103331+
103332 return simple_xattr_set(&info->xattrs, name, value, size, flags);
103333 }
103334
103335@@ -2586,8 +2600,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
103336 int err = -ENOMEM;
103337
103338 /* Round up to L1_CACHE_BYTES to resist false sharing */
103339- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
103340- L1_CACHE_BYTES), GFP_KERNEL);
103341+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
103342 if (!sbinfo)
103343 return -ENOMEM;
103344
103345diff --git a/mm/slab.c b/mm/slab.c
103346index 3070b92..bcfff83 100644
103347--- a/mm/slab.c
103348+++ b/mm/slab.c
103349@@ -311,10 +311,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
103350 if ((x)->max_freeable < i) \
103351 (x)->max_freeable = i; \
103352 } while (0)
103353-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
103354-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
103355-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
103356-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
103357+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
103358+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
103359+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
103360+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
103361+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
103362+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
103363 #else
103364 #define STATS_INC_ACTIVE(x) do { } while (0)
103365 #define STATS_DEC_ACTIVE(x) do { } while (0)
103366@@ -331,6 +333,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
103367 #define STATS_INC_ALLOCMISS(x) do { } while (0)
103368 #define STATS_INC_FREEHIT(x) do { } while (0)
103369 #define STATS_INC_FREEMISS(x) do { } while (0)
103370+#define STATS_INC_SANITIZED(x) do { } while (0)
103371+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
103372 #endif
103373
103374 #if DEBUG
103375@@ -447,7 +451,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
103376 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
103377 */
103378 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
103379- const struct page *page, void *obj)
103380+ const struct page *page, const void *obj)
103381 {
103382 u32 offset = (obj - page->s_mem);
103383 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
103384@@ -1558,12 +1562,12 @@ void __init kmem_cache_init(void)
103385 */
103386
103387 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
103388- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
103389+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
103390
103391 if (INDEX_AC != INDEX_NODE)
103392 kmalloc_caches[INDEX_NODE] =
103393 create_kmalloc_cache("kmalloc-node",
103394- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
103395+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
103396
103397 slab_early_init = 0;
103398
103399@@ -3512,6 +3516,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
103400 struct array_cache *ac = cpu_cache_get(cachep);
103401
103402 check_irq_off();
103403+
103404+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103405+ if (pax_sanitize_slab) {
103406+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
103407+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
103408+
103409+ if (cachep->ctor)
103410+ cachep->ctor(objp);
103411+
103412+ STATS_INC_SANITIZED(cachep);
103413+ } else
103414+ STATS_INC_NOT_SANITIZED(cachep);
103415+ }
103416+#endif
103417+
103418 kmemleak_free_recursive(objp, cachep->flags);
103419 objp = cache_free_debugcheck(cachep, objp, caller);
103420
103421@@ -3735,6 +3754,7 @@ void kfree(const void *objp)
103422
103423 if (unlikely(ZERO_OR_NULL_PTR(objp)))
103424 return;
103425+ VM_BUG_ON(!virt_addr_valid(objp));
103426 local_irq_save(flags);
103427 kfree_debugcheck(objp);
103428 c = virt_to_cache(objp);
103429@@ -4176,14 +4196,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
103430 }
103431 /* cpu stats */
103432 {
103433- unsigned long allochit = atomic_read(&cachep->allochit);
103434- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
103435- unsigned long freehit = atomic_read(&cachep->freehit);
103436- unsigned long freemiss = atomic_read(&cachep->freemiss);
103437+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
103438+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
103439+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
103440+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
103441
103442 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
103443 allochit, allocmiss, freehit, freemiss);
103444 }
103445+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103446+ {
103447+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
103448+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
103449+
103450+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
103451+ }
103452+#endif
103453 #endif
103454 }
103455
103456@@ -4404,13 +4432,69 @@ static const struct file_operations proc_slabstats_operations = {
103457 static int __init slab_proc_init(void)
103458 {
103459 #ifdef CONFIG_DEBUG_SLAB_LEAK
103460- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
103461+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
103462 #endif
103463 return 0;
103464 }
103465 module_init(slab_proc_init);
103466 #endif
103467
103468+bool is_usercopy_object(const void *ptr)
103469+{
103470+ struct page *page;
103471+ struct kmem_cache *cachep;
103472+
103473+ if (ZERO_OR_NULL_PTR(ptr))
103474+ return false;
103475+
103476+ if (!slab_is_available())
103477+ return false;
103478+
103479+ if (!virt_addr_valid(ptr))
103480+ return false;
103481+
103482+ page = virt_to_head_page(ptr);
103483+
103484+ if (!PageSlab(page))
103485+ return false;
103486+
103487+ cachep = page->slab_cache;
103488+ return cachep->flags & SLAB_USERCOPY;
103489+}
103490+
103491+#ifdef CONFIG_PAX_USERCOPY
103492+const char *check_heap_object(const void *ptr, unsigned long n)
103493+{
103494+ struct page *page;
103495+ struct kmem_cache *cachep;
103496+ unsigned int objnr;
103497+ unsigned long offset;
103498+
103499+ if (ZERO_OR_NULL_PTR(ptr))
103500+ return "<null>";
103501+
103502+ if (!virt_addr_valid(ptr))
103503+ return NULL;
103504+
103505+ page = virt_to_head_page(ptr);
103506+
103507+ if (!PageSlab(page))
103508+ return NULL;
103509+
103510+ cachep = page->slab_cache;
103511+ if (!(cachep->flags & SLAB_USERCOPY))
103512+ return cachep->name;
103513+
103514+ objnr = obj_to_index(cachep, page, ptr);
103515+ BUG_ON(objnr >= cachep->num);
103516+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
103517+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
103518+ return NULL;
103519+
103520+ return cachep->name;
103521+}
103522+#endif
103523+
103524 /**
103525 * ksize - get the actual amount of memory allocated for a given object
103526 * @objp: Pointer to the object
103527diff --git a/mm/slab.h b/mm/slab.h
103528index 961a3fb..6b12514 100644
103529--- a/mm/slab.h
103530+++ b/mm/slab.h
103531@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
103532 /* The slab cache that manages slab cache information */
103533 extern struct kmem_cache *kmem_cache;
103534
103535+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103536+#ifdef CONFIG_X86_64
103537+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
103538+#else
103539+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
103540+#endif
103541+extern bool pax_sanitize_slab;
103542+#endif
103543+
103544 unsigned long calculate_alignment(unsigned long flags,
103545 unsigned long align, unsigned long size);
103546
103547@@ -67,7 +76,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103548
103549 /* Legal flag mask for kmem_cache_create(), for various configurations */
103550 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
103551- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
103552+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
103553+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
103554
103555 #if defined(CONFIG_DEBUG_SLAB)
103556 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
103557@@ -251,6 +261,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
103558 return s;
103559
103560 page = virt_to_head_page(x);
103561+
103562+ BUG_ON(!PageSlab(page));
103563+
103564 cachep = page->slab_cache;
103565 if (slab_equal_or_root(cachep, s))
103566 return cachep;
103567diff --git a/mm/slab_common.c b/mm/slab_common.c
103568index d31c4ba..1121296 100644
103569--- a/mm/slab_common.c
103570+++ b/mm/slab_common.c
103571@@ -23,11 +23,22 @@
103572
103573 #include "slab.h"
103574
103575-enum slab_state slab_state;
103576+enum slab_state slab_state __read_only;
103577 LIST_HEAD(slab_caches);
103578 DEFINE_MUTEX(slab_mutex);
103579 struct kmem_cache *kmem_cache;
103580
103581+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103582+bool pax_sanitize_slab __read_only = true;
103583+static int __init pax_sanitize_slab_setup(char *str)
103584+{
103585+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
103586+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
103587+ return 1;
103588+}
103589+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
103590+#endif
103591+
103592 #ifdef CONFIG_DEBUG_VM
103593 static int kmem_cache_sanity_check(const char *name, size_t size)
103594 {
103595@@ -158,7 +169,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
103596 if (err)
103597 goto out_free_cache;
103598
103599- s->refcount = 1;
103600+ atomic_set(&s->refcount, 1);
103601 list_add(&s->list, &slab_caches);
103602 out:
103603 if (err)
103604@@ -339,8 +350,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
103605
103606 mutex_lock(&slab_mutex);
103607
103608- s->refcount--;
103609- if (s->refcount)
103610+ if (!atomic_dec_and_test(&s->refcount))
103611 goto out_unlock;
103612
103613 if (memcg_cleanup_cache_params(s) != 0)
103614@@ -360,7 +370,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
103615 rcu_barrier();
103616
103617 memcg_free_cache_params(s);
103618-#ifdef SLAB_SUPPORTS_SYSFS
103619+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103620 sysfs_slab_remove(s);
103621 #else
103622 slab_kmem_cache_release(s);
103623@@ -416,7 +426,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
103624 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
103625 name, size, err);
103626
103627- s->refcount = -1; /* Exempt from merging for now */
103628+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
103629 }
103630
103631 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
103632@@ -429,7 +439,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
103633
103634 create_boot_cache(s, name, size, flags);
103635 list_add(&s->list, &slab_caches);
103636- s->refcount = 1;
103637+ atomic_set(&s->refcount, 1);
103638 return s;
103639 }
103640
103641@@ -441,6 +451,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
103642 EXPORT_SYMBOL(kmalloc_dma_caches);
103643 #endif
103644
103645+#ifdef CONFIG_PAX_USERCOPY_SLABS
103646+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
103647+EXPORT_SYMBOL(kmalloc_usercopy_caches);
103648+#endif
103649+
103650 /*
103651 * Conversion table for small slabs sizes / 8 to the index in the
103652 * kmalloc array. This is necessary for slabs < 192 since we have non power
103653@@ -505,6 +520,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
103654 return kmalloc_dma_caches[index];
103655
103656 #endif
103657+
103658+#ifdef CONFIG_PAX_USERCOPY_SLABS
103659+ if (unlikely((flags & GFP_USERCOPY)))
103660+ return kmalloc_usercopy_caches[index];
103661+
103662+#endif
103663+
103664 return kmalloc_caches[index];
103665 }
103666
103667@@ -561,7 +583,7 @@ void __init create_kmalloc_caches(unsigned long flags)
103668 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
103669 if (!kmalloc_caches[i]) {
103670 kmalloc_caches[i] = create_kmalloc_cache(NULL,
103671- 1 << i, flags);
103672+ 1 << i, SLAB_USERCOPY | flags);
103673 }
103674
103675 /*
103676@@ -570,10 +592,10 @@ void __init create_kmalloc_caches(unsigned long flags)
103677 * earlier power of two caches
103678 */
103679 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
103680- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
103681+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
103682
103683 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
103684- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
103685+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
103686 }
103687
103688 /* Kmalloc array is now usable */
103689@@ -606,6 +628,23 @@ void __init create_kmalloc_caches(unsigned long flags)
103690 }
103691 }
103692 #endif
103693+
103694+#ifdef CONFIG_PAX_USERCOPY_SLABS
103695+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
103696+ struct kmem_cache *s = kmalloc_caches[i];
103697+
103698+ if (s) {
103699+ int size = kmalloc_size(i);
103700+ char *n = kasprintf(GFP_NOWAIT,
103701+ "usercopy-kmalloc-%d", size);
103702+
103703+ BUG_ON(!n);
103704+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
103705+ size, SLAB_USERCOPY | flags);
103706+ }
103707+ }
103708+#endif
103709+
103710 }
103711 #endif /* !CONFIG_SLOB */
103712
103713@@ -664,6 +703,9 @@ void print_slabinfo_header(struct seq_file *m)
103714 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
103715 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
103716 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
103717+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103718+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
103719+#endif
103720 #endif
103721 seq_putc(m, '\n');
103722 }
103723diff --git a/mm/slob.c b/mm/slob.c
103724index 21980e0..ed9a648 100644
103725--- a/mm/slob.c
103726+++ b/mm/slob.c
103727@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
103728 /*
103729 * Return the size of a slob block.
103730 */
103731-static slobidx_t slob_units(slob_t *s)
103732+static slobidx_t slob_units(const slob_t *s)
103733 {
103734 if (s->units > 0)
103735 return s->units;
103736@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
103737 /*
103738 * Return the next free slob block pointer after this one.
103739 */
103740-static slob_t *slob_next(slob_t *s)
103741+static slob_t *slob_next(const slob_t *s)
103742 {
103743 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
103744 slobidx_t next;
103745@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
103746 /*
103747 * Returns true if s is the last free block in its page.
103748 */
103749-static int slob_last(slob_t *s)
103750+static int slob_last(const slob_t *s)
103751 {
103752 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
103753 }
103754
103755-static void *slob_new_pages(gfp_t gfp, int order, int node)
103756+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
103757 {
103758- void *page;
103759+ struct page *page;
103760
103761 #ifdef CONFIG_NUMA
103762 if (node != NUMA_NO_NODE)
103763@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
103764 if (!page)
103765 return NULL;
103766
103767- return page_address(page);
103768+ __SetPageSlab(page);
103769+ return page;
103770 }
103771
103772-static void slob_free_pages(void *b, int order)
103773+static void slob_free_pages(struct page *sp, int order)
103774 {
103775 if (current->reclaim_state)
103776 current->reclaim_state->reclaimed_slab += 1 << order;
103777- free_pages((unsigned long)b, order);
103778+ __ClearPageSlab(sp);
103779+ page_mapcount_reset(sp);
103780+ sp->private = 0;
103781+ __free_pages(sp, order);
103782 }
103783
103784 /*
103785@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
103786
103787 /* Not enough space: must allocate a new page */
103788 if (!b) {
103789- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
103790- if (!b)
103791+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
103792+ if (!sp)
103793 return NULL;
103794- sp = virt_to_page(b);
103795- __SetPageSlab(sp);
103796+ b = page_address(sp);
103797
103798 spin_lock_irqsave(&slob_lock, flags);
103799 sp->units = SLOB_UNITS(PAGE_SIZE);
103800 sp->freelist = b;
103801+ sp->private = 0;
103802 INIT_LIST_HEAD(&sp->lru);
103803 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
103804 set_slob_page_free(sp, slob_list);
103805@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
103806 if (slob_page_free(sp))
103807 clear_slob_page_free(sp);
103808 spin_unlock_irqrestore(&slob_lock, flags);
103809- __ClearPageSlab(sp);
103810- page_mapcount_reset(sp);
103811- slob_free_pages(b, 0);
103812+ slob_free_pages(sp, 0);
103813 return;
103814 }
103815
103816+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103817+ if (pax_sanitize_slab)
103818+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
103819+#endif
103820+
103821 if (!slob_page_free(sp)) {
103822 /* This slob page is about to become partially free. Easy! */
103823 sp->units = units;
103824@@ -424,11 +431,10 @@ out:
103825 */
103826
103827 static __always_inline void *
103828-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103829+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
103830 {
103831- unsigned int *m;
103832- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103833- void *ret;
103834+ slob_t *m;
103835+ void *ret = NULL;
103836
103837 gfp &= gfp_allowed_mask;
103838
103839@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103840
103841 if (!m)
103842 return NULL;
103843- *m = size;
103844+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
103845+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
103846+ m[0].units = size;
103847+ m[1].units = align;
103848 ret = (void *)m + align;
103849
103850 trace_kmalloc_node(caller, ret,
103851 size, size + align, gfp, node);
103852 } else {
103853 unsigned int order = get_order(size);
103854+ struct page *page;
103855
103856 if (likely(order))
103857 gfp |= __GFP_COMP;
103858- ret = slob_new_pages(gfp, order, node);
103859+ page = slob_new_pages(gfp, order, node);
103860+ if (page) {
103861+ ret = page_address(page);
103862+ page->private = size;
103863+ }
103864
103865 trace_kmalloc_node(caller, ret,
103866 size, PAGE_SIZE << order, gfp, node);
103867 }
103868
103869- kmemleak_alloc(ret, size, 1, gfp);
103870+ return ret;
103871+}
103872+
103873+static __always_inline void *
103874+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103875+{
103876+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103877+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
103878+
103879+ if (!ZERO_OR_NULL_PTR(ret))
103880+ kmemleak_alloc(ret, size, 1, gfp);
103881 return ret;
103882 }
103883
103884@@ -493,34 +517,112 @@ void kfree(const void *block)
103885 return;
103886 kmemleak_free(block);
103887
103888+ VM_BUG_ON(!virt_addr_valid(block));
103889 sp = virt_to_page(block);
103890- if (PageSlab(sp)) {
103891+ VM_BUG_ON(!PageSlab(sp));
103892+ if (!sp->private) {
103893 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103894- unsigned int *m = (unsigned int *)(block - align);
103895- slob_free(m, *m + align);
103896- } else
103897+ slob_t *m = (slob_t *)(block - align);
103898+ slob_free(m, m[0].units + align);
103899+ } else {
103900+ __ClearPageSlab(sp);
103901+ page_mapcount_reset(sp);
103902+ sp->private = 0;
103903 __free_pages(sp, compound_order(sp));
103904+ }
103905 }
103906 EXPORT_SYMBOL(kfree);
103907
103908+bool is_usercopy_object(const void *ptr)
103909+{
103910+ if (!slab_is_available())
103911+ return false;
103912+
103913+ // PAX: TODO
103914+
103915+ return false;
103916+}
103917+
103918+#ifdef CONFIG_PAX_USERCOPY
103919+const char *check_heap_object(const void *ptr, unsigned long n)
103920+{
103921+ struct page *page;
103922+ const slob_t *free;
103923+ const void *base;
103924+ unsigned long flags;
103925+
103926+ if (ZERO_OR_NULL_PTR(ptr))
103927+ return "<null>";
103928+
103929+ if (!virt_addr_valid(ptr))
103930+ return NULL;
103931+
103932+ page = virt_to_head_page(ptr);
103933+ if (!PageSlab(page))
103934+ return NULL;
103935+
103936+ if (page->private) {
103937+ base = page;
103938+ if (base <= ptr && n <= page->private - (ptr - base))
103939+ return NULL;
103940+ return "<slob>";
103941+ }
103942+
103943+ /* some tricky double walking to find the chunk */
103944+ spin_lock_irqsave(&slob_lock, flags);
103945+ base = (void *)((unsigned long)ptr & PAGE_MASK);
103946+ free = page->freelist;
103947+
103948+ while (!slob_last(free) && (void *)free <= ptr) {
103949+ base = free + slob_units(free);
103950+ free = slob_next(free);
103951+ }
103952+
103953+ while (base < (void *)free) {
103954+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
103955+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
103956+ int offset;
103957+
103958+ if (ptr < base + align)
103959+ break;
103960+
103961+ offset = ptr - base - align;
103962+ if (offset >= m) {
103963+ base += size;
103964+ continue;
103965+ }
103966+
103967+ if (n > m - offset)
103968+ break;
103969+
103970+ spin_unlock_irqrestore(&slob_lock, flags);
103971+ return NULL;
103972+ }
103973+
103974+ spin_unlock_irqrestore(&slob_lock, flags);
103975+ return "<slob>";
103976+}
103977+#endif
103978+
103979 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
103980 size_t ksize(const void *block)
103981 {
103982 struct page *sp;
103983 int align;
103984- unsigned int *m;
103985+ slob_t *m;
103986
103987 BUG_ON(!block);
103988 if (unlikely(block == ZERO_SIZE_PTR))
103989 return 0;
103990
103991 sp = virt_to_page(block);
103992- if (unlikely(!PageSlab(sp)))
103993- return PAGE_SIZE << compound_order(sp);
103994+ VM_BUG_ON(!PageSlab(sp));
103995+ if (sp->private)
103996+ return sp->private;
103997
103998 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103999- m = (unsigned int *)(block - align);
104000- return SLOB_UNITS(*m) * SLOB_UNIT;
104001+ m = (slob_t *)(block - align);
104002+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
104003 }
104004 EXPORT_SYMBOL(ksize);
104005
104006@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
104007
104008 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
104009 {
104010- void *b;
104011+ void *b = NULL;
104012
104013 flags &= gfp_allowed_mask;
104014
104015 lockdep_trace_alloc(flags);
104016
104017+#ifdef CONFIG_PAX_USERCOPY_SLABS
104018+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
104019+#else
104020 if (c->size < PAGE_SIZE) {
104021 b = slob_alloc(c->size, flags, c->align, node);
104022 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
104023 SLOB_UNITS(c->size) * SLOB_UNIT,
104024 flags, node);
104025 } else {
104026- b = slob_new_pages(flags, get_order(c->size), node);
104027+ struct page *sp;
104028+
104029+ sp = slob_new_pages(flags, get_order(c->size), node);
104030+ if (sp) {
104031+ b = page_address(sp);
104032+ sp->private = c->size;
104033+ }
104034 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
104035 PAGE_SIZE << get_order(c->size),
104036 flags, node);
104037 }
104038+#endif
104039
104040 if (b && c->ctor)
104041 c->ctor(b);
104042@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
104043
104044 static void __kmem_cache_free(void *b, int size)
104045 {
104046- if (size < PAGE_SIZE)
104047+ struct page *sp;
104048+
104049+ sp = virt_to_page(b);
104050+ BUG_ON(!PageSlab(sp));
104051+ if (!sp->private)
104052 slob_free(b, size);
104053 else
104054- slob_free_pages(b, get_order(size));
104055+ slob_free_pages(sp, get_order(size));
104056 }
104057
104058 static void kmem_rcu_free(struct rcu_head *head)
104059@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
104060
104061 void kmem_cache_free(struct kmem_cache *c, void *b)
104062 {
104063+ int size = c->size;
104064+
104065+#ifdef CONFIG_PAX_USERCOPY_SLABS
104066+ if (size + c->align < PAGE_SIZE) {
104067+ size += c->align;
104068+ b -= c->align;
104069+ }
104070+#endif
104071+
104072 kmemleak_free_recursive(b, c->flags);
104073 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
104074 struct slob_rcu *slob_rcu;
104075- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
104076- slob_rcu->size = c->size;
104077+ slob_rcu = b + (size - sizeof(struct slob_rcu));
104078+ slob_rcu->size = size;
104079 call_rcu(&slob_rcu->head, kmem_rcu_free);
104080 } else {
104081- __kmem_cache_free(b, c->size);
104082+ __kmem_cache_free(b, size);
104083 }
104084
104085+#ifdef CONFIG_PAX_USERCOPY_SLABS
104086+ trace_kfree(_RET_IP_, b);
104087+#else
104088 trace_kmem_cache_free(_RET_IP_, b);
104089+#endif
104090+
104091 }
104092 EXPORT_SYMBOL(kmem_cache_free);
104093
104094diff --git a/mm/slub.c b/mm/slub.c
104095index 7300480..cb92846 100644
104096--- a/mm/slub.c
104097+++ b/mm/slub.c
104098@@ -207,7 +207,7 @@ struct track {
104099
104100 enum track_item { TRACK_ALLOC, TRACK_FREE };
104101
104102-#ifdef CONFIG_SYSFS
104103+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
104104 static int sysfs_slab_add(struct kmem_cache *);
104105 static int sysfs_slab_alias(struct kmem_cache *, const char *);
104106 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
104107@@ -546,7 +546,7 @@ static void print_track(const char *s, struct track *t)
104108 if (!t->addr)
104109 return;
104110
104111- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
104112+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
104113 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
104114 #ifdef CONFIG_STACKTRACE
104115 {
104116@@ -2673,6 +2673,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
104117
104118 slab_free_hook(s, x);
104119
104120+#ifdef CONFIG_PAX_MEMORY_SANITIZE
104121+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
104122+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
104123+ if (s->ctor)
104124+ s->ctor(x);
104125+ }
104126+#endif
104127+
104128 redo:
104129 /*
104130 * Determine the currently cpus per cpu slab.
104131@@ -2740,7 +2748,7 @@ static int slub_min_objects;
104132 * Merge control. If this is set then no merging of slab caches will occur.
104133 * (Could be removed. This was introduced to pacify the merge skeptics.)
104134 */
104135-static int slub_nomerge;
104136+static int slub_nomerge = 1;
104137
104138 /*
104139 * Calculate the order of allocation given an slab object size.
104140@@ -3019,6 +3027,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
104141 s->inuse = size;
104142
104143 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
104144+#ifdef CONFIG_PAX_MEMORY_SANITIZE
104145+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
104146+#endif
104147 s->ctor)) {
104148 /*
104149 * Relocate free pointer after the object if it is not
104150@@ -3347,6 +3358,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
104151 EXPORT_SYMBOL(__kmalloc_node);
104152 #endif
104153
104154+bool is_usercopy_object(const void *ptr)
104155+{
104156+ struct page *page;
104157+ struct kmem_cache *s;
104158+
104159+ if (ZERO_OR_NULL_PTR(ptr))
104160+ return false;
104161+
104162+ if (!slab_is_available())
104163+ return false;
104164+
104165+ if (!virt_addr_valid(ptr))
104166+ return false;
104167+
104168+ page = virt_to_head_page(ptr);
104169+
104170+ if (!PageSlab(page))
104171+ return false;
104172+
104173+ s = page->slab_cache;
104174+ return s->flags & SLAB_USERCOPY;
104175+}
104176+
104177+#ifdef CONFIG_PAX_USERCOPY
104178+const char *check_heap_object(const void *ptr, unsigned long n)
104179+{
104180+ struct page *page;
104181+ struct kmem_cache *s;
104182+ unsigned long offset;
104183+
104184+ if (ZERO_OR_NULL_PTR(ptr))
104185+ return "<null>";
104186+
104187+ if (!virt_addr_valid(ptr))
104188+ return NULL;
104189+
104190+ page = virt_to_head_page(ptr);
104191+
104192+ if (!PageSlab(page))
104193+ return NULL;
104194+
104195+ s = page->slab_cache;
104196+ if (!(s->flags & SLAB_USERCOPY))
104197+ return s->name;
104198+
104199+ offset = (ptr - page_address(page)) % s->size;
104200+ if (offset <= s->object_size && n <= s->object_size - offset)
104201+ return NULL;
104202+
104203+ return s->name;
104204+}
104205+#endif
104206+
104207 size_t ksize(const void *object)
104208 {
104209 struct page *page;
104210@@ -3375,6 +3439,7 @@ void kfree(const void *x)
104211 if (unlikely(ZERO_OR_NULL_PTR(x)))
104212 return;
104213
104214+ VM_BUG_ON(!virt_addr_valid(x));
104215 page = virt_to_head_page(x);
104216 if (unlikely(!PageSlab(page))) {
104217 BUG_ON(!PageCompound(page));
104218@@ -3680,7 +3745,7 @@ static int slab_unmergeable(struct kmem_cache *s)
104219 /*
104220 * We may have set a slab to be unmergeable during bootstrap.
104221 */
104222- if (s->refcount < 0)
104223+ if (atomic_read(&s->refcount) < 0)
104224 return 1;
104225
104226 return 0;
104227@@ -3737,7 +3802,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
104228 int i;
104229 struct kmem_cache *c;
104230
104231- s->refcount++;
104232+ atomic_inc(&s->refcount);
104233
104234 /*
104235 * Adjust the object sizes so that we clear
104236@@ -3756,7 +3821,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
104237 }
104238
104239 if (sysfs_slab_alias(s, name)) {
104240- s->refcount--;
104241+ atomic_dec(&s->refcount);
104242 s = NULL;
104243 }
104244 }
104245@@ -3873,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
104246 }
104247 #endif
104248
104249-#ifdef CONFIG_SYSFS
104250+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
104251 static int count_inuse(struct page *page)
104252 {
104253 return page->inuse;
104254@@ -4156,7 +4221,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
104255 len += sprintf(buf + len, "%7ld ", l->count);
104256
104257 if (l->addr)
104258+#ifdef CONFIG_GRKERNSEC_HIDESYM
104259+ len += sprintf(buf + len, "%pS", NULL);
104260+#else
104261 len += sprintf(buf + len, "%pS", (void *)l->addr);
104262+#endif
104263 else
104264 len += sprintf(buf + len, "<not-available>");
104265
104266@@ -4258,12 +4327,12 @@ static void resiliency_test(void)
104267 validate_slab_cache(kmalloc_caches[9]);
104268 }
104269 #else
104270-#ifdef CONFIG_SYSFS
104271+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
104272 static void resiliency_test(void) {};
104273 #endif
104274 #endif
104275
104276-#ifdef CONFIG_SYSFS
104277+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
104278 enum slab_stat_type {
104279 SL_ALL, /* All slabs */
104280 SL_PARTIAL, /* Only partially allocated slabs */
104281@@ -4503,13 +4572,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
104282 {
104283 if (!s->ctor)
104284 return 0;
104285+#ifdef CONFIG_GRKERNSEC_HIDESYM
104286+ return sprintf(buf, "%pS\n", NULL);
104287+#else
104288 return sprintf(buf, "%pS\n", s->ctor);
104289+#endif
104290 }
104291 SLAB_ATTR_RO(ctor);
104292
104293 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
104294 {
104295- return sprintf(buf, "%d\n", s->refcount - 1);
104296+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
104297 }
104298 SLAB_ATTR_RO(aliases);
104299
104300@@ -4597,6 +4670,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
104301 SLAB_ATTR_RO(cache_dma);
104302 #endif
104303
104304+#ifdef CONFIG_PAX_USERCOPY_SLABS
104305+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
104306+{
104307+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
104308+}
104309+SLAB_ATTR_RO(usercopy);
104310+#endif
104311+
104312 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
104313 {
104314 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
104315@@ -4931,6 +5012,9 @@ static struct attribute *slab_attrs[] = {
104316 #ifdef CONFIG_ZONE_DMA
104317 &cache_dma_attr.attr,
104318 #endif
104319+#ifdef CONFIG_PAX_USERCOPY_SLABS
104320+ &usercopy_attr.attr,
104321+#endif
104322 #ifdef CONFIG_NUMA
104323 &remote_node_defrag_ratio_attr.attr,
104324 #endif
104325@@ -5181,6 +5265,7 @@ static char *create_unique_id(struct kmem_cache *s)
104326 return name;
104327 }
104328
104329+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
104330 static int sysfs_slab_add(struct kmem_cache *s)
104331 {
104332 int err;
104333@@ -5254,6 +5339,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
104334 kobject_del(&s->kobj);
104335 kobject_put(&s->kobj);
104336 }
104337+#endif
104338
104339 /*
104340 * Need to buffer aliases during bootup until sysfs becomes
104341@@ -5267,6 +5353,7 @@ struct saved_alias {
104342
104343 static struct saved_alias *alias_list;
104344
104345+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
104346 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
104347 {
104348 struct saved_alias *al;
104349@@ -5289,6 +5376,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
104350 alias_list = al;
104351 return 0;
104352 }
104353+#endif
104354
104355 static int __init slab_sysfs_init(void)
104356 {
104357diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
104358index 4cba9c2..b4f9fcc 100644
104359--- a/mm/sparse-vmemmap.c
104360+++ b/mm/sparse-vmemmap.c
104361@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
104362 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
104363 if (!p)
104364 return NULL;
104365- pud_populate(&init_mm, pud, p);
104366+ pud_populate_kernel(&init_mm, pud, p);
104367 }
104368 return pud;
104369 }
104370@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
104371 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
104372 if (!p)
104373 return NULL;
104374- pgd_populate(&init_mm, pgd, p);
104375+ pgd_populate_kernel(&init_mm, pgd, p);
104376 }
104377 return pgd;
104378 }
104379diff --git a/mm/sparse.c b/mm/sparse.c
104380index d1b48b6..6e8590e 100644
104381--- a/mm/sparse.c
104382+++ b/mm/sparse.c
104383@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
104384
104385 for (i = 0; i < PAGES_PER_SECTION; i++) {
104386 if (PageHWPoison(&memmap[i])) {
104387- atomic_long_sub(1, &num_poisoned_pages);
104388+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
104389 ClearPageHWPoison(&memmap[i]);
104390 }
104391 }
104392diff --git a/mm/swap.c b/mm/swap.c
104393index 9e8e347..3c22e0f 100644
104394--- a/mm/swap.c
104395+++ b/mm/swap.c
104396@@ -31,6 +31,7 @@
104397 #include <linux/memcontrol.h>
104398 #include <linux/gfp.h>
104399 #include <linux/uio.h>
104400+#include <linux/hugetlb.h>
104401
104402 #include "internal.h"
104403
104404@@ -76,6 +77,8 @@ static void __put_compound_page(struct page *page)
104405
104406 __page_cache_release(page);
104407 dtor = get_compound_page_dtor(page);
104408+ if (!PageHuge(page))
104409+ BUG_ON(dtor != free_compound_page);
104410 (*dtor)(page);
104411 }
104412
104413diff --git a/mm/swapfile.c b/mm/swapfile.c
104414index 4c524f7..f7601f17 100644
104415--- a/mm/swapfile.c
104416+++ b/mm/swapfile.c
104417@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
104418
104419 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
104420 /* Activity counter to indicate that a swapon or swapoff has occurred */
104421-static atomic_t proc_poll_event = ATOMIC_INIT(0);
104422+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
104423
104424 static inline unsigned char swap_count(unsigned char ent)
104425 {
104426@@ -1945,7 +1945,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
104427 spin_unlock(&swap_lock);
104428
104429 err = 0;
104430- atomic_inc(&proc_poll_event);
104431+ atomic_inc_unchecked(&proc_poll_event);
104432 wake_up_interruptible(&proc_poll_wait);
104433
104434 out_dput:
104435@@ -1962,8 +1962,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
104436
104437 poll_wait(file, &proc_poll_wait, wait);
104438
104439- if (seq->poll_event != atomic_read(&proc_poll_event)) {
104440- seq->poll_event = atomic_read(&proc_poll_event);
104441+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
104442+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
104443 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
104444 }
104445
104446@@ -2061,7 +2061,7 @@ static int swaps_open(struct inode *inode, struct file *file)
104447 return ret;
104448
104449 seq = file->private_data;
104450- seq->poll_event = atomic_read(&proc_poll_event);
104451+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
104452 return 0;
104453 }
104454
104455@@ -2521,7 +2521,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
104456 (frontswap_map) ? "FS" : "");
104457
104458 mutex_unlock(&swapon_mutex);
104459- atomic_inc(&proc_poll_event);
104460+ atomic_inc_unchecked(&proc_poll_event);
104461 wake_up_interruptible(&proc_poll_wait);
104462
104463 if (S_ISREG(inode->i_mode))
104464diff --git a/mm/util.c b/mm/util.c
104465index 33e9f44..be026b2 100644
104466--- a/mm/util.c
104467+++ b/mm/util.c
104468@@ -296,6 +296,12 @@ done:
104469 void arch_pick_mmap_layout(struct mm_struct *mm)
104470 {
104471 mm->mmap_base = TASK_UNMAPPED_BASE;
104472+
104473+#ifdef CONFIG_PAX_RANDMMAP
104474+ if (mm->pax_flags & MF_PAX_RANDMMAP)
104475+ mm->mmap_base += mm->delta_mmap;
104476+#endif
104477+
104478 mm->get_unmapped_area = arch_get_unmapped_area;
104479 }
104480 #endif
104481@@ -472,6 +478,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
104482 if (!mm->arg_end)
104483 goto out_mm; /* Shh! No looking before we're done */
104484
104485+ if (gr_acl_handle_procpidmem(task))
104486+ goto out_mm;
104487+
104488 len = mm->arg_end - mm->arg_start;
104489
104490 if (len > buflen)
104491diff --git a/mm/vmalloc.c b/mm/vmalloc.c
104492index f64632b..e8c52e7 100644
104493--- a/mm/vmalloc.c
104494+++ b/mm/vmalloc.c
104495@@ -40,6 +40,21 @@ struct vfree_deferred {
104496 };
104497 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
104498
104499+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104500+struct stack_deferred_llist {
104501+ struct llist_head list;
104502+ void *stack;
104503+ void *lowmem_stack;
104504+};
104505+
104506+struct stack_deferred {
104507+ struct stack_deferred_llist list;
104508+ struct work_struct wq;
104509+};
104510+
104511+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
104512+#endif
104513+
104514 static void __vunmap(const void *, int);
104515
104516 static void free_work(struct work_struct *w)
104517@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
104518 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
104519 struct llist_node *llnode = llist_del_all(&p->list);
104520 while (llnode) {
104521- void *p = llnode;
104522+ void *x = llnode;
104523 llnode = llist_next(llnode);
104524- __vunmap(p, 1);
104525+ __vunmap(x, 1);
104526 }
104527 }
104528
104529+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104530+static void unmap_work(struct work_struct *w)
104531+{
104532+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
104533+ struct llist_node *llnode = llist_del_all(&p->list.list);
104534+ while (llnode) {
104535+ struct stack_deferred_llist *x =
104536+ llist_entry((struct llist_head *)llnode,
104537+ struct stack_deferred_llist, list);
104538+ void *stack = ACCESS_ONCE(x->stack);
104539+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
104540+ llnode = llist_next(llnode);
104541+ __vunmap(stack, 0);
104542+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
104543+ }
104544+}
104545+#endif
104546+
104547 /*** Page table manipulation functions ***/
104548
104549 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
104550@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
104551
104552 pte = pte_offset_kernel(pmd, addr);
104553 do {
104554- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
104555- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
104556+
104557+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
104558+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
104559+ BUG_ON(!pte_exec(*pte));
104560+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
104561+ continue;
104562+ }
104563+#endif
104564+
104565+ {
104566+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
104567+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
104568+ }
104569 } while (pte++, addr += PAGE_SIZE, addr != end);
104570 }
104571
104572@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
104573 pte = pte_alloc_kernel(pmd, addr);
104574 if (!pte)
104575 return -ENOMEM;
104576+
104577+ pax_open_kernel();
104578 do {
104579 struct page *page = pages[*nr];
104580
104581- if (WARN_ON(!pte_none(*pte)))
104582+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
104583+ if (pgprot_val(prot) & _PAGE_NX)
104584+#endif
104585+
104586+ if (!pte_none(*pte)) {
104587+ pax_close_kernel();
104588+ WARN_ON(1);
104589 return -EBUSY;
104590- if (WARN_ON(!page))
104591+ }
104592+ if (!page) {
104593+ pax_close_kernel();
104594+ WARN_ON(1);
104595 return -ENOMEM;
104596+ }
104597 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
104598 (*nr)++;
104599 } while (pte++, addr += PAGE_SIZE, addr != end);
104600+ pax_close_kernel();
104601 return 0;
104602 }
104603
104604@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
104605 pmd_t *pmd;
104606 unsigned long next;
104607
104608- pmd = pmd_alloc(&init_mm, pud, addr);
104609+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
104610 if (!pmd)
104611 return -ENOMEM;
104612 do {
104613@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
104614 pud_t *pud;
104615 unsigned long next;
104616
104617- pud = pud_alloc(&init_mm, pgd, addr);
104618+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
104619 if (!pud)
104620 return -ENOMEM;
104621 do {
104622@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
104623 if (addr >= MODULES_VADDR && addr < MODULES_END)
104624 return 1;
104625 #endif
104626+
104627+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
104628+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
104629+ return 1;
104630+#endif
104631+
104632 return is_vmalloc_addr(x);
104633 }
104634
104635@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
104636
104637 if (!pgd_none(*pgd)) {
104638 pud_t *pud = pud_offset(pgd, addr);
104639+#ifdef CONFIG_X86
104640+ if (!pud_large(*pud))
104641+#endif
104642 if (!pud_none(*pud)) {
104643 pmd_t *pmd = pmd_offset(pud, addr);
104644+#ifdef CONFIG_X86
104645+ if (!pmd_large(*pmd))
104646+#endif
104647 if (!pmd_none(*pmd)) {
104648 pte_t *ptep, pte;
104649
104650@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
104651 for_each_possible_cpu(i) {
104652 struct vmap_block_queue *vbq;
104653 struct vfree_deferred *p;
104654+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104655+ struct stack_deferred *p2;
104656+#endif
104657
104658 vbq = &per_cpu(vmap_block_queue, i);
104659 spin_lock_init(&vbq->lock);
104660 INIT_LIST_HEAD(&vbq->free);
104661+
104662 p = &per_cpu(vfree_deferred, i);
104663 init_llist_head(&p->list);
104664 INIT_WORK(&p->wq, free_work);
104665+
104666+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104667+ p2 = &per_cpu(stack_deferred, i);
104668+ init_llist_head(&p2->list.list);
104669+ INIT_WORK(&p2->wq, unmap_work);
104670+#endif
104671 }
104672
104673 /* Import existing vmlist entries. */
104674@@ -1318,6 +1397,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
104675 struct vm_struct *area;
104676
104677 BUG_ON(in_interrupt());
104678+
104679+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
104680+ if (flags & VM_KERNEXEC) {
104681+ if (start != VMALLOC_START || end != VMALLOC_END)
104682+ return NULL;
104683+ start = (unsigned long)MODULES_EXEC_VADDR;
104684+ end = (unsigned long)MODULES_EXEC_END;
104685+ }
104686+#endif
104687+
104688 if (flags & VM_IOREMAP)
104689 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
104690
104691@@ -1523,6 +1612,23 @@ void vunmap(const void *addr)
104692 }
104693 EXPORT_SYMBOL(vunmap);
104694
104695+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104696+void unmap_process_stacks(struct task_struct *task)
104697+{
104698+ if (unlikely(in_interrupt())) {
104699+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
104700+ struct stack_deferred_llist *list = task->stack;
104701+ list->stack = task->stack;
104702+ list->lowmem_stack = task->lowmem_stack;
104703+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
104704+ schedule_work(&p->wq);
104705+ } else {
104706+ __vunmap(task->stack, 0);
104707+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
104708+ }
104709+}
104710+#endif
104711+
104712 /**
104713 * vmap - map an array of pages into virtually contiguous space
104714 * @pages: array of page pointers
104715@@ -1543,6 +1649,11 @@ void *vmap(struct page **pages, unsigned int count,
104716 if (count > totalram_pages)
104717 return NULL;
104718
104719+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
104720+ if (!(pgprot_val(prot) & _PAGE_NX))
104721+ flags |= VM_KERNEXEC;
104722+#endif
104723+
104724 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
104725 __builtin_return_address(0));
104726 if (!area)
104727@@ -1643,6 +1754,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
104728 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
104729 goto fail;
104730
104731+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
104732+ if (!(pgprot_val(prot) & _PAGE_NX))
104733+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
104734+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
104735+ else
104736+#endif
104737+
104738 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
104739 start, end, node, gfp_mask, caller);
104740 if (!area)
104741@@ -1819,10 +1937,9 @@ EXPORT_SYMBOL(vzalloc_node);
104742 * For tight control over page level allocator and protection flags
104743 * use __vmalloc() instead.
104744 */
104745-
104746 void *vmalloc_exec(unsigned long size)
104747 {
104748- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
104749+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
104750 NUMA_NO_NODE, __builtin_return_address(0));
104751 }
104752
104753@@ -2129,6 +2246,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
104754 {
104755 struct vm_struct *area;
104756
104757+ BUG_ON(vma->vm_mirror);
104758+
104759 size = PAGE_ALIGN(size);
104760
104761 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
104762@@ -2611,7 +2730,11 @@ static int s_show(struct seq_file *m, void *p)
104763 v->addr, v->addr + v->size, v->size);
104764
104765 if (v->caller)
104766+#ifdef CONFIG_GRKERNSEC_HIDESYM
104767+ seq_printf(m, " %pK", v->caller);
104768+#else
104769 seq_printf(m, " %pS", v->caller);
104770+#endif
104771
104772 if (v->nr_pages)
104773 seq_printf(m, " pages=%d", v->nr_pages);
104774diff --git a/mm/vmstat.c b/mm/vmstat.c
104775index b37bd49..4d7b3da 100644
104776--- a/mm/vmstat.c
104777+++ b/mm/vmstat.c
104778@@ -20,6 +20,7 @@
104779 #include <linux/writeback.h>
104780 #include <linux/compaction.h>
104781 #include <linux/mm_inline.h>
104782+#include <linux/grsecurity.h>
104783
104784 #include "internal.h"
104785
104786@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
104787 *
104788 * vm_stat contains the global counters
104789 */
104790-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
104791+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
104792 EXPORT_SYMBOL(vm_stat);
104793
104794 #ifdef CONFIG_SMP
104795@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
104796
104797 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
104798 if (diff[i])
104799- atomic_long_add(diff[i], &vm_stat[i]);
104800+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
104801 }
104802
104803 /*
104804@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
104805 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
104806 if (v) {
104807
104808- atomic_long_add(v, &zone->vm_stat[i]);
104809+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104810 global_diff[i] += v;
104811 #ifdef CONFIG_NUMA
104812 /* 3 seconds idle till flush */
104813@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
104814
104815 v = p->vm_stat_diff[i];
104816 p->vm_stat_diff[i] = 0;
104817- atomic_long_add(v, &zone->vm_stat[i]);
104818+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104819 global_diff[i] += v;
104820 }
104821 }
104822@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
104823 if (pset->vm_stat_diff[i]) {
104824 int v = pset->vm_stat_diff[i];
104825 pset->vm_stat_diff[i] = 0;
104826- atomic_long_add(v, &zone->vm_stat[i]);
104827- atomic_long_add(v, &vm_stat[i]);
104828+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104829+ atomic_long_add_unchecked(v, &vm_stat[i]);
104830 }
104831 }
104832 #endif
104833@@ -1162,10 +1163,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
104834 stat_items_size += sizeof(struct vm_event_state);
104835 #endif
104836
104837- v = kmalloc(stat_items_size, GFP_KERNEL);
104838+ v = kzalloc(stat_items_size, GFP_KERNEL);
104839 m->private = v;
104840 if (!v)
104841 return ERR_PTR(-ENOMEM);
104842+
104843+#ifdef CONFIG_GRKERNSEC_PROC_ADD
104844+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
104845+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
104846+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
104847+ && !in_group_p(grsec_proc_gid)
104848+#endif
104849+ )
104850+ return (unsigned long *)m->private + *pos;
104851+#endif
104852+#endif
104853+
104854 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
104855 v[i] = global_page_state(i);
104856 v += NR_VM_ZONE_STAT_ITEMS;
104857@@ -1314,10 +1327,16 @@ static int __init setup_vmstat(void)
104858 cpu_notifier_register_done();
104859 #endif
104860 #ifdef CONFIG_PROC_FS
104861- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
104862- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
104863- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
104864- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
104865+ {
104866+ mode_t gr_mode = S_IRUGO;
104867+#ifdef CONFIG_GRKERNSEC_PROC_ADD
104868+ gr_mode = S_IRUSR;
104869+#endif
104870+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
104871+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
104872+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
104873+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
104874+ }
104875 #endif
104876 return 0;
104877 }
104878diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
104879index 44ebd5c..1f732bae 100644
104880--- a/net/8021q/vlan.c
104881+++ b/net/8021q/vlan.c
104882@@ -475,7 +475,7 @@ out:
104883 return NOTIFY_DONE;
104884 }
104885
104886-static struct notifier_block vlan_notifier_block __read_mostly = {
104887+static struct notifier_block vlan_notifier_block = {
104888 .notifier_call = vlan_device_event,
104889 };
104890
104891@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
104892 err = -EPERM;
104893 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
104894 break;
104895- if ((args.u.name_type >= 0) &&
104896- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
104897+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
104898 struct vlan_net *vn;
104899
104900 vn = net_generic(net, vlan_net_id);
104901diff --git a/net/9p/client.c b/net/9p/client.c
104902index 0004cba..feba240 100644
104903--- a/net/9p/client.c
104904+++ b/net/9p/client.c
104905@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
104906 len - inline_len);
104907 } else {
104908 err = copy_from_user(ename + inline_len,
104909- uidata, len - inline_len);
104910+ (char __force_user *)uidata, len - inline_len);
104911 if (err) {
104912 err = -EFAULT;
104913 goto out_err;
104914@@ -1571,7 +1571,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
104915 kernel_buf = 1;
104916 indata = data;
104917 } else
104918- indata = (__force char *)udata;
104919+ indata = (__force_kernel char *)udata;
104920 /*
104921 * response header len is 11
104922 * PDU Header(7) + IO Size (4)
104923@@ -1646,7 +1646,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
104924 kernel_buf = 1;
104925 odata = data;
104926 } else
104927- odata = (char *)udata;
104928+ odata = (char __force_kernel *)udata;
104929 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
104930 P9_ZC_HDR_SZ, kernel_buf, "dqd",
104931 fid->fid, offset, rsize);
104932diff --git a/net/9p/mod.c b/net/9p/mod.c
104933index 6ab36ae..6f1841b 100644
104934--- a/net/9p/mod.c
104935+++ b/net/9p/mod.c
104936@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
104937 void v9fs_register_trans(struct p9_trans_module *m)
104938 {
104939 spin_lock(&v9fs_trans_lock);
104940- list_add_tail(&m->list, &v9fs_trans_list);
104941+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
104942 spin_unlock(&v9fs_trans_lock);
104943 }
104944 EXPORT_SYMBOL(v9fs_register_trans);
104945@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
104946 void v9fs_unregister_trans(struct p9_trans_module *m)
104947 {
104948 spin_lock(&v9fs_trans_lock);
104949- list_del_init(&m->list);
104950+ pax_list_del_init((struct list_head *)&m->list);
104951 spin_unlock(&v9fs_trans_lock);
104952 }
104953 EXPORT_SYMBOL(v9fs_unregister_trans);
104954diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
104955index 80d08f6..de63fd1 100644
104956--- a/net/9p/trans_fd.c
104957+++ b/net/9p/trans_fd.c
104958@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
104959 oldfs = get_fs();
104960 set_fs(get_ds());
104961 /* The cast to a user pointer is valid due to the set_fs() */
104962- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
104963+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
104964 set_fs(oldfs);
104965
104966 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
104967diff --git a/net/Kconfig b/net/Kconfig
104968index d92afe4..ab63892 100644
104969--- a/net/Kconfig
104970+++ b/net/Kconfig
104971@@ -89,12 +89,8 @@ config NETWORK_SECMARK
104972 to nfmark, but designated for security purposes.
104973 If you are unsure how to answer this question, answer N.
104974
104975-config NET_PTP_CLASSIFY
104976- def_bool n
104977-
104978 config NETWORK_PHY_TIMESTAMPING
104979 bool "Timestamping in PHY devices"
104980- select NET_PTP_CLASSIFY
104981 help
104982 This allows timestamping of network packets by PHYs with
104983 hardware timestamping capabilities. This option adds some
104984@@ -269,7 +265,7 @@ config BQL
104985 config BPF_JIT
104986 bool "enable BPF Just In Time compiler"
104987 depends on HAVE_BPF_JIT
104988- depends on MODULES
104989+ depends on MODULES && X86
104990 ---help---
104991 Berkeley Packet Filter filtering capabilities are normally handled
104992 by an interpreter. This option allows kernel to generate a native
104993diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
104994index af46bc4..f9adfcd 100644
104995--- a/net/appletalk/atalk_proc.c
104996+++ b/net/appletalk/atalk_proc.c
104997@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
104998 struct proc_dir_entry *p;
104999 int rc = -ENOMEM;
105000
105001- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
105002+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
105003 if (!atalk_proc_dir)
105004 goto out;
105005
105006diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
105007index 876fbe8..8bbea9f 100644
105008--- a/net/atm/atm_misc.c
105009+++ b/net/atm/atm_misc.c
105010@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
105011 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
105012 return 1;
105013 atm_return(vcc, truesize);
105014- atomic_inc(&vcc->stats->rx_drop);
105015+ atomic_inc_unchecked(&vcc->stats->rx_drop);
105016 return 0;
105017 }
105018 EXPORT_SYMBOL(atm_charge);
105019@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
105020 }
105021 }
105022 atm_return(vcc, guess);
105023- atomic_inc(&vcc->stats->rx_drop);
105024+ atomic_inc_unchecked(&vcc->stats->rx_drop);
105025 return NULL;
105026 }
105027 EXPORT_SYMBOL(atm_alloc_charge);
105028@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
105029
105030 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
105031 {
105032-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
105033+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
105034 __SONET_ITEMS
105035 #undef __HANDLE_ITEM
105036 }
105037@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
105038
105039 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
105040 {
105041-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
105042+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
105043 __SONET_ITEMS
105044 #undef __HANDLE_ITEM
105045 }
105046diff --git a/net/atm/lec.c b/net/atm/lec.c
105047index 4c5b8ba..95f7005 100644
105048--- a/net/atm/lec.c
105049+++ b/net/atm/lec.c
105050@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
105051 }
105052
105053 static struct lane2_ops lane2_ops = {
105054- lane2_resolve, /* resolve, spec 3.1.3 */
105055- lane2_associate_req, /* associate_req, spec 3.1.4 */
105056- NULL /* associate indicator, spec 3.1.5 */
105057+ .resolve = lane2_resolve,
105058+ .associate_req = lane2_associate_req,
105059+ .associate_indicator = NULL
105060 };
105061
105062 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
105063diff --git a/net/atm/lec.h b/net/atm/lec.h
105064index 4149db1..f2ab682 100644
105065--- a/net/atm/lec.h
105066+++ b/net/atm/lec.h
105067@@ -48,7 +48,7 @@ struct lane2_ops {
105068 const u8 *tlvs, u32 sizeoftlvs);
105069 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
105070 const u8 *tlvs, u32 sizeoftlvs);
105071-};
105072+} __no_const;
105073
105074 /*
105075 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
105076diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
105077index d1b2d9a..d549f7f 100644
105078--- a/net/atm/mpoa_caches.c
105079+++ b/net/atm/mpoa_caches.c
105080@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
105081
105082
105083 static struct in_cache_ops ingress_ops = {
105084- in_cache_add_entry, /* add_entry */
105085- in_cache_get, /* get */
105086- in_cache_get_with_mask, /* get_with_mask */
105087- in_cache_get_by_vcc, /* get_by_vcc */
105088- in_cache_put, /* put */
105089- in_cache_remove_entry, /* remove_entry */
105090- cache_hit, /* cache_hit */
105091- clear_count_and_expired, /* clear_count */
105092- check_resolving_entries, /* check_resolving */
105093- refresh_entries, /* refresh */
105094- in_destroy_cache /* destroy_cache */
105095+ .add_entry = in_cache_add_entry,
105096+ .get = in_cache_get,
105097+ .get_with_mask = in_cache_get_with_mask,
105098+ .get_by_vcc = in_cache_get_by_vcc,
105099+ .put = in_cache_put,
105100+ .remove_entry = in_cache_remove_entry,
105101+ .cache_hit = cache_hit,
105102+ .clear_count = clear_count_and_expired,
105103+ .check_resolving = check_resolving_entries,
105104+ .refresh = refresh_entries,
105105+ .destroy_cache = in_destroy_cache
105106 };
105107
105108 static struct eg_cache_ops egress_ops = {
105109- eg_cache_add_entry, /* add_entry */
105110- eg_cache_get_by_cache_id, /* get_by_cache_id */
105111- eg_cache_get_by_tag, /* get_by_tag */
105112- eg_cache_get_by_vcc, /* get_by_vcc */
105113- eg_cache_get_by_src_ip, /* get_by_src_ip */
105114- eg_cache_put, /* put */
105115- eg_cache_remove_entry, /* remove_entry */
105116- update_eg_cache_entry, /* update */
105117- clear_expired, /* clear_expired */
105118- eg_destroy_cache /* destroy_cache */
105119+ .add_entry = eg_cache_add_entry,
105120+ .get_by_cache_id = eg_cache_get_by_cache_id,
105121+ .get_by_tag = eg_cache_get_by_tag,
105122+ .get_by_vcc = eg_cache_get_by_vcc,
105123+ .get_by_src_ip = eg_cache_get_by_src_ip,
105124+ .put = eg_cache_put,
105125+ .remove_entry = eg_cache_remove_entry,
105126+ .update = update_eg_cache_entry,
105127+ .clear_expired = clear_expired,
105128+ .destroy_cache = eg_destroy_cache
105129 };
105130
105131
105132diff --git a/net/atm/proc.c b/net/atm/proc.c
105133index bbb6461..cf04016 100644
105134--- a/net/atm/proc.c
105135+++ b/net/atm/proc.c
105136@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
105137 const struct k_atm_aal_stats *stats)
105138 {
105139 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
105140- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
105141- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
105142- atomic_read(&stats->rx_drop));
105143+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
105144+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
105145+ atomic_read_unchecked(&stats->rx_drop));
105146 }
105147
105148 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
105149diff --git a/net/atm/resources.c b/net/atm/resources.c
105150index 0447d5d..3cf4728 100644
105151--- a/net/atm/resources.c
105152+++ b/net/atm/resources.c
105153@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
105154 static void copy_aal_stats(struct k_atm_aal_stats *from,
105155 struct atm_aal_stats *to)
105156 {
105157-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
105158+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
105159 __AAL_STAT_ITEMS
105160 #undef __HANDLE_ITEM
105161 }
105162@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
105163 static void subtract_aal_stats(struct k_atm_aal_stats *from,
105164 struct atm_aal_stats *to)
105165 {
105166-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
105167+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
105168 __AAL_STAT_ITEMS
105169 #undef __HANDLE_ITEM
105170 }
105171diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
105172index 919a5ce..cc6b444 100644
105173--- a/net/ax25/sysctl_net_ax25.c
105174+++ b/net/ax25/sysctl_net_ax25.c
105175@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
105176 {
105177 char path[sizeof("net/ax25/") + IFNAMSIZ];
105178 int k;
105179- struct ctl_table *table;
105180+ ctl_table_no_const *table;
105181
105182 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
105183 if (!table)
105184diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
105185index f04224c..f326579 100644
105186--- a/net/batman-adv/bat_iv_ogm.c
105187+++ b/net/batman-adv/bat_iv_ogm.c
105188@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
105189
105190 /* randomize initial seqno to avoid collision */
105191 get_random_bytes(&random_seqno, sizeof(random_seqno));
105192- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
105193+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
105194
105195 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
105196 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
105197@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
105198 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
105199
105200 /* change sequence number to network order */
105201- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
105202+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
105203 batadv_ogm_packet->seqno = htonl(seqno);
105204- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
105205+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
105206
105207 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
105208
105209@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
105210 return;
105211
105212 /* could be changed by schedule_own_packet() */
105213- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
105214+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
105215
105216 if (ogm_packet->flags & BATADV_DIRECTLINK)
105217 has_directlink_flag = true;
105218diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
105219index 022d18a..919daff 100644
105220--- a/net/batman-adv/fragmentation.c
105221+++ b/net/batman-adv/fragmentation.c
105222@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
105223 frag_header.packet_type = BATADV_UNICAST_FRAG;
105224 frag_header.version = BATADV_COMPAT_VERSION;
105225 frag_header.ttl = BATADV_TTL;
105226- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
105227+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
105228 frag_header.reserved = 0;
105229 frag_header.no = 0;
105230 frag_header.total_size = htons(skb->len);
105231diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
105232index cbd677f..b783347 100644
105233--- a/net/batman-adv/soft-interface.c
105234+++ b/net/batman-adv/soft-interface.c
105235@@ -296,7 +296,7 @@ send:
105236 primary_if->net_dev->dev_addr);
105237
105238 /* set broadcast sequence number */
105239- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
105240+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
105241 bcast_packet->seqno = htonl(seqno);
105242
105243 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
105244@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
105245 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
105246
105247 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
105248- atomic_set(&bat_priv->bcast_seqno, 1);
105249+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
105250 atomic_set(&bat_priv->tt.vn, 0);
105251 atomic_set(&bat_priv->tt.local_changes, 0);
105252 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
105253@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
105254
105255 /* randomize initial seqno to avoid collision */
105256 get_random_bytes(&random_seqno, sizeof(random_seqno));
105257- atomic_set(&bat_priv->frag_seqno, random_seqno);
105258+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
105259
105260 bat_priv->primary_if = NULL;
105261 bat_priv->num_ifaces = 0;
105262diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
105263index 8854c05..ee5d5497 100644
105264--- a/net/batman-adv/types.h
105265+++ b/net/batman-adv/types.h
105266@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
105267 struct batadv_hard_iface_bat_iv {
105268 unsigned char *ogm_buff;
105269 int ogm_buff_len;
105270- atomic_t ogm_seqno;
105271+ atomic_unchecked_t ogm_seqno;
105272 };
105273
105274 /**
105275@@ -768,7 +768,7 @@ struct batadv_priv {
105276 atomic_t bonding;
105277 atomic_t fragmentation;
105278 atomic_t packet_size_max;
105279- atomic_t frag_seqno;
105280+ atomic_unchecked_t frag_seqno;
105281 #ifdef CONFIG_BATMAN_ADV_BLA
105282 atomic_t bridge_loop_avoidance;
105283 #endif
105284@@ -787,7 +787,7 @@ struct batadv_priv {
105285 #endif
105286 uint32_t isolation_mark;
105287 uint32_t isolation_mark_mask;
105288- atomic_t bcast_seqno;
105289+ atomic_unchecked_t bcast_seqno;
105290 atomic_t bcast_queue_left;
105291 atomic_t batman_queue_left;
105292 char num_ifaces;
105293diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
105294index 80d25c1..aa99a98 100644
105295--- a/net/bluetooth/hci_sock.c
105296+++ b/net/bluetooth/hci_sock.c
105297@@ -1044,7 +1044,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
105298 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
105299 }
105300
105301- len = min_t(unsigned int, len, sizeof(uf));
105302+ len = min((size_t)len, sizeof(uf));
105303 if (copy_from_user(&uf, optval, len)) {
105304 err = -EFAULT;
105305 break;
105306diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
105307index 323f23c..5e27529 100644
105308--- a/net/bluetooth/l2cap_core.c
105309+++ b/net/bluetooth/l2cap_core.c
105310@@ -3548,8 +3548,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
105311 break;
105312
105313 case L2CAP_CONF_RFC:
105314- if (olen == sizeof(rfc))
105315- memcpy(&rfc, (void *)val, olen);
105316+ if (olen != sizeof(rfc))
105317+ break;
105318+
105319+ memcpy(&rfc, (void *)val, olen);
105320
105321 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
105322 rfc.mode != chan->mode)
105323diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
105324index d0fd8b0..e33d2f9 100644
105325--- a/net/bluetooth/l2cap_sock.c
105326+++ b/net/bluetooth/l2cap_sock.c
105327@@ -628,7 +628,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
105328 struct sock *sk = sock->sk;
105329 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
105330 struct l2cap_options opts;
105331- int len, err = 0;
105332+ int err = 0;
105333+ size_t len = optlen;
105334 u32 opt;
105335
105336 BT_DBG("sk %p", sk);
105337@@ -655,7 +656,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
105338 opts.max_tx = chan->max_tx;
105339 opts.txwin_size = chan->tx_win;
105340
105341- len = min_t(unsigned int, sizeof(opts), optlen);
105342+ len = min(sizeof(opts), len);
105343 if (copy_from_user((char *) &opts, optval, len)) {
105344 err = -EFAULT;
105345 break;
105346@@ -742,7 +743,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
105347 struct bt_security sec;
105348 struct bt_power pwr;
105349 struct l2cap_conn *conn;
105350- int len, err = 0;
105351+ int err = 0;
105352+ size_t len = optlen;
105353 u32 opt;
105354
105355 BT_DBG("sk %p", sk);
105356@@ -766,7 +768,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
105357
105358 sec.level = BT_SECURITY_LOW;
105359
105360- len = min_t(unsigned int, sizeof(sec), optlen);
105361+ len = min(sizeof(sec), len);
105362 if (copy_from_user((char *) &sec, optval, len)) {
105363 err = -EFAULT;
105364 break;
105365@@ -861,7 +863,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
105366
105367 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
105368
105369- len = min_t(unsigned int, sizeof(pwr), optlen);
105370+ len = min(sizeof(pwr), len);
105371 if (copy_from_user((char *) &pwr, optval, len)) {
105372 err = -EFAULT;
105373 break;
105374diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
105375index 8bbbb5e..6fc0950 100644
105376--- a/net/bluetooth/rfcomm/sock.c
105377+++ b/net/bluetooth/rfcomm/sock.c
105378@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
105379 struct sock *sk = sock->sk;
105380 struct bt_security sec;
105381 int err = 0;
105382- size_t len;
105383+ size_t len = optlen;
105384 u32 opt;
105385
105386 BT_DBG("sk %p", sk);
105387@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
105388
105389 sec.level = BT_SECURITY_LOW;
105390
105391- len = min_t(unsigned int, sizeof(sec), optlen);
105392+ len = min(sizeof(sec), len);
105393 if (copy_from_user((char *) &sec, optval, len)) {
105394 err = -EFAULT;
105395 break;
105396diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
105397index 8e385a0..a5bdd8e 100644
105398--- a/net/bluetooth/rfcomm/tty.c
105399+++ b/net/bluetooth/rfcomm/tty.c
105400@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
105401 BT_DBG("tty %p id %d", tty, tty->index);
105402
105403 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
105404- dev->channel, dev->port.count);
105405+ dev->channel, atomic_read(&dev->port.count));
105406
105407 err = tty_port_open(&dev->port, tty, filp);
105408 if (err)
105409@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
105410 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
105411
105412 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
105413- dev->port.count);
105414+ atomic_read(&dev->port.count));
105415
105416 tty_port_close(&dev->port, tty, filp);
105417 }
105418diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
105419index 1059ed3..d70846a 100644
105420--- a/net/bridge/netfilter/ebtables.c
105421+++ b/net/bridge/netfilter/ebtables.c
105422@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
105423 tmp.valid_hooks = t->table->valid_hooks;
105424 }
105425 mutex_unlock(&ebt_mutex);
105426- if (copy_to_user(user, &tmp, *len) != 0) {
105427+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
105428 BUGPRINT("c2u Didn't work\n");
105429 ret = -EFAULT;
105430 break;
105431@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
105432 goto out;
105433 tmp.valid_hooks = t->valid_hooks;
105434
105435- if (copy_to_user(user, &tmp, *len) != 0) {
105436+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
105437 ret = -EFAULT;
105438 break;
105439 }
105440@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
105441 tmp.entries_size = t->table->entries_size;
105442 tmp.valid_hooks = t->table->valid_hooks;
105443
105444- if (copy_to_user(user, &tmp, *len) != 0) {
105445+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
105446 ret = -EFAULT;
105447 break;
105448 }
105449diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
105450index 0f45522..dab651f 100644
105451--- a/net/caif/cfctrl.c
105452+++ b/net/caif/cfctrl.c
105453@@ -10,6 +10,7 @@
105454 #include <linux/spinlock.h>
105455 #include <linux/slab.h>
105456 #include <linux/pkt_sched.h>
105457+#include <linux/sched.h>
105458 #include <net/caif/caif_layer.h>
105459 #include <net/caif/cfpkt.h>
105460 #include <net/caif/cfctrl.h>
105461@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
105462 memset(&dev_info, 0, sizeof(dev_info));
105463 dev_info.id = 0xff;
105464 cfsrvl_init(&this->serv, 0, &dev_info, false);
105465- atomic_set(&this->req_seq_no, 1);
105466- atomic_set(&this->rsp_seq_no, 1);
105467+ atomic_set_unchecked(&this->req_seq_no, 1);
105468+ atomic_set_unchecked(&this->rsp_seq_no, 1);
105469 this->serv.layer.receive = cfctrl_recv;
105470 sprintf(this->serv.layer.name, "ctrl");
105471 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
105472@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
105473 struct cfctrl_request_info *req)
105474 {
105475 spin_lock_bh(&ctrl->info_list_lock);
105476- atomic_inc(&ctrl->req_seq_no);
105477- req->sequence_no = atomic_read(&ctrl->req_seq_no);
105478+ atomic_inc_unchecked(&ctrl->req_seq_no);
105479+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
105480 list_add_tail(&req->list, &ctrl->list);
105481 spin_unlock_bh(&ctrl->info_list_lock);
105482 }
105483@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
105484 if (p != first)
105485 pr_warn("Requests are not received in order\n");
105486
105487- atomic_set(&ctrl->rsp_seq_no,
105488+ atomic_set_unchecked(&ctrl->rsp_seq_no,
105489 p->sequence_no);
105490 list_del(&p->list);
105491 goto out;
105492diff --git a/net/can/af_can.c b/net/can/af_can.c
105493index ce82337..5d17b4d 100644
105494--- a/net/can/af_can.c
105495+++ b/net/can/af_can.c
105496@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
105497 };
105498
105499 /* notifier block for netdevice event */
105500-static struct notifier_block can_netdev_notifier __read_mostly = {
105501+static struct notifier_block can_netdev_notifier = {
105502 .notifier_call = can_notifier,
105503 };
105504
105505diff --git a/net/can/bcm.c b/net/can/bcm.c
105506index dcb75c0..24b1b43 100644
105507--- a/net/can/bcm.c
105508+++ b/net/can/bcm.c
105509@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
105510 }
105511
105512 /* create /proc/net/can-bcm directory */
105513- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
105514+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
105515 return 0;
105516 }
105517
105518diff --git a/net/can/gw.c b/net/can/gw.c
105519index 050a211..bb9fe33 100644
105520--- a/net/can/gw.c
105521+++ b/net/can/gw.c
105522@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
105523 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
105524
105525 static HLIST_HEAD(cgw_list);
105526-static struct notifier_block notifier;
105527
105528 static struct kmem_cache *cgw_cache __read_mostly;
105529
105530@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
105531 return err;
105532 }
105533
105534+static struct notifier_block notifier = {
105535+ .notifier_call = cgw_notifier
105536+};
105537+
105538 static __init int cgw_module_init(void)
105539 {
105540 /* sanitize given module parameter */
105541@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
105542 return -ENOMEM;
105543
105544 /* set notifier */
105545- notifier.notifier_call = cgw_notifier;
105546 register_netdevice_notifier(&notifier);
105547
105548 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
105549diff --git a/net/can/proc.c b/net/can/proc.c
105550index 1a19b98..df2b4ec 100644
105551--- a/net/can/proc.c
105552+++ b/net/can/proc.c
105553@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
105554 void can_init_proc(void)
105555 {
105556 /* create /proc/net/can directory */
105557- can_dir = proc_mkdir("can", init_net.proc_net);
105558+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
105559
105560 if (!can_dir) {
105561 printk(KERN_INFO "can: failed to create /proc/net/can . "
105562diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
105563index 3d9ddc2..ca5d5b6 100644
105564--- a/net/ceph/messenger.c
105565+++ b/net/ceph/messenger.c
105566@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
105567 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
105568
105569 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
105570-static atomic_t addr_str_seq = ATOMIC_INIT(0);
105571+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
105572
105573 static struct page *zero_page; /* used in certain error cases */
105574
105575@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
105576 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
105577 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
105578
105579- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
105580+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
105581 s = addr_str[i];
105582
105583 switch (ss->ss_family) {
105584diff --git a/net/compat.c b/net/compat.c
105585index bc8aeef..f9c070c 100644
105586--- a/net/compat.c
105587+++ b/net/compat.c
105588@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
105589 return -EFAULT;
105590 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
105591 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
105592- kmsg->msg_name = compat_ptr(tmp1);
105593- kmsg->msg_iov = compat_ptr(tmp2);
105594- kmsg->msg_control = compat_ptr(tmp3);
105595+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
105596+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
105597+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
105598 return 0;
105599 }
105600
105601@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
105602
105603 if (kern_msg->msg_name && kern_msg->msg_namelen) {
105604 if (mode == VERIFY_READ) {
105605- int err = move_addr_to_kernel(kern_msg->msg_name,
105606+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
105607 kern_msg->msg_namelen,
105608 kern_address);
105609 if (err < 0)
105610@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
105611 }
105612
105613 tot_len = iov_from_user_compat_to_kern(kern_iov,
105614- (struct compat_iovec __user *)kern_msg->msg_iov,
105615+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
105616 kern_msg->msg_iovlen);
105617 if (tot_len >= 0)
105618 kern_msg->msg_iov = kern_iov;
105619@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
105620
105621 #define CMSG_COMPAT_FIRSTHDR(msg) \
105622 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
105623- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
105624+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
105625 (struct compat_cmsghdr __user *)NULL)
105626
105627 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
105628 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
105629 (ucmlen) <= (unsigned long) \
105630 ((mhdr)->msg_controllen - \
105631- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
105632+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
105633
105634 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
105635 struct compat_cmsghdr __user *cmsg, int cmsg_len)
105636 {
105637 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
105638- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
105639+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
105640 msg->msg_controllen)
105641 return NULL;
105642 return (struct compat_cmsghdr __user *)ptr;
105643@@ -223,7 +223,7 @@ Efault:
105644
105645 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
105646 {
105647- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
105648+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
105649 struct compat_cmsghdr cmhdr;
105650 struct compat_timeval ctv;
105651 struct compat_timespec cts[3];
105652@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
105653
105654 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
105655 {
105656- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
105657+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
105658 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
105659 int fdnum = scm->fp->count;
105660 struct file **fp = scm->fp->fp;
105661@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
105662 return -EFAULT;
105663 old_fs = get_fs();
105664 set_fs(KERNEL_DS);
105665- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
105666+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
105667 set_fs(old_fs);
105668
105669 return err;
105670@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
105671 len = sizeof(ktime);
105672 old_fs = get_fs();
105673 set_fs(KERNEL_DS);
105674- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
105675+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
105676 set_fs(old_fs);
105677
105678 if (!err) {
105679@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
105680 case MCAST_JOIN_GROUP:
105681 case MCAST_LEAVE_GROUP:
105682 {
105683- struct compat_group_req __user *gr32 = (void *)optval;
105684+ struct compat_group_req __user *gr32 = (void __user *)optval;
105685 struct group_req __user *kgr =
105686 compat_alloc_user_space(sizeof(struct group_req));
105687 u32 interface;
105688@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
105689 case MCAST_BLOCK_SOURCE:
105690 case MCAST_UNBLOCK_SOURCE:
105691 {
105692- struct compat_group_source_req __user *gsr32 = (void *)optval;
105693+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
105694 struct group_source_req __user *kgsr = compat_alloc_user_space(
105695 sizeof(struct group_source_req));
105696 u32 interface;
105697@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
105698 }
105699 case MCAST_MSFILTER:
105700 {
105701- struct compat_group_filter __user *gf32 = (void *)optval;
105702+ struct compat_group_filter __user *gf32 = (void __user *)optval;
105703 struct group_filter __user *kgf;
105704 u32 interface, fmode, numsrc;
105705
105706@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
105707 char __user *optval, int __user *optlen,
105708 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
105709 {
105710- struct compat_group_filter __user *gf32 = (void *)optval;
105711+ struct compat_group_filter __user *gf32 = (void __user *)optval;
105712 struct group_filter __user *kgf;
105713 int __user *koptlen;
105714 u32 interface, fmode, numsrc;
105715@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
105716
105717 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
105718 return -EINVAL;
105719- if (copy_from_user(a, args, nas[call]))
105720+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
105721 return -EFAULT;
105722 a0 = a[0];
105723 a1 = a[1];
105724diff --git a/net/core/Makefile b/net/core/Makefile
105725index 71093d9..a8a035b 100644
105726--- a/net/core/Makefile
105727+++ b/net/core/Makefile
105728@@ -21,6 +21,5 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
105729 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
105730 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
105731 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
105732-obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
105733 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
105734 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
105735diff --git a/net/core/datagram.c b/net/core/datagram.c
105736index 488dd1a..7179f0f 100644
105737--- a/net/core/datagram.c
105738+++ b/net/core/datagram.c
105739@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
105740 }
105741
105742 kfree_skb(skb);
105743- atomic_inc(&sk->sk_drops);
105744+ atomic_inc_unchecked(&sk->sk_drops);
105745 sk_mem_reclaim_partial(sk);
105746
105747 return err;
105748diff --git a/net/core/dev.c b/net/core/dev.c
105749index 367a586..ef2fe17 100644
105750--- a/net/core/dev.c
105751+++ b/net/core/dev.c
105752@@ -1672,14 +1672,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
105753 {
105754 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
105755 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
105756- atomic_long_inc(&dev->rx_dropped);
105757+ atomic_long_inc_unchecked(&dev->rx_dropped);
105758 kfree_skb(skb);
105759 return NET_RX_DROP;
105760 }
105761 }
105762
105763 if (unlikely(!is_skb_forwardable(dev, skb))) {
105764- atomic_long_inc(&dev->rx_dropped);
105765+ atomic_long_inc_unchecked(&dev->rx_dropped);
105766 kfree_skb(skb);
105767 return NET_RX_DROP;
105768 }
105769@@ -2476,7 +2476,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
105770
105771 struct dev_gso_cb {
105772 void (*destructor)(struct sk_buff *skb);
105773-};
105774+} __no_const;
105775
105776 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
105777
105778@@ -2932,7 +2932,7 @@ recursion_alert:
105779 rc = -ENETDOWN;
105780 rcu_read_unlock_bh();
105781
105782- atomic_long_inc(&dev->tx_dropped);
105783+ atomic_long_inc_unchecked(&dev->tx_dropped);
105784 kfree_skb(skb);
105785 return rc;
105786 out:
105787@@ -3276,7 +3276,7 @@ enqueue:
105788
105789 local_irq_restore(flags);
105790
105791- atomic_long_inc(&skb->dev->rx_dropped);
105792+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
105793 kfree_skb(skb);
105794 return NET_RX_DROP;
105795 }
105796@@ -3353,7 +3353,7 @@ int netif_rx_ni(struct sk_buff *skb)
105797 }
105798 EXPORT_SYMBOL(netif_rx_ni);
105799
105800-static void net_tx_action(struct softirq_action *h)
105801+static __latent_entropy void net_tx_action(void)
105802 {
105803 struct softnet_data *sd = &__get_cpu_var(softnet_data);
105804
105805@@ -3686,7 +3686,7 @@ ncls:
105806 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
105807 } else {
105808 drop:
105809- atomic_long_inc(&skb->dev->rx_dropped);
105810+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
105811 kfree_skb(skb);
105812 /* Jamal, now you will not able to escape explaining
105813 * me how you were going to use this. :-)
105814@@ -4406,7 +4406,7 @@ void netif_napi_del(struct napi_struct *napi)
105815 }
105816 EXPORT_SYMBOL(netif_napi_del);
105817
105818-static void net_rx_action(struct softirq_action *h)
105819+static __latent_entropy void net_rx_action(void)
105820 {
105821 struct softnet_data *sd = &__get_cpu_var(softnet_data);
105822 unsigned long time_limit = jiffies + 2;
105823@@ -6403,8 +6403,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
105824 } else {
105825 netdev_stats_to_stats64(storage, &dev->stats);
105826 }
105827- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
105828- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
105829+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
105830+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
105831 return storage;
105832 }
105833 EXPORT_SYMBOL(dev_get_stats);
105834diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
105835index cf999e0..c59a975 100644
105836--- a/net/core/dev_ioctl.c
105837+++ b/net/core/dev_ioctl.c
105838@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
105839 if (no_module && capable(CAP_NET_ADMIN))
105840 no_module = request_module("netdev-%s", name);
105841 if (no_module && capable(CAP_SYS_MODULE)) {
105842+#ifdef CONFIG_GRKERNSEC_MODHARDEN
105843+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
105844+#else
105845 if (!request_module("%s", name))
105846 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
105847 name);
105848+#endif
105849 }
105850 }
105851 EXPORT_SYMBOL(dev_load);
105852diff --git a/net/core/filter.c b/net/core/filter.c
105853index 1dbf646..0f95703 100644
105854--- a/net/core/filter.c
105855+++ b/net/core/filter.c
105856@@ -1,16 +1,11 @@
105857 /*
105858 * Linux Socket Filter - Kernel level socket filtering
105859 *
105860- * Based on the design of the Berkeley Packet Filter. The new
105861- * internal format has been designed by PLUMgrid:
105862+ * Author:
105863+ * Jay Schulist <jschlst@samba.org>
105864 *
105865- * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
105866- *
105867- * Authors:
105868- *
105869- * Jay Schulist <jschlst@samba.org>
105870- * Alexei Starovoitov <ast@plumgrid.com>
105871- * Daniel Borkmann <dborkman@redhat.com>
105872+ * Based on the design of:
105873+ * - The Berkeley Packet Filter
105874 *
105875 * This program is free software; you can redistribute it and/or
105876 * modify it under the terms of the GNU General Public License
105877@@ -45,27 +40,6 @@
105878 #include <linux/seccomp.h>
105879 #include <linux/if_vlan.h>
105880
105881-/* Registers */
105882-#define BPF_R0 regs[BPF_REG_0]
105883-#define BPF_R1 regs[BPF_REG_1]
105884-#define BPF_R2 regs[BPF_REG_2]
105885-#define BPF_R3 regs[BPF_REG_3]
105886-#define BPF_R4 regs[BPF_REG_4]
105887-#define BPF_R5 regs[BPF_REG_5]
105888-#define BPF_R6 regs[BPF_REG_6]
105889-#define BPF_R7 regs[BPF_REG_7]
105890-#define BPF_R8 regs[BPF_REG_8]
105891-#define BPF_R9 regs[BPF_REG_9]
105892-#define BPF_R10 regs[BPF_REG_10]
105893-
105894-/* Named registers */
105895-#define DST regs[insn->dst_reg]
105896-#define SRC regs[insn->src_reg]
105897-#define FP regs[BPF_REG_FP]
105898-#define ARG1 regs[BPF_REG_ARG1]
105899-#define CTX regs[BPF_REG_CTX]
105900-#define IMM insn->imm
105901-
105902 /* No hurry in this branch
105903 *
105904 * Exported for the bpf jit load helper.
105905@@ -78,9 +52,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
105906 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
105907 else if (k >= SKF_LL_OFF)
105908 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
105909+
105910 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
105911 return ptr;
105912-
105913 return NULL;
105914 }
105915
105916@@ -89,7 +63,6 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
105917 {
105918 if (k >= 0)
105919 return skb_header_pointer(skb, k, size, buffer);
105920-
105921 return bpf_internal_load_pointer_neg_helper(skb, k, size);
105922 }
105923
105924@@ -135,960 +108,309 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
105925 }
105926 EXPORT_SYMBOL(sk_filter);
105927
105928-/* Base function for offset calculation. Needs to go into .text section,
105929- * therefore keeping it non-static as well; will also be used by JITs
105930- * anyway later on, so do not let the compiler omit it.
105931- */
105932-noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105933-{
105934- return 0;
105935-}
105936-
105937 /**
105938- * __sk_run_filter - run a filter on a given context
105939- * @ctx: buffer to run the filter on
105940- * @insn: filter to apply
105941+ * sk_run_filter - run a filter on a socket
105942+ * @skb: buffer to run the filter on
105943+ * @fentry: filter to apply
105944 *
105945- * Decode and apply filter instructions to the skb->data. Return length to
105946- * keep, 0 for none. @ctx is the data we are operating on, @insn is the
105947- * array of filter instructions.
105948+ * Decode and apply filter instructions to the skb->data.
105949+ * Return length to keep, 0 for none. @skb is the data we are
105950+ * filtering, @filter is the array of filter instructions.
105951+ * Because all jumps are guaranteed to be before last instruction,
105952+ * and last instruction guaranteed to be a RET, we dont need to check
105953+ * flen. (We used to pass to this function the length of filter)
105954 */
105955-static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
105956+unsigned int sk_run_filter(const struct sk_buff *skb,
105957+ const struct sock_filter *fentry)
105958 {
105959- u64 stack[MAX_BPF_STACK / sizeof(u64)];
105960- u64 regs[MAX_BPF_REG], tmp;
105961- static const void *jumptable[256] = {
105962- [0 ... 255] = &&default_label,
105963- /* Now overwrite non-defaults ... */
105964- /* 32 bit ALU operations */
105965- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
105966- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
105967- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
105968- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
105969- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
105970- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
105971- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
105972- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
105973- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
105974- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
105975- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
105976- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
105977- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
105978- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
105979- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
105980- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
105981- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
105982- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
105983- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
105984- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
105985- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
105986- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
105987- [BPF_ALU | BPF_NEG] = &&ALU_NEG,
105988- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
105989- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
105990- /* 64 bit ALU operations */
105991- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
105992- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
105993- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
105994- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
105995- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
105996- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
105997- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
105998- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
105999- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
106000- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
106001- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
106002- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
106003- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
106004- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
106005- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
106006- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
106007- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
106008- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
106009- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
106010- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
106011- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
106012- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
106013- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
106014- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
106015- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
106016- /* Call instruction */
106017- [BPF_JMP | BPF_CALL] = &&JMP_CALL,
106018- /* Jumps */
106019- [BPF_JMP | BPF_JA] = &&JMP_JA,
106020- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
106021- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
106022- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
106023- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
106024- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
106025- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
106026- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
106027- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
106028- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
106029- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
106030- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
106031- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
106032- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
106033- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
106034- /* Program return */
106035- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
106036- /* Store instructions */
106037- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
106038- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
106039- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
106040- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
106041- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
106042- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
106043- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
106044- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
106045- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
106046- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
106047- /* Load instructions */
106048- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
106049- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
106050- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
106051- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
106052- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
106053- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
106054- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
106055- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
106056- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
106057- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
106058- };
106059 void *ptr;
106060- int off;
106061-
106062-#define CONT ({ insn++; goto select_insn; })
106063-#define CONT_JMP ({ insn++; goto select_insn; })
106064-
106065- FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
106066- ARG1 = (u64) (unsigned long) ctx;
106067-
106068- /* Registers used in classic BPF programs need to be reset first. */
106069- regs[BPF_REG_A] = 0;
106070- regs[BPF_REG_X] = 0;
106071-
106072-select_insn:
106073- goto *jumptable[insn->code];
106074-
106075- /* ALU */
106076-#define ALU(OPCODE, OP) \
106077- ALU64_##OPCODE##_X: \
106078- DST = DST OP SRC; \
106079- CONT; \
106080- ALU_##OPCODE##_X: \
106081- DST = (u32) DST OP (u32) SRC; \
106082- CONT; \
106083- ALU64_##OPCODE##_K: \
106084- DST = DST OP IMM; \
106085- CONT; \
106086- ALU_##OPCODE##_K: \
106087- DST = (u32) DST OP (u32) IMM; \
106088- CONT;
106089-
106090- ALU(ADD, +)
106091- ALU(SUB, -)
106092- ALU(AND, &)
106093- ALU(OR, |)
106094- ALU(LSH, <<)
106095- ALU(RSH, >>)
106096- ALU(XOR, ^)
106097- ALU(MUL, *)
106098-#undef ALU
106099- ALU_NEG:
106100- DST = (u32) -DST;
106101- CONT;
106102- ALU64_NEG:
106103- DST = -DST;
106104- CONT;
106105- ALU_MOV_X:
106106- DST = (u32) SRC;
106107- CONT;
106108- ALU_MOV_K:
106109- DST = (u32) IMM;
106110- CONT;
106111- ALU64_MOV_X:
106112- DST = SRC;
106113- CONT;
106114- ALU64_MOV_K:
106115- DST = IMM;
106116- CONT;
106117- ALU64_ARSH_X:
106118- (*(s64 *) &DST) >>= SRC;
106119- CONT;
106120- ALU64_ARSH_K:
106121- (*(s64 *) &DST) >>= IMM;
106122- CONT;
106123- ALU64_MOD_X:
106124- if (unlikely(SRC == 0))
106125- return 0;
106126- tmp = DST;
106127- DST = do_div(tmp, SRC);
106128- CONT;
106129- ALU_MOD_X:
106130- if (unlikely(SRC == 0))
106131- return 0;
106132- tmp = (u32) DST;
106133- DST = do_div(tmp, (u32) SRC);
106134- CONT;
106135- ALU64_MOD_K:
106136- tmp = DST;
106137- DST = do_div(tmp, IMM);
106138- CONT;
106139- ALU_MOD_K:
106140- tmp = (u32) DST;
106141- DST = do_div(tmp, (u32) IMM);
106142- CONT;
106143- ALU64_DIV_X:
106144- if (unlikely(SRC == 0))
106145- return 0;
106146- do_div(DST, SRC);
106147- CONT;
106148- ALU_DIV_X:
106149- if (unlikely(SRC == 0))
106150- return 0;
106151- tmp = (u32) DST;
106152- do_div(tmp, (u32) SRC);
106153- DST = (u32) tmp;
106154- CONT;
106155- ALU64_DIV_K:
106156- do_div(DST, IMM);
106157- CONT;
106158- ALU_DIV_K:
106159- tmp = (u32) DST;
106160- do_div(tmp, (u32) IMM);
106161- DST = (u32) tmp;
106162- CONT;
106163- ALU_END_TO_BE:
106164- switch (IMM) {
106165- case 16:
106166- DST = (__force u16) cpu_to_be16(DST);
106167- break;
106168- case 32:
106169- DST = (__force u32) cpu_to_be32(DST);
106170- break;
106171- case 64:
106172- DST = (__force u64) cpu_to_be64(DST);
106173- break;
106174- }
106175- CONT;
106176- ALU_END_TO_LE:
106177- switch (IMM) {
106178- case 16:
106179- DST = (__force u16) cpu_to_le16(DST);
106180- break;
106181- case 32:
106182- DST = (__force u32) cpu_to_le32(DST);
106183- break;
106184- case 64:
106185- DST = (__force u64) cpu_to_le64(DST);
106186- break;
106187- }
106188- CONT;
106189-
106190- /* CALL */
106191- JMP_CALL:
106192- /* Function call scratches BPF_R1-BPF_R5 registers,
106193- * preserves BPF_R6-BPF_R9, and stores return value
106194- * into BPF_R0.
106195- */
106196- BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
106197- BPF_R4, BPF_R5);
106198- CONT;
106199-
106200- /* JMP */
106201- JMP_JA:
106202- insn += insn->off;
106203- CONT;
106204- JMP_JEQ_X:
106205- if (DST == SRC) {
106206- insn += insn->off;
106207- CONT_JMP;
106208- }
106209- CONT;
106210- JMP_JEQ_K:
106211- if (DST == IMM) {
106212- insn += insn->off;
106213- CONT_JMP;
106214- }
106215- CONT;
106216- JMP_JNE_X:
106217- if (DST != SRC) {
106218- insn += insn->off;
106219- CONT_JMP;
106220- }
106221- CONT;
106222- JMP_JNE_K:
106223- if (DST != IMM) {
106224- insn += insn->off;
106225- CONT_JMP;
106226- }
106227- CONT;
106228- JMP_JGT_X:
106229- if (DST > SRC) {
106230- insn += insn->off;
106231- CONT_JMP;
106232- }
106233- CONT;
106234- JMP_JGT_K:
106235- if (DST > IMM) {
106236- insn += insn->off;
106237- CONT_JMP;
106238- }
106239- CONT;
106240- JMP_JGE_X:
106241- if (DST >= SRC) {
106242- insn += insn->off;
106243- CONT_JMP;
106244- }
106245- CONT;
106246- JMP_JGE_K:
106247- if (DST >= IMM) {
106248- insn += insn->off;
106249- CONT_JMP;
106250- }
106251- CONT;
106252- JMP_JSGT_X:
106253- if (((s64) DST) > ((s64) SRC)) {
106254- insn += insn->off;
106255- CONT_JMP;
106256- }
106257- CONT;
106258- JMP_JSGT_K:
106259- if (((s64) DST) > ((s64) IMM)) {
106260- insn += insn->off;
106261- CONT_JMP;
106262- }
106263- CONT;
106264- JMP_JSGE_X:
106265- if (((s64) DST) >= ((s64) SRC)) {
106266- insn += insn->off;
106267- CONT_JMP;
106268- }
106269- CONT;
106270- JMP_JSGE_K:
106271- if (((s64) DST) >= ((s64) IMM)) {
106272- insn += insn->off;
106273- CONT_JMP;
106274- }
106275- CONT;
106276- JMP_JSET_X:
106277- if (DST & SRC) {
106278- insn += insn->off;
106279- CONT_JMP;
106280- }
106281- CONT;
106282- JMP_JSET_K:
106283- if (DST & IMM) {
106284- insn += insn->off;
106285- CONT_JMP;
106286- }
106287- CONT;
106288- JMP_EXIT:
106289- return BPF_R0;
106290-
106291- /* STX and ST and LDX*/
106292-#define LDST(SIZEOP, SIZE) \
106293- STX_MEM_##SIZEOP: \
106294- *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
106295- CONT; \
106296- ST_MEM_##SIZEOP: \
106297- *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
106298- CONT; \
106299- LDX_MEM_##SIZEOP: \
106300- DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
106301- CONT;
106302-
106303- LDST(B, u8)
106304- LDST(H, u16)
106305- LDST(W, u32)
106306- LDST(DW, u64)
106307-#undef LDST
106308- STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
106309- atomic_add((u32) SRC, (atomic_t *)(unsigned long)
106310- (DST + insn->off));
106311- CONT;
106312- STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
106313- atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
106314- (DST + insn->off));
106315- CONT;
106316- LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
106317- off = IMM;
106318-load_word:
106319- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
106320- * only appearing in the programs where ctx ==
106321- * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
106322- * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
106323- * internal BPF verifier will check that BPF_R6 ==
106324- * ctx.
106325- *
106326- * BPF_ABS and BPF_IND are wrappers of function calls,
106327- * so they scratch BPF_R1-BPF_R5 registers, preserve
106328- * BPF_R6-BPF_R9, and store return value into BPF_R0.
106329- *
106330- * Implicit input:
106331- * ctx == skb == BPF_R6 == CTX
106332- *
106333- * Explicit input:
106334- * SRC == any register
106335- * IMM == 32-bit immediate
106336- *
106337- * Output:
106338- * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
106339- */
106340-
106341- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
106342- if (likely(ptr != NULL)) {
106343- BPF_R0 = get_unaligned_be32(ptr);
106344- CONT;
106345- }
106346-
106347- return 0;
106348- LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
106349- off = IMM;
106350-load_half:
106351- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
106352- if (likely(ptr != NULL)) {
106353- BPF_R0 = get_unaligned_be16(ptr);
106354- CONT;
106355- }
106356-
106357- return 0;
106358- LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
106359- off = IMM;
106360-load_byte:
106361- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
106362- if (likely(ptr != NULL)) {
106363- BPF_R0 = *(u8 *)ptr;
106364- CONT;
106365- }
106366-
106367- return 0;
106368- LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
106369- off = IMM + SRC;
106370- goto load_word;
106371- LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
106372- off = IMM + SRC;
106373- goto load_half;
106374- LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
106375- off = IMM + SRC;
106376- goto load_byte;
106377-
106378- default_label:
106379- /* If we ever reach this, we have a bug somewhere. */
106380- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
106381- return 0;
106382-}
106383-
106384-/* Helper to find the offset of pkt_type in sk_buff structure. We want
106385- * to make sure its still a 3bit field starting at a byte boundary;
106386- * taken from arch/x86/net/bpf_jit_comp.c.
106387- */
106388-#ifdef __BIG_ENDIAN_BITFIELD
106389-#define PKT_TYPE_MAX (7 << 5)
106390+ u32 A = 0; /* Accumulator */
106391+ u32 X = 0; /* Index Register */
106392+ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */
106393+ u32 tmp;
106394+ int k;
106395+
106396+ /*
106397+ * Process array of filter instructions.
106398+ */
106399+ for (;; fentry++) {
106400+#if defined(CONFIG_X86_32)
106401+#define K (fentry->k)
106402 #else
106403-#define PKT_TYPE_MAX 7
106404+ const u32 K = fentry->k;
106405 #endif
106406-static unsigned int pkt_type_offset(void)
106407-{
106408- struct sk_buff skb_probe = { .pkt_type = ~0, };
106409- u8 *ct = (u8 *) &skb_probe;
106410- unsigned int off;
106411
106412- for (off = 0; off < sizeof(struct sk_buff); off++) {
106413- if (ct[off] == PKT_TYPE_MAX)
106414- return off;
106415- }
106416-
106417- pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
106418- return -1;
106419-}
106420-
106421-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106422-{
106423- return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
106424-}
106425-
106426-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106427-{
106428- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
106429- struct nlattr *nla;
106430-
106431- if (skb_is_nonlinear(skb))
106432- return 0;
106433-
106434- if (skb->len < sizeof(struct nlattr))
106435- return 0;
106436-
106437- if (a > skb->len - sizeof(struct nlattr))
106438- return 0;
106439-
106440- nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
106441- if (nla)
106442- return (void *) nla - (void *) skb->data;
106443-
106444- return 0;
106445-}
106446-
106447-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106448-{
106449- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
106450- struct nlattr *nla;
106451-
106452- if (skb_is_nonlinear(skb))
106453- return 0;
106454-
106455- if (skb->len < sizeof(struct nlattr))
106456- return 0;
106457-
106458- if (a > skb->len - sizeof(struct nlattr))
106459- return 0;
106460-
106461- nla = (struct nlattr *) &skb->data[a];
106462- if (nla->nla_len > skb->len - a)
106463- return 0;
106464-
106465- nla = nla_find_nested(nla, x);
106466- if (nla)
106467- return (void *) nla - (void *) skb->data;
106468-
106469- return 0;
106470-}
106471-
106472-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106473-{
106474- return raw_smp_processor_id();
106475-}
106476-
106477-/* note that this only generates 32-bit random numbers */
106478-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106479-{
106480- return prandom_u32();
106481-}
106482-
106483-static bool convert_bpf_extensions(struct sock_filter *fp,
106484- struct sock_filter_int **insnp)
106485-{
106486- struct sock_filter_int *insn = *insnp;
106487-
106488- switch (fp->k) {
106489- case SKF_AD_OFF + SKF_AD_PROTOCOL:
106490- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
106491-
106492- /* A = *(u16 *) (CTX + offsetof(protocol)) */
106493- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
106494- offsetof(struct sk_buff, protocol));
106495- /* A = ntohs(A) [emitting a nop or swap16] */
106496- *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
106497- break;
106498-
106499- case SKF_AD_OFF + SKF_AD_PKTTYPE:
106500- *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
106501- pkt_type_offset());
106502- if (insn->off < 0)
106503- return false;
106504- insn++;
106505- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
106506-#ifdef __BIG_ENDIAN_BITFIELD
106507- insn++;
106508- *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
106509-#endif
106510- break;
106511-
106512- case SKF_AD_OFF + SKF_AD_IFINDEX:
106513- case SKF_AD_OFF + SKF_AD_HATYPE:
106514- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
106515- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
106516- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
106517-
106518- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
106519- BPF_REG_TMP, BPF_REG_CTX,
106520- offsetof(struct sk_buff, dev));
106521- /* if (tmp != 0) goto pc + 1 */
106522- *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
106523- *insn++ = BPF_EXIT_INSN();
106524- if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
106525- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
106526- offsetof(struct net_device, ifindex));
106527- else
106528- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
106529- offsetof(struct net_device, type));
106530- break;
106531-
106532- case SKF_AD_OFF + SKF_AD_MARK:
106533- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
106534-
106535- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
106536- offsetof(struct sk_buff, mark));
106537- break;
106538-
106539- case SKF_AD_OFF + SKF_AD_RXHASH:
106540- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
106541-
106542- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
106543- offsetof(struct sk_buff, hash));
106544- break;
106545-
106546- case SKF_AD_OFF + SKF_AD_QUEUE:
106547- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
106548-
106549- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
106550- offsetof(struct sk_buff, queue_mapping));
106551- break;
106552-
106553- case SKF_AD_OFF + SKF_AD_VLAN_TAG:
106554- case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
106555- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
106556- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
106557-
106558- /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
106559- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
106560- offsetof(struct sk_buff, vlan_tci));
106561- if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
106562- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
106563- ~VLAN_TAG_PRESENT);
106564- } else {
106565- /* A >>= 12 */
106566- *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
106567- /* A &= 1 */
106568- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
106569- }
106570- break;
106571-
106572- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
106573- case SKF_AD_OFF + SKF_AD_NLATTR:
106574- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
106575- case SKF_AD_OFF + SKF_AD_CPU:
106576- case SKF_AD_OFF + SKF_AD_RANDOM:
106577- /* arg1 = CTX */
106578- *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
106579- /* arg2 = A */
106580- *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
106581- /* arg3 = X */
106582- *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
106583- /* Emit call(arg1=CTX, arg2=A, arg3=X) */
106584- switch (fp->k) {
106585- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
106586- *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
106587- break;
106588- case SKF_AD_OFF + SKF_AD_NLATTR:
106589- *insn = BPF_EMIT_CALL(__skb_get_nlattr);
106590- break;
106591- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
106592- *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
106593- break;
106594- case SKF_AD_OFF + SKF_AD_CPU:
106595- *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
106596- break;
106597- case SKF_AD_OFF + SKF_AD_RANDOM:
106598- *insn = BPF_EMIT_CALL(__get_random_u32);
106599- break;
106600- }
106601- break;
106602-
106603- case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
106604- /* A ^= X */
106605- *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
106606- break;
106607-
106608- default:
106609- /* This is just a dummy call to avoid letting the compiler
106610- * evict __bpf_call_base() as an optimization. Placed here
106611- * where no-one bothers.
106612- */
106613- BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
106614- return false;
106615- }
106616-
106617- *insnp = insn;
106618- return true;
106619-}
106620-
106621-/**
106622- * sk_convert_filter - convert filter program
106623- * @prog: the user passed filter program
106624- * @len: the length of the user passed filter program
106625- * @new_prog: buffer where converted program will be stored
106626- * @new_len: pointer to store length of converted program
106627- *
106628- * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
106629- * Conversion workflow:
106630- *
106631- * 1) First pass for calculating the new program length:
106632- * sk_convert_filter(old_prog, old_len, NULL, &new_len)
106633- *
106634- * 2) 2nd pass to remap in two passes: 1st pass finds new
106635- * jump offsets, 2nd pass remapping:
106636- * new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
106637- * sk_convert_filter(old_prog, old_len, new_prog, &new_len);
106638- *
106639- * User BPF's register A is mapped to our BPF register 6, user BPF
106640- * register X is mapped to BPF register 7; frame pointer is always
106641- * register 10; Context 'void *ctx' is stored in register 1, that is,
106642- * for socket filters: ctx == 'struct sk_buff *', for seccomp:
106643- * ctx == 'struct seccomp_data *'.
106644- */
106645-int sk_convert_filter(struct sock_filter *prog, int len,
106646- struct sock_filter_int *new_prog, int *new_len)
106647-{
106648- int new_flen = 0, pass = 0, target, i;
106649- struct sock_filter_int *new_insn;
106650- struct sock_filter *fp;
106651- int *addrs = NULL;
106652- u8 bpf_src;
106653-
106654- BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
106655- BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
106656-
106657- if (len <= 0 || len > BPF_MAXINSNS)
106658- return -EINVAL;
106659-
106660- if (new_prog) {
106661- addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
106662- if (!addrs)
106663- return -ENOMEM;
106664- }
106665-
106666-do_pass:
106667- new_insn = new_prog;
106668- fp = prog;
106669-
106670- if (new_insn)
106671- *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
106672- new_insn++;
106673-
106674- for (i = 0; i < len; fp++, i++) {
106675- struct sock_filter_int tmp_insns[6] = { };
106676- struct sock_filter_int *insn = tmp_insns;
106677-
106678- if (addrs)
106679- addrs[i] = new_insn - new_prog;
106680-
106681- switch (fp->code) {
106682- /* All arithmetic insns and skb loads map as-is. */
106683- case BPF_ALU | BPF_ADD | BPF_X:
106684- case BPF_ALU | BPF_ADD | BPF_K:
106685- case BPF_ALU | BPF_SUB | BPF_X:
106686- case BPF_ALU | BPF_SUB | BPF_K:
106687- case BPF_ALU | BPF_AND | BPF_X:
106688- case BPF_ALU | BPF_AND | BPF_K:
106689- case BPF_ALU | BPF_OR | BPF_X:
106690- case BPF_ALU | BPF_OR | BPF_K:
106691- case BPF_ALU | BPF_LSH | BPF_X:
106692- case BPF_ALU | BPF_LSH | BPF_K:
106693- case BPF_ALU | BPF_RSH | BPF_X:
106694- case BPF_ALU | BPF_RSH | BPF_K:
106695- case BPF_ALU | BPF_XOR | BPF_X:
106696- case BPF_ALU | BPF_XOR | BPF_K:
106697- case BPF_ALU | BPF_MUL | BPF_X:
106698- case BPF_ALU | BPF_MUL | BPF_K:
106699- case BPF_ALU | BPF_DIV | BPF_X:
106700- case BPF_ALU | BPF_DIV | BPF_K:
106701- case BPF_ALU | BPF_MOD | BPF_X:
106702- case BPF_ALU | BPF_MOD | BPF_K:
106703- case BPF_ALU | BPF_NEG:
106704- case BPF_LD | BPF_ABS | BPF_W:
106705- case BPF_LD | BPF_ABS | BPF_H:
106706- case BPF_LD | BPF_ABS | BPF_B:
106707- case BPF_LD | BPF_IND | BPF_W:
106708- case BPF_LD | BPF_IND | BPF_H:
106709- case BPF_LD | BPF_IND | BPF_B:
106710- /* Check for overloaded BPF extension and
106711- * directly convert it if found, otherwise
106712- * just move on with mapping.
106713- */
106714- if (BPF_CLASS(fp->code) == BPF_LD &&
106715- BPF_MODE(fp->code) == BPF_ABS &&
106716- convert_bpf_extensions(fp, &insn))
106717- break;
106718-
106719- *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
106720- break;
106721-
106722- /* Jump transformation cannot use BPF block macros
106723- * everywhere as offset calculation and target updates
106724- * require a bit more work than the rest, i.e. jump
106725- * opcodes map as-is, but offsets need adjustment.
106726- */
106727-
106728-#define BPF_EMIT_JMP \
106729- do { \
106730- if (target >= len || target < 0) \
106731- goto err; \
106732- insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
106733- /* Adjust pc relative offset for 2nd or 3rd insn. */ \
106734- insn->off -= insn - tmp_insns; \
106735- } while (0)
106736-
106737- case BPF_JMP | BPF_JA:
106738- target = i + fp->k + 1;
106739- insn->code = fp->code;
106740- BPF_EMIT_JMP;
106741- break;
106742-
106743- case BPF_JMP | BPF_JEQ | BPF_K:
106744- case BPF_JMP | BPF_JEQ | BPF_X:
106745- case BPF_JMP | BPF_JSET | BPF_K:
106746- case BPF_JMP | BPF_JSET | BPF_X:
106747- case BPF_JMP | BPF_JGT | BPF_K:
106748- case BPF_JMP | BPF_JGT | BPF_X:
106749- case BPF_JMP | BPF_JGE | BPF_K:
106750- case BPF_JMP | BPF_JGE | BPF_X:
106751- if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
106752- /* BPF immediates are signed, zero extend
106753- * immediate into tmp register and use it
106754- * in compare insn.
106755- */
106756- *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
106757-
106758- insn->dst_reg = BPF_REG_A;
106759- insn->src_reg = BPF_REG_TMP;
106760- bpf_src = BPF_X;
106761- } else {
106762- insn->dst_reg = BPF_REG_A;
106763- insn->src_reg = BPF_REG_X;
106764- insn->imm = fp->k;
106765- bpf_src = BPF_SRC(fp->code);
106766+ switch (fentry->code) {
106767+ case BPF_S_ALU_ADD_X:
106768+ A += X;
106769+ continue;
106770+ case BPF_S_ALU_ADD_K:
106771+ A += K;
106772+ continue;
106773+ case BPF_S_ALU_SUB_X:
106774+ A -= X;
106775+ continue;
106776+ case BPF_S_ALU_SUB_K:
106777+ A -= K;
106778+ continue;
106779+ case BPF_S_ALU_MUL_X:
106780+ A *= X;
106781+ continue;
106782+ case BPF_S_ALU_MUL_K:
106783+ A *= K;
106784+ continue;
106785+ case BPF_S_ALU_DIV_X:
106786+ if (X == 0)
106787+ return 0;
106788+ A /= X;
106789+ continue;
106790+ case BPF_S_ALU_DIV_K:
106791+ A /= K;
106792+ continue;
106793+ case BPF_S_ALU_MOD_X:
106794+ if (X == 0)
106795+ return 0;
106796+ A %= X;
106797+ continue;
106798+ case BPF_S_ALU_MOD_K:
106799+ A %= K;
106800+ continue;
106801+ case BPF_S_ALU_AND_X:
106802+ A &= X;
106803+ continue;
106804+ case BPF_S_ALU_AND_K:
106805+ A &= K;
106806+ continue;
106807+ case BPF_S_ALU_OR_X:
106808+ A |= X;
106809+ continue;
106810+ case BPF_S_ALU_OR_K:
106811+ A |= K;
106812+ continue;
106813+ case BPF_S_ANC_ALU_XOR_X:
106814+ case BPF_S_ALU_XOR_X:
106815+ A ^= X;
106816+ continue;
106817+ case BPF_S_ALU_XOR_K:
106818+ A ^= K;
106819+ continue;
106820+ case BPF_S_ALU_LSH_X:
106821+ A <<= X;
106822+ continue;
106823+ case BPF_S_ALU_LSH_K:
106824+ A <<= K;
106825+ continue;
106826+ case BPF_S_ALU_RSH_X:
106827+ A >>= X;
106828+ continue;
106829+ case BPF_S_ALU_RSH_K:
106830+ A >>= K;
106831+ continue;
106832+ case BPF_S_ALU_NEG:
106833+ A = -A;
106834+ continue;
106835+ case BPF_S_JMP_JA:
106836+ fentry += K;
106837+ continue;
106838+ case BPF_S_JMP_JGT_K:
106839+ fentry += (A > K) ? fentry->jt : fentry->jf;
106840+ continue;
106841+ case BPF_S_JMP_JGE_K:
106842+ fentry += (A >= K) ? fentry->jt : fentry->jf;
106843+ continue;
106844+ case BPF_S_JMP_JEQ_K:
106845+ fentry += (A == K) ? fentry->jt : fentry->jf;
106846+ continue;
106847+ case BPF_S_JMP_JSET_K:
106848+ fentry += (A & K) ? fentry->jt : fentry->jf;
106849+ continue;
106850+ case BPF_S_JMP_JGT_X:
106851+ fentry += (A > X) ? fentry->jt : fentry->jf;
106852+ continue;
106853+ case BPF_S_JMP_JGE_X:
106854+ fentry += (A >= X) ? fentry->jt : fentry->jf;
106855+ continue;
106856+ case BPF_S_JMP_JEQ_X:
106857+ fentry += (A == X) ? fentry->jt : fentry->jf;
106858+ continue;
106859+ case BPF_S_JMP_JSET_X:
106860+ fentry += (A & X) ? fentry->jt : fentry->jf;
106861+ continue;
106862+ case BPF_S_LD_W_ABS:
106863+ k = K;
106864+load_w:
106865+ ptr = load_pointer(skb, k, 4, &tmp);
106866+ if (ptr != NULL) {
106867+ A = get_unaligned_be32(ptr);
106868+ continue;
106869 }
106870-
106871- /* Common case where 'jump_false' is next insn. */
106872- if (fp->jf == 0) {
106873- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
106874- target = i + fp->jt + 1;
106875- BPF_EMIT_JMP;
106876- break;
106877+ return 0;
106878+ case BPF_S_LD_H_ABS:
106879+ k = K;
106880+load_h:
106881+ ptr = load_pointer(skb, k, 2, &tmp);
106882+ if (ptr != NULL) {
106883+ A = get_unaligned_be16(ptr);
106884+ continue;
106885 }
106886-
106887- /* Convert JEQ into JNE when 'jump_true' is next insn. */
106888- if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
106889- insn->code = BPF_JMP | BPF_JNE | bpf_src;
106890- target = i + fp->jf + 1;
106891- BPF_EMIT_JMP;
106892- break;
106893+ return 0;
106894+ case BPF_S_LD_B_ABS:
106895+ k = K;
106896+load_b:
106897+ ptr = load_pointer(skb, k, 1, &tmp);
106898+ if (ptr != NULL) {
106899+ A = *(u8 *)ptr;
106900+ continue;
106901 }
106902-
106903- /* Other jumps are mapped into two insns: Jxx and JA. */
106904- target = i + fp->jt + 1;
106905- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
106906- BPF_EMIT_JMP;
106907- insn++;
106908-
106909- insn->code = BPF_JMP | BPF_JA;
106910- target = i + fp->jf + 1;
106911- BPF_EMIT_JMP;
106912- break;
106913-
106914- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
106915- case BPF_LDX | BPF_MSH | BPF_B:
106916- /* tmp = A */
106917- *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
106918- /* A = BPF_R0 = *(u8 *) (skb->data + K) */
106919- *insn++ = BPF_LD_ABS(BPF_B, fp->k);
106920- /* A &= 0xf */
106921- *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
106922- /* A <<= 2 */
106923- *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
106924- /* X = A */
106925- *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
106926- /* A = tmp */
106927- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
106928- break;
106929-
106930- /* RET_K, RET_A are remaped into 2 insns. */
106931- case BPF_RET | BPF_A:
106932- case BPF_RET | BPF_K:
106933- *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
106934- BPF_K : BPF_X, BPF_REG_0,
106935- BPF_REG_A, fp->k);
106936- *insn = BPF_EXIT_INSN();
106937- break;
106938-
106939- /* Store to stack. */
106940- case BPF_ST:
106941- case BPF_STX:
106942- *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
106943- BPF_ST ? BPF_REG_A : BPF_REG_X,
106944- -(BPF_MEMWORDS - fp->k) * 4);
106945- break;
106946-
106947- /* Load from stack. */
106948- case BPF_LD | BPF_MEM:
106949- case BPF_LDX | BPF_MEM:
106950- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
106951- BPF_REG_A : BPF_REG_X, BPF_REG_FP,
106952- -(BPF_MEMWORDS - fp->k) * 4);
106953- break;
106954-
106955- /* A = K or X = K */
106956- case BPF_LD | BPF_IMM:
106957- case BPF_LDX | BPF_IMM:
106958- *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
106959- BPF_REG_A : BPF_REG_X, fp->k);
106960- break;
106961-
106962- /* X = A */
106963- case BPF_MISC | BPF_TAX:
106964- *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
106965- break;
106966-
106967- /* A = X */
106968- case BPF_MISC | BPF_TXA:
106969- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
106970- break;
106971-
106972- /* A = skb->len or X = skb->len */
106973- case BPF_LD | BPF_W | BPF_LEN:
106974- case BPF_LDX | BPF_W | BPF_LEN:
106975- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
106976- BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
106977- offsetof(struct sk_buff, len));
106978- break;
106979-
106980- /* Access seccomp_data fields. */
106981- case BPF_LDX | BPF_ABS | BPF_W:
106982- /* A = *(u32 *) (ctx + K) */
106983- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
106984- break;
106985-
106986- /* Unkown instruction. */
106987+ return 0;
106988+ case BPF_S_LD_W_LEN:
106989+ A = skb->len;
106990+ continue;
106991+ case BPF_S_LDX_W_LEN:
106992+ X = skb->len;
106993+ continue;
106994+ case BPF_S_LD_W_IND:
106995+ k = X + K;
106996+ goto load_w;
106997+ case BPF_S_LD_H_IND:
106998+ k = X + K;
106999+ goto load_h;
107000+ case BPF_S_LD_B_IND:
107001+ k = X + K;
107002+ goto load_b;
107003+ case BPF_S_LDX_B_MSH:
107004+ ptr = load_pointer(skb, K, 1, &tmp);
107005+ if (ptr != NULL) {
107006+ X = (*(u8 *)ptr & 0xf) << 2;
107007+ continue;
107008+ }
107009+ return 0;
107010+ case BPF_S_LD_IMM:
107011+ A = K;
107012+ continue;
107013+ case BPF_S_LDX_IMM:
107014+ X = K;
107015+ continue;
107016+ case BPF_S_LD_MEM:
107017+ A = mem[K&15];
107018+ continue;
107019+ case BPF_S_LDX_MEM:
107020+ X = mem[K&15];
107021+ continue;
107022+ case BPF_S_MISC_TAX:
107023+ X = A;
107024+ continue;
107025+ case BPF_S_MISC_TXA:
107026+ A = X;
107027+ continue;
107028+ case BPF_S_RET_K:
107029+ return K;
107030+ case BPF_S_RET_A:
107031+ return A;
107032+ case BPF_S_ST:
107033+ mem[K&15] = A;
107034+ continue;
107035+ case BPF_S_STX:
107036+ mem[K&15] = X;
107037+ continue;
107038+ case BPF_S_ANC_PROTOCOL:
107039+ A = ntohs(skb->protocol);
107040+ continue;
107041+ case BPF_S_ANC_PKTTYPE:
107042+ A = skb->pkt_type;
107043+ continue;
107044+ case BPF_S_ANC_IFINDEX:
107045+ if (!skb->dev)
107046+ return 0;
107047+ A = skb->dev->ifindex;
107048+ continue;
107049+ case BPF_S_ANC_MARK:
107050+ A = skb->mark;
107051+ continue;
107052+ case BPF_S_ANC_QUEUE:
107053+ A = skb->queue_mapping;
107054+ continue;
107055+ case BPF_S_ANC_HATYPE:
107056+ if (!skb->dev)
107057+ return 0;
107058+ A = skb->dev->type;
107059+ continue;
107060+ case BPF_S_ANC_RXHASH:
107061+ A = skb->hash;
107062+ continue;
107063+ case BPF_S_ANC_CPU:
107064+ A = raw_smp_processor_id();
107065+ continue;
107066+ case BPF_S_ANC_VLAN_TAG:
107067+ A = vlan_tx_tag_get(skb);
107068+ continue;
107069+ case BPF_S_ANC_VLAN_TAG_PRESENT:
107070+ A = !!vlan_tx_tag_present(skb);
107071+ continue;
107072+ case BPF_S_ANC_PAY_OFFSET:
107073+ A = __skb_get_poff(skb);
107074+ continue;
107075+ case BPF_S_ANC_NLATTR: {
107076+ struct nlattr *nla;
107077+
107078+ if (skb_is_nonlinear(skb))
107079+ return 0;
107080+ if (skb->len < sizeof(struct nlattr))
107081+ return 0;
107082+ if (A > skb->len - sizeof(struct nlattr))
107083+ return 0;
107084+
107085+ nla = nla_find((struct nlattr *)&skb->data[A],
107086+ skb->len - A, X);
107087+ if (nla)
107088+ A = (void *)nla - (void *)skb->data;
107089+ else
107090+ A = 0;
107091+ continue;
107092+ }
107093+ case BPF_S_ANC_NLATTR_NEST: {
107094+ struct nlattr *nla;
107095+
107096+ if (skb_is_nonlinear(skb))
107097+ return 0;
107098+ if (skb->len < sizeof(struct nlattr))
107099+ return 0;
107100+ if (A > skb->len - sizeof(struct nlattr))
107101+ return 0;
107102+
107103+ nla = (struct nlattr *)&skb->data[A];
107104+ if (nla->nla_len > skb->len - A)
107105+ return 0;
107106+
107107+ nla = nla_find_nested(nla, X);
107108+ if (nla)
107109+ A = (void *)nla - (void *)skb->data;
107110+ else
107111+ A = 0;
107112+ continue;
107113+ }
107114+#ifdef CONFIG_SECCOMP_FILTER
107115+ case BPF_S_ANC_SECCOMP_LD_W:
107116+ A = seccomp_bpf_load(fentry->k);
107117+ continue;
107118+#endif
107119 default:
107120- goto err;
107121+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
107122+ fentry->code, fentry->jt,
107123+ fentry->jf, fentry->k);
107124+ BUG();
107125+ return 0;
107126 }
107127-
107128- insn++;
107129- if (new_prog)
107130- memcpy(new_insn, tmp_insns,
107131- sizeof(*insn) * (insn - tmp_insns));
107132- new_insn += insn - tmp_insns;
107133- }
107134-
107135- if (!new_prog) {
107136- /* Only calculating new length. */
107137- *new_len = new_insn - new_prog;
107138- return 0;
107139- }
107140-
107141- pass++;
107142- if (new_flen != new_insn - new_prog) {
107143- new_flen = new_insn - new_prog;
107144- if (pass > 2)
107145- goto err;
107146- goto do_pass;
107147 }
107148
107149- kfree(addrs);
107150- BUG_ON(*new_len != new_flen);
107151 return 0;
107152-err:
107153- kfree(addrs);
107154- return -EINVAL;
107155 }
107156+EXPORT_SYMBOL(sk_run_filter);
107157
107158-/* Security:
107159- *
107160+/*
107161+ * Security :
107162 * A BPF program is able to use 16 cells of memory to store intermediate
107163- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
107164- *
107165+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
107166 * As we dont want to clear mem[] array for each packet going through
107167 * sk_run_filter(), we check that filter loaded by user never try to read
107168 * a cell if not previously written, and we check all branches to be sure
107169@@ -1096,46 +418,44 @@ err:
107170 */
107171 static int check_load_and_stores(struct sock_filter *filter, int flen)
107172 {
107173- u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
107174+ u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
107175 int pc, ret = 0;
107176
107177- BUILD_BUG_ON(BPF_MEMWORDS > 16);
107178-
107179- masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
107180+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
107181+ masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
107182 if (!masks)
107183 return -ENOMEM;
107184-
107185 memset(masks, 0xff, flen * sizeof(*masks));
107186
107187 for (pc = 0; pc < flen; pc++) {
107188 memvalid &= masks[pc];
107189
107190 switch (filter[pc].code) {
107191- case BPF_ST:
107192- case BPF_STX:
107193+ case BPF_S_ST:
107194+ case BPF_S_STX:
107195 memvalid |= (1 << filter[pc].k);
107196 break;
107197- case BPF_LD | BPF_MEM:
107198- case BPF_LDX | BPF_MEM:
107199+ case BPF_S_LD_MEM:
107200+ case BPF_S_LDX_MEM:
107201 if (!(memvalid & (1 << filter[pc].k))) {
107202 ret = -EINVAL;
107203 goto error;
107204 }
107205 break;
107206- case BPF_JMP | BPF_JA:
107207- /* A jump must set masks on target */
107208+ case BPF_S_JMP_JA:
107209+ /* a jump must set masks on target */
107210 masks[pc + 1 + filter[pc].k] &= memvalid;
107211 memvalid = ~0;
107212 break;
107213- case BPF_JMP | BPF_JEQ | BPF_K:
107214- case BPF_JMP | BPF_JEQ | BPF_X:
107215- case BPF_JMP | BPF_JGE | BPF_K:
107216- case BPF_JMP | BPF_JGE | BPF_X:
107217- case BPF_JMP | BPF_JGT | BPF_K:
107218- case BPF_JMP | BPF_JGT | BPF_X:
107219- case BPF_JMP | BPF_JSET | BPF_K:
107220- case BPF_JMP | BPF_JSET | BPF_X:
107221- /* A jump must set masks on targets */
107222+ case BPF_S_JMP_JEQ_K:
107223+ case BPF_S_JMP_JEQ_X:
107224+ case BPF_S_JMP_JGE_K:
107225+ case BPF_S_JMP_JGE_X:
107226+ case BPF_S_JMP_JGT_K:
107227+ case BPF_S_JMP_JGT_X:
107228+ case BPF_S_JMP_JSET_X:
107229+ case BPF_S_JMP_JSET_K:
107230+ /* a jump must set masks on targets */
107231 masks[pc + 1 + filter[pc].jt] &= memvalid;
107232 masks[pc + 1 + filter[pc].jf] &= memvalid;
107233 memvalid = ~0;
107234@@ -1147,72 +467,6 @@ error:
107235 return ret;
107236 }
107237
107238-static bool chk_code_allowed(u16 code_to_probe)
107239-{
107240- static const bool codes[] = {
107241- /* 32 bit ALU operations */
107242- [BPF_ALU | BPF_ADD | BPF_K] = true,
107243- [BPF_ALU | BPF_ADD | BPF_X] = true,
107244- [BPF_ALU | BPF_SUB | BPF_K] = true,
107245- [BPF_ALU | BPF_SUB | BPF_X] = true,
107246- [BPF_ALU | BPF_MUL | BPF_K] = true,
107247- [BPF_ALU | BPF_MUL | BPF_X] = true,
107248- [BPF_ALU | BPF_DIV | BPF_K] = true,
107249- [BPF_ALU | BPF_DIV | BPF_X] = true,
107250- [BPF_ALU | BPF_MOD | BPF_K] = true,
107251- [BPF_ALU | BPF_MOD | BPF_X] = true,
107252- [BPF_ALU | BPF_AND | BPF_K] = true,
107253- [BPF_ALU | BPF_AND | BPF_X] = true,
107254- [BPF_ALU | BPF_OR | BPF_K] = true,
107255- [BPF_ALU | BPF_OR | BPF_X] = true,
107256- [BPF_ALU | BPF_XOR | BPF_K] = true,
107257- [BPF_ALU | BPF_XOR | BPF_X] = true,
107258- [BPF_ALU | BPF_LSH | BPF_K] = true,
107259- [BPF_ALU | BPF_LSH | BPF_X] = true,
107260- [BPF_ALU | BPF_RSH | BPF_K] = true,
107261- [BPF_ALU | BPF_RSH | BPF_X] = true,
107262- [BPF_ALU | BPF_NEG] = true,
107263- /* Load instructions */
107264- [BPF_LD | BPF_W | BPF_ABS] = true,
107265- [BPF_LD | BPF_H | BPF_ABS] = true,
107266- [BPF_LD | BPF_B | BPF_ABS] = true,
107267- [BPF_LD | BPF_W | BPF_LEN] = true,
107268- [BPF_LD | BPF_W | BPF_IND] = true,
107269- [BPF_LD | BPF_H | BPF_IND] = true,
107270- [BPF_LD | BPF_B | BPF_IND] = true,
107271- [BPF_LD | BPF_IMM] = true,
107272- [BPF_LD | BPF_MEM] = true,
107273- [BPF_LDX | BPF_W | BPF_LEN] = true,
107274- [BPF_LDX | BPF_B | BPF_MSH] = true,
107275- [BPF_LDX | BPF_IMM] = true,
107276- [BPF_LDX | BPF_MEM] = true,
107277- /* Store instructions */
107278- [BPF_ST] = true,
107279- [BPF_STX] = true,
107280- /* Misc instructions */
107281- [BPF_MISC | BPF_TAX] = true,
107282- [BPF_MISC | BPF_TXA] = true,
107283- /* Return instructions */
107284- [BPF_RET | BPF_K] = true,
107285- [BPF_RET | BPF_A] = true,
107286- /* Jump instructions */
107287- [BPF_JMP | BPF_JA] = true,
107288- [BPF_JMP | BPF_JEQ | BPF_K] = true,
107289- [BPF_JMP | BPF_JEQ | BPF_X] = true,
107290- [BPF_JMP | BPF_JGE | BPF_K] = true,
107291- [BPF_JMP | BPF_JGE | BPF_X] = true,
107292- [BPF_JMP | BPF_JGT | BPF_K] = true,
107293- [BPF_JMP | BPF_JGT | BPF_X] = true,
107294- [BPF_JMP | BPF_JSET | BPF_K] = true,
107295- [BPF_JMP | BPF_JSET | BPF_X] = true,
107296- };
107297-
107298- if (code_to_probe >= ARRAY_SIZE(codes))
107299- return false;
107300-
107301- return codes[code_to_probe];
107302-}
107303-
107304 /**
107305 * sk_chk_filter - verify socket filter code
107306 * @filter: filter to verify
107307@@ -1229,303 +483,187 @@ static bool chk_code_allowed(u16 code_to_probe)
107308 */
107309 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
107310 {
107311- bool anc_found;
107312+ /*
107313+ * Valid instructions are initialized to non-0.
107314+ * Invalid instructions are initialized to 0.
107315+ */
107316+ static const u8 codes[] = {
107317+ [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
107318+ [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
107319+ [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
107320+ [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
107321+ [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
107322+ [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
107323+ [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
107324+ [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
107325+ [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
107326+ [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
107327+ [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
107328+ [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
107329+ [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
107330+ [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
107331+ [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
107332+ [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
107333+ [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
107334+ [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
107335+ [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
107336+ [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
107337+ [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
107338+ [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
107339+ [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
107340+ [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
107341+ [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
107342+ [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
107343+ [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
107344+ [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
107345+ [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
107346+ [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
107347+ [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
107348+ [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
107349+ [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
107350+ [BPF_RET|BPF_K] = BPF_S_RET_K,
107351+ [BPF_RET|BPF_A] = BPF_S_RET_A,
107352+ [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
107353+ [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
107354+ [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
107355+ [BPF_ST] = BPF_S_ST,
107356+ [BPF_STX] = BPF_S_STX,
107357+ [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
107358+ [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
107359+ [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
107360+ [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
107361+ [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
107362+ [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
107363+ [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
107364+ [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
107365+ [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
107366+ };
107367 int pc;
107368+ bool anc_found;
107369
107370 if (flen == 0 || flen > BPF_MAXINSNS)
107371 return -EINVAL;
107372
107373- /* Check the filter code now */
107374+ /* check the filter code now */
107375 for (pc = 0; pc < flen; pc++) {
107376 struct sock_filter *ftest = &filter[pc];
107377+ u16 code = ftest->code;
107378
107379- /* May we actually operate on this code? */
107380- if (!chk_code_allowed(ftest->code))
107381+ if (code >= ARRAY_SIZE(codes))
107382+ return -EINVAL;
107383+ code = codes[code];
107384+ if (!code)
107385 return -EINVAL;
107386-
107387 /* Some instructions need special checks */
107388- switch (ftest->code) {
107389- case BPF_ALU | BPF_DIV | BPF_K:
107390- case BPF_ALU | BPF_MOD | BPF_K:
107391- /* Check for division by zero */
107392+ switch (code) {
107393+ case BPF_S_ALU_DIV_K:
107394+ case BPF_S_ALU_MOD_K:
107395+ /* check for division by zero */
107396 if (ftest->k == 0)
107397 return -EINVAL;
107398 break;
107399- case BPF_LD | BPF_MEM:
107400- case BPF_LDX | BPF_MEM:
107401- case BPF_ST:
107402- case BPF_STX:
107403- /* Check for invalid memory addresses */
107404+ case BPF_S_LD_MEM:
107405+ case BPF_S_LDX_MEM:
107406+ case BPF_S_ST:
107407+ case BPF_S_STX:
107408+ /* check for invalid memory addresses */
107409 if (ftest->k >= BPF_MEMWORDS)
107410 return -EINVAL;
107411 break;
107412- case BPF_JMP | BPF_JA:
107413- /* Note, the large ftest->k might cause loops.
107414+ case BPF_S_JMP_JA:
107415+ /*
107416+ * Note, the large ftest->k might cause loops.
107417 * Compare this with conditional jumps below,
107418 * where offsets are limited. --ANK (981016)
107419 */
107420- if (ftest->k >= (unsigned int)(flen - pc - 1))
107421+ if (ftest->k >= (unsigned int)(flen-pc-1))
107422 return -EINVAL;
107423 break;
107424- case BPF_JMP | BPF_JEQ | BPF_K:
107425- case BPF_JMP | BPF_JEQ | BPF_X:
107426- case BPF_JMP | BPF_JGE | BPF_K:
107427- case BPF_JMP | BPF_JGE | BPF_X:
107428- case BPF_JMP | BPF_JGT | BPF_K:
107429- case BPF_JMP | BPF_JGT | BPF_X:
107430- case BPF_JMP | BPF_JSET | BPF_K:
107431- case BPF_JMP | BPF_JSET | BPF_X:
107432- /* Both conditionals must be safe */
107433+ case BPF_S_JMP_JEQ_K:
107434+ case BPF_S_JMP_JEQ_X:
107435+ case BPF_S_JMP_JGE_K:
107436+ case BPF_S_JMP_JGE_X:
107437+ case BPF_S_JMP_JGT_K:
107438+ case BPF_S_JMP_JGT_X:
107439+ case BPF_S_JMP_JSET_X:
107440+ case BPF_S_JMP_JSET_K:
107441+ /* for conditionals both must be safe */
107442 if (pc + ftest->jt + 1 >= flen ||
107443 pc + ftest->jf + 1 >= flen)
107444 return -EINVAL;
107445 break;
107446- case BPF_LD | BPF_W | BPF_ABS:
107447- case BPF_LD | BPF_H | BPF_ABS:
107448- case BPF_LD | BPF_B | BPF_ABS:
107449+ case BPF_S_LD_W_ABS:
107450+ case BPF_S_LD_H_ABS:
107451+ case BPF_S_LD_B_ABS:
107452 anc_found = false;
107453- if (bpf_anc_helper(ftest) & BPF_ANC)
107454- anc_found = true;
107455- /* Ancillary operation unknown or unsupported */
107456+#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
107457+ code = BPF_S_ANC_##CODE; \
107458+ anc_found = true; \
107459+ break
107460+ switch (ftest->k) {
107461+ ANCILLARY(PROTOCOL);
107462+ ANCILLARY(PKTTYPE);
107463+ ANCILLARY(IFINDEX);
107464+ ANCILLARY(NLATTR);
107465+ ANCILLARY(NLATTR_NEST);
107466+ ANCILLARY(MARK);
107467+ ANCILLARY(QUEUE);
107468+ ANCILLARY(HATYPE);
107469+ ANCILLARY(RXHASH);
107470+ ANCILLARY(CPU);
107471+ ANCILLARY(ALU_XOR_X);
107472+ ANCILLARY(VLAN_TAG);
107473+ ANCILLARY(VLAN_TAG_PRESENT);
107474+ ANCILLARY(PAY_OFFSET);
107475+ }
107476+
107477+ /* ancillary operation unknown or unsupported */
107478 if (anc_found == false && ftest->k >= SKF_AD_OFF)
107479 return -EINVAL;
107480 }
107481+ ftest->code = code;
107482 }
107483
107484- /* Last instruction must be a RET code */
107485+ /* last instruction must be a RET code */
107486 switch (filter[flen - 1].code) {
107487- case BPF_RET | BPF_K:
107488- case BPF_RET | BPF_A:
107489+ case BPF_S_RET_K:
107490+ case BPF_S_RET_A:
107491 return check_load_and_stores(filter, flen);
107492 }
107493-
107494 return -EINVAL;
107495 }
107496 EXPORT_SYMBOL(sk_chk_filter);
107497
107498-static int sk_store_orig_filter(struct sk_filter *fp,
107499- const struct sock_fprog *fprog)
107500-{
107501- unsigned int fsize = sk_filter_proglen(fprog);
107502- struct sock_fprog_kern *fkprog;
107503-
107504- fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
107505- if (!fp->orig_prog)
107506- return -ENOMEM;
107507-
107508- fkprog = fp->orig_prog;
107509- fkprog->len = fprog->len;
107510- fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
107511- if (!fkprog->filter) {
107512- kfree(fp->orig_prog);
107513- return -ENOMEM;
107514- }
107515-
107516- return 0;
107517-}
107518-
107519-static void sk_release_orig_filter(struct sk_filter *fp)
107520-{
107521- struct sock_fprog_kern *fprog = fp->orig_prog;
107522-
107523- if (fprog) {
107524- kfree(fprog->filter);
107525- kfree(fprog);
107526- }
107527-}
107528-
107529 /**
107530 * sk_filter_release_rcu - Release a socket filter by rcu_head
107531 * @rcu: rcu_head that contains the sk_filter to free
107532 */
107533-static void sk_filter_release_rcu(struct rcu_head *rcu)
107534+void sk_filter_release_rcu(struct rcu_head *rcu)
107535 {
107536 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
107537
107538- sk_release_orig_filter(fp);
107539- sk_filter_free(fp);
107540-}
107541-
107542-/**
107543- * sk_filter_release - release a socket filter
107544- * @fp: filter to remove
107545- *
107546- * Remove a filter from a socket and release its resources.
107547- */
107548-static void sk_filter_release(struct sk_filter *fp)
107549-{
107550- if (atomic_dec_and_test(&fp->refcnt))
107551- call_rcu(&fp->rcu, sk_filter_release_rcu);
107552-}
107553-
107554-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
107555-{
107556- atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
107557- sk_filter_release(fp);
107558-}
107559-
107560-void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
107561-{
107562- atomic_inc(&fp->refcnt);
107563- atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
107564-}
107565-
107566-static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
107567- struct sock *sk,
107568- unsigned int len)
107569-{
107570- struct sk_filter *fp_new;
107571-
107572- if (sk == NULL)
107573- return krealloc(fp, len, GFP_KERNEL);
107574-
107575- fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
107576- if (fp_new) {
107577- *fp_new = *fp;
107578- /* As we're keeping orig_prog in fp_new along,
107579- * we need to make sure we're not evicting it
107580- * from the old fp.
107581- */
107582- fp->orig_prog = NULL;
107583- sk_filter_uncharge(sk, fp);
107584- }
107585-
107586- return fp_new;
107587-}
107588-
107589-static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
107590- struct sock *sk)
107591-{
107592- struct sock_filter *old_prog;
107593- struct sk_filter *old_fp;
107594- int err, new_len, old_len = fp->len;
107595-
107596- /* We are free to overwrite insns et al right here as it
107597- * won't be used at this point in time anymore internally
107598- * after the migration to the internal BPF instruction
107599- * representation.
107600- */
107601- BUILD_BUG_ON(sizeof(struct sock_filter) !=
107602- sizeof(struct sock_filter_int));
107603-
107604- /* Conversion cannot happen on overlapping memory areas,
107605- * so we need to keep the user BPF around until the 2nd
107606- * pass. At this time, the user BPF is stored in fp->insns.
107607- */
107608- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
107609- GFP_KERNEL);
107610- if (!old_prog) {
107611- err = -ENOMEM;
107612- goto out_err;
107613- }
107614-
107615- /* 1st pass: calculate the new program length. */
107616- err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
107617- if (err)
107618- goto out_err_free;
107619-
107620- /* Expand fp for appending the new filter representation. */
107621- old_fp = fp;
107622- fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
107623- if (!fp) {
107624- /* The old_fp is still around in case we couldn't
107625- * allocate new memory, so uncharge on that one.
107626- */
107627- fp = old_fp;
107628- err = -ENOMEM;
107629- goto out_err_free;
107630- }
107631-
107632- fp->len = new_len;
107633-
107634- /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
107635- err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
107636- if (err)
107637- /* 2nd sk_convert_filter() can fail only if it fails
107638- * to allocate memory, remapping must succeed. Note,
107639- * that at this time old_fp has already been released
107640- * by __sk_migrate_realloc().
107641- */
107642- goto out_err_free;
107643-
107644- sk_filter_select_runtime(fp);
107645-
107646- kfree(old_prog);
107647- return fp;
107648-
107649-out_err_free:
107650- kfree(old_prog);
107651-out_err:
107652- /* Rollback filter setup. */
107653- if (sk != NULL)
107654- sk_filter_uncharge(sk, fp);
107655- else
107656- kfree(fp);
107657- return ERR_PTR(err);
107658-}
107659-
107660-void __weak bpf_int_jit_compile(struct sk_filter *prog)
107661-{
107662-}
107663-
107664-/**
107665- * sk_filter_select_runtime - select execution runtime for BPF program
107666- * @fp: sk_filter populated with internal BPF program
107667- *
107668- * try to JIT internal BPF program, if JIT is not available select interpreter
107669- * BPF program will be executed via SK_RUN_FILTER() macro
107670- */
107671-void sk_filter_select_runtime(struct sk_filter *fp)
107672-{
107673- fp->bpf_func = (void *) __sk_run_filter;
107674-
107675- /* Probe if internal BPF can be JITed */
107676- bpf_int_jit_compile(fp);
107677-}
107678-EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
107679-
107680-/* free internal BPF program */
107681-void sk_filter_free(struct sk_filter *fp)
107682-{
107683 bpf_jit_free(fp);
107684 }
107685-EXPORT_SYMBOL_GPL(sk_filter_free);
107686+EXPORT_SYMBOL(sk_filter_release_rcu);
107687
107688-static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
107689- struct sock *sk)
107690+static int __sk_prepare_filter(struct sk_filter *fp)
107691 {
107692 int err;
107693
107694- fp->bpf_func = NULL;
107695- fp->jited = 0;
107696+ fp->bpf_func = sk_run_filter;
107697
107698 err = sk_chk_filter(fp->insns, fp->len);
107699- if (err) {
107700- if (sk != NULL)
107701- sk_filter_uncharge(sk, fp);
107702- else
107703- kfree(fp);
107704- return ERR_PTR(err);
107705- }
107706+ if (err)
107707+ return err;
107708
107709- /* Probe if we can JIT compile the filter and if so, do
107710- * the compilation of the filter.
107711- */
107712 bpf_jit_compile(fp);
107713-
107714- /* JIT compiler couldn't process this filter, so do the
107715- * internal BPF translation for the optimized interpreter.
107716- */
107717- if (!fp->jited)
107718- fp = __sk_migrate_filter(fp, sk);
107719-
107720- return fp;
107721+ return 0;
107722 }
107723
107724 /**
107725 * sk_unattached_filter_create - create an unattached filter
107726+ * @fprog: the filter program
107727 * @pfp: the unattached filter that is created
107728- * @fprog: the filter program
107729 *
107730 * Create a filter independent of any socket. We first run some
107731 * sanity checks on it to make sure it does not explode on us later.
107732@@ -1533,10 +671,11 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
107733 * a negative errno code is returned. On success the return is zero.
107734 */
107735 int sk_unattached_filter_create(struct sk_filter **pfp,
107736- struct sock_fprog_kern *fprog)
107737+ struct sock_fprog *fprog)
107738 {
107739- unsigned int fsize = sk_filter_proglen(fprog);
107740 struct sk_filter *fp;
107741+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
107742+ int err;
107743
107744 /* Make sure new filter is there and in the right amounts. */
107745 if (fprog->filter == NULL)
107746@@ -1545,26 +684,20 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
107747 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
107748 if (!fp)
107749 return -ENOMEM;
107750-
107751- memcpy(fp->insns, fprog->filter, fsize);
107752+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
107753
107754 atomic_set(&fp->refcnt, 1);
107755 fp->len = fprog->len;
107756- /* Since unattached filters are not copied back to user
107757- * space through sk_get_filter(), we do not need to hold
107758- * a copy here, and can spare us the work.
107759- */
107760- fp->orig_prog = NULL;
107761
107762- /* __sk_prepare_filter() already takes care of uncharging
107763- * memory in case something goes wrong.
107764- */
107765- fp = __sk_prepare_filter(fp, NULL);
107766- if (IS_ERR(fp))
107767- return PTR_ERR(fp);
107768+ err = __sk_prepare_filter(fp);
107769+ if (err)
107770+ goto free_mem;
107771
107772 *pfp = fp;
107773 return 0;
107774+free_mem:
107775+ kfree(fp);
107776+ return err;
107777 }
107778 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
107779
107780@@ -1587,7 +720,7 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
107781 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107782 {
107783 struct sk_filter *fp, *old_fp;
107784- unsigned int fsize = sk_filter_proglen(fprog);
107785+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
107786 unsigned int sk_fsize = sk_filter_size(fprog->len);
107787 int err;
107788
107789@@ -1601,7 +734,6 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107790 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
107791 if (!fp)
107792 return -ENOMEM;
107793-
107794 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
107795 sock_kfree_s(sk, fp, sk_fsize);
107796 return -EFAULT;
107797@@ -1610,26 +742,18 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107798 atomic_set(&fp->refcnt, 1);
107799 fp->len = fprog->len;
107800
107801- err = sk_store_orig_filter(fp, fprog);
107802+ err = __sk_prepare_filter(fp);
107803 if (err) {
107804 sk_filter_uncharge(sk, fp);
107805- return -ENOMEM;
107806+ return err;
107807 }
107808
107809- /* __sk_prepare_filter() already takes care of uncharging
107810- * memory in case something goes wrong.
107811- */
107812- fp = __sk_prepare_filter(fp, sk);
107813- if (IS_ERR(fp))
107814- return PTR_ERR(fp);
107815-
107816 old_fp = rcu_dereference_protected(sk->sk_filter,
107817 sock_owned_by_user(sk));
107818 rcu_assign_pointer(sk->sk_filter, fp);
107819
107820 if (old_fp)
107821 sk_filter_uncharge(sk, old_fp);
107822-
107823 return 0;
107824 }
107825 EXPORT_SYMBOL_GPL(sk_attach_filter);
107826@@ -1649,46 +773,116 @@ int sk_detach_filter(struct sock *sk)
107827 sk_filter_uncharge(sk, filter);
107828 ret = 0;
107829 }
107830-
107831 return ret;
107832 }
107833 EXPORT_SYMBOL_GPL(sk_detach_filter);
107834
107835-int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
107836- unsigned int len)
107837+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
107838+{
107839+ static const u16 decodes[] = {
107840+ [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
107841+ [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
107842+ [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
107843+ [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
107844+ [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
107845+ [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
107846+ [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
107847+ [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
107848+ [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
107849+ [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
107850+ [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
107851+ [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
107852+ [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
107853+ [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
107854+ [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
107855+ [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
107856+ [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
107857+ [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
107858+ [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
107859+ [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
107860+ [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
107861+ [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
107862+ [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
107863+ [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
107864+ [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
107865+ [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
107866+ [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
107867+ [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
107868+ [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
107869+ [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
107870+ [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
107871+ [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
107872+ [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
107873+ [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
107874+ [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
107875+ [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
107876+ [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
107877+ [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
107878+ [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
107879+ [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
107880+ [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
107881+ [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
107882+ [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
107883+ [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
107884+ [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
107885+ [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
107886+ [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
107887+ [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
107888+ [BPF_S_RET_K] = BPF_RET|BPF_K,
107889+ [BPF_S_RET_A] = BPF_RET|BPF_A,
107890+ [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
107891+ [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
107892+ [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
107893+ [BPF_S_ST] = BPF_ST,
107894+ [BPF_S_STX] = BPF_STX,
107895+ [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
107896+ [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
107897+ [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
107898+ [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
107899+ [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
107900+ [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
107901+ [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
107902+ [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
107903+ [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
107904+ };
107905+ u16 code;
107906+
107907+ code = filt->code;
107908+
107909+ to->code = decodes[code];
107910+ to->jt = filt->jt;
107911+ to->jf = filt->jf;
107912+ to->k = filt->k;
107913+}
107914+
107915+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
107916 {
107917- struct sock_fprog_kern *fprog;
107918 struct sk_filter *filter;
107919- int ret = 0;
107920+ int i, ret;
107921
107922 lock_sock(sk);
107923 filter = rcu_dereference_protected(sk->sk_filter,
107924- sock_owned_by_user(sk));
107925+ sock_owned_by_user(sk));
107926+ ret = 0;
107927 if (!filter)
107928 goto out;
107929-
107930- /* We're copying the filter that has been originally attached,
107931- * so no conversion/decode needed anymore.
107932- */
107933- fprog = filter->orig_prog;
107934-
107935- ret = fprog->len;
107936+ ret = filter->len;
107937 if (!len)
107938- /* User space only enquires number of filter blocks. */
107939 goto out;
107940-
107941 ret = -EINVAL;
107942- if (len < fprog->len)
107943+ if (len < filter->len)
107944 goto out;
107945
107946 ret = -EFAULT;
107947- if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
107948- goto out;
107949+ for (i = 0; i < filter->len; i++) {
107950+ struct sock_filter fb;
107951
107952- /* Instead of bytes, the API requests to return the number
107953- * of filter blocks.
107954- */
107955- ret = fprog->len;
107956+ sk_decode_filter(&filter->insns[i], &fb);
107957+ if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
107958+ goto out;
107959+ }
107960+
107961+ ret = filter->len;
107962 out:
107963 release_sock(sk);
107964 return ret;
107965diff --git a/net/core/flow.c b/net/core/flow.c
107966index a0348fd..6951c76 100644
107967--- a/net/core/flow.c
107968+++ b/net/core/flow.c
107969@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
107970 static int flow_entry_valid(struct flow_cache_entry *fle,
107971 struct netns_xfrm *xfrm)
107972 {
107973- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
107974+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
107975 return 0;
107976 if (fle->object && !fle->object->ops->check(fle->object))
107977 return 0;
107978@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
107979 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
107980 fcp->hash_count++;
107981 }
107982- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
107983+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
107984 flo = fle->object;
107985 if (!flo)
107986 goto ret_object;
107987@@ -263,7 +263,7 @@ nocache:
107988 }
107989 flo = resolver(net, key, family, dir, flo, ctx);
107990 if (fle) {
107991- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
107992+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
107993 if (!IS_ERR(flo))
107994 fle->object = flo;
107995 else
107996diff --git a/net/core/iovec.c b/net/core/iovec.c
107997index e1ec45a..e5c6f16 100644
107998--- a/net/core/iovec.c
107999+++ b/net/core/iovec.c
108000@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
108001 if (m->msg_name && m->msg_namelen) {
108002 if (mode == VERIFY_READ) {
108003 void __user *namep;
108004- namep = (void __user __force *) m->msg_name;
108005+ namep = (void __force_user *) m->msg_name;
108006 err = move_addr_to_kernel(namep, m->msg_namelen,
108007 address);
108008 if (err < 0)
108009@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
108010 }
108011
108012 size = m->msg_iovlen * sizeof(struct iovec);
108013- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
108014+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
108015 return -EFAULT;
108016
108017 m->msg_iov = iov;
108018diff --git a/net/core/neighbour.c b/net/core/neighbour.c
108019index ef31fef..8be66d9 100644
108020--- a/net/core/neighbour.c
108021+++ b/net/core/neighbour.c
108022@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
108023 void __user *buffer, size_t *lenp, loff_t *ppos)
108024 {
108025 int size, ret;
108026- struct ctl_table tmp = *ctl;
108027+ ctl_table_no_const tmp = *ctl;
108028
108029 tmp.extra1 = &zero;
108030 tmp.extra2 = &unres_qlen_max;
108031@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
108032 void __user *buffer,
108033 size_t *lenp, loff_t *ppos)
108034 {
108035- struct ctl_table tmp = *ctl;
108036+ ctl_table_no_const tmp = *ctl;
108037 int ret;
108038
108039 tmp.extra1 = &zero;
108040diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
108041index 2bf8329..2eb1423 100644
108042--- a/net/core/net-procfs.c
108043+++ b/net/core/net-procfs.c
108044@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
108045 struct rtnl_link_stats64 temp;
108046 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
108047
108048- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
108049+ if (gr_proc_is_restricted())
108050+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
108051+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
108052+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
108053+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
108054+ else
108055+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
108056 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
108057 dev->name, stats->rx_bytes, stats->rx_packets,
108058 stats->rx_errors,
108059@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
108060 return 0;
108061 }
108062
108063-static const struct seq_operations dev_seq_ops = {
108064+const struct seq_operations dev_seq_ops = {
108065 .start = dev_seq_start,
108066 .next = dev_seq_next,
108067 .stop = dev_seq_stop,
108068@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
108069
108070 static int softnet_seq_open(struct inode *inode, struct file *file)
108071 {
108072- return seq_open(file, &softnet_seq_ops);
108073+ return seq_open_restrict(file, &softnet_seq_ops);
108074 }
108075
108076 static const struct file_operations softnet_seq_fops = {
108077@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
108078 else
108079 seq_printf(seq, "%04x", ntohs(pt->type));
108080
108081+#ifdef CONFIG_GRKERNSEC_HIDESYM
108082+ seq_printf(seq, " %-8s %pf\n",
108083+ pt->dev ? pt->dev->name : "", NULL);
108084+#else
108085 seq_printf(seq, " %-8s %pf\n",
108086 pt->dev ? pt->dev->name : "", pt->func);
108087+#endif
108088 }
108089
108090 return 0;
108091diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
108092index 1cac29e..fb482f3 100644
108093--- a/net/core/net-sysfs.c
108094+++ b/net/core/net-sysfs.c
108095@@ -259,7 +259,7 @@ static ssize_t carrier_changes_show(struct device *dev,
108096 {
108097 struct net_device *netdev = to_net_dev(dev);
108098 return sprintf(buf, fmt_dec,
108099- atomic_read(&netdev->carrier_changes));
108100+ atomic_read_unchecked(&netdev->carrier_changes));
108101 }
108102 static DEVICE_ATTR_RO(carrier_changes);
108103
108104diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
108105index 85b6269..fc77ea0 100644
108106--- a/net/core/net_namespace.c
108107+++ b/net/core/net_namespace.c
108108@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
108109 int error;
108110 LIST_HEAD(net_exit_list);
108111
108112- list_add_tail(&ops->list, list);
108113+ pax_list_add_tail((struct list_head *)&ops->list, list);
108114 if (ops->init || (ops->id && ops->size)) {
108115 for_each_net(net) {
108116 error = ops_init(ops, net);
108117@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
108118
108119 out_undo:
108120 /* If I have an error cleanup all namespaces I initialized */
108121- list_del(&ops->list);
108122+ pax_list_del((struct list_head *)&ops->list);
108123 ops_exit_list(ops, &net_exit_list);
108124 ops_free_list(ops, &net_exit_list);
108125 return error;
108126@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
108127 struct net *net;
108128 LIST_HEAD(net_exit_list);
108129
108130- list_del(&ops->list);
108131+ pax_list_del((struct list_head *)&ops->list);
108132 for_each_net(net)
108133 list_add_tail(&net->exit_list, &net_exit_list);
108134 ops_exit_list(ops, &net_exit_list);
108135@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
108136 mutex_lock(&net_mutex);
108137 error = register_pernet_operations(&pernet_list, ops);
108138 if (!error && (first_device == &pernet_list))
108139- first_device = &ops->list;
108140+ first_device = (struct list_head *)&ops->list;
108141 mutex_unlock(&net_mutex);
108142 return error;
108143 }
108144diff --git a/net/core/netpoll.c b/net/core/netpoll.c
108145index e33937f..b2b4981 100644
108146--- a/net/core/netpoll.c
108147+++ b/net/core/netpoll.c
108148@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
108149 struct udphdr *udph;
108150 struct iphdr *iph;
108151 struct ethhdr *eth;
108152- static atomic_t ip_ident;
108153+ static atomic_unchecked_t ip_ident;
108154 struct ipv6hdr *ip6h;
108155
108156 udp_len = len + sizeof(*udph);
108157@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
108158 put_unaligned(0x45, (unsigned char *)iph);
108159 iph->tos = 0;
108160 put_unaligned(htons(ip_len), &(iph->tot_len));
108161- iph->id = htons(atomic_inc_return(&ip_ident));
108162+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
108163 iph->frag_off = 0;
108164 iph->ttl = 64;
108165 iph->protocol = IPPROTO_UDP;
108166diff --git a/net/core/pktgen.c b/net/core/pktgen.c
108167index fc17a9d..d4a3d88 100644
108168--- a/net/core/pktgen.c
108169+++ b/net/core/pktgen.c
108170@@ -3725,7 +3725,7 @@ static int __net_init pg_net_init(struct net *net)
108171 pn->net = net;
108172 INIT_LIST_HEAD(&pn->pktgen_threads);
108173 pn->pktgen_exiting = false;
108174- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
108175+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
108176 if (!pn->proc_dir) {
108177 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
108178 return -ENODEV;
108179diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
108180deleted file mode 100644
108181index d3027a7..0000000
108182--- a/net/core/ptp_classifier.c
108183+++ /dev/null
108184@@ -1,141 +0,0 @@
108185-/* PTP classifier
108186- *
108187- * This program is free software; you can redistribute it and/or
108188- * modify it under the terms of version 2 of the GNU General Public
108189- * License as published by the Free Software Foundation.
108190- *
108191- * This program is distributed in the hope that it will be useful, but
108192- * WITHOUT ANY WARRANTY; without even the implied warranty of
108193- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
108194- * General Public License for more details.
108195- */
108196-
108197-/* The below program is the bpf_asm (tools/net/) representation of
108198- * the opcode array in the ptp_filter structure.
108199- *
108200- * For convenience, this can easily be altered and reviewed with
108201- * bpf_asm and bpf_dbg, e.g. `./bpf_asm -c prog` where prog is a
108202- * simple file containing the below program:
108203- *
108204- * ldh [12] ; load ethertype
108205- *
108206- * ; PTP over UDP over IPv4 over Ethernet
108207- * test_ipv4:
108208- * jneq #0x800, test_ipv6 ; ETH_P_IP ?
108209- * ldb [23] ; load proto
108210- * jneq #17, drop_ipv4 ; IPPROTO_UDP ?
108211- * ldh [20] ; load frag offset field
108212- * jset #0x1fff, drop_ipv4 ; don't allow fragments
108213- * ldxb 4*([14]&0xf) ; load IP header len
108214- * ldh [x + 16] ; load UDP dst port
108215- * jneq #319, drop_ipv4 ; is port PTP_EV_PORT ?
108216- * ldh [x + 22] ; load payload
108217- * and #0xf ; mask PTP_CLASS_VMASK
108218- * or #0x10 ; PTP_CLASS_IPV4
108219- * ret a ; return PTP class
108220- * drop_ipv4: ret #0x0 ; PTP_CLASS_NONE
108221- *
108222- * ; PTP over UDP over IPv6 over Ethernet
108223- * test_ipv6:
108224- * jneq #0x86dd, test_8021q ; ETH_P_IPV6 ?
108225- * ldb [20] ; load proto
108226- * jneq #17, drop_ipv6 ; IPPROTO_UDP ?
108227- * ldh [56] ; load UDP dst port
108228- * jneq #319, drop_ipv6 ; is port PTP_EV_PORT ?
108229- * ldh [62] ; load payload
108230- * and #0xf ; mask PTP_CLASS_VMASK
108231- * or #0x20 ; PTP_CLASS_IPV6
108232- * ret a ; return PTP class
108233- * drop_ipv6: ret #0x0 ; PTP_CLASS_NONE
108234- *
108235- * ; PTP over 802.1Q over Ethernet
108236- * test_8021q:
108237- * jneq #0x8100, test_ieee1588 ; ETH_P_8021Q ?
108238- * ldh [16] ; load inner type
108239- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
108240- * ldb [18] ; load payload
108241- * and #0x8 ; as we don't have ports here, test
108242- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
108243- * ldh [18] ; reload payload
108244- * and #0xf ; mask PTP_CLASS_VMASK
108245- * or #0x40 ; PTP_CLASS_V2_VLAN
108246- * ret a ; return PTP class
108247- *
108248- * ; PTP over Ethernet
108249- * test_ieee1588:
108250- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
108251- * ldb [14] ; load payload
108252- * and #0x8 ; as we don't have ports here, test
108253- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
108254- * ldh [14] ; reload payload
108255- * and #0xf ; mask PTP_CLASS_VMASK
108256- * or #0x30 ; PTP_CLASS_L2
108257- * ret a ; return PTP class
108258- * drop_ieee1588: ret #0x0 ; PTP_CLASS_NONE
108259- */
108260-
108261-#include <linux/skbuff.h>
108262-#include <linux/filter.h>
108263-#include <linux/ptp_classify.h>
108264-
108265-static struct sk_filter *ptp_insns __read_mostly;
108266-
108267-unsigned int ptp_classify_raw(const struct sk_buff *skb)
108268-{
108269- return SK_RUN_FILTER(ptp_insns, skb);
108270-}
108271-EXPORT_SYMBOL_GPL(ptp_classify_raw);
108272-
108273-void __init ptp_classifier_init(void)
108274-{
108275- static struct sock_filter ptp_filter[] __initdata = {
108276- { 0x28, 0, 0, 0x0000000c },
108277- { 0x15, 0, 12, 0x00000800 },
108278- { 0x30, 0, 0, 0x00000017 },
108279- { 0x15, 0, 9, 0x00000011 },
108280- { 0x28, 0, 0, 0x00000014 },
108281- { 0x45, 7, 0, 0x00001fff },
108282- { 0xb1, 0, 0, 0x0000000e },
108283- { 0x48, 0, 0, 0x00000010 },
108284- { 0x15, 0, 4, 0x0000013f },
108285- { 0x48, 0, 0, 0x00000016 },
108286- { 0x54, 0, 0, 0x0000000f },
108287- { 0x44, 0, 0, 0x00000010 },
108288- { 0x16, 0, 0, 0x00000000 },
108289- { 0x06, 0, 0, 0x00000000 },
108290- { 0x15, 0, 9, 0x000086dd },
108291- { 0x30, 0, 0, 0x00000014 },
108292- { 0x15, 0, 6, 0x00000011 },
108293- { 0x28, 0, 0, 0x00000038 },
108294- { 0x15, 0, 4, 0x0000013f },
108295- { 0x28, 0, 0, 0x0000003e },
108296- { 0x54, 0, 0, 0x0000000f },
108297- { 0x44, 0, 0, 0x00000020 },
108298- { 0x16, 0, 0, 0x00000000 },
108299- { 0x06, 0, 0, 0x00000000 },
108300- { 0x15, 0, 9, 0x00008100 },
108301- { 0x28, 0, 0, 0x00000010 },
108302- { 0x15, 0, 15, 0x000088f7 },
108303- { 0x30, 0, 0, 0x00000012 },
108304- { 0x54, 0, 0, 0x00000008 },
108305- { 0x15, 0, 12, 0x00000000 },
108306- { 0x28, 0, 0, 0x00000012 },
108307- { 0x54, 0, 0, 0x0000000f },
108308- { 0x44, 0, 0, 0x00000040 },
108309- { 0x16, 0, 0, 0x00000000 },
108310- { 0x15, 0, 7, 0x000088f7 },
108311- { 0x30, 0, 0, 0x0000000e },
108312- { 0x54, 0, 0, 0x00000008 },
108313- { 0x15, 0, 4, 0x00000000 },
108314- { 0x28, 0, 0, 0x0000000e },
108315- { 0x54, 0, 0, 0x0000000f },
108316- { 0x44, 0, 0, 0x00000030 },
108317- { 0x16, 0, 0, 0x00000000 },
108318- { 0x06, 0, 0, 0x00000000 },
108319- };
108320- struct sock_fprog_kern ptp_prog = {
108321- .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
108322- };
108323-
108324- BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
108325-}
108326diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
108327index 1063996..0729c19 100644
108328--- a/net/core/rtnetlink.c
108329+++ b/net/core/rtnetlink.c
108330@@ -58,7 +58,7 @@ struct rtnl_link {
108331 rtnl_doit_func doit;
108332 rtnl_dumpit_func dumpit;
108333 rtnl_calcit_func calcit;
108334-};
108335+} __no_const;
108336
108337 static DEFINE_MUTEX(rtnl_mutex);
108338
108339@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
108340 if (rtnl_link_ops_get(ops->kind))
108341 return -EEXIST;
108342
108343- if (!ops->dellink)
108344- ops->dellink = unregister_netdevice_queue;
108345+ if (!ops->dellink) {
108346+ pax_open_kernel();
108347+ *(void **)&ops->dellink = unregister_netdevice_queue;
108348+ pax_close_kernel();
108349+ }
108350
108351- list_add_tail(&ops->list, &link_ops);
108352+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
108353 return 0;
108354 }
108355 EXPORT_SYMBOL_GPL(__rtnl_link_register);
108356@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
108357 for_each_net(net) {
108358 __rtnl_kill_links(net, ops);
108359 }
108360- list_del(&ops->list);
108361+ pax_list_del((struct list_head *)&ops->list);
108362 }
108363 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
108364
108365@@ -1008,7 +1011,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
108366 (dev->ifalias &&
108367 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
108368 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
108369- atomic_read(&dev->carrier_changes)))
108370+ atomic_read_unchecked(&dev->carrier_changes)))
108371 goto nla_put_failure;
108372
108373 if (1) {
108374diff --git a/net/core/scm.c b/net/core/scm.c
108375index b442e7e..6f5b5a2 100644
108376--- a/net/core/scm.c
108377+++ b/net/core/scm.c
108378@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
108379 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
108380 {
108381 struct cmsghdr __user *cm
108382- = (__force struct cmsghdr __user *)msg->msg_control;
108383+ = (struct cmsghdr __force_user *)msg->msg_control;
108384 struct cmsghdr cmhdr;
108385 int cmlen = CMSG_LEN(len);
108386 int err;
108387@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
108388 err = -EFAULT;
108389 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
108390 goto out;
108391- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
108392+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
108393 goto out;
108394 cmlen = CMSG_SPACE(len);
108395 if (msg->msg_controllen < cmlen)
108396@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
108397 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
108398 {
108399 struct cmsghdr __user *cm
108400- = (__force struct cmsghdr __user*)msg->msg_control;
108401+ = (struct cmsghdr __force_user *)msg->msg_control;
108402
108403 int fdmax = 0;
108404 int fdnum = scm->fp->count;
108405@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
108406 if (fdnum < fdmax)
108407 fdmax = fdnum;
108408
108409- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
108410+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
108411 i++, cmfptr++)
108412 {
108413 struct socket *sock;
108414diff --git a/net/core/skbuff.c b/net/core/skbuff.c
108415index 58ff88e..af9b458 100644
108416--- a/net/core/skbuff.c
108417+++ b/net/core/skbuff.c
108418@@ -2010,7 +2010,7 @@ EXPORT_SYMBOL(__skb_checksum);
108419 __wsum skb_checksum(const struct sk_buff *skb, int offset,
108420 int len, __wsum csum)
108421 {
108422- const struct skb_checksum_ops ops = {
108423+ static const struct skb_checksum_ops ops = {
108424 .update = csum_partial_ext,
108425 .combine = csum_block_add_ext,
108426 };
108427@@ -3233,13 +3233,15 @@ void __init skb_init(void)
108428 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
108429 sizeof(struct sk_buff),
108430 0,
108431- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
108432+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
108433+ SLAB_NO_SANITIZE,
108434 NULL);
108435 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
108436 (2*sizeof(struct sk_buff)) +
108437 sizeof(atomic_t),
108438 0,
108439- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
108440+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
108441+ SLAB_NO_SANITIZE,
108442 NULL);
108443 }
108444
108445diff --git a/net/core/sock.c b/net/core/sock.c
108446index 026e01f..f54f908 100644
108447--- a/net/core/sock.c
108448+++ b/net/core/sock.c
108449@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
108450 struct sk_buff_head *list = &sk->sk_receive_queue;
108451
108452 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
108453- atomic_inc(&sk->sk_drops);
108454+ atomic_inc_unchecked(&sk->sk_drops);
108455 trace_sock_rcvqueue_full(sk, skb);
108456 return -ENOMEM;
108457 }
108458@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
108459 return err;
108460
108461 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
108462- atomic_inc(&sk->sk_drops);
108463+ atomic_inc_unchecked(&sk->sk_drops);
108464 return -ENOBUFS;
108465 }
108466
108467@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
108468 skb_dst_force(skb);
108469
108470 spin_lock_irqsave(&list->lock, flags);
108471- skb->dropcount = atomic_read(&sk->sk_drops);
108472+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
108473 __skb_queue_tail(list, skb);
108474 spin_unlock_irqrestore(&list->lock, flags);
108475
108476@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
108477 skb->dev = NULL;
108478
108479 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
108480- atomic_inc(&sk->sk_drops);
108481+ atomic_inc_unchecked(&sk->sk_drops);
108482 goto discard_and_relse;
108483 }
108484 if (nested)
108485@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
108486 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
108487 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
108488 bh_unlock_sock(sk);
108489- atomic_inc(&sk->sk_drops);
108490+ atomic_inc_unchecked(&sk->sk_drops);
108491 goto discard_and_relse;
108492 }
108493
108494@@ -998,12 +998,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
108495 struct timeval tm;
108496 } v;
108497
108498- int lv = sizeof(int);
108499- int len;
108500+ unsigned int lv = sizeof(int);
108501+ unsigned int len;
108502
108503 if (get_user(len, optlen))
108504 return -EFAULT;
108505- if (len < 0)
108506+ if (len > INT_MAX)
108507 return -EINVAL;
108508
108509 memset(&v, 0, sizeof(v));
108510@@ -1155,11 +1155,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
108511
108512 case SO_PEERNAME:
108513 {
108514- char address[128];
108515+ char address[_K_SS_MAXSIZE];
108516
108517 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
108518 return -ENOTCONN;
108519- if (lv < len)
108520+ if (lv < len || sizeof address < len)
108521 return -EINVAL;
108522 if (copy_to_user(optval, address, len))
108523 return -EFAULT;
108524@@ -1240,7 +1240,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
108525
108526 if (len > lv)
108527 len = lv;
108528- if (copy_to_user(optval, &v, len))
108529+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
108530 return -EFAULT;
108531 lenout:
108532 if (put_user(len, optlen))
108533@@ -2375,7 +2375,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
108534 */
108535 smp_wmb();
108536 atomic_set(&sk->sk_refcnt, 1);
108537- atomic_set(&sk->sk_drops, 0);
108538+ atomic_set_unchecked(&sk->sk_drops, 0);
108539 }
108540 EXPORT_SYMBOL(sock_init_data);
108541
108542@@ -2503,6 +2503,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
108543 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
108544 int level, int type)
108545 {
108546+ struct sock_extended_err ee;
108547 struct sock_exterr_skb *serr;
108548 struct sk_buff *skb, *skb2;
108549 int copied, err;
108550@@ -2524,7 +2525,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
108551 sock_recv_timestamp(msg, sk, skb);
108552
108553 serr = SKB_EXT_ERR(skb);
108554- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
108555+ ee = serr->ee;
108556+ put_cmsg(msg, level, type, sizeof ee, &ee);
108557
108558 msg->msg_flags |= MSG_ERRQUEUE;
108559 err = copied;
108560diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
108561index a4216a4..773e3d7 100644
108562--- a/net/core/sock_diag.c
108563+++ b/net/core/sock_diag.c
108564@@ -9,26 +9,33 @@
108565 #include <linux/inet_diag.h>
108566 #include <linux/sock_diag.h>
108567
108568-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
108569+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
108570 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
108571 static DEFINE_MUTEX(sock_diag_table_mutex);
108572
108573 int sock_diag_check_cookie(void *sk, __u32 *cookie)
108574 {
108575+#ifndef CONFIG_GRKERNSEC_HIDESYM
108576 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
108577 cookie[1] != INET_DIAG_NOCOOKIE) &&
108578 ((u32)(unsigned long)sk != cookie[0] ||
108579 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
108580 return -ESTALE;
108581 else
108582+#endif
108583 return 0;
108584 }
108585 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
108586
108587 void sock_diag_save_cookie(void *sk, __u32 *cookie)
108588 {
108589+#ifdef CONFIG_GRKERNSEC_HIDESYM
108590+ cookie[0] = 0;
108591+ cookie[1] = 0;
108592+#else
108593 cookie[0] = (u32)(unsigned long)sk;
108594 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
108595+#endif
108596 }
108597 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
108598
108599@@ -52,10 +59,9 @@ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
108600 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
108601 struct sk_buff *skb, int attrtype)
108602 {
108603- struct sock_fprog_kern *fprog;
108604- struct sk_filter *filter;
108605 struct nlattr *attr;
108606- unsigned int flen;
108607+ struct sk_filter *filter;
108608+ unsigned int len;
108609 int err = 0;
108610
108611 if (!may_report_filterinfo) {
108612@@ -64,20 +70,24 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
108613 }
108614
108615 rcu_read_lock();
108616+
108617 filter = rcu_dereference(sk->sk_filter);
108618- if (!filter)
108619- goto out;
108620+ len = filter ? filter->len * sizeof(struct sock_filter) : 0;
108621
108622- fprog = filter->orig_prog;
108623- flen = sk_filter_proglen(fprog);
108624-
108625- attr = nla_reserve(skb, attrtype, flen);
108626+ attr = nla_reserve(skb, attrtype, len);
108627 if (attr == NULL) {
108628 err = -EMSGSIZE;
108629 goto out;
108630 }
108631
108632- memcpy(nla_data(attr), fprog->filter, flen);
108633+ if (filter) {
108634+ struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
108635+ int i;
108636+
108637+ for (i = 0; i < filter->len; i++, fb++)
108638+ sk_decode_filter(&filter->insns[i], fb);
108639+ }
108640+
108641 out:
108642 rcu_read_unlock();
108643 return err;
108644@@ -110,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
108645 mutex_lock(&sock_diag_table_mutex);
108646 if (sock_diag_handlers[hndl->family])
108647 err = -EBUSY;
108648- else
108649+ else {
108650+ pax_open_kernel();
108651 sock_diag_handlers[hndl->family] = hndl;
108652+ pax_close_kernel();
108653+ }
108654 mutex_unlock(&sock_diag_table_mutex);
108655
108656 return err;
108657@@ -127,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
108658
108659 mutex_lock(&sock_diag_table_mutex);
108660 BUG_ON(sock_diag_handlers[family] != hnld);
108661+ pax_open_kernel();
108662 sock_diag_handlers[family] = NULL;
108663+ pax_close_kernel();
108664 mutex_unlock(&sock_diag_table_mutex);
108665 }
108666 EXPORT_SYMBOL_GPL(sock_diag_unregister);
108667diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
108668index cf9cd13..8b56af3 100644
108669--- a/net/core/sysctl_net_core.c
108670+++ b/net/core/sysctl_net_core.c
108671@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
108672 {
108673 unsigned int orig_size, size;
108674 int ret, i;
108675- struct ctl_table tmp = {
108676+ ctl_table_no_const tmp = {
108677 .data = &size,
108678 .maxlen = sizeof(size),
108679 .mode = table->mode
108680@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
108681 void __user *buffer, size_t *lenp, loff_t *ppos)
108682 {
108683 char id[IFNAMSIZ];
108684- struct ctl_table tbl = {
108685+ ctl_table_no_const tbl = {
108686 .data = id,
108687 .maxlen = IFNAMSIZ,
108688 };
108689@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
108690
108691 static __net_init int sysctl_core_net_init(struct net *net)
108692 {
108693- struct ctl_table *tbl;
108694+ ctl_table_no_const *tbl = NULL;
108695
108696 net->core.sysctl_somaxconn = SOMAXCONN;
108697
108698- tbl = netns_core_table;
108699 if (!net_eq(net, &init_net)) {
108700- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
108701+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
108702 if (tbl == NULL)
108703 goto err_dup;
108704
108705@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
108706 if (net->user_ns != &init_user_ns) {
108707 tbl[0].procname = NULL;
108708 }
108709- }
108710-
108711- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
108712+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
108713+ } else
108714+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
108715 if (net->core.sysctl_hdr == NULL)
108716 goto err_reg;
108717
108718 return 0;
108719
108720 err_reg:
108721- if (tbl != netns_core_table)
108722- kfree(tbl);
108723+ kfree(tbl);
108724 err_dup:
108725 return -ENOMEM;
108726 }
108727@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
108728 kfree(tbl);
108729 }
108730
108731-static __net_initdata struct pernet_operations sysctl_core_ops = {
108732+static __net_initconst struct pernet_operations sysctl_core_ops = {
108733 .init = sysctl_core_net_init,
108734 .exit = sysctl_core_net_exit,
108735 };
108736diff --git a/net/core/timestamping.c b/net/core/timestamping.c
108737index 6521dfd..661b5a4 100644
108738--- a/net/core/timestamping.c
108739+++ b/net/core/timestamping.c
108740@@ -23,11 +23,16 @@
108741 #include <linux/skbuff.h>
108742 #include <linux/export.h>
108743
108744+static struct sock_filter ptp_filter[] = {
108745+ PTP_FILTER
108746+};
108747+
108748 static unsigned int classify(const struct sk_buff *skb)
108749 {
108750- if (likely(skb->dev && skb->dev->phydev &&
108751+ if (likely(skb->dev &&
108752+ skb->dev->phydev &&
108753 skb->dev->phydev->drv))
108754- return ptp_classify_raw(skb);
108755+ return sk_run_filter(skb, ptp_filter);
108756 else
108757 return PTP_CLASS_NONE;
108758 }
108759@@ -55,13 +60,11 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
108760 if (likely(phydev->drv->txtstamp)) {
108761 if (!atomic_inc_not_zero(&sk->sk_refcnt))
108762 return;
108763-
108764 clone = skb_clone(skb, GFP_ATOMIC);
108765 if (!clone) {
108766 sock_put(sk);
108767 return;
108768 }
108769-
108770 clone->sk = sk;
108771 phydev->drv->txtstamp(phydev, clone, type);
108772 }
108773@@ -86,15 +89,12 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
108774 }
108775
108776 *skb_hwtstamps(skb) = *hwtstamps;
108777-
108778 serr = SKB_EXT_ERR(skb);
108779 memset(serr, 0, sizeof(*serr));
108780 serr->ee.ee_errno = ENOMSG;
108781 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
108782 skb->sk = NULL;
108783-
108784 err = sock_queue_err_skb(sk, skb);
108785-
108786 sock_put(sk);
108787 if (err)
108788 kfree_skb(skb);
108789@@ -132,3 +132,8 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
108790 return false;
108791 }
108792 EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
108793+
108794+void __init skb_timestamping_init(void)
108795+{
108796+ BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter)));
108797+}
108798diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
108799index ae011b4..d2d18bf 100644
108800--- a/net/decnet/af_decnet.c
108801+++ b/net/decnet/af_decnet.c
108802@@ -465,6 +465,7 @@ static struct proto dn_proto = {
108803 .sysctl_rmem = sysctl_decnet_rmem,
108804 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
108805 .obj_size = sizeof(struct dn_sock),
108806+ .slab_flags = SLAB_USERCOPY,
108807 };
108808
108809 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
108810diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
108811index 3b726f3..1af6368 100644
108812--- a/net/decnet/dn_dev.c
108813+++ b/net/decnet/dn_dev.c
108814@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
108815 .extra1 = &min_t3,
108816 .extra2 = &max_t3
108817 },
108818- {0}
108819+ { }
108820 },
108821 };
108822
108823diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
108824index 5325b54..a0d4d69 100644
108825--- a/net/decnet/sysctl_net_decnet.c
108826+++ b/net/decnet/sysctl_net_decnet.c
108827@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
108828
108829 if (len > *lenp) len = *lenp;
108830
108831- if (copy_to_user(buffer, addr, len))
108832+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
108833 return -EFAULT;
108834
108835 *lenp = len;
108836@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
108837
108838 if (len > *lenp) len = *lenp;
108839
108840- if (copy_to_user(buffer, devname, len))
108841+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
108842 return -EFAULT;
108843
108844 *lenp = len;
108845diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
108846index 6f1428c..9586b83 100644
108847--- a/net/ieee802154/reassembly.c
108848+++ b/net/ieee802154/reassembly.c
108849@@ -438,14 +438,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
108850
108851 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108852 {
108853- struct ctl_table *table;
108854+ ctl_table_no_const *table = NULL;
108855 struct ctl_table_header *hdr;
108856 struct netns_ieee802154_lowpan *ieee802154_lowpan =
108857 net_ieee802154_lowpan(net);
108858
108859- table = lowpan_frags_ns_ctl_table;
108860 if (!net_eq(net, &init_net)) {
108861- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
108862+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
108863 GFP_KERNEL);
108864 if (table == NULL)
108865 goto err_alloc;
108866@@ -458,9 +457,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108867 /* Don't export sysctls to unprivileged users */
108868 if (net->user_ns != &init_user_ns)
108869 table[0].procname = NULL;
108870- }
108871-
108872- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
108873+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
108874+ } else
108875+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
108876 if (hdr == NULL)
108877 goto err_reg;
108878
108879@@ -468,8 +467,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108880 return 0;
108881
108882 err_reg:
108883- if (!net_eq(net, &init_net))
108884- kfree(table);
108885+ kfree(table);
108886 err_alloc:
108887 return -ENOMEM;
108888 }
108889diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
108890index e944937..368fe78 100644
108891--- a/net/ipv4/devinet.c
108892+++ b/net/ipv4/devinet.c
108893@@ -1540,7 +1540,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
108894 idx = 0;
108895 head = &net->dev_index_head[h];
108896 rcu_read_lock();
108897- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
108898+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
108899 net->dev_base_seq;
108900 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108901 if (idx < s_idx)
108902@@ -1858,7 +1858,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
108903 idx = 0;
108904 head = &net->dev_index_head[h];
108905 rcu_read_lock();
108906- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
108907+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
108908 net->dev_base_seq;
108909 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108910 if (idx < s_idx)
108911@@ -2093,7 +2093,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
108912 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
108913 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
108914
108915-static struct devinet_sysctl_table {
108916+static const struct devinet_sysctl_table {
108917 struct ctl_table_header *sysctl_header;
108918 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
108919 } devinet_sysctl = {
108920@@ -2215,7 +2215,7 @@ static __net_init int devinet_init_net(struct net *net)
108921 int err;
108922 struct ipv4_devconf *all, *dflt;
108923 #ifdef CONFIG_SYSCTL
108924- struct ctl_table *tbl = ctl_forward_entry;
108925+ ctl_table_no_const *tbl = NULL;
108926 struct ctl_table_header *forw_hdr;
108927 #endif
108928
108929@@ -2233,7 +2233,7 @@ static __net_init int devinet_init_net(struct net *net)
108930 goto err_alloc_dflt;
108931
108932 #ifdef CONFIG_SYSCTL
108933- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
108934+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
108935 if (tbl == NULL)
108936 goto err_alloc_ctl;
108937
108938@@ -2253,7 +2253,10 @@ static __net_init int devinet_init_net(struct net *net)
108939 goto err_reg_dflt;
108940
108941 err = -ENOMEM;
108942- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
108943+ if (!net_eq(net, &init_net))
108944+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
108945+ else
108946+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
108947 if (forw_hdr == NULL)
108948 goto err_reg_ctl;
108949 net->ipv4.forw_hdr = forw_hdr;
108950@@ -2269,8 +2272,7 @@ err_reg_ctl:
108951 err_reg_dflt:
108952 __devinet_sysctl_unregister(all);
108953 err_reg_all:
108954- if (tbl != ctl_forward_entry)
108955- kfree(tbl);
108956+ kfree(tbl);
108957 err_alloc_ctl:
108958 #endif
108959 if (dflt != &ipv4_devconf_dflt)
108960diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
108961index 255aa99..45c78f8 100644
108962--- a/net/ipv4/fib_frontend.c
108963+++ b/net/ipv4/fib_frontend.c
108964@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
108965 #ifdef CONFIG_IP_ROUTE_MULTIPATH
108966 fib_sync_up(dev);
108967 #endif
108968- atomic_inc(&net->ipv4.dev_addr_genid);
108969+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108970 rt_cache_flush(dev_net(dev));
108971 break;
108972 case NETDEV_DOWN:
108973 fib_del_ifaddr(ifa, NULL);
108974- atomic_inc(&net->ipv4.dev_addr_genid);
108975+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108976 if (ifa->ifa_dev->ifa_list == NULL) {
108977 /* Last address was deleted from this interface.
108978 * Disable IP.
108979@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
108980 #ifdef CONFIG_IP_ROUTE_MULTIPATH
108981 fib_sync_up(dev);
108982 #endif
108983- atomic_inc(&net->ipv4.dev_addr_genid);
108984+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108985 rt_cache_flush(net);
108986 break;
108987 case NETDEV_DOWN:
108988diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
108989index b10cd43a..22327f9 100644
108990--- a/net/ipv4/fib_semantics.c
108991+++ b/net/ipv4/fib_semantics.c
108992@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
108993 nh->nh_saddr = inet_select_addr(nh->nh_dev,
108994 nh->nh_gw,
108995 nh->nh_parent->fib_scope);
108996- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
108997+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
108998
108999 return nh->nh_saddr;
109000 }
109001diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
109002index 43116e8..e3e6159 100644
109003--- a/net/ipv4/inet_hashtables.c
109004+++ b/net/ipv4/inet_hashtables.c
109005@@ -18,6 +18,7 @@
109006 #include <linux/sched.h>
109007 #include <linux/slab.h>
109008 #include <linux/wait.h>
109009+#include <linux/security.h>
109010
109011 #include <net/inet_connection_sock.h>
109012 #include <net/inet_hashtables.h>
109013@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
109014 return inet_ehashfn(net, laddr, lport, faddr, fport);
109015 }
109016
109017+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
109018+
109019 /*
109020 * Allocate and initialize a new local port bind bucket.
109021 * The bindhash mutex for snum's hash chain must be held here.
109022@@ -554,6 +557,8 @@ ok:
109023 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
109024 spin_unlock(&head->lock);
109025
109026+ gr_update_task_in_ip_table(current, inet_sk(sk));
109027+
109028 if (tw) {
109029 inet_twsk_deschedule(tw, death_row);
109030 while (twrefcnt) {
109031diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
109032index bd5f592..e80e605 100644
109033--- a/net/ipv4/inetpeer.c
109034+++ b/net/ipv4/inetpeer.c
109035@@ -482,7 +482,7 @@ relookup:
109036 if (p) {
109037 p->daddr = *daddr;
109038 atomic_set(&p->refcnt, 1);
109039- atomic_set(&p->rid, 0);
109040+ atomic_set_unchecked(&p->rid, 0);
109041 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
109042 p->rate_tokens = 0;
109043 /* 60*HZ is arbitrary, but chosen enough high so that the first
109044diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
109045index ed32313..3762abe 100644
109046--- a/net/ipv4/ip_fragment.c
109047+++ b/net/ipv4/ip_fragment.c
109048@@ -284,7 +284,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
109049 return 0;
109050
109051 start = qp->rid;
109052- end = atomic_inc_return(&peer->rid);
109053+ end = atomic_inc_return_unchecked(&peer->rid);
109054 qp->rid = end;
109055
109056 rc = qp->q.fragments && (end - start) > max;
109057@@ -761,12 +761,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
109058
109059 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
109060 {
109061- struct ctl_table *table;
109062+ ctl_table_no_const *table = NULL;
109063 struct ctl_table_header *hdr;
109064
109065- table = ip4_frags_ns_ctl_table;
109066 if (!net_eq(net, &init_net)) {
109067- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
109068+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
109069 if (table == NULL)
109070 goto err_alloc;
109071
109072@@ -777,9 +776,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
109073 /* Don't export sysctls to unprivileged users */
109074 if (net->user_ns != &init_user_ns)
109075 table[0].procname = NULL;
109076- }
109077+ hdr = register_net_sysctl(net, "net/ipv4", table);
109078+ } else
109079+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
109080
109081- hdr = register_net_sysctl(net, "net/ipv4", table);
109082 if (hdr == NULL)
109083 goto err_reg;
109084
109085@@ -787,8 +787,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
109086 return 0;
109087
109088 err_reg:
109089- if (!net_eq(net, &init_net))
109090- kfree(table);
109091+ kfree(table);
109092 err_alloc:
109093 return -ENOMEM;
109094 }
109095diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
109096index 9b84254..c776611 100644
109097--- a/net/ipv4/ip_gre.c
109098+++ b/net/ipv4/ip_gre.c
109099@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
109100 module_param(log_ecn_error, bool, 0644);
109101 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
109102
109103-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
109104+static struct rtnl_link_ops ipgre_link_ops;
109105 static int ipgre_tunnel_init(struct net_device *dev);
109106
109107 static int ipgre_net_id __read_mostly;
109108@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
109109 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
109110 };
109111
109112-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
109113+static struct rtnl_link_ops ipgre_link_ops = {
109114 .kind = "gre",
109115 .maxtype = IFLA_GRE_MAX,
109116 .policy = ipgre_policy,
109117@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
109118 .fill_info = ipgre_fill_info,
109119 };
109120
109121-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
109122+static struct rtnl_link_ops ipgre_tap_ops = {
109123 .kind = "gretap",
109124 .maxtype = IFLA_GRE_MAX,
109125 .policy = ipgre_policy,
109126diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
109127index 64741b9..6f334a2 100644
109128--- a/net/ipv4/ip_sockglue.c
109129+++ b/net/ipv4/ip_sockglue.c
109130@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
109131 len = min_t(unsigned int, len, opt->optlen);
109132 if (put_user(len, optlen))
109133 return -EFAULT;
109134- if (copy_to_user(optval, opt->__data, len))
109135+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
109136+ copy_to_user(optval, opt->__data, len))
109137 return -EFAULT;
109138 return 0;
109139 }
109140@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
109141 if (sk->sk_type != SOCK_STREAM)
109142 return -ENOPROTOOPT;
109143
109144- msg.msg_control = optval;
109145+ msg.msg_control = (void __force_kernel *)optval;
109146 msg.msg_controllen = len;
109147 msg.msg_flags = flags;
109148
109149diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
109150index b8960f3..0f025db 100644
109151--- a/net/ipv4/ip_vti.c
109152+++ b/net/ipv4/ip_vti.c
109153@@ -45,7 +45,7 @@
109154 #include <net/net_namespace.h>
109155 #include <net/netns/generic.h>
109156
109157-static struct rtnl_link_ops vti_link_ops __read_mostly;
109158+static struct rtnl_link_ops vti_link_ops;
109159
109160 static int vti_net_id __read_mostly;
109161 static int vti_tunnel_init(struct net_device *dev);
109162@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
109163 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
109164 };
109165
109166-static struct rtnl_link_ops vti_link_ops __read_mostly = {
109167+static struct rtnl_link_ops vti_link_ops = {
109168 .kind = "vti",
109169 .maxtype = IFLA_VTI_MAX,
109170 .policy = vti_policy,
109171diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
109172index b3e86ea..18ce98c 100644
109173--- a/net/ipv4/ipconfig.c
109174+++ b/net/ipv4/ipconfig.c
109175@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
109176
109177 mm_segment_t oldfs = get_fs();
109178 set_fs(get_ds());
109179- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
109180+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
109181 set_fs(oldfs);
109182 return res;
109183 }
109184@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
109185
109186 mm_segment_t oldfs = get_fs();
109187 set_fs(get_ds());
109188- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
109189+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
109190 set_fs(oldfs);
109191 return res;
109192 }
109193@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
109194
109195 mm_segment_t oldfs = get_fs();
109196 set_fs(get_ds());
109197- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
109198+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
109199 set_fs(oldfs);
109200 return res;
109201 }
109202diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
109203index 62eaa00..29b2dc2 100644
109204--- a/net/ipv4/ipip.c
109205+++ b/net/ipv4/ipip.c
109206@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
109207 static int ipip_net_id __read_mostly;
109208
109209 static int ipip_tunnel_init(struct net_device *dev);
109210-static struct rtnl_link_ops ipip_link_ops __read_mostly;
109211+static struct rtnl_link_ops ipip_link_ops;
109212
109213 static int ipip_err(struct sk_buff *skb, u32 info)
109214 {
109215@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
109216 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
109217 };
109218
109219-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
109220+static struct rtnl_link_ops ipip_link_ops = {
109221 .kind = "ipip",
109222 .maxtype = IFLA_IPTUN_MAX,
109223 .policy = ipip_policy,
109224diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
109225index f95b6f9..2ee2097 100644
109226--- a/net/ipv4/netfilter/arp_tables.c
109227+++ b/net/ipv4/netfilter/arp_tables.c
109228@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
109229 #endif
109230
109231 static int get_info(struct net *net, void __user *user,
109232- const int *len, int compat)
109233+ int len, int compat)
109234 {
109235 char name[XT_TABLE_MAXNAMELEN];
109236 struct xt_table *t;
109237 int ret;
109238
109239- if (*len != sizeof(struct arpt_getinfo)) {
109240- duprintf("length %u != %Zu\n", *len,
109241+ if (len != sizeof(struct arpt_getinfo)) {
109242+ duprintf("length %u != %Zu\n", len,
109243 sizeof(struct arpt_getinfo));
109244 return -EINVAL;
109245 }
109246@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
109247 info.size = private->size;
109248 strcpy(info.name, name);
109249
109250- if (copy_to_user(user, &info, *len) != 0)
109251+ if (copy_to_user(user, &info, len) != 0)
109252 ret = -EFAULT;
109253 else
109254 ret = 0;
109255@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
109256
109257 switch (cmd) {
109258 case ARPT_SO_GET_INFO:
109259- ret = get_info(sock_net(sk), user, len, 1);
109260+ ret = get_info(sock_net(sk), user, *len, 1);
109261 break;
109262 case ARPT_SO_GET_ENTRIES:
109263 ret = compat_get_entries(sock_net(sk), user, len);
109264@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
109265
109266 switch (cmd) {
109267 case ARPT_SO_GET_INFO:
109268- ret = get_info(sock_net(sk), user, len, 0);
109269+ ret = get_info(sock_net(sk), user, *len, 0);
109270 break;
109271
109272 case ARPT_SO_GET_ENTRIES:
109273diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
109274index 99e810f..3711b81 100644
109275--- a/net/ipv4/netfilter/ip_tables.c
109276+++ b/net/ipv4/netfilter/ip_tables.c
109277@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
109278 #endif
109279
109280 static int get_info(struct net *net, void __user *user,
109281- const int *len, int compat)
109282+ int len, int compat)
109283 {
109284 char name[XT_TABLE_MAXNAMELEN];
109285 struct xt_table *t;
109286 int ret;
109287
109288- if (*len != sizeof(struct ipt_getinfo)) {
109289- duprintf("length %u != %zu\n", *len,
109290+ if (len != sizeof(struct ipt_getinfo)) {
109291+ duprintf("length %u != %zu\n", len,
109292 sizeof(struct ipt_getinfo));
109293 return -EINVAL;
109294 }
109295@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
109296 info.size = private->size;
109297 strcpy(info.name, name);
109298
109299- if (copy_to_user(user, &info, *len) != 0)
109300+ if (copy_to_user(user, &info, len) != 0)
109301 ret = -EFAULT;
109302 else
109303 ret = 0;
109304@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109305
109306 switch (cmd) {
109307 case IPT_SO_GET_INFO:
109308- ret = get_info(sock_net(sk), user, len, 1);
109309+ ret = get_info(sock_net(sk), user, *len, 1);
109310 break;
109311 case IPT_SO_GET_ENTRIES:
109312 ret = compat_get_entries(sock_net(sk), user, len);
109313@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109314
109315 switch (cmd) {
109316 case IPT_SO_GET_INFO:
109317- ret = get_info(sock_net(sk), user, len, 0);
109318+ ret = get_info(sock_net(sk), user, *len, 0);
109319 break;
109320
109321 case IPT_SO_GET_ENTRIES:
109322diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
109323index 2510c02..cfb34fa 100644
109324--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
109325+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
109326@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
109327 spin_lock_init(&cn->lock);
109328
109329 #ifdef CONFIG_PROC_FS
109330- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
109331+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
109332 if (!cn->procdir) {
109333 pr_err("Unable to proc dir entry\n");
109334 return -ENOMEM;
109335diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
109336index 044a0dd..3399751 100644
109337--- a/net/ipv4/ping.c
109338+++ b/net/ipv4/ping.c
109339@@ -59,7 +59,7 @@ struct ping_table {
109340 };
109341
109342 static struct ping_table ping_table;
109343-struct pingv6_ops pingv6_ops;
109344+struct pingv6_ops *pingv6_ops;
109345 EXPORT_SYMBOL_GPL(pingv6_ops);
109346
109347 static u16 ping_port_rover;
109348@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
109349 return -ENODEV;
109350 }
109351 }
109352- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
109353+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
109354 scoped);
109355 rcu_read_unlock();
109356
109357@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
109358 }
109359 #if IS_ENABLED(CONFIG_IPV6)
109360 } else if (skb->protocol == htons(ETH_P_IPV6)) {
109361- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
109362+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
109363 #endif
109364 }
109365
109366@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
109367 info, (u8 *)icmph);
109368 #if IS_ENABLED(CONFIG_IPV6)
109369 } else if (family == AF_INET6) {
109370- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
109371+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
109372 info, (u8 *)icmph);
109373 #endif
109374 }
109375@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109376 return ip_recv_error(sk, msg, len, addr_len);
109377 #if IS_ENABLED(CONFIG_IPV6)
109378 } else if (family == AF_INET6) {
109379- return pingv6_ops.ipv6_recv_error(sk, msg, len,
109380+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
109381 addr_len);
109382 #endif
109383 }
109384@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109385 }
109386
109387 if (inet6_sk(sk)->rxopt.all)
109388- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
109389+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
109390 if (skb->protocol == htons(ETH_P_IPV6) &&
109391 inet6_sk(sk)->rxopt.all)
109392- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
109393+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
109394 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
109395 ip_cmsg_recv(msg, skb);
109396 #endif
109397@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
109398 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
109399 0, sock_i_ino(sp),
109400 atomic_read(&sp->sk_refcnt), sp,
109401- atomic_read(&sp->sk_drops));
109402+ atomic_read_unchecked(&sp->sk_drops));
109403 }
109404
109405 static int ping_v4_seq_show(struct seq_file *seq, void *v)
109406diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
109407index 2c65160..213ecdf 100644
109408--- a/net/ipv4/raw.c
109409+++ b/net/ipv4/raw.c
109410@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
109411 int raw_rcv(struct sock *sk, struct sk_buff *skb)
109412 {
109413 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
109414- atomic_inc(&sk->sk_drops);
109415+ atomic_inc_unchecked(&sk->sk_drops);
109416 kfree_skb(skb);
109417 return NET_RX_DROP;
109418 }
109419@@ -748,16 +748,20 @@ static int raw_init(struct sock *sk)
109420
109421 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
109422 {
109423+ struct icmp_filter filter;
109424+
109425 if (optlen > sizeof(struct icmp_filter))
109426 optlen = sizeof(struct icmp_filter);
109427- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
109428+ if (copy_from_user(&filter, optval, optlen))
109429 return -EFAULT;
109430+ raw_sk(sk)->filter = filter;
109431 return 0;
109432 }
109433
109434 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
109435 {
109436 int len, ret = -EFAULT;
109437+ struct icmp_filter filter;
109438
109439 if (get_user(len, optlen))
109440 goto out;
109441@@ -767,8 +771,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
109442 if (len > sizeof(struct icmp_filter))
109443 len = sizeof(struct icmp_filter);
109444 ret = -EFAULT;
109445- if (put_user(len, optlen) ||
109446- copy_to_user(optval, &raw_sk(sk)->filter, len))
109447+ filter = raw_sk(sk)->filter;
109448+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
109449 goto out;
109450 ret = 0;
109451 out: return ret;
109452@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
109453 0, 0L, 0,
109454 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
109455 0, sock_i_ino(sp),
109456- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
109457+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
109458 }
109459
109460 static int raw_seq_show(struct seq_file *seq, void *v)
109461diff --git a/net/ipv4/route.c b/net/ipv4/route.c
109462index 1901998..a9a850a 100644
109463--- a/net/ipv4/route.c
109464+++ b/net/ipv4/route.c
109465@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
109466
109467 static int rt_cache_seq_open(struct inode *inode, struct file *file)
109468 {
109469- return seq_open(file, &rt_cache_seq_ops);
109470+ return seq_open_restrict(file, &rt_cache_seq_ops);
109471 }
109472
109473 static const struct file_operations rt_cache_seq_fops = {
109474@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
109475
109476 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
109477 {
109478- return seq_open(file, &rt_cpu_seq_ops);
109479+ return seq_open_restrict(file, &rt_cpu_seq_ops);
109480 }
109481
109482 static const struct file_operations rt_cpu_seq_fops = {
109483@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
109484
109485 static int rt_acct_proc_open(struct inode *inode, struct file *file)
109486 {
109487- return single_open(file, rt_acct_proc_show, NULL);
109488+ return single_open_restrict(file, rt_acct_proc_show, NULL);
109489 }
109490
109491 static const struct file_operations rt_acct_proc_fops = {
109492@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
109493
109494 #define IP_IDENTS_SZ 2048u
109495 struct ip_ident_bucket {
109496- atomic_t id;
109497+ atomic_unchecked_t id;
109498 u32 stamp32;
109499 };
109500
109501-static struct ip_ident_bucket *ip_idents __read_mostly;
109502+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
109503
109504 /* In order to protect privacy, we add a perturbation to identifiers
109505 * if one generator is seldom used. This makes hard for an attacker
109506@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
109507 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
109508 delta = prandom_u32_max(now - old);
109509
109510- return atomic_add_return(segs + delta, &bucket->id) - segs;
109511+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
109512 }
109513 EXPORT_SYMBOL(ip_idents_reserve);
109514
109515@@ -2625,34 +2625,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
109516 .maxlen = sizeof(int),
109517 .mode = 0200,
109518 .proc_handler = ipv4_sysctl_rtcache_flush,
109519+ .extra1 = &init_net,
109520 },
109521 { },
109522 };
109523
109524 static __net_init int sysctl_route_net_init(struct net *net)
109525 {
109526- struct ctl_table *tbl;
109527+ ctl_table_no_const *tbl = NULL;
109528
109529- tbl = ipv4_route_flush_table;
109530 if (!net_eq(net, &init_net)) {
109531- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
109532+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
109533 if (tbl == NULL)
109534 goto err_dup;
109535
109536 /* Don't export sysctls to unprivileged users */
109537 if (net->user_ns != &init_user_ns)
109538 tbl[0].procname = NULL;
109539- }
109540- tbl[0].extra1 = net;
109541+ tbl[0].extra1 = net;
109542+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
109543+ } else
109544+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
109545
109546- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
109547 if (net->ipv4.route_hdr == NULL)
109548 goto err_reg;
109549 return 0;
109550
109551 err_reg:
109552- if (tbl != ipv4_route_flush_table)
109553- kfree(tbl);
109554+ kfree(tbl);
109555 err_dup:
109556 return -ENOMEM;
109557 }
109558@@ -2675,8 +2675,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
109559
109560 static __net_init int rt_genid_init(struct net *net)
109561 {
109562- atomic_set(&net->ipv4.rt_genid, 0);
109563- atomic_set(&net->fnhe_genid, 0);
109564+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
109565+ atomic_set_unchecked(&net->fnhe_genid, 0);
109566 get_random_bytes(&net->ipv4.dev_addr_genid,
109567 sizeof(net->ipv4.dev_addr_genid));
109568 return 0;
109569@@ -2719,11 +2719,7 @@ int __init ip_rt_init(void)
109570 {
109571 int rc = 0;
109572
109573- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
109574- if (!ip_idents)
109575- panic("IP: failed to allocate ip_idents\n");
109576-
109577- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
109578+ prandom_bytes(ip_idents, sizeof(ip_idents));
109579
109580 #ifdef CONFIG_IP_ROUTE_CLASSID
109581 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
109582diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
109583index 79a007c..5023029 100644
109584--- a/net/ipv4/sysctl_net_ipv4.c
109585+++ b/net/ipv4/sysctl_net_ipv4.c
109586@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
109587 container_of(table->data, struct net, ipv4.ip_local_ports.range);
109588 int ret;
109589 int range[2];
109590- struct ctl_table tmp = {
109591+ ctl_table_no_const tmp = {
109592 .data = &range,
109593 .maxlen = sizeof(range),
109594 .mode = table->mode,
109595@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
109596 int ret;
109597 gid_t urange[2];
109598 kgid_t low, high;
109599- struct ctl_table tmp = {
109600+ ctl_table_no_const tmp = {
109601 .data = &urange,
109602 .maxlen = sizeof(urange),
109603 .mode = table->mode,
109604@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
109605 void __user *buffer, size_t *lenp, loff_t *ppos)
109606 {
109607 char val[TCP_CA_NAME_MAX];
109608- struct ctl_table tbl = {
109609+ ctl_table_no_const tbl = {
109610 .data = val,
109611 .maxlen = TCP_CA_NAME_MAX,
109612 };
109613@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
109614 void __user *buffer, size_t *lenp,
109615 loff_t *ppos)
109616 {
109617- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
109618+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
109619 int ret;
109620
109621 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
109622@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
109623 void __user *buffer, size_t *lenp,
109624 loff_t *ppos)
109625 {
109626- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
109627+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
109628 int ret;
109629
109630 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
109631@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
109632 void __user *buffer, size_t *lenp,
109633 loff_t *ppos)
109634 {
109635- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
109636+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
109637 struct tcp_fastopen_context *ctxt;
109638 int ret;
109639 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
109640@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
109641
109642 static __net_init int ipv4_sysctl_init_net(struct net *net)
109643 {
109644- struct ctl_table *table;
109645+ ctl_table_no_const *table = NULL;
109646
109647- table = ipv4_net_table;
109648 if (!net_eq(net, &init_net)) {
109649 int i;
109650
109651- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
109652+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
109653 if (table == NULL)
109654 goto err_alloc;
109655
109656@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
109657 table[i].data += (void *)net - (void *)&init_net;
109658 }
109659
109660- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
109661+ if (!net_eq(net, &init_net))
109662+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
109663+ else
109664+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
109665 if (net->ipv4.ipv4_hdr == NULL)
109666 goto err_reg;
109667
109668diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
109669index 40639c2..dfc86b2 100644
109670--- a/net/ipv4/tcp_input.c
109671+++ b/net/ipv4/tcp_input.c
109672@@ -754,7 +754,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
109673 * without any lock. We want to make sure compiler wont store
109674 * intermediate values in this location.
109675 */
109676- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
109677+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
109678 sk->sk_max_pacing_rate);
109679 }
109680
109681@@ -4478,7 +4478,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
109682 * simplifies code)
109683 */
109684 static void
109685-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
109686+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
109687 struct sk_buff *head, struct sk_buff *tail,
109688 u32 start, u32 end)
109689 {
109690@@ -5536,6 +5536,7 @@ discard:
109691 tcp_paws_reject(&tp->rx_opt, 0))
109692 goto discard_and_undo;
109693
109694+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
109695 if (th->syn) {
109696 /* We see SYN without ACK. It is attempt of
109697 * simultaneous connect with crossed SYNs.
109698@@ -5586,6 +5587,7 @@ discard:
109699 goto discard;
109700 #endif
109701 }
109702+#endif
109703 /* "fifth, if neither of the SYN or RST bits is set then
109704 * drop the segment and return."
109705 */
109706@@ -5632,7 +5634,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
109707 goto discard;
109708
109709 if (th->syn) {
109710- if (th->fin)
109711+ if (th->fin || th->urg || th->psh)
109712 goto discard;
109713 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
109714 return 1;
109715diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
109716index 77cccda..10122c4 100644
109717--- a/net/ipv4/tcp_ipv4.c
109718+++ b/net/ipv4/tcp_ipv4.c
109719@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
109720 EXPORT_SYMBOL(sysctl_tcp_low_latency);
109721
109722
109723+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109724+extern int grsec_enable_blackhole;
109725+#endif
109726+
109727 #ifdef CONFIG_TCP_MD5SIG
109728 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
109729 __be32 daddr, __be32 saddr, const struct tcphdr *th);
109730@@ -1591,6 +1595,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
109731 return 0;
109732
109733 reset:
109734+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109735+ if (!grsec_enable_blackhole)
109736+#endif
109737 tcp_v4_send_reset(rsk, skb);
109738 discard:
109739 kfree_skb(skb);
109740@@ -1737,12 +1744,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
109741 TCP_SKB_CB(skb)->sacked = 0;
109742
109743 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
109744- if (!sk)
109745+ if (!sk) {
109746+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109747+ ret = 1;
109748+#endif
109749 goto no_tcp_socket;
109750-
109751+ }
109752 process:
109753- if (sk->sk_state == TCP_TIME_WAIT)
109754+ if (sk->sk_state == TCP_TIME_WAIT) {
109755+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109756+ ret = 2;
109757+#endif
109758 goto do_time_wait;
109759+ }
109760
109761 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
109762 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
109763@@ -1796,6 +1810,10 @@ csum_error:
109764 bad_packet:
109765 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
109766 } else {
109767+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109768+ if (!grsec_enable_blackhole || (ret == 1 &&
109769+ (skb->dev->flags & IFF_LOOPBACK)))
109770+#endif
109771 tcp_v4_send_reset(NULL, skb);
109772 }
109773
109774diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
109775index e68e0d4..0334263 100644
109776--- a/net/ipv4/tcp_minisocks.c
109777+++ b/net/ipv4/tcp_minisocks.c
109778@@ -27,6 +27,10 @@
109779 #include <net/inet_common.h>
109780 #include <net/xfrm.h>
109781
109782+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109783+extern int grsec_enable_blackhole;
109784+#endif
109785+
109786 int sysctl_tcp_syncookies __read_mostly = 1;
109787 EXPORT_SYMBOL(sysctl_tcp_syncookies);
109788
109789@@ -740,7 +744,10 @@ embryonic_reset:
109790 * avoid becoming vulnerable to outside attack aiming at
109791 * resetting legit local connections.
109792 */
109793- req->rsk_ops->send_reset(sk, skb);
109794+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109795+ if (!grsec_enable_blackhole)
109796+#endif
109797+ req->rsk_ops->send_reset(sk, skb);
109798 } else if (fastopen) { /* received a valid RST pkt */
109799 reqsk_fastopen_remove(sk, req, true);
109800 tcp_reset(sk);
109801diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
109802index 3b66610..bfbe23a 100644
109803--- a/net/ipv4/tcp_probe.c
109804+++ b/net/ipv4/tcp_probe.c
109805@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
109806 if (cnt + width >= len)
109807 break;
109808
109809- if (copy_to_user(buf + cnt, tbuf, width))
109810+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
109811 return -EFAULT;
109812 cnt += width;
109813 }
109814diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
109815index 286227a..c495a76 100644
109816--- a/net/ipv4/tcp_timer.c
109817+++ b/net/ipv4/tcp_timer.c
109818@@ -22,6 +22,10 @@
109819 #include <linux/gfp.h>
109820 #include <net/tcp.h>
109821
109822+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109823+extern int grsec_lastack_retries;
109824+#endif
109825+
109826 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
109827 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
109828 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
109829@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
109830 }
109831 }
109832
109833+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109834+ if ((sk->sk_state == TCP_LAST_ACK) &&
109835+ (grsec_lastack_retries > 0) &&
109836+ (grsec_lastack_retries < retry_until))
109837+ retry_until = grsec_lastack_retries;
109838+#endif
109839+
109840 if (retransmits_timed_out(sk, retry_until,
109841 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
109842 /* Has it gone just too far? */
109843diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
109844index 7d5a866..4874211 100644
109845--- a/net/ipv4/udp.c
109846+++ b/net/ipv4/udp.c
109847@@ -87,6 +87,7 @@
109848 #include <linux/types.h>
109849 #include <linux/fcntl.h>
109850 #include <linux/module.h>
109851+#include <linux/security.h>
109852 #include <linux/socket.h>
109853 #include <linux/sockios.h>
109854 #include <linux/igmp.h>
109855@@ -113,6 +114,10 @@
109856 #include <net/busy_poll.h>
109857 #include "udp_impl.h"
109858
109859+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109860+extern int grsec_enable_blackhole;
109861+#endif
109862+
109863 struct udp_table udp_table __read_mostly;
109864 EXPORT_SYMBOL(udp_table);
109865
109866@@ -615,6 +620,9 @@ found:
109867 return s;
109868 }
109869
109870+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
109871+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
109872+
109873 /*
109874 * This routine is called by the ICMP module when it gets some
109875 * sort of error condition. If err < 0 then the socket should
109876@@ -952,9 +960,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109877 dport = usin->sin_port;
109878 if (dport == 0)
109879 return -EINVAL;
109880+
109881+ err = gr_search_udp_sendmsg(sk, usin);
109882+ if (err)
109883+ return err;
109884 } else {
109885 if (sk->sk_state != TCP_ESTABLISHED)
109886 return -EDESTADDRREQ;
109887+
109888+ err = gr_search_udp_sendmsg(sk, NULL);
109889+ if (err)
109890+ return err;
109891+
109892 daddr = inet->inet_daddr;
109893 dport = inet->inet_dport;
109894 /* Open fast path for connected socket.
109895@@ -1202,7 +1219,7 @@ static unsigned int first_packet_length(struct sock *sk)
109896 IS_UDPLITE(sk));
109897 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109898 IS_UDPLITE(sk));
109899- atomic_inc(&sk->sk_drops);
109900+ atomic_inc_unchecked(&sk->sk_drops);
109901 __skb_unlink(skb, rcvq);
109902 __skb_queue_tail(&list_kill, skb);
109903 }
109904@@ -1282,6 +1299,10 @@ try_again:
109905 if (!skb)
109906 goto out;
109907
109908+ err = gr_search_udp_recvmsg(sk, skb);
109909+ if (err)
109910+ goto out_free;
109911+
109912 ulen = skb->len - sizeof(struct udphdr);
109913 copied = len;
109914 if (copied > ulen)
109915@@ -1315,7 +1336,7 @@ try_again:
109916 if (unlikely(err)) {
109917 trace_kfree_skb(skb, udp_recvmsg);
109918 if (!peeked) {
109919- atomic_inc(&sk->sk_drops);
109920+ atomic_inc_unchecked(&sk->sk_drops);
109921 UDP_INC_STATS_USER(sock_net(sk),
109922 UDP_MIB_INERRORS, is_udplite);
109923 }
109924@@ -1612,7 +1633,7 @@ csum_error:
109925 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
109926 drop:
109927 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
109928- atomic_inc(&sk->sk_drops);
109929+ atomic_inc_unchecked(&sk->sk_drops);
109930 kfree_skb(skb);
109931 return -1;
109932 }
109933@@ -1631,7 +1652,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
109934 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
109935
109936 if (!skb1) {
109937- atomic_inc(&sk->sk_drops);
109938+ atomic_inc_unchecked(&sk->sk_drops);
109939 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
109940 IS_UDPLITE(sk));
109941 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109942@@ -1817,6 +1838,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
109943 goto csum_error;
109944
109945 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
109946+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109947+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
109948+#endif
109949 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
109950
109951 /*
109952@@ -2403,7 +2427,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
109953 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
109954 0, sock_i_ino(sp),
109955 atomic_read(&sp->sk_refcnt), sp,
109956- atomic_read(&sp->sk_drops));
109957+ atomic_read_unchecked(&sp->sk_drops));
109958 }
109959
109960 int udp4_seq_show(struct seq_file *seq, void *v)
109961diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
109962index 6156f68..d6ab46d 100644
109963--- a/net/ipv4/xfrm4_policy.c
109964+++ b/net/ipv4/xfrm4_policy.c
109965@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
109966 fl4->flowi4_tos = iph->tos;
109967 }
109968
109969-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
109970+static int xfrm4_garbage_collect(struct dst_ops *ops)
109971 {
109972 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
109973
109974- xfrm4_policy_afinfo.garbage_collect(net);
109975+ xfrm_garbage_collect_deferred(net);
109976 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
109977 }
109978
109979@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
109980
109981 static int __net_init xfrm4_net_init(struct net *net)
109982 {
109983- struct ctl_table *table;
109984+ ctl_table_no_const *table = NULL;
109985 struct ctl_table_header *hdr;
109986
109987- table = xfrm4_policy_table;
109988 if (!net_eq(net, &init_net)) {
109989- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
109990+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
109991 if (!table)
109992 goto err_alloc;
109993
109994 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
109995- }
109996-
109997- hdr = register_net_sysctl(net, "net/ipv4", table);
109998+ hdr = register_net_sysctl(net, "net/ipv4", table);
109999+ } else
110000+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
110001 if (!hdr)
110002 goto err_reg;
110003
110004@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
110005 return 0;
110006
110007 err_reg:
110008- if (!net_eq(net, &init_net))
110009- kfree(table);
110010+ kfree(table);
110011 err_alloc:
110012 return -ENOMEM;
110013 }
110014diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
110015index 5667b30..2044f61 100644
110016--- a/net/ipv6/addrconf.c
110017+++ b/net/ipv6/addrconf.c
110018@@ -593,7 +593,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
110019 idx = 0;
110020 head = &net->dev_index_head[h];
110021 rcu_read_lock();
110022- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
110023+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
110024 net->dev_base_seq;
110025 hlist_for_each_entry_rcu(dev, head, index_hlist) {
110026 if (idx < s_idx)
110027@@ -2390,7 +2390,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
110028 p.iph.ihl = 5;
110029 p.iph.protocol = IPPROTO_IPV6;
110030 p.iph.ttl = 64;
110031- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
110032+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
110033
110034 if (ops->ndo_do_ioctl) {
110035 mm_segment_t oldfs = get_fs();
110036@@ -3516,16 +3516,23 @@ static const struct file_operations if6_fops = {
110037 .release = seq_release_net,
110038 };
110039
110040+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
110041+extern void unregister_ipv6_seq_ops_addr(void);
110042+
110043 static int __net_init if6_proc_net_init(struct net *net)
110044 {
110045- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
110046+ register_ipv6_seq_ops_addr(&if6_seq_ops);
110047+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
110048+ unregister_ipv6_seq_ops_addr();
110049 return -ENOMEM;
110050+ }
110051 return 0;
110052 }
110053
110054 static void __net_exit if6_proc_net_exit(struct net *net)
110055 {
110056 remove_proc_entry("if_inet6", net->proc_net);
110057+ unregister_ipv6_seq_ops_addr();
110058 }
110059
110060 static struct pernet_operations if6_proc_net_ops = {
110061@@ -4141,7 +4148,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
110062 s_ip_idx = ip_idx = cb->args[2];
110063
110064 rcu_read_lock();
110065- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
110066+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
110067 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
110068 idx = 0;
110069 head = &net->dev_index_head[h];
110070@@ -4741,11 +4748,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
110071
110072 rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
110073 dev->ifindex, 1);
110074- if (rt) {
110075- dst_hold(&rt->dst);
110076- if (ip6_del_rt(rt))
110077- dst_free(&rt->dst);
110078- }
110079+ if (rt && ip6_del_rt(rt))
110080+ dst_free(&rt->dst);
110081 }
110082 dst_hold(&ifp->rt->dst);
110083
110084@@ -4753,7 +4757,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
110085 dst_free(&ifp->rt->dst);
110086 break;
110087 }
110088- atomic_inc(&net->ipv6.dev_addr_genid);
110089+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
110090 rt_genid_bump_ipv6(net);
110091 }
110092
110093@@ -4774,7 +4778,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
110094 int *valp = ctl->data;
110095 int val = *valp;
110096 loff_t pos = *ppos;
110097- struct ctl_table lctl;
110098+ ctl_table_no_const lctl;
110099 int ret;
110100
110101 /*
110102@@ -4859,7 +4863,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
110103 int *valp = ctl->data;
110104 int val = *valp;
110105 loff_t pos = *ppos;
110106- struct ctl_table lctl;
110107+ ctl_table_no_const lctl;
110108 int ret;
110109
110110 /*
110111diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
110112index 7cb4392..dc96d28 100644
110113--- a/net/ipv6/af_inet6.c
110114+++ b/net/ipv6/af_inet6.c
110115@@ -765,7 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
110116 net->ipv6.sysctl.bindv6only = 0;
110117 net->ipv6.sysctl.icmpv6_time = 1*HZ;
110118 net->ipv6.sysctl.flowlabel_consistency = 1;
110119- atomic_set(&net->ipv6.rt_genid, 0);
110120+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
110121
110122 err = ipv6_init_mibs(net);
110123 if (err)
110124diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
110125index c3bf2d2..1f00573 100644
110126--- a/net/ipv6/datagram.c
110127+++ b/net/ipv6/datagram.c
110128@@ -938,5 +938,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
110129 0,
110130 sock_i_ino(sp),
110131 atomic_read(&sp->sk_refcnt), sp,
110132- atomic_read(&sp->sk_drops));
110133+ atomic_read_unchecked(&sp->sk_drops));
110134 }
110135diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
110136index f6c84a6..9f2084e 100644
110137--- a/net/ipv6/icmp.c
110138+++ b/net/ipv6/icmp.c
110139@@ -990,7 +990,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
110140
110141 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
110142 {
110143- struct ctl_table *table;
110144+ ctl_table_no_const *table;
110145
110146 table = kmemdup(ipv6_icmp_table_template,
110147 sizeof(ipv6_icmp_table_template),
110148diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
110149index 3873181..220ad3f 100644
110150--- a/net/ipv6/ip6_gre.c
110151+++ b/net/ipv6/ip6_gre.c
110152@@ -71,8 +71,8 @@ struct ip6gre_net {
110153 struct net_device *fb_tunnel_dev;
110154 };
110155
110156-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
110157-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
110158+static struct rtnl_link_ops ip6gre_link_ops;
110159+static struct rtnl_link_ops ip6gre_tap_ops;
110160 static int ip6gre_tunnel_init(struct net_device *dev);
110161 static void ip6gre_tunnel_setup(struct net_device *dev);
110162 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
110163@@ -1280,7 +1280,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
110164 }
110165
110166
110167-static struct inet6_protocol ip6gre_protocol __read_mostly = {
110168+static struct inet6_protocol ip6gre_protocol = {
110169 .handler = ip6gre_rcv,
110170 .err_handler = ip6gre_err,
110171 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
110172@@ -1638,7 +1638,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
110173 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
110174 };
110175
110176-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
110177+static struct rtnl_link_ops ip6gre_link_ops = {
110178 .kind = "ip6gre",
110179 .maxtype = IFLA_GRE_MAX,
110180 .policy = ip6gre_policy,
110181@@ -1652,7 +1652,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
110182 .fill_info = ip6gre_fill_info,
110183 };
110184
110185-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
110186+static struct rtnl_link_ops ip6gre_tap_ops = {
110187 .kind = "ip6gretap",
110188 .maxtype = IFLA_GRE_MAX,
110189 .policy = ip6gre_policy,
110190diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
110191index afa0824..04ba530 100644
110192--- a/net/ipv6/ip6_tunnel.c
110193+++ b/net/ipv6/ip6_tunnel.c
110194@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
110195
110196 static int ip6_tnl_dev_init(struct net_device *dev);
110197 static void ip6_tnl_dev_setup(struct net_device *dev);
110198-static struct rtnl_link_ops ip6_link_ops __read_mostly;
110199+static struct rtnl_link_ops ip6_link_ops;
110200
110201 static int ip6_tnl_net_id __read_mostly;
110202 struct ip6_tnl_net {
110203@@ -1708,7 +1708,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
110204 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
110205 };
110206
110207-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
110208+static struct rtnl_link_ops ip6_link_ops = {
110209 .kind = "ip6tnl",
110210 .maxtype = IFLA_IPTUN_MAX,
110211 .policy = ip6_tnl_policy,
110212diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
110213index 9aaa6bb..5c13e57 100644
110214--- a/net/ipv6/ip6_vti.c
110215+++ b/net/ipv6/ip6_vti.c
110216@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
110217
110218 static int vti6_dev_init(struct net_device *dev);
110219 static void vti6_dev_setup(struct net_device *dev);
110220-static struct rtnl_link_ops vti6_link_ops __read_mostly;
110221+static struct rtnl_link_ops vti6_link_ops;
110222
110223 static int vti6_net_id __read_mostly;
110224 struct vti6_net {
110225@@ -977,7 +977,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
110226 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
110227 };
110228
110229-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
110230+static struct rtnl_link_ops vti6_link_ops = {
110231 .kind = "vti6",
110232 .maxtype = IFLA_VTI_MAX,
110233 .policy = vti6_policy,
110234diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
110235index edb58af..78de133 100644
110236--- a/net/ipv6/ipv6_sockglue.c
110237+++ b/net/ipv6/ipv6_sockglue.c
110238@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
110239 if (sk->sk_type != SOCK_STREAM)
110240 return -ENOPROTOOPT;
110241
110242- msg.msg_control = optval;
110243+ msg.msg_control = (void __force_kernel *)optval;
110244 msg.msg_controllen = len;
110245 msg.msg_flags = flags;
110246
110247diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
110248index e080fbb..412b3cf 100644
110249--- a/net/ipv6/netfilter/ip6_tables.c
110250+++ b/net/ipv6/netfilter/ip6_tables.c
110251@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
110252 #endif
110253
110254 static int get_info(struct net *net, void __user *user,
110255- const int *len, int compat)
110256+ int len, int compat)
110257 {
110258 char name[XT_TABLE_MAXNAMELEN];
110259 struct xt_table *t;
110260 int ret;
110261
110262- if (*len != sizeof(struct ip6t_getinfo)) {
110263- duprintf("length %u != %zu\n", *len,
110264+ if (len != sizeof(struct ip6t_getinfo)) {
110265+ duprintf("length %u != %zu\n", len,
110266 sizeof(struct ip6t_getinfo));
110267 return -EINVAL;
110268 }
110269@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
110270 info.size = private->size;
110271 strcpy(info.name, name);
110272
110273- if (copy_to_user(user, &info, *len) != 0)
110274+ if (copy_to_user(user, &info, len) != 0)
110275 ret = -EFAULT;
110276 else
110277 ret = 0;
110278@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
110279
110280 switch (cmd) {
110281 case IP6T_SO_GET_INFO:
110282- ret = get_info(sock_net(sk), user, len, 1);
110283+ ret = get_info(sock_net(sk), user, *len, 1);
110284 break;
110285 case IP6T_SO_GET_ENTRIES:
110286 ret = compat_get_entries(sock_net(sk), user, len);
110287@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
110288
110289 switch (cmd) {
110290 case IP6T_SO_GET_INFO:
110291- ret = get_info(sock_net(sk), user, len, 0);
110292+ ret = get_info(sock_net(sk), user, *len, 0);
110293 break;
110294
110295 case IP6T_SO_GET_ENTRIES:
110296diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
110297index 0d5279f..89d9f6f 100644
110298--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
110299+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
110300@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
110301
110302 static int nf_ct_frag6_sysctl_register(struct net *net)
110303 {
110304- struct ctl_table *table;
110305+ ctl_table_no_const *table = NULL;
110306 struct ctl_table_header *hdr;
110307
110308- table = nf_ct_frag6_sysctl_table;
110309 if (!net_eq(net, &init_net)) {
110310- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
110311+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
110312 GFP_KERNEL);
110313 if (table == NULL)
110314 goto err_alloc;
110315@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
110316 table[0].data = &net->nf_frag.frags.timeout;
110317 table[1].data = &net->nf_frag.frags.low_thresh;
110318 table[2].data = &net->nf_frag.frags.high_thresh;
110319- }
110320-
110321- hdr = register_net_sysctl(net, "net/netfilter", table);
110322+ hdr = register_net_sysctl(net, "net/netfilter", table);
110323+ } else
110324+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
110325 if (hdr == NULL)
110326 goto err_reg;
110327
110328@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
110329 return 0;
110330
110331 err_reg:
110332- if (!net_eq(net, &init_net))
110333- kfree(table);
110334+ kfree(table);
110335 err_alloc:
110336 return -ENOMEM;
110337 }
110338diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
110339index 5b7a1ed..d9da205 100644
110340--- a/net/ipv6/ping.c
110341+++ b/net/ipv6/ping.c
110342@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
110343 };
110344 #endif
110345
110346+static struct pingv6_ops real_pingv6_ops = {
110347+ .ipv6_recv_error = ipv6_recv_error,
110348+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
110349+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
110350+ .icmpv6_err_convert = icmpv6_err_convert,
110351+ .ipv6_icmp_error = ipv6_icmp_error,
110352+ .ipv6_chk_addr = ipv6_chk_addr,
110353+};
110354+
110355+static struct pingv6_ops dummy_pingv6_ops = {
110356+ .ipv6_recv_error = dummy_ipv6_recv_error,
110357+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
110358+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
110359+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
110360+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
110361+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
110362+};
110363+
110364 int __init pingv6_init(void)
110365 {
110366 #ifdef CONFIG_PROC_FS
110367@@ -247,13 +265,7 @@ int __init pingv6_init(void)
110368 if (ret)
110369 return ret;
110370 #endif
110371- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
110372- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
110373- pingv6_ops.ip6_datagram_recv_specific_ctl =
110374- ip6_datagram_recv_specific_ctl;
110375- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
110376- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
110377- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
110378+ pingv6_ops = &real_pingv6_ops;
110379 return inet6_register_protosw(&pingv6_protosw);
110380 }
110381
110382@@ -262,14 +274,9 @@ int __init pingv6_init(void)
110383 */
110384 void pingv6_exit(void)
110385 {
110386- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
110387- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
110388- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
110389- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
110390- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
110391- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
110392 #ifdef CONFIG_PROC_FS
110393 unregister_pernet_subsys(&ping_v6_net_ops);
110394 #endif
110395+ pingv6_ops = &dummy_pingv6_ops;
110396 inet6_unregister_protosw(&pingv6_protosw);
110397 }
110398diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
110399index 3317440..201764e 100644
110400--- a/net/ipv6/proc.c
110401+++ b/net/ipv6/proc.c
110402@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
110403 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
110404 goto proc_snmp6_fail;
110405
110406- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
110407+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
110408 if (!net->mib.proc_net_devsnmp6)
110409 goto proc_dev_snmp6_fail;
110410 return 0;
110411diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
110412index b2dc60b..a6b6c10 100644
110413--- a/net/ipv6/raw.c
110414+++ b/net/ipv6/raw.c
110415@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
110416 {
110417 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
110418 skb_checksum_complete(skb)) {
110419- atomic_inc(&sk->sk_drops);
110420+ atomic_inc_unchecked(&sk->sk_drops);
110421 kfree_skb(skb);
110422 return NET_RX_DROP;
110423 }
110424@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
110425 struct raw6_sock *rp = raw6_sk(sk);
110426
110427 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
110428- atomic_inc(&sk->sk_drops);
110429+ atomic_inc_unchecked(&sk->sk_drops);
110430 kfree_skb(skb);
110431 return NET_RX_DROP;
110432 }
110433@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
110434
110435 if (inet->hdrincl) {
110436 if (skb_checksum_complete(skb)) {
110437- atomic_inc(&sk->sk_drops);
110438+ atomic_inc_unchecked(&sk->sk_drops);
110439 kfree_skb(skb);
110440 return NET_RX_DROP;
110441 }
110442@@ -610,7 +610,7 @@ out:
110443 return err;
110444 }
110445
110446-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
110447+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
110448 struct flowi6 *fl6, struct dst_entry **dstp,
110449 unsigned int flags)
110450 {
110451@@ -916,12 +916,15 @@ do_confirm:
110452 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
110453 char __user *optval, int optlen)
110454 {
110455+ struct icmp6_filter filter;
110456+
110457 switch (optname) {
110458 case ICMPV6_FILTER:
110459 if (optlen > sizeof(struct icmp6_filter))
110460 optlen = sizeof(struct icmp6_filter);
110461- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
110462+ if (copy_from_user(&filter, optval, optlen))
110463 return -EFAULT;
110464+ raw6_sk(sk)->filter = filter;
110465 return 0;
110466 default:
110467 return -ENOPROTOOPT;
110468@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
110469 char __user *optval, int __user *optlen)
110470 {
110471 int len;
110472+ struct icmp6_filter filter;
110473
110474 switch (optname) {
110475 case ICMPV6_FILTER:
110476@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
110477 len = sizeof(struct icmp6_filter);
110478 if (put_user(len, optlen))
110479 return -EFAULT;
110480- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
110481+ filter = raw6_sk(sk)->filter;
110482+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
110483 return -EFAULT;
110484 return 0;
110485 default:
110486diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
110487index cc85a9b..526a133 100644
110488--- a/net/ipv6/reassembly.c
110489+++ b/net/ipv6/reassembly.c
110490@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
110491
110492 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
110493 {
110494- struct ctl_table *table;
110495+ ctl_table_no_const *table = NULL;
110496 struct ctl_table_header *hdr;
110497
110498- table = ip6_frags_ns_ctl_table;
110499 if (!net_eq(net, &init_net)) {
110500- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
110501+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
110502 if (table == NULL)
110503 goto err_alloc;
110504
110505@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
110506 /* Don't export sysctls to unprivileged users */
110507 if (net->user_ns != &init_user_ns)
110508 table[0].procname = NULL;
110509- }
110510+ hdr = register_net_sysctl(net, "net/ipv6", table);
110511+ } else
110512+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
110513
110514- hdr = register_net_sysctl(net, "net/ipv6", table);
110515 if (hdr == NULL)
110516 goto err_reg;
110517
110518@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
110519 return 0;
110520
110521 err_reg:
110522- if (!net_eq(net, &init_net))
110523- kfree(table);
110524+ kfree(table);
110525 err_alloc:
110526 return -ENOMEM;
110527 }
110528diff --git a/net/ipv6/route.c b/net/ipv6/route.c
110529index f23fbd2..7868241 100644
110530--- a/net/ipv6/route.c
110531+++ b/net/ipv6/route.c
110532@@ -2971,7 +2971,7 @@ struct ctl_table ipv6_route_table_template[] = {
110533
110534 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
110535 {
110536- struct ctl_table *table;
110537+ ctl_table_no_const *table;
110538
110539 table = kmemdup(ipv6_route_table_template,
110540 sizeof(ipv6_route_table_template),
110541diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
110542index 4f40817..54dcbef 100644
110543--- a/net/ipv6/sit.c
110544+++ b/net/ipv6/sit.c
110545@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
110546 static void ipip6_dev_free(struct net_device *dev);
110547 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
110548 __be32 *v4dst);
110549-static struct rtnl_link_ops sit_link_ops __read_mostly;
110550+static struct rtnl_link_ops sit_link_ops;
110551
110552 static int sit_net_id __read_mostly;
110553 struct sit_net {
110554@@ -1661,7 +1661,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
110555 unregister_netdevice_queue(dev, head);
110556 }
110557
110558-static struct rtnl_link_ops sit_link_ops __read_mostly = {
110559+static struct rtnl_link_ops sit_link_ops = {
110560 .kind = "sit",
110561 .maxtype = IFLA_IPTUN_MAX,
110562 .policy = ipip6_policy,
110563diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
110564index 058f3ec..dec973d 100644
110565--- a/net/ipv6/sysctl_net_ipv6.c
110566+++ b/net/ipv6/sysctl_net_ipv6.c
110567@@ -61,7 +61,7 @@ static struct ctl_table ipv6_rotable[] = {
110568
110569 static int __net_init ipv6_sysctl_net_init(struct net *net)
110570 {
110571- struct ctl_table *ipv6_table;
110572+ ctl_table_no_const *ipv6_table;
110573 struct ctl_table *ipv6_route_table;
110574 struct ctl_table *ipv6_icmp_table;
110575 int err;
110576diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
110577index 229239ad..ee2802f 100644
110578--- a/net/ipv6/tcp_ipv6.c
110579+++ b/net/ipv6/tcp_ipv6.c
110580@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
110581 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
110582 }
110583
110584+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110585+extern int grsec_enable_blackhole;
110586+#endif
110587+
110588 static void tcp_v6_hash(struct sock *sk)
110589 {
110590 if (sk->sk_state != TCP_CLOSE) {
110591@@ -1424,6 +1428,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
110592 return 0;
110593
110594 reset:
110595+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110596+ if (!grsec_enable_blackhole)
110597+#endif
110598 tcp_v6_send_reset(sk, skb);
110599 discard:
110600 if (opt_skb)
110601@@ -1508,12 +1515,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
110602 TCP_SKB_CB(skb)->sacked = 0;
110603
110604 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
110605- if (!sk)
110606+ if (!sk) {
110607+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110608+ ret = 1;
110609+#endif
110610 goto no_tcp_socket;
110611+ }
110612
110613 process:
110614- if (sk->sk_state == TCP_TIME_WAIT)
110615+ if (sk->sk_state == TCP_TIME_WAIT) {
110616+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110617+ ret = 2;
110618+#endif
110619 goto do_time_wait;
110620+ }
110621
110622 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
110623 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
110624@@ -1565,6 +1580,10 @@ csum_error:
110625 bad_packet:
110626 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
110627 } else {
110628+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110629+ if (!grsec_enable_blackhole || (ret == 1 &&
110630+ (skb->dev->flags & IFF_LOOPBACK)))
110631+#endif
110632 tcp_v6_send_reset(NULL, skb);
110633 }
110634
110635diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
110636index 7092ff7..3fd0eb4 100644
110637--- a/net/ipv6/udp.c
110638+++ b/net/ipv6/udp.c
110639@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
110640 udp_ipv6_hash_secret + net_hash_mix(net));
110641 }
110642
110643+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110644+extern int grsec_enable_blackhole;
110645+#endif
110646+
110647 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
110648 {
110649 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
110650@@ -435,7 +439,7 @@ try_again:
110651 if (unlikely(err)) {
110652 trace_kfree_skb(skb, udpv6_recvmsg);
110653 if (!peeked) {
110654- atomic_inc(&sk->sk_drops);
110655+ atomic_inc_unchecked(&sk->sk_drops);
110656 if (is_udp4)
110657 UDP_INC_STATS_USER(sock_net(sk),
110658 UDP_MIB_INERRORS,
110659@@ -698,7 +702,7 @@ csum_error:
110660 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
110661 drop:
110662 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
110663- atomic_inc(&sk->sk_drops);
110664+ atomic_inc_unchecked(&sk->sk_drops);
110665 kfree_skb(skb);
110666 return -1;
110667 }
110668@@ -754,7 +758,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
110669 if (likely(skb1 == NULL))
110670 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
110671 if (!skb1) {
110672- atomic_inc(&sk->sk_drops);
110673+ atomic_inc_unchecked(&sk->sk_drops);
110674 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
110675 IS_UDPLITE(sk));
110676 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
110677@@ -920,6 +924,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
110678 goto csum_error;
110679
110680 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
110681+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110682+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
110683+#endif
110684 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
110685
110686 kfree_skb(skb);
110687diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
110688index 2a0bbda..d75ca57 100644
110689--- a/net/ipv6/xfrm6_policy.c
110690+++ b/net/ipv6/xfrm6_policy.c
110691@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
110692 }
110693 }
110694
110695-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
110696+static int xfrm6_garbage_collect(struct dst_ops *ops)
110697 {
110698 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
110699
110700- xfrm6_policy_afinfo.garbage_collect(net);
110701+ xfrm_garbage_collect_deferred(net);
110702 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
110703 }
110704
110705@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
110706
110707 static int __net_init xfrm6_net_init(struct net *net)
110708 {
110709- struct ctl_table *table;
110710+ ctl_table_no_const *table = NULL;
110711 struct ctl_table_header *hdr;
110712
110713- table = xfrm6_policy_table;
110714 if (!net_eq(net, &init_net)) {
110715- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
110716+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
110717 if (!table)
110718 goto err_alloc;
110719
110720 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
110721- }
110722+ hdr = register_net_sysctl(net, "net/ipv6", table);
110723+ } else
110724+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
110725
110726- hdr = register_net_sysctl(net, "net/ipv6", table);
110727 if (!hdr)
110728 goto err_reg;
110729
110730@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
110731 return 0;
110732
110733 err_reg:
110734- if (!net_eq(net, &init_net))
110735- kfree(table);
110736+ kfree(table);
110737 err_alloc:
110738 return -ENOMEM;
110739 }
110740diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
110741index e15c16a..7cf07aa 100644
110742--- a/net/ipx/ipx_proc.c
110743+++ b/net/ipx/ipx_proc.c
110744@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
110745 struct proc_dir_entry *p;
110746 int rc = -ENOMEM;
110747
110748- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
110749+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
110750
110751 if (!ipx_proc_dir)
110752 goto out;
110753diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
110754index 2ba8b97..6d33010 100644
110755--- a/net/irda/ircomm/ircomm_tty.c
110756+++ b/net/irda/ircomm/ircomm_tty.c
110757@@ -317,11 +317,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110758 add_wait_queue(&port->open_wait, &wait);
110759
110760 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
110761- __FILE__, __LINE__, tty->driver->name, port->count);
110762+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110763
110764 spin_lock_irqsave(&port->lock, flags);
110765 if (!tty_hung_up_p(filp))
110766- port->count--;
110767+ atomic_dec(&port->count);
110768 port->blocked_open++;
110769 spin_unlock_irqrestore(&port->lock, flags);
110770
110771@@ -356,7 +356,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110772 }
110773
110774 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
110775- __FILE__, __LINE__, tty->driver->name, port->count);
110776+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110777
110778 schedule();
110779 }
110780@@ -366,12 +366,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110781
110782 spin_lock_irqsave(&port->lock, flags);
110783 if (!tty_hung_up_p(filp))
110784- port->count++;
110785+ atomic_inc(&port->count);
110786 port->blocked_open--;
110787 spin_unlock_irqrestore(&port->lock, flags);
110788
110789 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
110790- __FILE__, __LINE__, tty->driver->name, port->count);
110791+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110792
110793 if (!retval)
110794 port->flags |= ASYNC_NORMAL_ACTIVE;
110795@@ -445,12 +445,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
110796
110797 /* ++ is not atomic, so this should be protected - Jean II */
110798 spin_lock_irqsave(&self->port.lock, flags);
110799- self->port.count++;
110800+ atomic_inc(&self->port.count);
110801 spin_unlock_irqrestore(&self->port.lock, flags);
110802 tty_port_tty_set(&self->port, tty);
110803
110804 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
110805- self->line, self->port.count);
110806+ self->line, atomic_read(&self->port.count));
110807
110808 /* Not really used by us, but lets do it anyway */
110809 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
110810@@ -987,7 +987,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
110811 tty_kref_put(port->tty);
110812 }
110813 port->tty = NULL;
110814- port->count = 0;
110815+ atomic_set(&port->count, 0);
110816 spin_unlock_irqrestore(&port->lock, flags);
110817
110818 wake_up_interruptible(&port->open_wait);
110819@@ -1344,7 +1344,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
110820 seq_putc(m, '\n');
110821
110822 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
110823- seq_printf(m, "Open count: %d\n", self->port.count);
110824+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
110825 seq_printf(m, "Max data size: %d\n", self->max_data_size);
110826 seq_printf(m, "Max header size: %d\n", self->max_header_size);
110827
110828diff --git a/net/irda/irproc.c b/net/irda/irproc.c
110829index b9ac598..f88cc56 100644
110830--- a/net/irda/irproc.c
110831+++ b/net/irda/irproc.c
110832@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
110833 {
110834 int i;
110835
110836- proc_irda = proc_mkdir("irda", init_net.proc_net);
110837+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
110838 if (proc_irda == NULL)
110839 return;
110840
110841diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
110842index 7a95fa4..57be196 100644
110843--- a/net/iucv/af_iucv.c
110844+++ b/net/iucv/af_iucv.c
110845@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
110846 {
110847 char name[12];
110848
110849- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
110850+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
110851 while (__iucv_get_sock_by_name(name)) {
110852 sprintf(name, "%08x",
110853- atomic_inc_return(&iucv_sk_list.autobind_name));
110854+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
110855 }
110856 memcpy(iucv->src_name, name, 8);
110857 }
110858diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
110859index da78793..bdd78cf 100644
110860--- a/net/iucv/iucv.c
110861+++ b/net/iucv/iucv.c
110862@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
110863 return NOTIFY_OK;
110864 }
110865
110866-static struct notifier_block __refdata iucv_cpu_notifier = {
110867+static struct notifier_block iucv_cpu_notifier = {
110868 .notifier_call = iucv_cpu_notify,
110869 };
110870
110871diff --git a/net/key/af_key.c b/net/key/af_key.c
110872index ba2a2f9..b658bc3 100644
110873--- a/net/key/af_key.c
110874+++ b/net/key/af_key.c
110875@@ -3052,10 +3052,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
110876 static u32 get_acqseq(void)
110877 {
110878 u32 res;
110879- static atomic_t acqseq;
110880+ static atomic_unchecked_t acqseq;
110881
110882 do {
110883- res = atomic_inc_return(&acqseq);
110884+ res = atomic_inc_return_unchecked(&acqseq);
110885 } while (!res);
110886 return res;
110887 }
110888diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
110889index 76125c5..e474828 100644
110890--- a/net/l2tp/l2tp_eth.c
110891+++ b/net/l2tp/l2tp_eth.c
110892@@ -42,12 +42,12 @@ struct l2tp_eth {
110893 struct sock *tunnel_sock;
110894 struct l2tp_session *session;
110895 struct list_head list;
110896- atomic_long_t tx_bytes;
110897- atomic_long_t tx_packets;
110898- atomic_long_t tx_dropped;
110899- atomic_long_t rx_bytes;
110900- atomic_long_t rx_packets;
110901- atomic_long_t rx_errors;
110902+ atomic_long_unchecked_t tx_bytes;
110903+ atomic_long_unchecked_t tx_packets;
110904+ atomic_long_unchecked_t tx_dropped;
110905+ atomic_long_unchecked_t rx_bytes;
110906+ atomic_long_unchecked_t rx_packets;
110907+ atomic_long_unchecked_t rx_errors;
110908 };
110909
110910 /* via l2tp_session_priv() */
110911@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
110912 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
110913
110914 if (likely(ret == NET_XMIT_SUCCESS)) {
110915- atomic_long_add(len, &priv->tx_bytes);
110916- atomic_long_inc(&priv->tx_packets);
110917+ atomic_long_add_unchecked(len, &priv->tx_bytes);
110918+ atomic_long_inc_unchecked(&priv->tx_packets);
110919 } else {
110920- atomic_long_inc(&priv->tx_dropped);
110921+ atomic_long_inc_unchecked(&priv->tx_dropped);
110922 }
110923 return NETDEV_TX_OK;
110924 }
110925@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
110926 {
110927 struct l2tp_eth *priv = netdev_priv(dev);
110928
110929- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
110930- stats->tx_packets = atomic_long_read(&priv->tx_packets);
110931- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
110932- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
110933- stats->rx_packets = atomic_long_read(&priv->rx_packets);
110934- stats->rx_errors = atomic_long_read(&priv->rx_errors);
110935+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
110936+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
110937+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
110938+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
110939+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
110940+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
110941 return stats;
110942 }
110943
110944@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
110945 nf_reset(skb);
110946
110947 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
110948- atomic_long_inc(&priv->rx_packets);
110949- atomic_long_add(data_len, &priv->rx_bytes);
110950+ atomic_long_inc_unchecked(&priv->rx_packets);
110951+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
110952 } else {
110953- atomic_long_inc(&priv->rx_errors);
110954+ atomic_long_inc_unchecked(&priv->rx_errors);
110955 }
110956 return;
110957
110958 error:
110959- atomic_long_inc(&priv->rx_errors);
110960+ atomic_long_inc_unchecked(&priv->rx_errors);
110961 kfree_skb(skb);
110962 }
110963
110964diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
110965index 13752d9..b704a93 100644
110966--- a/net/l2tp/l2tp_ppp.c
110967+++ b/net/l2tp/l2tp_ppp.c
110968@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
110969 /* If PMTU discovery was enabled, use the MTU that was discovered */
110970 dst = sk_dst_get(tunnel->sock);
110971 if (dst != NULL) {
110972- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
110973+ u32 pmtu = dst_mtu(dst);
110974+
110975 if (pmtu != 0)
110976 session->mtu = session->mru = pmtu -
110977 PPPOL2TP_HEADER_OVERHEAD;
110978diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
110979index 1a3c7e0..80f8b0c 100644
110980--- a/net/llc/llc_proc.c
110981+++ b/net/llc/llc_proc.c
110982@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
110983 int rc = -ENOMEM;
110984 struct proc_dir_entry *p;
110985
110986- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
110987+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
110988 if (!llc_proc_dir)
110989 goto out;
110990
110991diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
110992index 592f4b1..efa7aa9 100644
110993--- a/net/mac80211/cfg.c
110994+++ b/net/mac80211/cfg.c
110995@@ -864,7 +864,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
110996 ret = ieee80211_vif_use_channel(sdata, chandef,
110997 IEEE80211_CHANCTX_EXCLUSIVE);
110998 }
110999- } else if (local->open_count == local->monitors) {
111000+ } else if (local_read(&local->open_count) == local->monitors) {
111001 local->_oper_chandef = *chandef;
111002 ieee80211_hw_config(local, 0);
111003 }
111004@@ -3574,7 +3574,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
111005 else
111006 local->probe_req_reg--;
111007
111008- if (!local->open_count)
111009+ if (!local_read(&local->open_count))
111010 break;
111011
111012 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
111013@@ -3723,8 +3723,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
111014 if (chanctx_conf) {
111015 *chandef = chanctx_conf->def;
111016 ret = 0;
111017- } else if (local->open_count > 0 &&
111018- local->open_count == local->monitors &&
111019+ } else if (local_read(&local->open_count) > 0 &&
111020+ local_read(&local->open_count) == local->monitors &&
111021 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
111022 if (local->use_chanctx)
111023 *chandef = local->monitor_chandef;
111024diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
111025index ac9836e..32613c1 100644
111026--- a/net/mac80211/ieee80211_i.h
111027+++ b/net/mac80211/ieee80211_i.h
111028@@ -28,6 +28,7 @@
111029 #include <net/ieee80211_radiotap.h>
111030 #include <net/cfg80211.h>
111031 #include <net/mac80211.h>
111032+#include <asm/local.h>
111033 #include "key.h"
111034 #include "sta_info.h"
111035 #include "debug.h"
111036@@ -1011,7 +1012,7 @@ struct ieee80211_local {
111037 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
111038 spinlock_t queue_stop_reason_lock;
111039
111040- int open_count;
111041+ local_t open_count;
111042 int monitors, cooked_mntrs;
111043 /* number of interfaces with corresponding FIF_ flags */
111044 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
111045diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
111046index 388b863..6575b55 100644
111047--- a/net/mac80211/iface.c
111048+++ b/net/mac80211/iface.c
111049@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
111050 break;
111051 }
111052
111053- if (local->open_count == 0) {
111054+ if (local_read(&local->open_count) == 0) {
111055 res = drv_start(local);
111056 if (res)
111057 goto err_del_bss;
111058@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
111059 res = drv_add_interface(local, sdata);
111060 if (res)
111061 goto err_stop;
111062- } else if (local->monitors == 0 && local->open_count == 0) {
111063+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
111064 res = ieee80211_add_virtual_monitor(local);
111065 if (res)
111066 goto err_stop;
111067@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
111068 atomic_inc(&local->iff_promiscs);
111069
111070 if (coming_up)
111071- local->open_count++;
111072+ local_inc(&local->open_count);
111073
111074 if (hw_reconf_flags)
111075 ieee80211_hw_config(local, hw_reconf_flags);
111076@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
111077 err_del_interface:
111078 drv_remove_interface(local, sdata);
111079 err_stop:
111080- if (!local->open_count)
111081+ if (!local_read(&local->open_count))
111082 drv_stop(local);
111083 err_del_bss:
111084 sdata->bss = NULL;
111085@@ -888,7 +888,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
111086 }
111087
111088 if (going_down)
111089- local->open_count--;
111090+ local_dec(&local->open_count);
111091
111092 switch (sdata->vif.type) {
111093 case NL80211_IFTYPE_AP_VLAN:
111094@@ -949,7 +949,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
111095 }
111096 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
111097
111098- if (local->open_count == 0)
111099+ if (local_read(&local->open_count) == 0)
111100 ieee80211_clear_tx_pending(local);
111101
111102 /*
111103@@ -989,7 +989,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
111104
111105 ieee80211_recalc_ps(local, -1);
111106
111107- if (local->open_count == 0) {
111108+ if (local_read(&local->open_count) == 0) {
111109 ieee80211_stop_device(local);
111110
111111 /* no reconfiguring after stop! */
111112@@ -1000,7 +1000,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
111113 ieee80211_configure_filter(local);
111114 ieee80211_hw_config(local, hw_reconf_flags);
111115
111116- if (local->monitors == local->open_count)
111117+ if (local->monitors == local_read(&local->open_count))
111118 ieee80211_add_virtual_monitor(local);
111119 }
111120
111121diff --git a/net/mac80211/main.c b/net/mac80211/main.c
111122index d17c26d..43d6bfb 100644
111123--- a/net/mac80211/main.c
111124+++ b/net/mac80211/main.c
111125@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
111126 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
111127 IEEE80211_CONF_CHANGE_POWER);
111128
111129- if (changed && local->open_count) {
111130+ if (changed && local_read(&local->open_count)) {
111131 ret = drv_config(local, changed);
111132 /*
111133 * Goal:
111134diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
111135index d478b88..8c8d157 100644
111136--- a/net/mac80211/pm.c
111137+++ b/net/mac80211/pm.c
111138@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111139 struct ieee80211_sub_if_data *sdata;
111140 struct sta_info *sta;
111141
111142- if (!local->open_count)
111143+ if (!local_read(&local->open_count))
111144 goto suspend;
111145
111146 ieee80211_scan_cancel(local);
111147@@ -58,7 +58,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111148 cancel_work_sync(&local->dynamic_ps_enable_work);
111149 del_timer_sync(&local->dynamic_ps_timer);
111150
111151- local->wowlan = wowlan && local->open_count;
111152+ local->wowlan = wowlan && local_read(&local->open_count);
111153 if (local->wowlan) {
111154 int err = drv_suspend(local, wowlan);
111155 if (err < 0) {
111156@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111157 WARN_ON(!list_empty(&local->chanctx_list));
111158
111159 /* stop hardware - this must stop RX */
111160- if (local->open_count)
111161+ if (local_read(&local->open_count))
111162 ieee80211_stop_device(local);
111163
111164 suspend:
111165diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
111166index 8fdadfd..a4f72b8 100644
111167--- a/net/mac80211/rate.c
111168+++ b/net/mac80211/rate.c
111169@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
111170
111171 ASSERT_RTNL();
111172
111173- if (local->open_count)
111174+ if (local_read(&local->open_count))
111175 return -EBUSY;
111176
111177 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
111178diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
111179index 6ff1346..936ca9a 100644
111180--- a/net/mac80211/rc80211_pid_debugfs.c
111181+++ b/net/mac80211/rc80211_pid_debugfs.c
111182@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
111183
111184 spin_unlock_irqrestore(&events->lock, status);
111185
111186- if (copy_to_user(buf, pb, p))
111187+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
111188 return -EFAULT;
111189
111190 return p;
111191diff --git a/net/mac80211/util.c b/net/mac80211/util.c
111192index a6cda52..f3b6776 100644
111193--- a/net/mac80211/util.c
111194+++ b/net/mac80211/util.c
111195@@ -1548,7 +1548,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
111196 }
111197 #endif
111198 /* everything else happens only if HW was up & running */
111199- if (!local->open_count)
111200+ if (!local_read(&local->open_count))
111201 goto wake_up;
111202
111203 /*
111204@@ -1772,7 +1772,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
111205 local->in_reconfig = false;
111206 barrier();
111207
111208- if (local->monitors == local->open_count && local->monitors > 0)
111209+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
111210 ieee80211_add_virtual_monitor(local);
111211
111212 /*
111213diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
111214index e9410d1..77b6378 100644
111215--- a/net/netfilter/Kconfig
111216+++ b/net/netfilter/Kconfig
111217@@ -1081,6 +1081,16 @@ config NETFILTER_XT_MATCH_ESP
111218
111219 To compile it as a module, choose M here. If unsure, say N.
111220
111221+config NETFILTER_XT_MATCH_GRADM
111222+ tristate '"gradm" match support'
111223+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
111224+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
111225+ ---help---
111226+ The gradm match allows to match on grsecurity RBAC being enabled.
111227+ It is useful when iptables rules are applied early on bootup to
111228+ prevent connections to the machine (except from a trusted host)
111229+ while the RBAC system is disabled.
111230+
111231 config NETFILTER_XT_MATCH_HASHLIMIT
111232 tristate '"hashlimit" match support'
111233 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
111234diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
111235index bffdad7..f9317d1 100644
111236--- a/net/netfilter/Makefile
111237+++ b/net/netfilter/Makefile
111238@@ -133,6 +133,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
111239 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
111240 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
111241 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
111242+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
111243 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
111244 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
111245 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
111246diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
111247index ec8114f..6b2bfba 100644
111248--- a/net/netfilter/ipset/ip_set_core.c
111249+++ b/net/netfilter/ipset/ip_set_core.c
111250@@ -1921,7 +1921,7 @@ done:
111251 return ret;
111252 }
111253
111254-static struct nf_sockopt_ops so_set __read_mostly = {
111255+static struct nf_sockopt_ops so_set = {
111256 .pf = PF_INET,
111257 .get_optmin = SO_IP_SET,
111258 .get_optmax = SO_IP_SET + 1,
111259diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
111260index 610e19c..08d0c3f 100644
111261--- a/net/netfilter/ipvs/ip_vs_conn.c
111262+++ b/net/netfilter/ipvs/ip_vs_conn.c
111263@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
111264 /* Increase the refcnt counter of the dest */
111265 ip_vs_dest_hold(dest);
111266
111267- conn_flags = atomic_read(&dest->conn_flags);
111268+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
111269 if (cp->protocol != IPPROTO_UDP)
111270 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
111271 flags = cp->flags;
111272@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
111273
111274 cp->control = NULL;
111275 atomic_set(&cp->n_control, 0);
111276- atomic_set(&cp->in_pkts, 0);
111277+ atomic_set_unchecked(&cp->in_pkts, 0);
111278
111279 cp->packet_xmit = NULL;
111280 cp->app = NULL;
111281@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
111282
111283 /* Don't drop the entry if its number of incoming packets is not
111284 located in [0, 8] */
111285- i = atomic_read(&cp->in_pkts);
111286+ i = atomic_read_unchecked(&cp->in_pkts);
111287 if (i > 8 || i < 0) return 0;
111288
111289 if (!todrop_rate[i]) return 0;
111290diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
111291index e683675..67cb16b 100644
111292--- a/net/netfilter/ipvs/ip_vs_core.c
111293+++ b/net/netfilter/ipvs/ip_vs_core.c
111294@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
111295 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
111296 /* do not touch skb anymore */
111297
111298- atomic_inc(&cp->in_pkts);
111299+ atomic_inc_unchecked(&cp->in_pkts);
111300 ip_vs_conn_put(cp);
111301 return ret;
111302 }
111303@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
111304 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
111305 pkts = sysctl_sync_threshold(ipvs);
111306 else
111307- pkts = atomic_add_return(1, &cp->in_pkts);
111308+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
111309
111310 if (ipvs->sync_state & IP_VS_STATE_MASTER)
111311 ip_vs_sync_conn(net, cp, pkts);
111312diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
111313index 581a658..910e112 100644
111314--- a/net/netfilter/ipvs/ip_vs_ctl.c
111315+++ b/net/netfilter/ipvs/ip_vs_ctl.c
111316@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
111317 */
111318 ip_vs_rs_hash(ipvs, dest);
111319 }
111320- atomic_set(&dest->conn_flags, conn_flags);
111321+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
111322
111323 /* bind the service */
111324 old_svc = rcu_dereference_protected(dest->svc, 1);
111325@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
111326 * align with netns init in ip_vs_control_net_init()
111327 */
111328
111329-static struct ctl_table vs_vars[] = {
111330+static ctl_table_no_const vs_vars[] __read_only = {
111331 {
111332 .procname = "amemthresh",
111333 .maxlen = sizeof(int),
111334@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
111335 " %-7s %-6d %-10d %-10d\n",
111336 &dest->addr.in6,
111337 ntohs(dest->port),
111338- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
111339+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
111340 atomic_read(&dest->weight),
111341 atomic_read(&dest->activeconns),
111342 atomic_read(&dest->inactconns));
111343@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
111344 "%-7s %-6d %-10d %-10d\n",
111345 ntohl(dest->addr.ip),
111346 ntohs(dest->port),
111347- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
111348+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
111349 atomic_read(&dest->weight),
111350 atomic_read(&dest->activeconns),
111351 atomic_read(&dest->inactconns));
111352@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
111353
111354 entry.addr = dest->addr.ip;
111355 entry.port = dest->port;
111356- entry.conn_flags = atomic_read(&dest->conn_flags);
111357+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
111358 entry.weight = atomic_read(&dest->weight);
111359 entry.u_threshold = dest->u_threshold;
111360 entry.l_threshold = dest->l_threshold;
111361@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
111362 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
111363 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
111364 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
111365- (atomic_read(&dest->conn_flags) &
111366+ (atomic_read_unchecked(&dest->conn_flags) &
111367 IP_VS_CONN_F_FWD_MASK)) ||
111368 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
111369 atomic_read(&dest->weight)) ||
111370@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
111371 {
111372 int idx;
111373 struct netns_ipvs *ipvs = net_ipvs(net);
111374- struct ctl_table *tbl;
111375+ ctl_table_no_const *tbl;
111376
111377 atomic_set(&ipvs->dropentry, 0);
111378 spin_lock_init(&ipvs->dropentry_lock);
111379diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
111380index 547ff33..c8c8117 100644
111381--- a/net/netfilter/ipvs/ip_vs_lblc.c
111382+++ b/net/netfilter/ipvs/ip_vs_lblc.c
111383@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
111384 * IPVS LBLC sysctl table
111385 */
111386 #ifdef CONFIG_SYSCTL
111387-static struct ctl_table vs_vars_table[] = {
111388+static ctl_table_no_const vs_vars_table[] __read_only = {
111389 {
111390 .procname = "lblc_expiration",
111391 .data = NULL,
111392diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
111393index 3f21a2f..a112e85 100644
111394--- a/net/netfilter/ipvs/ip_vs_lblcr.c
111395+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
111396@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
111397 * IPVS LBLCR sysctl table
111398 */
111399
111400-static struct ctl_table vs_vars_table[] = {
111401+static ctl_table_no_const vs_vars_table[] __read_only = {
111402 {
111403 .procname = "lblcr_expiration",
111404 .data = NULL,
111405diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
111406index db80126..ef7110e 100644
111407--- a/net/netfilter/ipvs/ip_vs_sync.c
111408+++ b/net/netfilter/ipvs/ip_vs_sync.c
111409@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
111410 cp = cp->control;
111411 if (cp) {
111412 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
111413- pkts = atomic_add_return(1, &cp->in_pkts);
111414+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
111415 else
111416 pkts = sysctl_sync_threshold(ipvs);
111417 ip_vs_sync_conn(net, cp->control, pkts);
111418@@ -771,7 +771,7 @@ control:
111419 if (!cp)
111420 return;
111421 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
111422- pkts = atomic_add_return(1, &cp->in_pkts);
111423+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
111424 else
111425 pkts = sysctl_sync_threshold(ipvs);
111426 goto sloop;
111427@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
111428
111429 if (opt)
111430 memcpy(&cp->in_seq, opt, sizeof(*opt));
111431- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
111432+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
111433 cp->state = state;
111434 cp->old_state = cp->state;
111435 /*
111436diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
111437index 73ba1cc..1adfc7a 100644
111438--- a/net/netfilter/ipvs/ip_vs_xmit.c
111439+++ b/net/netfilter/ipvs/ip_vs_xmit.c
111440@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
111441 else
111442 rc = NF_ACCEPT;
111443 /* do not touch skb anymore */
111444- atomic_inc(&cp->in_pkts);
111445+ atomic_inc_unchecked(&cp->in_pkts);
111446 goto out;
111447 }
111448
111449@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
111450 else
111451 rc = NF_ACCEPT;
111452 /* do not touch skb anymore */
111453- atomic_inc(&cp->in_pkts);
111454+ atomic_inc_unchecked(&cp->in_pkts);
111455 goto out;
111456 }
111457
111458diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
111459index a4b5e2a..13b1de3 100644
111460--- a/net/netfilter/nf_conntrack_acct.c
111461+++ b/net/netfilter/nf_conntrack_acct.c
111462@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
111463 #ifdef CONFIG_SYSCTL
111464 static int nf_conntrack_acct_init_sysctl(struct net *net)
111465 {
111466- struct ctl_table *table;
111467+ ctl_table_no_const *table;
111468
111469 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
111470 GFP_KERNEL);
111471diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
111472index 1f4f954..e364ad7 100644
111473--- a/net/netfilter/nf_conntrack_core.c
111474+++ b/net/netfilter/nf_conntrack_core.c
111475@@ -1789,6 +1789,10 @@ void nf_conntrack_init_end(void)
111476 #define DYING_NULLS_VAL ((1<<30)+1)
111477 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
111478
111479+#ifdef CONFIG_GRKERNSEC_HIDESYM
111480+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
111481+#endif
111482+
111483 int nf_conntrack_init_net(struct net *net)
111484 {
111485 int ret = -ENOMEM;
111486@@ -1814,7 +1818,11 @@ int nf_conntrack_init_net(struct net *net)
111487 if (!net->ct.stat)
111488 goto err_pcpu_lists;
111489
111490+#ifdef CONFIG_GRKERNSEC_HIDESYM
111491+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
111492+#else
111493 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
111494+#endif
111495 if (!net->ct.slabname)
111496 goto err_slabname;
111497
111498diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
111499index 1df1761..ce8b88a 100644
111500--- a/net/netfilter/nf_conntrack_ecache.c
111501+++ b/net/netfilter/nf_conntrack_ecache.c
111502@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
111503 #ifdef CONFIG_SYSCTL
111504 static int nf_conntrack_event_init_sysctl(struct net *net)
111505 {
111506- struct ctl_table *table;
111507+ ctl_table_no_const *table;
111508
111509 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
111510 GFP_KERNEL);
111511diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
111512index 5b3eae7..dd4b8fe 100644
111513--- a/net/netfilter/nf_conntrack_helper.c
111514+++ b/net/netfilter/nf_conntrack_helper.c
111515@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
111516
111517 static int nf_conntrack_helper_init_sysctl(struct net *net)
111518 {
111519- struct ctl_table *table;
111520+ ctl_table_no_const *table;
111521
111522 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
111523 GFP_KERNEL);
111524diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
111525index b65d586..beec902 100644
111526--- a/net/netfilter/nf_conntrack_proto.c
111527+++ b/net/netfilter/nf_conntrack_proto.c
111528@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
111529
111530 static void
111531 nf_ct_unregister_sysctl(struct ctl_table_header **header,
111532- struct ctl_table **table,
111533+ ctl_table_no_const **table,
111534 unsigned int users)
111535 {
111536 if (users > 0)
111537diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
111538index f641751..d3c5b51 100644
111539--- a/net/netfilter/nf_conntrack_standalone.c
111540+++ b/net/netfilter/nf_conntrack_standalone.c
111541@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
111542
111543 static int nf_conntrack_standalone_init_sysctl(struct net *net)
111544 {
111545- struct ctl_table *table;
111546+ ctl_table_no_const *table;
111547
111548 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
111549 GFP_KERNEL);
111550diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
111551index 7a394df..bd91a8a 100644
111552--- a/net/netfilter/nf_conntrack_timestamp.c
111553+++ b/net/netfilter/nf_conntrack_timestamp.c
111554@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
111555 #ifdef CONFIG_SYSCTL
111556 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
111557 {
111558- struct ctl_table *table;
111559+ ctl_table_no_const *table;
111560
111561 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
111562 GFP_KERNEL);
111563diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
111564index 85296d4..8becdec 100644
111565--- a/net/netfilter/nf_log.c
111566+++ b/net/netfilter/nf_log.c
111567@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
111568
111569 #ifdef CONFIG_SYSCTL
111570 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
111571-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
111572+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
111573
111574 static int nf_log_proc_dostring(struct ctl_table *table, int write,
111575 void __user *buffer, size_t *lenp, loff_t *ppos)
111576@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
111577 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
111578 mutex_unlock(&nf_log_mutex);
111579 } else {
111580+ ctl_table_no_const nf_log_table = *table;
111581+
111582 mutex_lock(&nf_log_mutex);
111583 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
111584 lockdep_is_held(&nf_log_mutex));
111585 if (!logger)
111586- table->data = "NONE";
111587+ nf_log_table.data = "NONE";
111588 else
111589- table->data = logger->name;
111590- r = proc_dostring(table, write, buffer, lenp, ppos);
111591+ nf_log_table.data = logger->name;
111592+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
111593 mutex_unlock(&nf_log_mutex);
111594 }
111595
111596diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
111597index f042ae5..30ea486 100644
111598--- a/net/netfilter/nf_sockopt.c
111599+++ b/net/netfilter/nf_sockopt.c
111600@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
111601 }
111602 }
111603
111604- list_add(&reg->list, &nf_sockopts);
111605+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
111606 out:
111607 mutex_unlock(&nf_sockopt_mutex);
111608 return ret;
111609@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
111610 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
111611 {
111612 mutex_lock(&nf_sockopt_mutex);
111613- list_del(&reg->list);
111614+ pax_list_del((struct list_head *)&reg->list);
111615 mutex_unlock(&nf_sockopt_mutex);
111616 }
111617 EXPORT_SYMBOL(nf_unregister_sockopt);
111618diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
111619index d292c8d..9f1e166 100644
111620--- a/net/netfilter/nfnetlink_log.c
111621+++ b/net/netfilter/nfnetlink_log.c
111622@@ -79,7 +79,7 @@ static int nfnl_log_net_id __read_mostly;
111623 struct nfnl_log_net {
111624 spinlock_t instances_lock;
111625 struct hlist_head instance_table[INSTANCE_BUCKETS];
111626- atomic_t global_seq;
111627+ atomic_unchecked_t global_seq;
111628 };
111629
111630 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
111631@@ -561,7 +561,7 @@ __build_packet_message(struct nfnl_log_net *log,
111632 /* global sequence number */
111633 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
111634 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
111635- htonl(atomic_inc_return(&log->global_seq))))
111636+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
111637 goto nla_put_failure;
111638
111639 if (data_len) {
111640diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
111641index 1840989..6895744 100644
111642--- a/net/netfilter/nft_compat.c
111643+++ b/net/netfilter/nft_compat.c
111644@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
111645 /* We want to reuse existing compat_to_user */
111646 old_fs = get_fs();
111647 set_fs(KERNEL_DS);
111648- t->compat_to_user(out, in);
111649+ t->compat_to_user((void __force_user *)out, in);
111650 set_fs(old_fs);
111651 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
111652 kfree(out);
111653@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
111654 /* We want to reuse existing compat_to_user */
111655 old_fs = get_fs();
111656 set_fs(KERNEL_DS);
111657- m->compat_to_user(out, in);
111658+ m->compat_to_user((void __force_user *)out, in);
111659 set_fs(old_fs);
111660 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
111661 kfree(out);
111662diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
111663index bbffdbda..12d4da8 100644
111664--- a/net/netfilter/xt_bpf.c
111665+++ b/net/netfilter/xt_bpf.c
111666@@ -23,11 +23,10 @@ MODULE_ALIAS("ip6t_bpf");
111667 static int bpf_mt_check(const struct xt_mtchk_param *par)
111668 {
111669 struct xt_bpf_info *info = par->matchinfo;
111670- struct sock_fprog_kern program;
111671+ struct sock_fprog program;
111672
111673 program.len = info->bpf_program_num_elem;
111674- program.filter = info->bpf_program;
111675-
111676+ program.filter = (struct sock_filter __user *) info->bpf_program;
111677 if (sk_unattached_filter_create(&info->filter, &program)) {
111678 pr_info("bpf: check failed: parse error\n");
111679 return -EINVAL;
111680diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
111681new file mode 100644
111682index 0000000..c566332
111683--- /dev/null
111684+++ b/net/netfilter/xt_gradm.c
111685@@ -0,0 +1,51 @@
111686+/*
111687+ * gradm match for netfilter
111688