]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.16.2-201409082129.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.16.2-201409082129.patch
CommitLineData
cb804b9a
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b7fa2f5..90cd9f8 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1138,6 +1138,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2141,6 +2145,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2418,6 +2426,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
290index ee78eba..a06b48d 100644
291--- a/Documentation/networking/filter.txt
292+++ b/Documentation/networking/filter.txt
293@@ -277,11 +277,10 @@ Possible BPF extensions are shown in the following table:
294 mark skb->mark
295 queue skb->queue_mapping
296 hatype skb->dev->type
297- rxhash skb->hash
298+ rxhash skb->rxhash
299 cpu raw_smp_processor_id()
300 vlan_tci vlan_tx_tag_get(skb)
301 vlan_pr vlan_tx_tag_present(skb)
302- rand prandom_u32()
303
304 These extensions can also be prefixed with '#'.
305 Examples for low-level BPF:
306@@ -309,18 +308,6 @@ Examples for low-level BPF:
307 ret #-1
308 drop: ret #0
309
310-** icmp random packet sampling, 1 in 4
311- ldh [12]
312- jne #0x800, drop
313- ldb [23]
314- jneq #1, drop
315- # get a random uint32 number
316- ld rand
317- mod #4
318- jneq #1, drop
319- ret #-1
320- drop: ret #0
321-
322 ** SECCOMP filter example:
323
324 ld [4] /* offsetof(struct seccomp_data, arch) */
325@@ -559,456 +546,6 @@ ffffffffa0069c8f + <x>:
326 For BPF JIT developers, bpf_jit_disasm, bpf_asm and bpf_dbg provides a useful
327 toolchain for developing and testing the kernel's JIT compiler.
328
329-BPF kernel internals
330---------------------
331-Internally, for the kernel interpreter, a different instruction set
332-format with similar underlying principles from BPF described in previous
333-paragraphs is being used. However, the instruction set format is modelled
334-closer to the underlying architecture to mimic native instruction sets, so
335-that a better performance can be achieved (more details later). This new
336-ISA is called 'eBPF' or 'internal BPF' interchangeably. (Note: eBPF which
337-originates from [e]xtended BPF is not the same as BPF extensions! While
338-eBPF is an ISA, BPF extensions date back to classic BPF's 'overloading'
339-of BPF_LD | BPF_{B,H,W} | BPF_ABS instruction.)
340-
341-It is designed to be JITed with one to one mapping, which can also open up
342-the possibility for GCC/LLVM compilers to generate optimized eBPF code through
343-an eBPF backend that performs almost as fast as natively compiled code.
344-
345-The new instruction set was originally designed with the possible goal in
346-mind to write programs in "restricted C" and compile into eBPF with a optional
347-GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with
348-minimal performance overhead over two steps, that is, C -> eBPF -> native code.
349-
350-Currently, the new format is being used for running user BPF programs, which
351-includes seccomp BPF, classic socket filters, cls_bpf traffic classifier,
352-team driver's classifier for its load-balancing mode, netfilter's xt_bpf
353-extension, PTP dissector/classifier, and much more. They are all internally
354-converted by the kernel into the new instruction set representation and run
355-in the eBPF interpreter. For in-kernel handlers, this all works transparently
356-by using sk_unattached_filter_create() for setting up the filter, resp.
357-sk_unattached_filter_destroy() for destroying it. The macro
358-SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
359-code to run the filter. 'filter' is a pointer to struct sk_filter that we
360-got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
361-skb pointer). All constraints and restrictions from sk_chk_filter() apply
362-before a conversion to the new layout is being done behind the scenes!
363-
364-Currently, the classic BPF format is being used for JITing on most of the
365-architectures. Only x86-64 performs JIT compilation from eBPF instruction set,
366-however, future work will migrate other JIT compilers as well, so that they
367-will profit from the very same benefits.
368-
369-Some core changes of the new internal format:
370-
371-- Number of registers increase from 2 to 10:
372-
373- The old format had two registers A and X, and a hidden frame pointer. The
374- new layout extends this to be 10 internal registers and a read-only frame
375- pointer. Since 64-bit CPUs are passing arguments to functions via registers
376- the number of args from eBPF program to in-kernel function is restricted
377- to 5 and one register is used to accept return value from an in-kernel
378- function. Natively, x86_64 passes first 6 arguments in registers, aarch64/
379- sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved
380- registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers.
381-
382- Therefore, eBPF calling convention is defined as:
383-
384- * R0 - return value from in-kernel function, and exit value for eBPF program
385- * R1 - R5 - arguments from eBPF program to in-kernel function
386- * R6 - R9 - callee saved registers that in-kernel function will preserve
387- * R10 - read-only frame pointer to access stack
388-
389- Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64,
390- etc, and eBPF calling convention maps directly to ABIs used by the kernel on
391- 64-bit architectures.
392-
393- On 32-bit architectures JIT may map programs that use only 32-bit arithmetic
394- and may let more complex programs to be interpreted.
395-
396- R0 - R5 are scratch registers and eBPF program needs spill/fill them if
397- necessary across calls. Note that there is only one eBPF program (== one
398- eBPF main routine) and it cannot call other eBPF functions, it can only
399- call predefined in-kernel functions, though.
400-
401-- Register width increases from 32-bit to 64-bit:
402-
403- Still, the semantics of the original 32-bit ALU operations are preserved
404- via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower
405- subregisters that zero-extend into 64-bit if they are being written to.
406- That behavior maps directly to x86_64 and arm64 subregister definition, but
407- makes other JITs more difficult.
408-
409- 32-bit architectures run 64-bit internal BPF programs via interpreter.
410- Their JITs may convert BPF programs that only use 32-bit subregisters into
411- native instruction set and let the rest being interpreted.
412-
413- Operation is 64-bit, because on 64-bit architectures, pointers are also
414- 64-bit wide, and we want to pass 64-bit values in/out of kernel functions,
415- so 32-bit eBPF registers would otherwise require to define register-pair
416- ABI, thus, there won't be able to use a direct eBPF register to HW register
417- mapping and JIT would need to do combine/split/move operations for every
418- register in and out of the function, which is complex, bug prone and slow.
419- Another reason is the use of atomic 64-bit counters.
420-
421-- Conditional jt/jf targets replaced with jt/fall-through:
422-
423- While the original design has constructs such as "if (cond) jump_true;
424- else jump_false;", they are being replaced into alternative constructs like
425- "if (cond) jump_true; /* else fall-through */".
426-
427-- Introduces bpf_call insn and register passing convention for zero overhead
428- calls from/to other kernel functions:
429-
430- Before an in-kernel function call, the internal BPF program needs to
431- place function arguments into R1 to R5 registers to satisfy calling
432- convention, then the interpreter will take them from registers and pass
433- to in-kernel function. If R1 - R5 registers are mapped to CPU registers
434- that are used for argument passing on given architecture, the JIT compiler
435- doesn't need to emit extra moves. Function arguments will be in the correct
436- registers and BPF_CALL instruction will be JITed as single 'call' HW
437- instruction. This calling convention was picked to cover common call
438- situations without performance penalty.
439-
440- After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
441- a return value of the function. Since R6 - R9 are callee saved, their state
442- is preserved across the call.
443-
444- For example, consider three C functions:
445-
446- u64 f1() { return (*_f2)(1); }
447- u64 f2(u64 a) { return f3(a + 1, a); }
448- u64 f3(u64 a, u64 b) { return a - b; }
449-
450- GCC can compile f1, f3 into x86_64:
451-
452- f1:
453- movl $1, %edi
454- movq _f2(%rip), %rax
455- jmp *%rax
456- f3:
457- movq %rdi, %rax
458- subq %rsi, %rax
459- ret
460-
461- Function f2 in eBPF may look like:
462-
463- f2:
464- bpf_mov R2, R1
465- bpf_add R1, 1
466- bpf_call f3
467- bpf_exit
468-
469- If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
470- returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
471- be used to call into f2.
472-
473- For practical reasons all eBPF programs have only one argument 'ctx' which is
474- already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
475- can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
476- are currently not supported, but these restrictions can be lifted if necessary
477- in the future.
478-
479- On 64-bit architectures all register map to HW registers one to one. For
480- example, x86_64 JIT compiler can map them as ...
481-
482- R0 - rax
483- R1 - rdi
484- R2 - rsi
485- R3 - rdx
486- R4 - rcx
487- R5 - r8
488- R6 - rbx
489- R7 - r13
490- R8 - r14
491- R9 - r15
492- R10 - rbp
493-
494- ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
495- and rbx, r12 - r15 are callee saved.
496-
497- Then the following internal BPF pseudo-program:
498-
499- bpf_mov R6, R1 /* save ctx */
500- bpf_mov R2, 2
501- bpf_mov R3, 3
502- bpf_mov R4, 4
503- bpf_mov R5, 5
504- bpf_call foo
505- bpf_mov R7, R0 /* save foo() return value */
506- bpf_mov R1, R6 /* restore ctx for next call */
507- bpf_mov R2, 6
508- bpf_mov R3, 7
509- bpf_mov R4, 8
510- bpf_mov R5, 9
511- bpf_call bar
512- bpf_add R0, R7
513- bpf_exit
514-
515- After JIT to x86_64 may look like:
516-
517- push %rbp
518- mov %rsp,%rbp
519- sub $0x228,%rsp
520- mov %rbx,-0x228(%rbp)
521- mov %r13,-0x220(%rbp)
522- mov %rdi,%rbx
523- mov $0x2,%esi
524- mov $0x3,%edx
525- mov $0x4,%ecx
526- mov $0x5,%r8d
527- callq foo
528- mov %rax,%r13
529- mov %rbx,%rdi
530- mov $0x2,%esi
531- mov $0x3,%edx
532- mov $0x4,%ecx
533- mov $0x5,%r8d
534- callq bar
535- add %r13,%rax
536- mov -0x228(%rbp),%rbx
537- mov -0x220(%rbp),%r13
538- leaveq
539- retq
540-
541- Which is in this example equivalent in C to:
542-
543- u64 bpf_filter(u64 ctx)
544- {
545- return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
546- }
547-
548- In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
549- arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
550- registers and place their return value into '%rax' which is R0 in eBPF.
551- Prologue and epilogue are emitted by JIT and are implicit in the
552- interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve
553- them across the calls as defined by calling convention.
554-
555- For example the following program is invalid:
556-
557- bpf_mov R1, 1
558- bpf_call foo
559- bpf_mov R0, R1
560- bpf_exit
561-
562- After the call the registers R1-R5 contain junk values and cannot be read.
563- In the future an eBPF verifier can be used to validate internal BPF programs.
564-
565-Also in the new design, eBPF is limited to 4096 insns, which means that any
566-program will terminate quickly and will only call a fixed number of kernel
567-functions. Original BPF and the new format are two operand instructions,
568-which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT.
569-
570-The input context pointer for invoking the interpreter function is generic,
571-its content is defined by a specific use case. For seccomp register R1 points
572-to seccomp_data, for converted BPF filters R1 points to a skb.
573-
574-A program, that is translated internally consists of the following elements:
575-
576- op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32
577-
578-So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
579-has room for new instructions. Some of them may use 16/24/32 byte encoding. New
580-instructions must be multiple of 8 bytes to preserve backward compatibility.
581-
582-Internal BPF is a general purpose RISC instruction set. Not every register and
583-every instruction are used during translation from original BPF to new format.
584-For example, socket filters are not using 'exclusive add' instruction, but
585-tracing filters may do to maintain counters of events, for example. Register R9
586-is not used by socket filters either, but more complex filters may be running
587-out of registers and would have to resort to spill/fill to stack.
588-
589-Internal BPF can used as generic assembler for last step performance
590-optimizations, socket filters and seccomp are using it as assembler. Tracing
591-filters may use it as assembler to generate code from kernel. In kernel usage
592-may not be bounded by security considerations, since generated internal BPF code
593-may be optimizing internal code path and not being exposed to the user space.
594-Safety of internal BPF can come from a verifier (TBD). In such use cases as
595-described, it may be used as safe instruction set.
596-
597-Just like the original BPF, the new format runs within a controlled environment,
598-is deterministic and the kernel can easily prove that. The safety of the program
599-can be determined in two steps: first step does depth-first-search to disallow
600-loops and other CFG validation; second step starts from the first insn and
601-descends all possible paths. It simulates execution of every insn and observes
602-the state change of registers and stack.
603-
604-eBPF opcode encoding
605---------------------
606-
607-eBPF is reusing most of the opcode encoding from classic to simplify conversion
608-of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code'
609-field is divided into three parts:
610-
611- +----------------+--------+--------------------+
612- | 4 bits | 1 bit | 3 bits |
613- | operation code | source | instruction class |
614- +----------------+--------+--------------------+
615- (MSB) (LSB)
616-
617-Three LSB bits store instruction class which is one of:
618-
619- Classic BPF classes: eBPF classes:
620-
621- BPF_LD 0x00 BPF_LD 0x00
622- BPF_LDX 0x01 BPF_LDX 0x01
623- BPF_ST 0x02 BPF_ST 0x02
624- BPF_STX 0x03 BPF_STX 0x03
625- BPF_ALU 0x04 BPF_ALU 0x04
626- BPF_JMP 0x05 BPF_JMP 0x05
627- BPF_RET 0x06 [ class 6 unused, for future if needed ]
628- BPF_MISC 0x07 BPF_ALU64 0x07
629-
630-When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
631-
632- BPF_K 0x00
633- BPF_X 0x08
634-
635- * in classic BPF, this means:
636-
637- BPF_SRC(code) == BPF_X - use register X as source operand
638- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
639-
640- * in eBPF, this means:
641-
642- BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand
643- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
644-
645-... and four MSB bits store operation code.
646-
647-If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
648-
649- BPF_ADD 0x00
650- BPF_SUB 0x10
651- BPF_MUL 0x20
652- BPF_DIV 0x30
653- BPF_OR 0x40
654- BPF_AND 0x50
655- BPF_LSH 0x60
656- BPF_RSH 0x70
657- BPF_NEG 0x80
658- BPF_MOD 0x90
659- BPF_XOR 0xa0
660- BPF_MOV 0xb0 /* eBPF only: mov reg to reg */
661- BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
662- BPF_END 0xd0 /* eBPF only: endianness conversion */
663-
664-If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
665-
666- BPF_JA 0x00
667- BPF_JEQ 0x10
668- BPF_JGT 0x20
669- BPF_JGE 0x30
670- BPF_JSET 0x40
671- BPF_JNE 0x50 /* eBPF only: jump != */
672- BPF_JSGT 0x60 /* eBPF only: signed '>' */
673- BPF_JSGE 0x70 /* eBPF only: signed '>=' */
674- BPF_CALL 0x80 /* eBPF only: function call */
675- BPF_EXIT 0x90 /* eBPF only: function return */
676-
677-So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
678-and eBPF. There are only two registers in classic BPF, so it means A += X.
679-In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly,
680-BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous
681-src_reg = (u32) src_reg ^ (u32) imm32 in eBPF.
682-
683-Classic BPF is using BPF_MISC class to represent A = X and X = A moves.
684-eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no
685-BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean
686-exactly the same operations as BPF_ALU, but with 64-bit wide operands
687-instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.:
688-dst_reg = dst_reg + src_reg
689-
690-Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
691-operation. Classic BPF_RET | BPF_K means copy imm32 into return register
692-and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
693-in eBPF means function exit only. The eBPF program needs to store return
694-value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
695-unused and reserved for future use.
696-
697-For load and store instructions the 8-bit 'code' field is divided as:
698-
699- +--------+--------+-------------------+
700- | 3 bits | 2 bits | 3 bits |
701- | mode | size | instruction class |
702- +--------+--------+-------------------+
703- (MSB) (LSB)
704-
705-Size modifier is one of ...
706-
707- BPF_W 0x00 /* word */
708- BPF_H 0x08 /* half word */
709- BPF_B 0x10 /* byte */
710- BPF_DW 0x18 /* eBPF only, double word */
711-
712-... which encodes size of load/store operation:
713-
714- B - 1 byte
715- H - 2 byte
716- W - 4 byte
717- DW - 8 byte (eBPF only)
718-
719-Mode modifier is one of:
720-
721- BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */
722- BPF_ABS 0x20
723- BPF_IND 0x40
724- BPF_MEM 0x60
725- BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
726- BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
727- BPF_XADD 0xc0 /* eBPF only, exclusive add */
728-
729-eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
730-(BPF_IND | <size> | BPF_LD) which are used to access packet data.
731-
732-They had to be carried over from classic to have strong performance of
733-socket filters running in eBPF interpreter. These instructions can only
734-be used when interpreter context is a pointer to 'struct sk_buff' and
735-have seven implicit operands. Register R6 is an implicit input that must
736-contain pointer to sk_buff. Register R0 is an implicit output which contains
737-the data fetched from the packet. Registers R1-R5 are scratch registers
738-and must not be used to store the data across BPF_ABS | BPF_LD or
739-BPF_IND | BPF_LD instructions.
740-
741-These instructions have implicit program exit condition as well. When
742-eBPF program is trying to access the data beyond the packet boundary,
743-the interpreter will abort the execution of the program. JIT compilers
744-therefore must preserve this property. src_reg and imm32 fields are
745-explicit inputs to these instructions.
746-
747-For example:
748-
749- BPF_IND | BPF_W | BPF_LD means:
750-
751- R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
752- and R1 - R5 were scratched.
753-
754-Unlike classic BPF instruction set, eBPF has generic load/store operations:
755-
756-BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
757-BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
758-BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
759-BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
760-BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
761-
762-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
763-2 byte atomic increments are not supported.
764-
765-Testing
766--------
767-
768-Next to the BPF toolchain, the kernel also ships a test module that contains
769-various test cases for classic and internal BPF that can be executed against
770-the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
771-enabled via Kconfig:
772-
773- CONFIG_TEST_BPF=m
774-
775-After the module has been built and installed, the test suite can be executed
776-via insmod or modprobe against 'test_bpf' module. Results of the test cases
777-including timings in nsec can be found in the kernel log (dmesg).
778-
779 Misc
780 ----
781
782@@ -1024,4 +561,3 @@ the underlying architecture.
783
784 Jay Schulist <jschlst@samba.org>
785 Daniel Borkmann <dborkman@redhat.com>
786-Alexei Starovoitov <ast@plumgrid.com>
787diff --git a/Makefile b/Makefile
788index c261752..7b9958b 100644
789--- a/Makefile
790+++ b/Makefile
791@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
792
793 HOSTCC = gcc
794 HOSTCXX = g++
795-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
796-HOSTCXXFLAGS = -O2
797+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
798+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
799+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
800
801 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
802 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
803@@ -449,8 +450,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
804 # Rules shared between *config targets and build targets
805
806 # Basic helpers built in scripts/
807-PHONY += scripts_basic
808-scripts_basic:
809+PHONY += scripts_basic gcc-plugins
810+scripts_basic: gcc-plugins
811 $(Q)$(MAKE) $(build)=scripts/basic
812 $(Q)rm -f .tmp_quiet_recordmcount
813
814@@ -621,6 +622,75 @@ else
815 KBUILD_CFLAGS += -O2
816 endif
817
818+# Tell gcc to never replace conditional load with a non-conditional one
819+KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
820+
821+ifndef DISABLE_PAX_PLUGINS
822+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
823+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
824+else
825+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
826+endif
827+ifneq ($(PLUGINCC),)
828+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
829+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
830+endif
831+ifdef CONFIG_PAX_MEMORY_STACKLEAK
832+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
833+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
834+endif
835+ifdef CONFIG_KALLOCSTAT_PLUGIN
836+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
837+endif
838+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
839+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
840+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
841+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
842+endif
843+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
844+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
845+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
846+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
847+endif
848+endif
849+ifdef CONFIG_CHECKER_PLUGIN
850+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
851+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
852+endif
853+endif
854+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
855+ifdef CONFIG_PAX_SIZE_OVERFLOW
856+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
857+endif
858+ifdef CONFIG_PAX_LATENT_ENTROPY
859+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
860+endif
861+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
862+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
863+endif
864+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
865+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
866+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
867+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
868+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
869+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
870+ifeq ($(KBUILD_EXTMOD),)
871+gcc-plugins:
872+ $(Q)$(MAKE) $(build)=tools/gcc
873+else
874+gcc-plugins: ;
875+endif
876+else
877+gcc-plugins:
878+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
879+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
880+else
881+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
882+endif
883+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
884+endif
885+endif
886+
887 ifdef CONFIG_READABLE_ASM
888 # Disable optimizations that make assembler listings hard to read.
889 # reorder blocks reorders the control in the function
890@@ -839,7 +909,7 @@ export mod_sign_cmd
891
892
893 ifeq ($(KBUILD_EXTMOD),)
894-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
895+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
896
897 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
898 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
899@@ -888,6 +958,8 @@ endif
900
901 # The actual objects are generated when descending,
902 # make sure no implicit rule kicks in
903+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
904+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
905 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
906
907 # Handle descending into subdirectories listed in $(vmlinux-dirs)
908@@ -897,7 +969,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
909 # Error messages still appears in the original language
910
911 PHONY += $(vmlinux-dirs)
912-$(vmlinux-dirs): prepare scripts
913+$(vmlinux-dirs): gcc-plugins prepare scripts
914 $(Q)$(MAKE) $(build)=$@
915
916 define filechk_kernel.release
917@@ -940,10 +1012,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
918
919 archprepare: archheaders archscripts prepare1 scripts_basic
920
921+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
922+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
923 prepare0: archprepare FORCE
924 $(Q)$(MAKE) $(build)=.
925
926 # All the preparing..
927+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
928 prepare: prepare0
929
930 # Generate some files
931@@ -1051,6 +1126,8 @@ all: modules
932 # using awk while concatenating to the final file.
933
934 PHONY += modules
935+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
936+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
937 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
938 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
939 @$(kecho) ' Building modules, stage 2.';
940@@ -1066,7 +1143,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
941
942 # Target to prepare building external modules
943 PHONY += modules_prepare
944-modules_prepare: prepare scripts
945+modules_prepare: gcc-plugins prepare scripts
946
947 # Target to install modules
948 PHONY += modules_install
949@@ -1132,7 +1209,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
950 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
951 signing_key.priv signing_key.x509 x509.genkey \
952 extra_certificates signing_key.x509.keyid \
953- signing_key.x509.signer include/linux/version.h
954+ signing_key.x509.signer include/linux/version.h \
955+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
956+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
957+ tools/gcc/randomize_layout_seed.h
958
959 # clean - Delete most, but leave enough to build external modules
960 #
961@@ -1171,7 +1251,7 @@ distclean: mrproper
962 @find $(srctree) $(RCS_FIND_IGNORE) \
963 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
964 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
965- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
966+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
967 -type f -print | xargs rm -f
968
969
970@@ -1332,6 +1412,8 @@ PHONY += $(module-dirs) modules
971 $(module-dirs): crmodverdir $(objtree)/Module.symvers
972 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
973
974+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
975+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
976 modules: $(module-dirs)
977 @$(kecho) ' Building modules, stage 2.';
978 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
979@@ -1471,17 +1553,21 @@ else
980 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
981 endif
982
983-%.s: %.c prepare scripts FORCE
984+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
985+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
986+%.s: %.c gcc-plugins prepare scripts FORCE
987 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
988 %.i: %.c prepare scripts FORCE
989 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
990-%.o: %.c prepare scripts FORCE
991+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
992+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
993+%.o: %.c gcc-plugins prepare scripts FORCE
994 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
995 %.lst: %.c prepare scripts FORCE
996 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
997-%.s: %.S prepare scripts FORCE
998+%.s: %.S gcc-plugins prepare scripts FORCE
999 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1000-%.o: %.S prepare scripts FORCE
1001+%.o: %.S gcc-plugins prepare scripts FORCE
1002 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1003 %.symtypes: %.c prepare scripts FORCE
1004 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1005@@ -1491,11 +1577,15 @@ endif
1006 $(cmd_crmodverdir)
1007 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1008 $(build)=$(build-dir)
1009-%/: prepare scripts FORCE
1010+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1011+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1012+%/: gcc-plugins prepare scripts FORCE
1013 $(cmd_crmodverdir)
1014 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1015 $(build)=$(build-dir)
1016-%.ko: prepare scripts FORCE
1017+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1018+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1019+%.ko: gcc-plugins prepare scripts FORCE
1020 $(cmd_crmodverdir)
1021 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1022 $(build)=$(build-dir) $(@:.ko=.o)
1023diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
1024index ed60a1e..47f1a55 100644
1025--- a/arch/alpha/include/asm/atomic.h
1026+++ b/arch/alpha/include/asm/atomic.h
1027@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
1028 #define atomic_dec(v) atomic_sub(1,(v))
1029 #define atomic64_dec(v) atomic64_sub(1,(v))
1030
1031+#define atomic64_read_unchecked(v) atomic64_read(v)
1032+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1033+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1034+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1035+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1036+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1037+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1038+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1039+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1040+
1041 #endif /* _ALPHA_ATOMIC_H */
1042diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
1043index ad368a9..fbe0f25 100644
1044--- a/arch/alpha/include/asm/cache.h
1045+++ b/arch/alpha/include/asm/cache.h
1046@@ -4,19 +4,19 @@
1047 #ifndef __ARCH_ALPHA_CACHE_H
1048 #define __ARCH_ALPHA_CACHE_H
1049
1050+#include <linux/const.h>
1051
1052 /* Bytes per L1 (data) cache line. */
1053 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
1054-# define L1_CACHE_BYTES 64
1055 # define L1_CACHE_SHIFT 6
1056 #else
1057 /* Both EV4 and EV5 are write-through, read-allocate,
1058 direct-mapped, physical.
1059 */
1060-# define L1_CACHE_BYTES 32
1061 # define L1_CACHE_SHIFT 5
1062 #endif
1063
1064+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1065 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1066
1067 #endif
1068diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
1069index 968d999..d36b2df 100644
1070--- a/arch/alpha/include/asm/elf.h
1071+++ b/arch/alpha/include/asm/elf.h
1072@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1073
1074 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
1075
1076+#ifdef CONFIG_PAX_ASLR
1077+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
1078+
1079+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
1080+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
1081+#endif
1082+
1083 /* $0 is set by ld.so to a pointer to a function which might be
1084 registered using atexit. This provides a mean for the dynamic
1085 linker to call DT_FINI functions for shared libraries that have
1086diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
1087index aab14a0..b4fa3e7 100644
1088--- a/arch/alpha/include/asm/pgalloc.h
1089+++ b/arch/alpha/include/asm/pgalloc.h
1090@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1091 pgd_set(pgd, pmd);
1092 }
1093
1094+static inline void
1095+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1096+{
1097+ pgd_populate(mm, pgd, pmd);
1098+}
1099+
1100 extern pgd_t *pgd_alloc(struct mm_struct *mm);
1101
1102 static inline void
1103diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
1104index d8f9b7e..f6222fa 100644
1105--- a/arch/alpha/include/asm/pgtable.h
1106+++ b/arch/alpha/include/asm/pgtable.h
1107@@ -102,6 +102,17 @@ struct vm_area_struct;
1108 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
1109 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1110 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1111+
1112+#ifdef CONFIG_PAX_PAGEEXEC
1113+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
1114+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1115+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1116+#else
1117+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1118+# define PAGE_COPY_NOEXEC PAGE_COPY
1119+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1120+#endif
1121+
1122 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
1123
1124 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
1125diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
1126index 2fd00b7..cfd5069 100644
1127--- a/arch/alpha/kernel/module.c
1128+++ b/arch/alpha/kernel/module.c
1129@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
1130
1131 /* The small sections were sorted to the end of the segment.
1132 The following should definitely cover them. */
1133- gp = (u64)me->module_core + me->core_size - 0x8000;
1134+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
1135 got = sechdrs[me->arch.gotsecindex].sh_addr;
1136
1137 for (i = 0; i < n; i++) {
1138diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
1139index 1402fcc..0b1abd2 100644
1140--- a/arch/alpha/kernel/osf_sys.c
1141+++ b/arch/alpha/kernel/osf_sys.c
1142@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1143 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
1144
1145 static unsigned long
1146-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1147- unsigned long limit)
1148+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
1149+ unsigned long limit, unsigned long flags)
1150 {
1151 struct vm_unmapped_area_info info;
1152+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
1153
1154 info.flags = 0;
1155 info.length = len;
1156@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1157 info.high_limit = limit;
1158 info.align_mask = 0;
1159 info.align_offset = 0;
1160+ info.threadstack_offset = offset;
1161 return vm_unmapped_area(&info);
1162 }
1163
1164@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1165 merely specific addresses, but regions of memory -- perhaps
1166 this feature should be incorporated into all ports? */
1167
1168+#ifdef CONFIG_PAX_RANDMMAP
1169+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1170+#endif
1171+
1172 if (addr) {
1173- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
1174+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
1175 if (addr != (unsigned long) -ENOMEM)
1176 return addr;
1177 }
1178
1179 /* Next, try allocating at TASK_UNMAPPED_BASE. */
1180- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
1181- len, limit);
1182+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
1183+
1184 if (addr != (unsigned long) -ENOMEM)
1185 return addr;
1186
1187 /* Finally, try allocating in low memory. */
1188- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
1189+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
1190
1191 return addr;
1192 }
1193diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
1194index 98838a0..b304fb4 100644
1195--- a/arch/alpha/mm/fault.c
1196+++ b/arch/alpha/mm/fault.c
1197@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
1198 __reload_thread(pcb);
1199 }
1200
1201+#ifdef CONFIG_PAX_PAGEEXEC
1202+/*
1203+ * PaX: decide what to do with offenders (regs->pc = fault address)
1204+ *
1205+ * returns 1 when task should be killed
1206+ * 2 when patched PLT trampoline was detected
1207+ * 3 when unpatched PLT trampoline was detected
1208+ */
1209+static int pax_handle_fetch_fault(struct pt_regs *regs)
1210+{
1211+
1212+#ifdef CONFIG_PAX_EMUPLT
1213+ int err;
1214+
1215+ do { /* PaX: patched PLT emulation #1 */
1216+ unsigned int ldah, ldq, jmp;
1217+
1218+ err = get_user(ldah, (unsigned int *)regs->pc);
1219+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
1220+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
1221+
1222+ if (err)
1223+ break;
1224+
1225+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1226+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
1227+ jmp == 0x6BFB0000U)
1228+ {
1229+ unsigned long r27, addr;
1230+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1231+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
1232+
1233+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1234+ err = get_user(r27, (unsigned long *)addr);
1235+ if (err)
1236+ break;
1237+
1238+ regs->r27 = r27;
1239+ regs->pc = r27;
1240+ return 2;
1241+ }
1242+ } while (0);
1243+
1244+ do { /* PaX: patched PLT emulation #2 */
1245+ unsigned int ldah, lda, br;
1246+
1247+ err = get_user(ldah, (unsigned int *)regs->pc);
1248+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
1249+ err |= get_user(br, (unsigned int *)(regs->pc+8));
1250+
1251+ if (err)
1252+ break;
1253+
1254+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1255+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
1256+ (br & 0xFFE00000U) == 0xC3E00000U)
1257+ {
1258+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
1259+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1260+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
1261+
1262+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1263+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1264+ return 2;
1265+ }
1266+ } while (0);
1267+
1268+ do { /* PaX: unpatched PLT emulation */
1269+ unsigned int br;
1270+
1271+ err = get_user(br, (unsigned int *)regs->pc);
1272+
1273+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
1274+ unsigned int br2, ldq, nop, jmp;
1275+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
1276+
1277+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1278+ err = get_user(br2, (unsigned int *)addr);
1279+ err |= get_user(ldq, (unsigned int *)(addr+4));
1280+ err |= get_user(nop, (unsigned int *)(addr+8));
1281+ err |= get_user(jmp, (unsigned int *)(addr+12));
1282+ err |= get_user(resolver, (unsigned long *)(addr+16));
1283+
1284+ if (err)
1285+ break;
1286+
1287+ if (br2 == 0xC3600000U &&
1288+ ldq == 0xA77B000CU &&
1289+ nop == 0x47FF041FU &&
1290+ jmp == 0x6B7B0000U)
1291+ {
1292+ regs->r28 = regs->pc+4;
1293+ regs->r27 = addr+16;
1294+ regs->pc = resolver;
1295+ return 3;
1296+ }
1297+ }
1298+ } while (0);
1299+#endif
1300+
1301+ return 1;
1302+}
1303+
1304+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1305+{
1306+ unsigned long i;
1307+
1308+ printk(KERN_ERR "PAX: bytes at PC: ");
1309+ for (i = 0; i < 5; i++) {
1310+ unsigned int c;
1311+ if (get_user(c, (unsigned int *)pc+i))
1312+ printk(KERN_CONT "???????? ");
1313+ else
1314+ printk(KERN_CONT "%08x ", c);
1315+ }
1316+ printk("\n");
1317+}
1318+#endif
1319
1320 /*
1321 * This routine handles page faults. It determines the address,
1322@@ -133,8 +251,29 @@ retry:
1323 good_area:
1324 si_code = SEGV_ACCERR;
1325 if (cause < 0) {
1326- if (!(vma->vm_flags & VM_EXEC))
1327+ if (!(vma->vm_flags & VM_EXEC)) {
1328+
1329+#ifdef CONFIG_PAX_PAGEEXEC
1330+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
1331+ goto bad_area;
1332+
1333+ up_read(&mm->mmap_sem);
1334+ switch (pax_handle_fetch_fault(regs)) {
1335+
1336+#ifdef CONFIG_PAX_EMUPLT
1337+ case 2:
1338+ case 3:
1339+ return;
1340+#endif
1341+
1342+ }
1343+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
1344+ do_group_exit(SIGKILL);
1345+#else
1346 goto bad_area;
1347+#endif
1348+
1349+ }
1350 } else if (!cause) {
1351 /* Allow reads even for write-only mappings */
1352 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
1353diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
1354index 290f02ee..a639059 100644
1355--- a/arch/arm/Kconfig
1356+++ b/arch/arm/Kconfig
1357@@ -1787,7 +1787,7 @@ config ALIGNMENT_TRAP
1358
1359 config UACCESS_WITH_MEMCPY
1360 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1361- depends on MMU
1362+ depends on MMU && !PAX_MEMORY_UDEREF
1363 default y if CPU_FEROCEON
1364 help
1365 Implement faster copy_to_user and clear_user methods for CPU
1366@@ -2051,6 +2051,7 @@ config XIP_PHYS_ADDR
1367 config KEXEC
1368 bool "Kexec system call (EXPERIMENTAL)"
1369 depends on (!SMP || PM_SLEEP_SMP)
1370+ depends on !GRKERNSEC_KMEM
1371 help
1372 kexec is a system call that implements the ability to shutdown your
1373 current kernel, and to start another kernel. It is like a reboot
1374diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1375index 3040359..89b3dfc 100644
1376--- a/arch/arm/include/asm/atomic.h
1377+++ b/arch/arm/include/asm/atomic.h
1378@@ -18,17 +18,35 @@
1379 #include <asm/barrier.h>
1380 #include <asm/cmpxchg.h>
1381
1382+#ifdef CONFIG_GENERIC_ATOMIC64
1383+#include <asm-generic/atomic64.h>
1384+#endif
1385+
1386 #define ATOMIC_INIT(i) { (i) }
1387
1388 #ifdef __KERNEL__
1389
1390+#define _ASM_EXTABLE(from, to) \
1391+" .pushsection __ex_table,\"a\"\n"\
1392+" .align 3\n" \
1393+" .long " #from ", " #to"\n" \
1394+" .popsection"
1395+
1396 /*
1397 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1398 * strex/ldrex monitor on some implementations. The reason we can use it for
1399 * atomic_set() is the clrex or dummy strex done on every exception return.
1400 */
1401 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1402+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1403+{
1404+ return v->counter;
1405+}
1406 #define atomic_set(v,i) (((v)->counter) = (i))
1407+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1408+{
1409+ v->counter = i;
1410+}
1411
1412 #if __LINUX_ARM_ARCH__ >= 6
1413
1414@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
1415
1416 prefetchw(&v->counter);
1417 __asm__ __volatile__("@ atomic_add\n"
1418+"1: ldrex %1, [%3]\n"
1419+" adds %0, %1, %4\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+"2: bkpt 0xf103\n"
1424+"3:\n"
1425+#endif
1426+
1427+" strex %1, %0, [%3]\n"
1428+" teq %1, #0\n"
1429+" bne 1b"
1430+
1431+#ifdef CONFIG_PAX_REFCOUNT
1432+"\n4:\n"
1433+ _ASM_EXTABLE(2b, 4b)
1434+#endif
1435+
1436+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1437+ : "r" (&v->counter), "Ir" (i)
1438+ : "cc");
1439+}
1440+
1441+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1442+{
1443+ unsigned long tmp;
1444+ int result;
1445+
1446+ prefetchw(&v->counter);
1447+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1448 "1: ldrex %0, [%3]\n"
1449 " add %0, %0, %4\n"
1450 " strex %1, %0, [%3]\n"
1451@@ -63,6 +111,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic_add_return\n"
1455+"1: ldrex %1, [%3]\n"
1456+" adds %0, %1, %4\n"
1457+
1458+#ifdef CONFIG_PAX_REFCOUNT
1459+" bvc 3f\n"
1460+" mov %0, %1\n"
1461+"2: bkpt 0xf103\n"
1462+"3:\n"
1463+#endif
1464+
1465+" strex %1, %0, [%3]\n"
1466+" teq %1, #0\n"
1467+" bne 1b"
1468+
1469+#ifdef CONFIG_PAX_REFCOUNT
1470+"\n4:\n"
1471+ _ASM_EXTABLE(2b, 4b)
1472+#endif
1473+
1474+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1475+ : "r" (&v->counter), "Ir" (i)
1476+ : "cc");
1477+
1478+ smp_mb();
1479+
1480+ return result;
1481+}
1482+
1483+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1484+{
1485+ unsigned long tmp;
1486+ int result;
1487+
1488+ smp_mb();
1489+ prefetchw(&v->counter);
1490+
1491+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1492 "1: ldrex %0, [%3]\n"
1493 " add %0, %0, %4\n"
1494 " strex %1, %0, [%3]\n"
1495@@ -84,6 +169,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1496
1497 prefetchw(&v->counter);
1498 __asm__ __volatile__("@ atomic_sub\n"
1499+"1: ldrex %1, [%3]\n"
1500+" subs %0, %1, %4\n"
1501+
1502+#ifdef CONFIG_PAX_REFCOUNT
1503+" bvc 3f\n"
1504+"2: bkpt 0xf103\n"
1505+"3:\n"
1506+#endif
1507+
1508+" strex %1, %0, [%3]\n"
1509+" teq %1, #0\n"
1510+" bne 1b"
1511+
1512+#ifdef CONFIG_PAX_REFCOUNT
1513+"\n4:\n"
1514+ _ASM_EXTABLE(2b, 4b)
1515+#endif
1516+
1517+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1518+ : "r" (&v->counter), "Ir" (i)
1519+ : "cc");
1520+}
1521+
1522+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1523+{
1524+ unsigned long tmp;
1525+ int result;
1526+
1527+ prefetchw(&v->counter);
1528+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1529 "1: ldrex %0, [%3]\n"
1530 " sub %0, %0, %4\n"
1531 " strex %1, %0, [%3]\n"
1532@@ -103,11 +218,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1533 prefetchw(&v->counter);
1534
1535 __asm__ __volatile__("@ atomic_sub_return\n"
1536-"1: ldrex %0, [%3]\n"
1537-" sub %0, %0, %4\n"
1538+"1: ldrex %1, [%3]\n"
1539+" subs %0, %1, %4\n"
1540+
1541+#ifdef CONFIG_PAX_REFCOUNT
1542+" bvc 3f\n"
1543+" mov %0, %1\n"
1544+"2: bkpt 0xf103\n"
1545+"3:\n"
1546+#endif
1547+
1548 " strex %1, %0, [%3]\n"
1549 " teq %1, #0\n"
1550 " bne 1b"
1551+
1552+#ifdef CONFIG_PAX_REFCOUNT
1553+"\n4:\n"
1554+ _ASM_EXTABLE(2b, 4b)
1555+#endif
1556+
1557 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1558 : "r" (&v->counter), "Ir" (i)
1559 : "cc");
1560@@ -152,12 +281,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1561 __asm__ __volatile__ ("@ atomic_add_unless\n"
1562 "1: ldrex %0, [%4]\n"
1563 " teq %0, %5\n"
1564-" beq 2f\n"
1565-" add %1, %0, %6\n"
1566+" beq 4f\n"
1567+" adds %1, %0, %6\n"
1568+
1569+#ifdef CONFIG_PAX_REFCOUNT
1570+" bvc 3f\n"
1571+"2: bkpt 0xf103\n"
1572+"3:\n"
1573+#endif
1574+
1575 " strex %2, %1, [%4]\n"
1576 " teq %2, #0\n"
1577 " bne 1b\n"
1578-"2:"
1579+"4:"
1580+
1581+#ifdef CONFIG_PAX_REFCOUNT
1582+ _ASM_EXTABLE(2b, 4b)
1583+#endif
1584+
1585 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1586 : "r" (&v->counter), "r" (u), "r" (a)
1587 : "cc");
1588@@ -168,6 +309,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1589 return oldval;
1590 }
1591
1592+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1593+{
1594+ unsigned long oldval, res;
1595+
1596+ smp_mb();
1597+
1598+ do {
1599+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1600+ "ldrex %1, [%3]\n"
1601+ "mov %0, #0\n"
1602+ "teq %1, %4\n"
1603+ "strexeq %0, %5, [%3]\n"
1604+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1605+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1606+ : "cc");
1607+ } while (res);
1608+
1609+ smp_mb();
1610+
1611+ return oldval;
1612+}
1613+
1614 #else /* ARM_ARCH_6 */
1615
1616 #ifdef CONFIG_SMP
1617@@ -186,7 +349,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1618
1619 return val;
1620 }
1621+
1622+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1623+{
1624+ return atomic_add_return(i, v);
1625+}
1626+
1627 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1628+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1629+{
1630+ (void) atomic_add_return(i, v);
1631+}
1632
1633 static inline int atomic_sub_return(int i, atomic_t *v)
1634 {
1635@@ -201,6 +374,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1636 return val;
1637 }
1638 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1639+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1640+{
1641+ (void) atomic_sub_return(i, v);
1642+}
1643
1644 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1645 {
1646@@ -216,6 +393,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1647 return ret;
1648 }
1649
1650+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1651+{
1652+ return atomic_cmpxchg(v, old, new);
1653+}
1654+
1655 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1656 {
1657 int c, old;
1658@@ -229,13 +411,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1659 #endif /* __LINUX_ARM_ARCH__ */
1660
1661 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1662+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1663+{
1664+ return xchg(&v->counter, new);
1665+}
1666
1667 #define atomic_inc(v) atomic_add(1, v)
1668+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1669+{
1670+ atomic_add_unchecked(1, v);
1671+}
1672 #define atomic_dec(v) atomic_sub(1, v)
1673+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1674+{
1675+ atomic_sub_unchecked(1, v);
1676+}
1677
1678 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1679+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1680+{
1681+ return atomic_add_return_unchecked(1, v) == 0;
1682+}
1683 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1684 #define atomic_inc_return(v) (atomic_add_return(1, v))
1685+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1686+{
1687+ return atomic_add_return_unchecked(1, v);
1688+}
1689 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1690 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1691
1692@@ -246,6 +448,14 @@ typedef struct {
1693 long long counter;
1694 } atomic64_t;
1695
1696+#ifdef CONFIG_PAX_REFCOUNT
1697+typedef struct {
1698+ long long counter;
1699+} atomic64_unchecked_t;
1700+#else
1701+typedef atomic64_t atomic64_unchecked_t;
1702+#endif
1703+
1704 #define ATOMIC64_INIT(i) { (i) }
1705
1706 #ifdef CONFIG_ARM_LPAE
1707@@ -262,6 +472,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1708 return result;
1709 }
1710
1711+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1712+{
1713+ long long result;
1714+
1715+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1716+" ldrd %0, %H0, [%1]"
1717+ : "=&r" (result)
1718+ : "r" (&v->counter), "Qo" (v->counter)
1719+ );
1720+
1721+ return result;
1722+}
1723+
1724 static inline void atomic64_set(atomic64_t *v, long long i)
1725 {
1726 __asm__ __volatile__("@ atomic64_set\n"
1727@@ -270,6 +493,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1728 : "r" (&v->counter), "r" (i)
1729 );
1730 }
1731+
1732+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1733+{
1734+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1735+" strd %2, %H2, [%1]"
1736+ : "=Qo" (v->counter)
1737+ : "r" (&v->counter), "r" (i)
1738+ );
1739+}
1740 #else
1741 static inline long long atomic64_read(const atomic64_t *v)
1742 {
1743@@ -284,6 +516,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1744 return result;
1745 }
1746
1747+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1748+{
1749+ long long result;
1750+
1751+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1752+" ldrexd %0, %H0, [%1]"
1753+ : "=&r" (result)
1754+ : "r" (&v->counter), "Qo" (v->counter)
1755+ );
1756+
1757+ return result;
1758+}
1759+
1760 static inline void atomic64_set(atomic64_t *v, long long i)
1761 {
1762 long long tmp;
1763@@ -298,6 +543,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1764 : "r" (&v->counter), "r" (i)
1765 : "cc");
1766 }
1767+
1768+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1769+{
1770+ long long tmp;
1771+
1772+ prefetchw(&v->counter);
1773+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1774+"1: ldrexd %0, %H0, [%2]\n"
1775+" strexd %0, %3, %H3, [%2]\n"
1776+" teq %0, #0\n"
1777+" bne 1b"
1778+ : "=&r" (tmp), "=Qo" (v->counter)
1779+ : "r" (&v->counter), "r" (i)
1780+ : "cc");
1781+}
1782 #endif
1783
1784 static inline void atomic64_add(long long i, atomic64_t *v)
1785@@ -309,6 +569,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1786 __asm__ __volatile__("@ atomic64_add\n"
1787 "1: ldrexd %0, %H0, [%3]\n"
1788 " adds %Q0, %Q0, %Q4\n"
1789+" adcs %R0, %R0, %R4\n"
1790+
1791+#ifdef CONFIG_PAX_REFCOUNT
1792+" bvc 3f\n"
1793+"2: bkpt 0xf103\n"
1794+"3:\n"
1795+#endif
1796+
1797+" strexd %1, %0, %H0, [%3]\n"
1798+" teq %1, #0\n"
1799+" bne 1b"
1800+
1801+#ifdef CONFIG_PAX_REFCOUNT
1802+"\n4:\n"
1803+ _ASM_EXTABLE(2b, 4b)
1804+#endif
1805+
1806+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1807+ : "r" (&v->counter), "r" (i)
1808+ : "cc");
1809+}
1810+
1811+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1812+{
1813+ long long result;
1814+ unsigned long tmp;
1815+
1816+ prefetchw(&v->counter);
1817+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1818+"1: ldrexd %0, %H0, [%3]\n"
1819+" adds %Q0, %Q0, %Q4\n"
1820 " adc %R0, %R0, %R4\n"
1821 " strexd %1, %0, %H0, [%3]\n"
1822 " teq %1, #0\n"
1823@@ -329,6 +620,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1824 __asm__ __volatile__("@ atomic64_add_return\n"
1825 "1: ldrexd %0, %H0, [%3]\n"
1826 " adds %Q0, %Q0, %Q4\n"
1827+" adcs %R0, %R0, %R4\n"
1828+
1829+#ifdef CONFIG_PAX_REFCOUNT
1830+" bvc 3f\n"
1831+" mov %0, %1\n"
1832+" mov %H0, %H1\n"
1833+"2: bkpt 0xf103\n"
1834+"3:\n"
1835+#endif
1836+
1837+" strexd %1, %0, %H0, [%3]\n"
1838+" teq %1, #0\n"
1839+" bne 1b"
1840+
1841+#ifdef CONFIG_PAX_REFCOUNT
1842+"\n4:\n"
1843+ _ASM_EXTABLE(2b, 4b)
1844+#endif
1845+
1846+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1847+ : "r" (&v->counter), "r" (i)
1848+ : "cc");
1849+
1850+ smp_mb();
1851+
1852+ return result;
1853+}
1854+
1855+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1856+{
1857+ long long result;
1858+ unsigned long tmp;
1859+
1860+ smp_mb();
1861+
1862+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1863+"1: ldrexd %0, %H0, [%3]\n"
1864+" adds %Q0, %Q0, %Q4\n"
1865 " adc %R0, %R0, %R4\n"
1866 " strexd %1, %0, %H0, [%3]\n"
1867 " teq %1, #0\n"
1868@@ -351,6 +680,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1869 __asm__ __volatile__("@ atomic64_sub\n"
1870 "1: ldrexd %0, %H0, [%3]\n"
1871 " subs %Q0, %Q0, %Q4\n"
1872+" sbcs %R0, %R0, %R4\n"
1873+
1874+#ifdef CONFIG_PAX_REFCOUNT
1875+" bvc 3f\n"
1876+"2: bkpt 0xf103\n"
1877+"3:\n"
1878+#endif
1879+
1880+" strexd %1, %0, %H0, [%3]\n"
1881+" teq %1, #0\n"
1882+" bne 1b"
1883+
1884+#ifdef CONFIG_PAX_REFCOUNT
1885+"\n4:\n"
1886+ _ASM_EXTABLE(2b, 4b)
1887+#endif
1888+
1889+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1890+ : "r" (&v->counter), "r" (i)
1891+ : "cc");
1892+}
1893+
1894+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1895+{
1896+ long long result;
1897+ unsigned long tmp;
1898+
1899+ prefetchw(&v->counter);
1900+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1901+"1: ldrexd %0, %H0, [%3]\n"
1902+" subs %Q0, %Q0, %Q4\n"
1903 " sbc %R0, %R0, %R4\n"
1904 " strexd %1, %0, %H0, [%3]\n"
1905 " teq %1, #0\n"
1906@@ -371,16 +731,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1907 __asm__ __volatile__("@ atomic64_sub_return\n"
1908 "1: ldrexd %0, %H0, [%3]\n"
1909 " subs %Q0, %Q0, %Q4\n"
1910-" sbc %R0, %R0, %R4\n"
1911+" sbcs %R0, %R0, %R4\n"
1912+
1913+#ifdef CONFIG_PAX_REFCOUNT
1914+" bvc 3f\n"
1915+" mov %0, %1\n"
1916+" mov %H0, %H1\n"
1917+"2: bkpt 0xf103\n"
1918+"3:\n"
1919+#endif
1920+
1921 " strexd %1, %0, %H0, [%3]\n"
1922 " teq %1, #0\n"
1923 " bne 1b"
1924+
1925+#ifdef CONFIG_PAX_REFCOUNT
1926+"\n4:\n"
1927+ _ASM_EXTABLE(2b, 4b)
1928+#endif
1929+
1930 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1931 : "r" (&v->counter), "r" (i)
1932 : "cc");
1933
1934- smp_mb();
1935-
1936 return result;
1937 }
1938
1939@@ -410,6 +783,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1940 return oldval;
1941 }
1942
1943+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1944+ long long new)
1945+{
1946+ long long oldval;
1947+ unsigned long res;
1948+
1949+ smp_mb();
1950+
1951+ do {
1952+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1953+ "ldrexd %1, %H1, [%3]\n"
1954+ "mov %0, #0\n"
1955+ "teq %1, %4\n"
1956+ "teqeq %H1, %H4\n"
1957+ "strexdeq %0, %5, %H5, [%3]"
1958+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1959+ : "r" (&ptr->counter), "r" (old), "r" (new)
1960+ : "cc");
1961+ } while (res);
1962+
1963+ smp_mb();
1964+
1965+ return oldval;
1966+}
1967+
1968 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1969 {
1970 long long result;
1971@@ -435,21 +833,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1972 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1973 {
1974 long long result;
1975- unsigned long tmp;
1976+ u64 tmp;
1977
1978 smp_mb();
1979 prefetchw(&v->counter);
1980
1981 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1982-"1: ldrexd %0, %H0, [%3]\n"
1983-" subs %Q0, %Q0, #1\n"
1984-" sbc %R0, %R0, #0\n"
1985+"1: ldrexd %1, %H1, [%3]\n"
1986+" subs %Q0, %Q1, #1\n"
1987+" sbcs %R0, %R1, #0\n"
1988+
1989+#ifdef CONFIG_PAX_REFCOUNT
1990+" bvc 3f\n"
1991+" mov %Q0, %Q1\n"
1992+" mov %R0, %R1\n"
1993+"2: bkpt 0xf103\n"
1994+"3:\n"
1995+#endif
1996+
1997 " teq %R0, #0\n"
1998-" bmi 2f\n"
1999+" bmi 4f\n"
2000 " strexd %1, %0, %H0, [%3]\n"
2001 " teq %1, #0\n"
2002 " bne 1b\n"
2003-"2:"
2004+"4:\n"
2005+
2006+#ifdef CONFIG_PAX_REFCOUNT
2007+ _ASM_EXTABLE(2b, 4b)
2008+#endif
2009+
2010 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
2011 : "r" (&v->counter)
2012 : "cc");
2013@@ -473,13 +885,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2014 " teq %0, %5\n"
2015 " teqeq %H0, %H5\n"
2016 " moveq %1, #0\n"
2017-" beq 2f\n"
2018+" beq 4f\n"
2019 " adds %Q0, %Q0, %Q6\n"
2020-" adc %R0, %R0, %R6\n"
2021+" adcs %R0, %R0, %R6\n"
2022+
2023+#ifdef CONFIG_PAX_REFCOUNT
2024+" bvc 3f\n"
2025+"2: bkpt 0xf103\n"
2026+"3:\n"
2027+#endif
2028+
2029 " strexd %2, %0, %H0, [%4]\n"
2030 " teq %2, #0\n"
2031 " bne 1b\n"
2032-"2:"
2033+"4:\n"
2034+
2035+#ifdef CONFIG_PAX_REFCOUNT
2036+ _ASM_EXTABLE(2b, 4b)
2037+#endif
2038+
2039 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
2040 : "r" (&v->counter), "r" (u), "r" (a)
2041 : "cc");
2042@@ -492,10 +916,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2043
2044 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
2045 #define atomic64_inc(v) atomic64_add(1LL, (v))
2046+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
2047 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
2048+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
2049 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2050 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
2051 #define atomic64_dec(v) atomic64_sub(1LL, (v))
2052+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
2053 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
2054 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
2055 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
2056diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
2057index c6a3e73..35cca85 100644
2058--- a/arch/arm/include/asm/barrier.h
2059+++ b/arch/arm/include/asm/barrier.h
2060@@ -63,7 +63,7 @@
2061 do { \
2062 compiletime_assert_atomic_type(*p); \
2063 smp_mb(); \
2064- ACCESS_ONCE(*p) = (v); \
2065+ ACCESS_ONCE_RW(*p) = (v); \
2066 } while (0)
2067
2068 #define smp_load_acquire(p) \
2069diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
2070index 75fe66b..ba3dee4 100644
2071--- a/arch/arm/include/asm/cache.h
2072+++ b/arch/arm/include/asm/cache.h
2073@@ -4,8 +4,10 @@
2074 #ifndef __ASMARM_CACHE_H
2075 #define __ASMARM_CACHE_H
2076
2077+#include <linux/const.h>
2078+
2079 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
2080-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2081+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2082
2083 /*
2084 * Memory returned by kmalloc() may be used for DMA, so we must make
2085@@ -24,5 +26,6 @@
2086 #endif
2087
2088 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2089+#define __read_only __attribute__ ((__section__(".data..read_only")))
2090
2091 #endif
2092diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
2093index fd43f7f..a817f5a 100644
2094--- a/arch/arm/include/asm/cacheflush.h
2095+++ b/arch/arm/include/asm/cacheflush.h
2096@@ -116,7 +116,7 @@ struct cpu_cache_fns {
2097 void (*dma_unmap_area)(const void *, size_t, int);
2098
2099 void (*dma_flush_range)(const void *, const void *);
2100-};
2101+} __no_const;
2102
2103 /*
2104 * Select the calling method
2105diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
2106index 5233151..87a71fa 100644
2107--- a/arch/arm/include/asm/checksum.h
2108+++ b/arch/arm/include/asm/checksum.h
2109@@ -37,7 +37,19 @@ __wsum
2110 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
2111
2112 __wsum
2113-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2114+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2115+
2116+static inline __wsum
2117+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
2118+{
2119+ __wsum ret;
2120+ pax_open_userland();
2121+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
2122+ pax_close_userland();
2123+ return ret;
2124+}
2125+
2126+
2127
2128 /*
2129 * Fold a partial checksum without adding pseudo headers
2130diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
2131index abb2c37..96db950 100644
2132--- a/arch/arm/include/asm/cmpxchg.h
2133+++ b/arch/arm/include/asm/cmpxchg.h
2134@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
2135
2136 #define xchg(ptr,x) \
2137 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2138+#define xchg_unchecked(ptr,x) \
2139+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2140
2141 #include <asm-generic/cmpxchg-local.h>
2142
2143diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
2144index 6ddbe44..b5e38b1 100644
2145--- a/arch/arm/include/asm/domain.h
2146+++ b/arch/arm/include/asm/domain.h
2147@@ -48,18 +48,37 @@
2148 * Domain types
2149 */
2150 #define DOMAIN_NOACCESS 0
2151-#define DOMAIN_CLIENT 1
2152 #ifdef CONFIG_CPU_USE_DOMAINS
2153+#define DOMAIN_USERCLIENT 1
2154+#define DOMAIN_KERNELCLIENT 1
2155 #define DOMAIN_MANAGER 3
2156+#define DOMAIN_VECTORS DOMAIN_USER
2157 #else
2158+
2159+#ifdef CONFIG_PAX_KERNEXEC
2160 #define DOMAIN_MANAGER 1
2161+#define DOMAIN_KERNEXEC 3
2162+#else
2163+#define DOMAIN_MANAGER 1
2164+#endif
2165+
2166+#ifdef CONFIG_PAX_MEMORY_UDEREF
2167+#define DOMAIN_USERCLIENT 0
2168+#define DOMAIN_UDEREF 1
2169+#define DOMAIN_VECTORS DOMAIN_KERNEL
2170+#else
2171+#define DOMAIN_USERCLIENT 1
2172+#define DOMAIN_VECTORS DOMAIN_USER
2173+#endif
2174+#define DOMAIN_KERNELCLIENT 1
2175+
2176 #endif
2177
2178 #define domain_val(dom,type) ((type) << (2*(dom)))
2179
2180 #ifndef __ASSEMBLY__
2181
2182-#ifdef CONFIG_CPU_USE_DOMAINS
2183+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2184 static inline void set_domain(unsigned val)
2185 {
2186 asm volatile(
2187@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
2188 isb();
2189 }
2190
2191-#define modify_domain(dom,type) \
2192- do { \
2193- struct thread_info *thread = current_thread_info(); \
2194- unsigned int domain = thread->cpu_domain; \
2195- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
2196- thread->cpu_domain = domain | domain_val(dom, type); \
2197- set_domain(thread->cpu_domain); \
2198- } while (0)
2199-
2200+extern void modify_domain(unsigned int dom, unsigned int type);
2201 #else
2202 static inline void set_domain(unsigned val) { }
2203 static inline void modify_domain(unsigned dom, unsigned type) { }
2204diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
2205index f4b46d3..abc9b2b 100644
2206--- a/arch/arm/include/asm/elf.h
2207+++ b/arch/arm/include/asm/elf.h
2208@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2209 the loader. We need to make sure that it is out of the way of the program
2210 that it will "exec", and that there is sufficient room for the brk. */
2211
2212-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2213+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2214+
2215+#ifdef CONFIG_PAX_ASLR
2216+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
2217+
2218+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2219+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2220+#endif
2221
2222 /* When the program starts, a1 contains a pointer to a function to be
2223 registered with atexit, as per the SVR4 ABI. A value of 0 means we
2224@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2225 extern void elf_set_personality(const struct elf32_hdr *);
2226 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
2227
2228-struct mm_struct;
2229-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2230-#define arch_randomize_brk arch_randomize_brk
2231-
2232 #ifdef CONFIG_MMU
2233 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2234 struct linux_binprm;
2235diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
2236index de53547..52b9a28 100644
2237--- a/arch/arm/include/asm/fncpy.h
2238+++ b/arch/arm/include/asm/fncpy.h
2239@@ -81,7 +81,9 @@
2240 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
2241 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
2242 \
2243+ pax_open_kernel(); \
2244 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
2245+ pax_close_kernel(); \
2246 flush_icache_range((unsigned long)(dest_buf), \
2247 (unsigned long)(dest_buf) + (size)); \
2248 \
2249diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
2250index 53e69da..3fdc896 100644
2251--- a/arch/arm/include/asm/futex.h
2252+++ b/arch/arm/include/asm/futex.h
2253@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2254 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2255 return -EFAULT;
2256
2257+ pax_open_userland();
2258+
2259 smp_mb();
2260 /* Prefetching cannot fault */
2261 prefetchw(uaddr);
2262@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2263 : "cc", "memory");
2264 smp_mb();
2265
2266+ pax_close_userland();
2267+
2268 *uval = val;
2269 return ret;
2270 }
2271@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2272 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2273 return -EFAULT;
2274
2275+ pax_open_userland();
2276+
2277 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
2278 "1: " TUSER(ldr) " %1, [%4]\n"
2279 " teq %1, %2\n"
2280@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2281 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
2282 : "cc", "memory");
2283
2284+ pax_close_userland();
2285+
2286 *uval = val;
2287 return ret;
2288 }
2289@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2290 return -EFAULT;
2291
2292 pagefault_disable(); /* implies preempt_disable() */
2293+ pax_open_userland();
2294
2295 switch (op) {
2296 case FUTEX_OP_SET:
2297@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2298 ret = -ENOSYS;
2299 }
2300
2301+ pax_close_userland();
2302 pagefault_enable(); /* subsumes preempt_enable() */
2303
2304 if (!ret) {
2305diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
2306index 83eb2f7..ed77159 100644
2307--- a/arch/arm/include/asm/kmap_types.h
2308+++ b/arch/arm/include/asm/kmap_types.h
2309@@ -4,6 +4,6 @@
2310 /*
2311 * This is the "bare minimum". AIO seems to require this.
2312 */
2313-#define KM_TYPE_NR 16
2314+#define KM_TYPE_NR 17
2315
2316 #endif
2317diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
2318index 9e614a1..3302cca 100644
2319--- a/arch/arm/include/asm/mach/dma.h
2320+++ b/arch/arm/include/asm/mach/dma.h
2321@@ -22,7 +22,7 @@ struct dma_ops {
2322 int (*residue)(unsigned int, dma_t *); /* optional */
2323 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
2324 const char *type;
2325-};
2326+} __do_const;
2327
2328 struct dma_struct {
2329 void *addr; /* single DMA address */
2330diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
2331index f98c7f3..e5c626d 100644
2332--- a/arch/arm/include/asm/mach/map.h
2333+++ b/arch/arm/include/asm/mach/map.h
2334@@ -23,17 +23,19 @@ struct map_desc {
2335
2336 /* types 0-3 are defined in asm/io.h */
2337 enum {
2338- MT_UNCACHED = 4,
2339- MT_CACHECLEAN,
2340- MT_MINICLEAN,
2341+ MT_UNCACHED_RW = 4,
2342+ MT_CACHECLEAN_RO,
2343+ MT_MINICLEAN_RO,
2344 MT_LOW_VECTORS,
2345 MT_HIGH_VECTORS,
2346- MT_MEMORY_RWX,
2347+ __MT_MEMORY_RWX,
2348 MT_MEMORY_RW,
2349- MT_ROM,
2350- MT_MEMORY_RWX_NONCACHED,
2351+ MT_MEMORY_RX,
2352+ MT_ROM_RX,
2353+ MT_MEMORY_RW_NONCACHED,
2354+ MT_MEMORY_RX_NONCACHED,
2355 MT_MEMORY_RW_DTCM,
2356- MT_MEMORY_RWX_ITCM,
2357+ MT_MEMORY_RX_ITCM,
2358 MT_MEMORY_RW_SO,
2359 MT_MEMORY_DMA_READY,
2360 };
2361diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
2362index 891a56b..48f337e 100644
2363--- a/arch/arm/include/asm/outercache.h
2364+++ b/arch/arm/include/asm/outercache.h
2365@@ -36,7 +36,7 @@ struct outer_cache_fns {
2366
2367 /* This is an ARM L2C thing */
2368 void (*write_sec)(unsigned long, unsigned);
2369-};
2370+} __no_const;
2371
2372 extern struct outer_cache_fns outer_cache;
2373
2374diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
2375index 4355f0e..cd9168e 100644
2376--- a/arch/arm/include/asm/page.h
2377+++ b/arch/arm/include/asm/page.h
2378@@ -23,6 +23,7 @@
2379
2380 #else
2381
2382+#include <linux/compiler.h>
2383 #include <asm/glue.h>
2384
2385 /*
2386@@ -114,7 +115,7 @@ struct cpu_user_fns {
2387 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
2388 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
2389 unsigned long vaddr, struct vm_area_struct *vma);
2390-};
2391+} __no_const;
2392
2393 #ifdef MULTI_USER
2394 extern struct cpu_user_fns cpu_user;
2395diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2396index 78a7793..e3dc06c 100644
2397--- a/arch/arm/include/asm/pgalloc.h
2398+++ b/arch/arm/include/asm/pgalloc.h
2399@@ -17,6 +17,7 @@
2400 #include <asm/processor.h>
2401 #include <asm/cacheflush.h>
2402 #include <asm/tlbflush.h>
2403+#include <asm/system_info.h>
2404
2405 #define check_pgt_cache() do { } while (0)
2406
2407@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2408 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2409 }
2410
2411+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2412+{
2413+ pud_populate(mm, pud, pmd);
2414+}
2415+
2416 #else /* !CONFIG_ARM_LPAE */
2417
2418 /*
2419@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2420 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2421 #define pmd_free(mm, pmd) do { } while (0)
2422 #define pud_populate(mm,pmd,pte) BUG()
2423+#define pud_populate_kernel(mm,pmd,pte) BUG()
2424
2425 #endif /* CONFIG_ARM_LPAE */
2426
2427@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2428 __free_page(pte);
2429 }
2430
2431+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2432+{
2433+#ifdef CONFIG_ARM_LPAE
2434+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2435+#else
2436+ if (addr & SECTION_SIZE)
2437+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2438+ else
2439+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2440+#endif
2441+ flush_pmd_entry(pmdp);
2442+}
2443+
2444 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2445 pmdval_t prot)
2446 {
2447@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2448 static inline void
2449 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2450 {
2451- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2452+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2453 }
2454 #define pmd_pgtable(pmd) pmd_page(pmd)
2455
2456diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2457index 5cfba15..f415e1a 100644
2458--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2459+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2460@@ -20,12 +20,15 @@
2461 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2462 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2463 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2464+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2465 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2466 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2467 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2468+
2469 /*
2470 * - section
2471 */
2472+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2473 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2474 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2475 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2476@@ -37,6 +40,7 @@
2477 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2478 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2479 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2480+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2481
2482 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2483 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2484@@ -66,6 +70,7 @@
2485 * - extended small page/tiny page
2486 */
2487 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2488+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2489 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2490 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2491 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2492diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2493index 219ac88..73ec32a 100644
2494--- a/arch/arm/include/asm/pgtable-2level.h
2495+++ b/arch/arm/include/asm/pgtable-2level.h
2496@@ -126,6 +126,9 @@
2497 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2498 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2499
2500+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2501+#define L_PTE_PXN (_AT(pteval_t, 0))
2502+
2503 /*
2504 * These are the memory types, defined to be compatible with
2505 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2506diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2507index 626989f..9d67a33 100644
2508--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2509+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2510@@ -75,6 +75,7 @@
2511 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2512 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2513 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2514+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2515 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2516
2517 /*
2518diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2519index 85c60ad..b0bbd7e 100644
2520--- a/arch/arm/include/asm/pgtable-3level.h
2521+++ b/arch/arm/include/asm/pgtable-3level.h
2522@@ -82,6 +82,7 @@
2523 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2524 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2525 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2526+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2527 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2528 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2529 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2530@@ -95,6 +96,7 @@
2531 /*
2532 * To be used in assembly code with the upper page attributes.
2533 */
2534+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2535 #define L_PTE_XN_HIGH (1 << (54 - 32))
2536 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2537
2538diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2539index 5478e5d..f5b5cb3 100644
2540--- a/arch/arm/include/asm/pgtable.h
2541+++ b/arch/arm/include/asm/pgtable.h
2542@@ -33,6 +33,9 @@
2543 #include <asm/pgtable-2level.h>
2544 #endif
2545
2546+#define ktla_ktva(addr) (addr)
2547+#define ktva_ktla(addr) (addr)
2548+
2549 /*
2550 * Just any arbitrary offset to the start of the vmalloc VM area: the
2551 * current 8MB value just means that there will be a 8MB "hole" after the
2552@@ -48,6 +51,9 @@
2553 #define LIBRARY_TEXT_START 0x0c000000
2554
2555 #ifndef __ASSEMBLY__
2556+extern pteval_t __supported_pte_mask;
2557+extern pmdval_t __supported_pmd_mask;
2558+
2559 extern void __pte_error(const char *file, int line, pte_t);
2560 extern void __pmd_error(const char *file, int line, pmd_t);
2561 extern void __pgd_error(const char *file, int line, pgd_t);
2562@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2563 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2564 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2565
2566+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2567+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2568+
2569+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2570+#include <asm/domain.h>
2571+#include <linux/thread_info.h>
2572+#include <linux/preempt.h>
2573+
2574+static inline int test_domain(int domain, int domaintype)
2575+{
2576+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2577+}
2578+#endif
2579+
2580+#ifdef CONFIG_PAX_KERNEXEC
2581+static inline unsigned long pax_open_kernel(void) {
2582+#ifdef CONFIG_ARM_LPAE
2583+ /* TODO */
2584+#else
2585+ preempt_disable();
2586+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2587+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2588+#endif
2589+ return 0;
2590+}
2591+
2592+static inline unsigned long pax_close_kernel(void) {
2593+#ifdef CONFIG_ARM_LPAE
2594+ /* TODO */
2595+#else
2596+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2597+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2598+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2599+ preempt_enable_no_resched();
2600+#endif
2601+ return 0;
2602+}
2603+#else
2604+static inline unsigned long pax_open_kernel(void) { return 0; }
2605+static inline unsigned long pax_close_kernel(void) { return 0; }
2606+#endif
2607+
2608 /*
2609 * This is the lowest virtual address we can permit any user space
2610 * mapping to be mapped at. This is particularly important for
2611@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2612 /*
2613 * The pgprot_* and protection_map entries will be fixed up in runtime
2614 * to include the cachable and bufferable bits based on memory policy,
2615- * as well as any architecture dependent bits like global/ASID and SMP
2616- * shared mapping bits.
2617+ * as well as any architecture dependent bits like global/ASID, PXN,
2618+ * and SMP shared mapping bits.
2619 */
2620 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2621
2622@@ -265,7 +313,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2623 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2624 {
2625 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2626- L_PTE_NONE | L_PTE_VALID;
2627+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2628 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2629 return pte;
2630 }
2631diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2632index c25ef3e..735f14b 100644
2633--- a/arch/arm/include/asm/psci.h
2634+++ b/arch/arm/include/asm/psci.h
2635@@ -32,7 +32,7 @@ struct psci_operations {
2636 int (*affinity_info)(unsigned long target_affinity,
2637 unsigned long lowest_affinity_level);
2638 int (*migrate_info_type)(void);
2639-};
2640+} __no_const;
2641
2642 extern struct psci_operations psci_ops;
2643 extern struct smp_operations psci_smp_ops;
2644diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2645index 2ec765c..beb1fe16 100644
2646--- a/arch/arm/include/asm/smp.h
2647+++ b/arch/arm/include/asm/smp.h
2648@@ -113,7 +113,7 @@ struct smp_operations {
2649 int (*cpu_disable)(unsigned int cpu);
2650 #endif
2651 #endif
2652-};
2653+} __no_const;
2654
2655 struct of_cpu_method {
2656 const char *method;
2657diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2658index e4e4208..086684a 100644
2659--- a/arch/arm/include/asm/thread_info.h
2660+++ b/arch/arm/include/asm/thread_info.h
2661@@ -88,9 +88,9 @@ struct thread_info {
2662 .flags = 0, \
2663 .preempt_count = INIT_PREEMPT_COUNT, \
2664 .addr_limit = KERNEL_DS, \
2665- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2666- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2667- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2668+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2669+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2670+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2671 .restart_block = { \
2672 .fn = do_no_restart_syscall, \
2673 }, \
2674@@ -164,7 +164,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2675 #define TIF_SYSCALL_AUDIT 9
2676 #define TIF_SYSCALL_TRACEPOINT 10
2677 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2678-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2679+/* within 8 bits of TIF_SYSCALL_TRACE
2680+ * to meet flexible second operand requirements
2681+ */
2682+#define TIF_GRSEC_SETXID 12
2683+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2684 #define TIF_USING_IWMMXT 17
2685 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2686 #define TIF_RESTORE_SIGMASK 20
2687@@ -178,10 +182,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2688 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2689 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2690 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2691+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2692
2693 /* Checks for any syscall work in entry-common.S */
2694 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2695- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2696+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2697
2698 /*
2699 * Change these and you break ASM code in entry-common.S
2700diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2701index 75d9579..b5b40e4 100644
2702--- a/arch/arm/include/asm/uaccess.h
2703+++ b/arch/arm/include/asm/uaccess.h
2704@@ -18,6 +18,7 @@
2705 #include <asm/domain.h>
2706 #include <asm/unified.h>
2707 #include <asm/compiler.h>
2708+#include <asm/pgtable.h>
2709
2710 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2711 #include <asm-generic/uaccess-unaligned.h>
2712@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2713 static inline void set_fs(mm_segment_t fs)
2714 {
2715 current_thread_info()->addr_limit = fs;
2716- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2717+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2718 }
2719
2720 #define segment_eq(a,b) ((a) == (b))
2721
2722+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2723+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2724+
2725+static inline void pax_open_userland(void)
2726+{
2727+
2728+#ifdef CONFIG_PAX_MEMORY_UDEREF
2729+ if (segment_eq(get_fs(), USER_DS)) {
2730+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2731+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2732+ }
2733+#endif
2734+
2735+}
2736+
2737+static inline void pax_close_userland(void)
2738+{
2739+
2740+#ifdef CONFIG_PAX_MEMORY_UDEREF
2741+ if (segment_eq(get_fs(), USER_DS)) {
2742+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2743+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2744+ }
2745+#endif
2746+
2747+}
2748+
2749 #define __addr_ok(addr) ({ \
2750 unsigned long flag; \
2751 __asm__("cmp %2, %0; movlo %0, #0" \
2752@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2753
2754 #define get_user(x,p) \
2755 ({ \
2756+ int __e; \
2757 might_fault(); \
2758- __get_user_check(x,p); \
2759+ pax_open_userland(); \
2760+ __e = __get_user_check(x,p); \
2761+ pax_close_userland(); \
2762+ __e; \
2763 })
2764
2765 extern int __put_user_1(void *, unsigned int);
2766@@ -196,8 +228,12 @@ extern int __put_user_8(void *, unsigned long long);
2767
2768 #define put_user(x,p) \
2769 ({ \
2770+ int __e; \
2771 might_fault(); \
2772- __put_user_check(x,p); \
2773+ pax_open_userland(); \
2774+ __e = __put_user_check(x,p); \
2775+ pax_close_userland(); \
2776+ __e; \
2777 })
2778
2779 #else /* CONFIG_MMU */
2780@@ -221,6 +257,7 @@ static inline void set_fs(mm_segment_t fs)
2781
2782 #endif /* CONFIG_MMU */
2783
2784+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2785 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2786
2787 #define user_addr_max() \
2788@@ -238,13 +275,17 @@ static inline void set_fs(mm_segment_t fs)
2789 #define __get_user(x,ptr) \
2790 ({ \
2791 long __gu_err = 0; \
2792+ pax_open_userland(); \
2793 __get_user_err((x),(ptr),__gu_err); \
2794+ pax_close_userland(); \
2795 __gu_err; \
2796 })
2797
2798 #define __get_user_error(x,ptr,err) \
2799 ({ \
2800+ pax_open_userland(); \
2801 __get_user_err((x),(ptr),err); \
2802+ pax_close_userland(); \
2803 (void) 0; \
2804 })
2805
2806@@ -320,13 +361,17 @@ do { \
2807 #define __put_user(x,ptr) \
2808 ({ \
2809 long __pu_err = 0; \
2810+ pax_open_userland(); \
2811 __put_user_err((x),(ptr),__pu_err); \
2812+ pax_close_userland(); \
2813 __pu_err; \
2814 })
2815
2816 #define __put_user_error(x,ptr,err) \
2817 ({ \
2818+ pax_open_userland(); \
2819 __put_user_err((x),(ptr),err); \
2820+ pax_close_userland(); \
2821 (void) 0; \
2822 })
2823
2824@@ -426,11 +471,44 @@ do { \
2825
2826
2827 #ifdef CONFIG_MMU
2828-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2829-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2830+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2831+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2832+
2833+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2834+{
2835+ unsigned long ret;
2836+
2837+ check_object_size(to, n, false);
2838+ pax_open_userland();
2839+ ret = ___copy_from_user(to, from, n);
2840+ pax_close_userland();
2841+ return ret;
2842+}
2843+
2844+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2845+{
2846+ unsigned long ret;
2847+
2848+ check_object_size(from, n, true);
2849+ pax_open_userland();
2850+ ret = ___copy_to_user(to, from, n);
2851+ pax_close_userland();
2852+ return ret;
2853+}
2854+
2855 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2856-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2857+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2858 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2859+
2860+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2861+{
2862+ unsigned long ret;
2863+ pax_open_userland();
2864+ ret = ___clear_user(addr, n);
2865+ pax_close_userland();
2866+ return ret;
2867+}
2868+
2869 #else
2870 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2871 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2872@@ -439,6 +517,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2873
2874 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2875 {
2876+ if ((long)n < 0)
2877+ return n;
2878+
2879 if (access_ok(VERIFY_READ, from, n))
2880 n = __copy_from_user(to, from, n);
2881 else /* security hole - plug it */
2882@@ -448,6 +529,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2883
2884 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2885 {
2886+ if ((long)n < 0)
2887+ return n;
2888+
2889 if (access_ok(VERIFY_WRITE, to, n))
2890 n = __copy_to_user(to, from, n);
2891 return n;
2892diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2893index 5af0ed1..cea83883 100644
2894--- a/arch/arm/include/uapi/asm/ptrace.h
2895+++ b/arch/arm/include/uapi/asm/ptrace.h
2896@@ -92,7 +92,7 @@
2897 * ARMv7 groups of PSR bits
2898 */
2899 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2900-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2901+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2902 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2903 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2904
2905diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2906index f7b450f..f5364c5 100644
2907--- a/arch/arm/kernel/armksyms.c
2908+++ b/arch/arm/kernel/armksyms.c
2909@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2910
2911 /* networking */
2912 EXPORT_SYMBOL(csum_partial);
2913-EXPORT_SYMBOL(csum_partial_copy_from_user);
2914+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2915 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2916 EXPORT_SYMBOL(__csum_ipv6_magic);
2917
2918@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2919 #ifdef CONFIG_MMU
2920 EXPORT_SYMBOL(copy_page);
2921
2922-EXPORT_SYMBOL(__copy_from_user);
2923-EXPORT_SYMBOL(__copy_to_user);
2924-EXPORT_SYMBOL(__clear_user);
2925+EXPORT_SYMBOL(___copy_from_user);
2926+EXPORT_SYMBOL(___copy_to_user);
2927+EXPORT_SYMBOL(___clear_user);
2928
2929 EXPORT_SYMBOL(__get_user_1);
2930 EXPORT_SYMBOL(__get_user_2);
2931diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2932index 52a949a..d8bbcab 100644
2933--- a/arch/arm/kernel/entry-armv.S
2934+++ b/arch/arm/kernel/entry-armv.S
2935@@ -47,6 +47,87 @@
2936 9997:
2937 .endm
2938
2939+ .macro pax_enter_kernel
2940+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2941+ @ make aligned space for saved DACR
2942+ sub sp, sp, #8
2943+ @ save regs
2944+ stmdb sp!, {r1, r2}
2945+ @ read DACR from cpu_domain into r1
2946+ mov r2, sp
2947+ @ assume 8K pages, since we have to split the immediate in two
2948+ bic r2, r2, #(0x1fc0)
2949+ bic r2, r2, #(0x3f)
2950+ ldr r1, [r2, #TI_CPU_DOMAIN]
2951+ @ store old DACR on stack
2952+ str r1, [sp, #8]
2953+#ifdef CONFIG_PAX_KERNEXEC
2954+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2955+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2956+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2957+#endif
2958+#ifdef CONFIG_PAX_MEMORY_UDEREF
2959+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2960+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2961+#endif
2962+ @ write r1 to current_thread_info()->cpu_domain
2963+ str r1, [r2, #TI_CPU_DOMAIN]
2964+ @ write r1 to DACR
2965+ mcr p15, 0, r1, c3, c0, 0
2966+ @ instruction sync
2967+ instr_sync
2968+ @ restore regs
2969+ ldmia sp!, {r1, r2}
2970+#endif
2971+ .endm
2972+
2973+ .macro pax_open_userland
2974+#ifdef CONFIG_PAX_MEMORY_UDEREF
2975+ @ save regs
2976+ stmdb sp!, {r0, r1}
2977+ @ read DACR from cpu_domain into r1
2978+ mov r0, sp
2979+ @ assume 8K pages, since we have to split the immediate in two
2980+ bic r0, r0, #(0x1fc0)
2981+ bic r0, r0, #(0x3f)
2982+ ldr r1, [r0, #TI_CPU_DOMAIN]
2983+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2984+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2985+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2986+ @ write r1 to current_thread_info()->cpu_domain
2987+ str r1, [r0, #TI_CPU_DOMAIN]
2988+ @ write r1 to DACR
2989+ mcr p15, 0, r1, c3, c0, 0
2990+ @ instruction sync
2991+ instr_sync
2992+ @ restore regs
2993+ ldmia sp!, {r0, r1}
2994+#endif
2995+ .endm
2996+
2997+ .macro pax_close_userland
2998+#ifdef CONFIG_PAX_MEMORY_UDEREF
2999+ @ save regs
3000+ stmdb sp!, {r0, r1}
3001+ @ read DACR from cpu_domain into r1
3002+ mov r0, sp
3003+ @ assume 8K pages, since we have to split the immediate in two
3004+ bic r0, r0, #(0x1fc0)
3005+ bic r0, r0, #(0x3f)
3006+ ldr r1, [r0, #TI_CPU_DOMAIN]
3007+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3008+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3009+ @ write r1 to current_thread_info()->cpu_domain
3010+ str r1, [r0, #TI_CPU_DOMAIN]
3011+ @ write r1 to DACR
3012+ mcr p15, 0, r1, c3, c0, 0
3013+ @ instruction sync
3014+ instr_sync
3015+ @ restore regs
3016+ ldmia sp!, {r0, r1}
3017+#endif
3018+ .endm
3019+
3020 .macro pabt_helper
3021 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
3022 #ifdef MULTI_PABORT
3023@@ -89,11 +170,15 @@
3024 * Invalid mode handlers
3025 */
3026 .macro inv_entry, reason
3027+
3028+ pax_enter_kernel
3029+
3030 sub sp, sp, #S_FRAME_SIZE
3031 ARM( stmib sp, {r1 - lr} )
3032 THUMB( stmia sp, {r0 - r12} )
3033 THUMB( str sp, [sp, #S_SP] )
3034 THUMB( str lr, [sp, #S_LR] )
3035+
3036 mov r1, #\reason
3037 .endm
3038
3039@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
3040 .macro svc_entry, stack_hole=0
3041 UNWIND(.fnstart )
3042 UNWIND(.save {r0 - pc} )
3043+
3044+ pax_enter_kernel
3045+
3046 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3047+
3048 #ifdef CONFIG_THUMB2_KERNEL
3049 SPFIX( str r0, [sp] ) @ temporarily saved
3050 SPFIX( mov r0, sp )
3051@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
3052 ldmia r0, {r3 - r5}
3053 add r7, sp, #S_SP - 4 @ here for interlock avoidance
3054 mov r6, #-1 @ "" "" "" ""
3055+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3056+ @ offset sp by 8 as done in pax_enter_kernel
3057+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
3058+#else
3059 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3060+#endif
3061 SPFIX( addeq r2, r2, #4 )
3062 str r3, [sp, #-4]! @ save the "real" r0 copied
3063 @ from the exception stack
3064@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
3065 .macro usr_entry
3066 UNWIND(.fnstart )
3067 UNWIND(.cantunwind ) @ don't unwind the user space
3068+
3069+ pax_enter_kernel_user
3070+
3071 sub sp, sp, #S_FRAME_SIZE
3072 ARM( stmib sp, {r1 - r12} )
3073 THUMB( stmia sp, {r0 - r12} )
3074@@ -421,7 +518,9 @@ __und_usr:
3075 tst r3, #PSR_T_BIT @ Thumb mode?
3076 bne __und_usr_thumb
3077 sub r4, r2, #4 @ ARM instr at LR - 4
3078+ pax_open_userland
3079 1: ldrt r0, [r4]
3080+ pax_close_userland
3081 ARM_BE8(rev r0, r0) @ little endian instruction
3082
3083 @ r0 = 32-bit ARM instruction which caused the exception
3084@@ -455,11 +554,15 @@ __und_usr_thumb:
3085 */
3086 .arch armv6t2
3087 #endif
3088+ pax_open_userland
3089 2: ldrht r5, [r4]
3090+ pax_close_userland
3091 ARM_BE8(rev16 r5, r5) @ little endian instruction
3092 cmp r5, #0xe800 @ 32bit instruction if xx != 0
3093 blo __und_usr_fault_16 @ 16bit undefined instruction
3094+ pax_open_userland
3095 3: ldrht r0, [r2]
3096+ pax_close_userland
3097 ARM_BE8(rev16 r0, r0) @ little endian instruction
3098 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
3099 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
3100@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
3101 */
3102 .pushsection .fixup, "ax"
3103 .align 2
3104-4: str r4, [sp, #S_PC] @ retry current instruction
3105+4: pax_close_userland
3106+ str r4, [sp, #S_PC] @ retry current instruction
3107 mov pc, r9
3108 .popsection
3109 .pushsection __ex_table,"a"
3110@@ -698,7 +802,7 @@ ENTRY(__switch_to)
3111 THUMB( str lr, [ip], #4 )
3112 ldr r4, [r2, #TI_TP_VALUE]
3113 ldr r5, [r2, #TI_TP_VALUE + 4]
3114-#ifdef CONFIG_CPU_USE_DOMAINS
3115+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3116 ldr r6, [r2, #TI_CPU_DOMAIN]
3117 #endif
3118 switch_tls r1, r4, r5, r3, r7
3119@@ -707,7 +811,7 @@ ENTRY(__switch_to)
3120 ldr r8, =__stack_chk_guard
3121 ldr r7, [r7, #TSK_STACK_CANARY]
3122 #endif
3123-#ifdef CONFIG_CPU_USE_DOMAINS
3124+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3125 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
3126 #endif
3127 mov r5, r0
3128diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
3129index 7139d4a..feaf37f 100644
3130--- a/arch/arm/kernel/entry-common.S
3131+++ b/arch/arm/kernel/entry-common.S
3132@@ -10,18 +10,46 @@
3133
3134 #include <asm/unistd.h>
3135 #include <asm/ftrace.h>
3136+#include <asm/domain.h>
3137 #include <asm/unwind.h>
3138
3139+#include "entry-header.S"
3140+
3141 #ifdef CONFIG_NEED_RET_TO_USER
3142 #include <mach/entry-macro.S>
3143 #else
3144 .macro arch_ret_to_user, tmp1, tmp2
3145+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3146+ @ save regs
3147+ stmdb sp!, {r1, r2}
3148+ @ read DACR from cpu_domain into r1
3149+ mov r2, sp
3150+ @ assume 8K pages, since we have to split the immediate in two
3151+ bic r2, r2, #(0x1fc0)
3152+ bic r2, r2, #(0x3f)
3153+ ldr r1, [r2, #TI_CPU_DOMAIN]
3154+#ifdef CONFIG_PAX_KERNEXEC
3155+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3156+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3157+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3158+#endif
3159+#ifdef CONFIG_PAX_MEMORY_UDEREF
3160+ @ set current DOMAIN_USER to DOMAIN_UDEREF
3161+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3162+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
3163+#endif
3164+ @ write r1 to current_thread_info()->cpu_domain
3165+ str r1, [r2, #TI_CPU_DOMAIN]
3166+ @ write r1 to DACR
3167+ mcr p15, 0, r1, c3, c0, 0
3168+ @ instruction sync
3169+ instr_sync
3170+ @ restore regs
3171+ ldmia sp!, {r1, r2}
3172+#endif
3173 .endm
3174 #endif
3175
3176-#include "entry-header.S"
3177-
3178-
3179 .align 5
3180 /*
3181 * This is the fast syscall return path. We do as little as
3182@@ -405,6 +433,12 @@ ENTRY(vector_swi)
3183 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
3184 #endif
3185
3186+ /*
3187+ * do this here to avoid a performance hit of wrapping the code above
3188+ * that directly dereferences userland to parse the SWI instruction
3189+ */
3190+ pax_enter_kernel_user
3191+
3192 adr tbl, sys_call_table @ load syscall table pointer
3193
3194 #if defined(CONFIG_OABI_COMPAT)
3195diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
3196index 5d702f8..f5fc51a 100644
3197--- a/arch/arm/kernel/entry-header.S
3198+++ b/arch/arm/kernel/entry-header.S
3199@@ -188,6 +188,60 @@
3200 msr cpsr_c, \rtemp @ switch back to the SVC mode
3201 .endm
3202
3203+ .macro pax_enter_kernel_user
3204+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3205+ @ save regs
3206+ stmdb sp!, {r0, r1}
3207+ @ read DACR from cpu_domain into r1
3208+ mov r0, sp
3209+ @ assume 8K pages, since we have to split the immediate in two
3210+ bic r0, r0, #(0x1fc0)
3211+ bic r0, r0, #(0x3f)
3212+ ldr r1, [r0, #TI_CPU_DOMAIN]
3213+#ifdef CONFIG_PAX_MEMORY_UDEREF
3214+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3215+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3216+#endif
3217+#ifdef CONFIG_PAX_KERNEXEC
3218+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3219+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3220+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3221+#endif
3222+ @ write r1 to current_thread_info()->cpu_domain
3223+ str r1, [r0, #TI_CPU_DOMAIN]
3224+ @ write r1 to DACR
3225+ mcr p15, 0, r1, c3, c0, 0
3226+ @ instruction sync
3227+ instr_sync
3228+ @ restore regs
3229+ ldmia sp!, {r0, r1}
3230+#endif
3231+ .endm
3232+
3233+ .macro pax_exit_kernel
3234+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3235+ @ save regs
3236+ stmdb sp!, {r0, r1}
3237+ @ read old DACR from stack into r1
3238+ ldr r1, [sp, #(8 + S_SP)]
3239+ sub r1, r1, #8
3240+ ldr r1, [r1]
3241+
3242+ @ write r1 to current_thread_info()->cpu_domain
3243+ mov r0, sp
3244+ @ assume 8K pages, since we have to split the immediate in two
3245+ bic r0, r0, #(0x1fc0)
3246+ bic r0, r0, #(0x3f)
3247+ str r1, [r0, #TI_CPU_DOMAIN]
3248+ @ write r1 to DACR
3249+ mcr p15, 0, r1, c3, c0, 0
3250+ @ instruction sync
3251+ instr_sync
3252+ @ restore regs
3253+ ldmia sp!, {r0, r1}
3254+#endif
3255+ .endm
3256+
3257 #ifndef CONFIG_THUMB2_KERNEL
3258 .macro svc_exit, rpsr, irq = 0
3259 .if \irq != 0
3260@@ -207,6 +261,9 @@
3261 blne trace_hardirqs_off
3262 #endif
3263 .endif
3264+
3265+ pax_exit_kernel
3266+
3267 msr spsr_cxsf, \rpsr
3268 #if defined(CONFIG_CPU_V6)
3269 ldr r0, [sp]
3270@@ -265,6 +322,9 @@
3271 blne trace_hardirqs_off
3272 #endif
3273 .endif
3274+
3275+ pax_exit_kernel
3276+
3277 ldr lr, [sp, #S_SP] @ top of the stack
3278 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
3279 clrex @ clear the exclusive monitor
3280diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
3281index 918875d..cd5fa27 100644
3282--- a/arch/arm/kernel/fiq.c
3283+++ b/arch/arm/kernel/fiq.c
3284@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
3285 void *base = vectors_page;
3286 unsigned offset = FIQ_OFFSET;
3287
3288+ pax_open_kernel();
3289 memcpy(base + offset, start, length);
3290+ pax_close_kernel();
3291+
3292 if (!cache_is_vipt_nonaliasing())
3293 flush_icache_range((unsigned long)base + offset, offset +
3294 length);
3295diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
3296index 2c35f0f..7747ee6 100644
3297--- a/arch/arm/kernel/head.S
3298+++ b/arch/arm/kernel/head.S
3299@@ -437,7 +437,7 @@ __enable_mmu:
3300 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
3301 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
3302 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
3303- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
3304+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
3305 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
3306 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
3307 #endif
3308diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
3309index 45e4781..8eac93d 100644
3310--- a/arch/arm/kernel/module.c
3311+++ b/arch/arm/kernel/module.c
3312@@ -38,12 +38,39 @@
3313 #endif
3314
3315 #ifdef CONFIG_MMU
3316-void *module_alloc(unsigned long size)
3317+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
3318 {
3319+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
3320+ return NULL;
3321 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
3322- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
3323+ GFP_KERNEL, prot, NUMA_NO_NODE,
3324 __builtin_return_address(0));
3325 }
3326+
3327+void *module_alloc(unsigned long size)
3328+{
3329+
3330+#ifdef CONFIG_PAX_KERNEXEC
3331+ return __module_alloc(size, PAGE_KERNEL);
3332+#else
3333+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3334+#endif
3335+
3336+}
3337+
3338+#ifdef CONFIG_PAX_KERNEXEC
3339+void module_free_exec(struct module *mod, void *module_region)
3340+{
3341+ module_free(mod, module_region);
3342+}
3343+EXPORT_SYMBOL(module_free_exec);
3344+
3345+void *module_alloc_exec(unsigned long size)
3346+{
3347+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3348+}
3349+EXPORT_SYMBOL(module_alloc_exec);
3350+#endif
3351 #endif
3352
3353 int
3354diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
3355index 07314af..c46655c 100644
3356--- a/arch/arm/kernel/patch.c
3357+++ b/arch/arm/kernel/patch.c
3358@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3359 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
3360 int size;
3361
3362+ pax_open_kernel();
3363 if (thumb2 && __opcode_is_thumb16(insn)) {
3364 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
3365 size = sizeof(u16);
3366@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3367 *(u32 *)addr = insn;
3368 size = sizeof(u32);
3369 }
3370+ pax_close_kernel();
3371
3372 flush_icache_range((uintptr_t)(addr),
3373 (uintptr_t)(addr) + size);
3374diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3375index 81ef686..f4130b8 100644
3376--- a/arch/arm/kernel/process.c
3377+++ b/arch/arm/kernel/process.c
3378@@ -212,6 +212,7 @@ void machine_power_off(void)
3379
3380 if (pm_power_off)
3381 pm_power_off();
3382+ BUG();
3383 }
3384
3385 /*
3386@@ -225,7 +226,7 @@ void machine_power_off(void)
3387 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3388 * to use. Implementing such co-ordination would be essentially impossible.
3389 */
3390-void machine_restart(char *cmd)
3391+__noreturn void machine_restart(char *cmd)
3392 {
3393 local_irq_disable();
3394 smp_send_stop();
3395@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3396
3397 show_regs_print_info(KERN_DEFAULT);
3398
3399- print_symbol("PC is at %s\n", instruction_pointer(regs));
3400- print_symbol("LR is at %s\n", regs->ARM_lr);
3401+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3402+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3403 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3404 "sp : %08lx ip : %08lx fp : %08lx\n",
3405 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3406@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
3407 return 0;
3408 }
3409
3410-unsigned long arch_randomize_brk(struct mm_struct *mm)
3411-{
3412- unsigned long range_end = mm->brk + 0x02000000;
3413- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3414-}
3415-
3416 #ifdef CONFIG_MMU
3417 #ifdef CONFIG_KUSER_HELPERS
3418 /*
3419@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
3420
3421 static int __init gate_vma_init(void)
3422 {
3423- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3424+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3425 return 0;
3426 }
3427 arch_initcall(gate_vma_init);
3428@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
3429
3430 const char *arch_vma_name(struct vm_area_struct *vma)
3431 {
3432- return is_gate_vma(vma) ? "[vectors]" :
3433- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3434- "[sigpage]" : NULL;
3435+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3436 }
3437
3438-static struct page *signal_page;
3439-extern struct page *get_signal_page(void);
3440-
3441 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3442 {
3443 struct mm_struct *mm = current->mm;
3444- unsigned long addr;
3445- int ret;
3446-
3447- if (!signal_page)
3448- signal_page = get_signal_page();
3449- if (!signal_page)
3450- return -ENOMEM;
3451
3452 down_write(&mm->mmap_sem);
3453- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3454- if (IS_ERR_VALUE(addr)) {
3455- ret = addr;
3456- goto up_fail;
3457- }
3458-
3459- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3460- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3461- &signal_page);
3462-
3463- if (ret == 0)
3464- mm->context.sigpage = addr;
3465-
3466- up_fail:
3467+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3468 up_write(&mm->mmap_sem);
3469- return ret;
3470+ return 0;
3471 }
3472 #endif
3473diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3474index f73891b..cf3004e 100644
3475--- a/arch/arm/kernel/psci.c
3476+++ b/arch/arm/kernel/psci.c
3477@@ -28,7 +28,7 @@
3478 #include <asm/psci.h>
3479 #include <asm/system_misc.h>
3480
3481-struct psci_operations psci_ops;
3482+struct psci_operations psci_ops __read_only;
3483
3484 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3485 typedef int (*psci_initcall_t)(const struct device_node *);
3486diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3487index 0c27ed6..b67388e 100644
3488--- a/arch/arm/kernel/ptrace.c
3489+++ b/arch/arm/kernel/ptrace.c
3490@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3491 regs->ARM_ip = ip;
3492 }
3493
3494+#ifdef CONFIG_GRKERNSEC_SETXID
3495+extern void gr_delayed_cred_worker(void);
3496+#endif
3497+
3498 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3499 {
3500 current_thread_info()->syscall = scno;
3501
3502+#ifdef CONFIG_GRKERNSEC_SETXID
3503+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3504+ gr_delayed_cred_worker();
3505+#endif
3506+
3507 /* Do the secure computing check first; failures should be fast. */
3508 if (secure_computing(scno) == -1)
3509 return -1;
3510diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3511index 8a16ee5..4f560e5 100644
3512--- a/arch/arm/kernel/setup.c
3513+++ b/arch/arm/kernel/setup.c
3514@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3515 unsigned int elf_hwcap2 __read_mostly;
3516 EXPORT_SYMBOL(elf_hwcap2);
3517
3518+pteval_t __supported_pte_mask __read_only;
3519+pmdval_t __supported_pmd_mask __read_only;
3520
3521 #ifdef MULTI_CPU
3522-struct processor processor __read_mostly;
3523+struct processor processor __read_only;
3524 #endif
3525 #ifdef MULTI_TLB
3526-struct cpu_tlb_fns cpu_tlb __read_mostly;
3527+struct cpu_tlb_fns cpu_tlb __read_only;
3528 #endif
3529 #ifdef MULTI_USER
3530-struct cpu_user_fns cpu_user __read_mostly;
3531+struct cpu_user_fns cpu_user __read_only;
3532 #endif
3533 #ifdef MULTI_CACHE
3534-struct cpu_cache_fns cpu_cache __read_mostly;
3535+struct cpu_cache_fns cpu_cache __read_only;
3536 #endif
3537 #ifdef CONFIG_OUTER_CACHE
3538-struct outer_cache_fns outer_cache __read_mostly;
3539+struct outer_cache_fns outer_cache __read_only;
3540 EXPORT_SYMBOL(outer_cache);
3541 #endif
3542
3543@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3544 asm("mrc p15, 0, %0, c0, c1, 4"
3545 : "=r" (mmfr0));
3546 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3547- (mmfr0 & 0x000000f0) >= 0x00000030)
3548+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3549 cpu_arch = CPU_ARCH_ARMv7;
3550- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3551+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3552+ __supported_pte_mask |= L_PTE_PXN;
3553+ __supported_pmd_mask |= PMD_PXNTABLE;
3554+ }
3555+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3556 (mmfr0 & 0x000000f0) == 0x00000020)
3557 cpu_arch = CPU_ARCH_ARMv6;
3558 else
3559diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3560index bd19834..e4d8c66 100644
3561--- a/arch/arm/kernel/signal.c
3562+++ b/arch/arm/kernel/signal.c
3563@@ -24,8 +24,6 @@
3564
3565 extern const unsigned long sigreturn_codes[7];
3566
3567-static unsigned long signal_return_offset;
3568-
3569 #ifdef CONFIG_CRUNCH
3570 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3571 {
3572@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3573 * except when the MPU has protected the vectors
3574 * page from PL0
3575 */
3576- retcode = mm->context.sigpage + signal_return_offset +
3577- (idx << 2) + thumb;
3578+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3579 } else
3580 #endif
3581 {
3582@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3583 } while (thread_flags & _TIF_WORK_MASK);
3584 return 0;
3585 }
3586-
3587-struct page *get_signal_page(void)
3588-{
3589- unsigned long ptr;
3590- unsigned offset;
3591- struct page *page;
3592- void *addr;
3593-
3594- page = alloc_pages(GFP_KERNEL, 0);
3595-
3596- if (!page)
3597- return NULL;
3598-
3599- addr = page_address(page);
3600-
3601- /* Give the signal return code some randomness */
3602- offset = 0x200 + (get_random_int() & 0x7fc);
3603- signal_return_offset = offset;
3604-
3605- /*
3606- * Copy signal return handlers into the vector page, and
3607- * set sigreturn to be a pointer to these.
3608- */
3609- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3610-
3611- ptr = (unsigned long)addr + offset;
3612- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3613-
3614- return page;
3615-}
3616diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3617index 7c4fada..8581286 100644
3618--- a/arch/arm/kernel/smp.c
3619+++ b/arch/arm/kernel/smp.c
3620@@ -73,7 +73,7 @@ enum ipi_msg_type {
3621
3622 static DECLARE_COMPLETION(cpu_running);
3623
3624-static struct smp_operations smp_ops;
3625+static struct smp_operations smp_ops __read_only;
3626
3627 void __init smp_set_ops(struct smp_operations *ops)
3628 {
3629diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3630index 7a3be1d..b00c7de 100644
3631--- a/arch/arm/kernel/tcm.c
3632+++ b/arch/arm/kernel/tcm.c
3633@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3634 .virtual = ITCM_OFFSET,
3635 .pfn = __phys_to_pfn(ITCM_OFFSET),
3636 .length = 0,
3637- .type = MT_MEMORY_RWX_ITCM,
3638+ .type = MT_MEMORY_RX_ITCM,
3639 }
3640 };
3641
3642@@ -267,7 +267,9 @@ no_dtcm:
3643 start = &__sitcm_text;
3644 end = &__eitcm_text;
3645 ram = &__itcm_start;
3646+ pax_open_kernel();
3647 memcpy(start, ram, itcm_code_sz);
3648+ pax_close_kernel();
3649 pr_debug("CPU ITCM: copied code from %p - %p\n",
3650 start, end);
3651 itcm_present = true;
3652diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3653index abd2fc0..895dbb6 100644
3654--- a/arch/arm/kernel/traps.c
3655+++ b/arch/arm/kernel/traps.c
3656@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3657 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3658 {
3659 #ifdef CONFIG_KALLSYMS
3660- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3661+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3662 #else
3663 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3664 #endif
3665@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3666 static int die_owner = -1;
3667 static unsigned int die_nest_count;
3668
3669+extern void gr_handle_kernel_exploit(void);
3670+
3671 static unsigned long oops_begin(void)
3672 {
3673 int cpu;
3674@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3675 panic("Fatal exception in interrupt");
3676 if (panic_on_oops)
3677 panic("Fatal exception");
3678+
3679+ gr_handle_kernel_exploit();
3680+
3681 if (signr)
3682 do_exit(signr);
3683 }
3684@@ -643,7 +648,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3685 * The user helper at 0xffff0fe0 must be used instead.
3686 * (see entry-armv.S for details)
3687 */
3688+ pax_open_kernel();
3689 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3690+ pax_close_kernel();
3691 }
3692 return 0;
3693
3694@@ -900,7 +907,11 @@ void __init early_trap_init(void *vectors_base)
3695 kuser_init(vectors_base);
3696
3697 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3698- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3699+
3700+#ifndef CONFIG_PAX_MEMORY_UDEREF
3701+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3702+#endif
3703+
3704 #else /* ifndef CONFIG_CPU_V7M */
3705 /*
3706 * on V7-M there is no need to copy the vector table to a dedicated
3707diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3708index 7bcee5c..e2f3249 100644
3709--- a/arch/arm/kernel/vmlinux.lds.S
3710+++ b/arch/arm/kernel/vmlinux.lds.S
3711@@ -8,7 +8,11 @@
3712 #include <asm/thread_info.h>
3713 #include <asm/memory.h>
3714 #include <asm/page.h>
3715-
3716+
3717+#ifdef CONFIG_PAX_KERNEXEC
3718+#include <asm/pgtable.h>
3719+#endif
3720+
3721 #define PROC_INFO \
3722 . = ALIGN(4); \
3723 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3724@@ -34,7 +38,7 @@
3725 #endif
3726
3727 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3728- defined(CONFIG_GENERIC_BUG)
3729+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3730 #define ARM_EXIT_KEEP(x) x
3731 #define ARM_EXIT_DISCARD(x)
3732 #else
3733@@ -90,6 +94,11 @@ SECTIONS
3734 _text = .;
3735 HEAD_TEXT
3736 }
3737+
3738+#ifdef CONFIG_PAX_KERNEXEC
3739+ . = ALIGN(1<<SECTION_SHIFT);
3740+#endif
3741+
3742 .text : { /* Real text segment */
3743 _stext = .; /* Text and read-only data */
3744 __exception_text_start = .;
3745@@ -112,6 +121,8 @@ SECTIONS
3746 ARM_CPU_KEEP(PROC_INFO)
3747 }
3748
3749+ _etext = .; /* End of text section */
3750+
3751 RO_DATA(PAGE_SIZE)
3752
3753 . = ALIGN(4);
3754@@ -142,7 +153,9 @@ SECTIONS
3755
3756 NOTES
3757
3758- _etext = .; /* End of text and rodata section */
3759+#ifdef CONFIG_PAX_KERNEXEC
3760+ . = ALIGN(1<<SECTION_SHIFT);
3761+#endif
3762
3763 #ifndef CONFIG_XIP_KERNEL
3764 . = ALIGN(PAGE_SIZE);
3765@@ -220,6 +233,11 @@ SECTIONS
3766 . = PAGE_OFFSET + TEXT_OFFSET;
3767 #else
3768 __init_end = .;
3769+
3770+#ifdef CONFIG_PAX_KERNEXEC
3771+ . = ALIGN(1<<SECTION_SHIFT);
3772+#endif
3773+
3774 . = ALIGN(THREAD_SIZE);
3775 __data_loc = .;
3776 #endif
3777diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3778index 3c82b37..69fa3d2 100644
3779--- a/arch/arm/kvm/arm.c
3780+++ b/arch/arm/kvm/arm.c
3781@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3782 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3783
3784 /* The VMID used in the VTTBR */
3785-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3786+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3787 static u8 kvm_next_vmid;
3788 static DEFINE_SPINLOCK(kvm_vmid_lock);
3789
3790@@ -409,7 +409,7 @@ void force_vm_exit(const cpumask_t *mask)
3791 */
3792 static bool need_new_vmid_gen(struct kvm *kvm)
3793 {
3794- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3795+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3796 }
3797
3798 /**
3799@@ -442,7 +442,7 @@ static void update_vttbr(struct kvm *kvm)
3800
3801 /* First user of a new VMID generation? */
3802 if (unlikely(kvm_next_vmid == 0)) {
3803- atomic64_inc(&kvm_vmid_gen);
3804+ atomic64_inc_unchecked(&kvm_vmid_gen);
3805 kvm_next_vmid = 1;
3806
3807 /*
3808@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
3809 kvm_call_hyp(__kvm_flush_vm_context);
3810 }
3811
3812- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3813+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3814 kvm->arch.vmid = kvm_next_vmid;
3815 kvm_next_vmid++;
3816
3817@@ -1034,7 +1034,7 @@ static void check_kvm_target_cpu(void *ret)
3818 /**
3819 * Initialize Hyp-mode and memory mappings on all CPUs.
3820 */
3821-int kvm_arch_init(void *opaque)
3822+int kvm_arch_init(const void *opaque)
3823 {
3824 int err;
3825 int ret, cpu;
3826diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3827index 14a0d98..7771a7d 100644
3828--- a/arch/arm/lib/clear_user.S
3829+++ b/arch/arm/lib/clear_user.S
3830@@ -12,14 +12,14 @@
3831
3832 .text
3833
3834-/* Prototype: int __clear_user(void *addr, size_t sz)
3835+/* Prototype: int ___clear_user(void *addr, size_t sz)
3836 * Purpose : clear some user memory
3837 * Params : addr - user memory address to clear
3838 * : sz - number of bytes to clear
3839 * Returns : number of bytes NOT cleared
3840 */
3841 ENTRY(__clear_user_std)
3842-WEAK(__clear_user)
3843+WEAK(___clear_user)
3844 stmfd sp!, {r1, lr}
3845 mov r2, #0
3846 cmp r1, #4
3847@@ -44,7 +44,7 @@ WEAK(__clear_user)
3848 USER( strnebt r2, [r0])
3849 mov r0, #0
3850 ldmfd sp!, {r1, pc}
3851-ENDPROC(__clear_user)
3852+ENDPROC(___clear_user)
3853 ENDPROC(__clear_user_std)
3854
3855 .pushsection .fixup,"ax"
3856diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3857index 66a477a..bee61d3 100644
3858--- a/arch/arm/lib/copy_from_user.S
3859+++ b/arch/arm/lib/copy_from_user.S
3860@@ -16,7 +16,7 @@
3861 /*
3862 * Prototype:
3863 *
3864- * size_t __copy_from_user(void *to, const void *from, size_t n)
3865+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3866 *
3867 * Purpose:
3868 *
3869@@ -84,11 +84,11 @@
3870
3871 .text
3872
3873-ENTRY(__copy_from_user)
3874+ENTRY(___copy_from_user)
3875
3876 #include "copy_template.S"
3877
3878-ENDPROC(__copy_from_user)
3879+ENDPROC(___copy_from_user)
3880
3881 .pushsection .fixup,"ax"
3882 .align 0
3883diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3884index 6ee2f67..d1cce76 100644
3885--- a/arch/arm/lib/copy_page.S
3886+++ b/arch/arm/lib/copy_page.S
3887@@ -10,6 +10,7 @@
3888 * ASM optimised string functions
3889 */
3890 #include <linux/linkage.h>
3891+#include <linux/const.h>
3892 #include <asm/assembler.h>
3893 #include <asm/asm-offsets.h>
3894 #include <asm/cache.h>
3895diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3896index d066df6..df28194 100644
3897--- a/arch/arm/lib/copy_to_user.S
3898+++ b/arch/arm/lib/copy_to_user.S
3899@@ -16,7 +16,7 @@
3900 /*
3901 * Prototype:
3902 *
3903- * size_t __copy_to_user(void *to, const void *from, size_t n)
3904+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3905 *
3906 * Purpose:
3907 *
3908@@ -88,11 +88,11 @@
3909 .text
3910
3911 ENTRY(__copy_to_user_std)
3912-WEAK(__copy_to_user)
3913+WEAK(___copy_to_user)
3914
3915 #include "copy_template.S"
3916
3917-ENDPROC(__copy_to_user)
3918+ENDPROC(___copy_to_user)
3919 ENDPROC(__copy_to_user_std)
3920
3921 .pushsection .fixup,"ax"
3922diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3923index 7d08b43..f7ca7ea 100644
3924--- a/arch/arm/lib/csumpartialcopyuser.S
3925+++ b/arch/arm/lib/csumpartialcopyuser.S
3926@@ -57,8 +57,8 @@
3927 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3928 */
3929
3930-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3931-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3932+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3933+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3934
3935 #include "csumpartialcopygeneric.S"
3936
3937diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3938index 5306de3..aed6d03 100644
3939--- a/arch/arm/lib/delay.c
3940+++ b/arch/arm/lib/delay.c
3941@@ -28,7 +28,7 @@
3942 /*
3943 * Default to the loop-based delay implementation.
3944 */
3945-struct arm_delay_ops arm_delay_ops = {
3946+struct arm_delay_ops arm_delay_ops __read_only = {
3947 .delay = __loop_delay,
3948 .const_udelay = __loop_const_udelay,
3949 .udelay = __loop_udelay,
3950diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3951index 3e58d71..029817c 100644
3952--- a/arch/arm/lib/uaccess_with_memcpy.c
3953+++ b/arch/arm/lib/uaccess_with_memcpy.c
3954@@ -136,7 +136,7 @@ out:
3955 }
3956
3957 unsigned long
3958-__copy_to_user(void __user *to, const void *from, unsigned long n)
3959+___copy_to_user(void __user *to, const void *from, unsigned long n)
3960 {
3961 /*
3962 * This test is stubbed out of the main function above to keep
3963@@ -190,7 +190,7 @@ out:
3964 return n;
3965 }
3966
3967-unsigned long __clear_user(void __user *addr, unsigned long n)
3968+unsigned long ___clear_user(void __user *addr, unsigned long n)
3969 {
3970 /* See rational for this in __copy_to_user() above. */
3971 if (n < 64)
3972diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3973index f7a07a5..258e1f7 100644
3974--- a/arch/arm/mach-at91/setup.c
3975+++ b/arch/arm/mach-at91/setup.c
3976@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3977
3978 desc->pfn = __phys_to_pfn(base);
3979 desc->length = length;
3980- desc->type = MT_MEMORY_RWX_NONCACHED;
3981+ desc->type = MT_MEMORY_RW_NONCACHED;
3982
3983 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3984 base, length, desc->virtual);
3985diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3986index 255f33a..507b157 100644
3987--- a/arch/arm/mach-kirkwood/common.c
3988+++ b/arch/arm/mach-kirkwood/common.c
3989@@ -157,7 +157,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3990 clk_gate_ops.disable(hw);
3991 }
3992
3993-static struct clk_ops clk_gate_fn_ops;
3994+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3995+{
3996+ return clk_gate_ops.is_enabled(hw);
3997+}
3998+
3999+static struct clk_ops clk_gate_fn_ops = {
4000+ .enable = clk_gate_fn_enable,
4001+ .disable = clk_gate_fn_disable,
4002+ .is_enabled = clk_gate_fn_is_enabled,
4003+};
4004
4005 static struct clk __init *clk_register_gate_fn(struct device *dev,
4006 const char *name,
4007@@ -191,14 +200,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
4008 gate_fn->fn_en = fn_en;
4009 gate_fn->fn_dis = fn_dis;
4010
4011- /* ops is the gate ops, but with our enable/disable functions */
4012- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
4013- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
4014- clk_gate_fn_ops = clk_gate_ops;
4015- clk_gate_fn_ops.enable = clk_gate_fn_enable;
4016- clk_gate_fn_ops.disable = clk_gate_fn_disable;
4017- }
4018-
4019 clk = clk_register(dev, &gate_fn->gate.hw);
4020
4021 if (IS_ERR(clk))
4022diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
4023index 2bdc323..cf1c607 100644
4024--- a/arch/arm/mach-mvebu/coherency.c
4025+++ b/arch/arm/mach-mvebu/coherency.c
4026@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
4027
4028 /*
4029 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
4030- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
4031+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
4032 * is needed as a workaround for a deadlock issue between the PCIe
4033 * interface and the cache controller.
4034 */
4035@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
4036 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
4037
4038 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
4039- mtype = MT_UNCACHED;
4040+ mtype = MT_UNCACHED_RW;
4041
4042 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
4043 }
4044diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
4045index aead77a..a2253fa 100644
4046--- a/arch/arm/mach-omap2/board-n8x0.c
4047+++ b/arch/arm/mach-omap2/board-n8x0.c
4048@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
4049 }
4050 #endif
4051
4052-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
4053+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
4054 .late_init = n8x0_menelaus_late_init,
4055 };
4056
4057diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
4058index 8bc1338..8b28b69 100644
4059--- a/arch/arm/mach-omap2/gpmc.c
4060+++ b/arch/arm/mach-omap2/gpmc.c
4061@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
4062 };
4063
4064 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
4065-static struct irq_chip gpmc_irq_chip;
4066 static int gpmc_irq_start;
4067
4068 static struct resource gpmc_mem_root;
4069@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
4070
4071 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
4072
4073+static struct irq_chip gpmc_irq_chip = {
4074+ .name = "gpmc",
4075+ .irq_startup = gpmc_irq_noop_ret,
4076+ .irq_enable = gpmc_irq_enable,
4077+ .irq_disable = gpmc_irq_disable,
4078+ .irq_shutdown = gpmc_irq_noop,
4079+ .irq_ack = gpmc_irq_noop,
4080+ .irq_mask = gpmc_irq_noop,
4081+ .irq_unmask = gpmc_irq_noop,
4082+
4083+};
4084+
4085 static int gpmc_setup_irq(void)
4086 {
4087 int i;
4088@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
4089 return gpmc_irq_start;
4090 }
4091
4092- gpmc_irq_chip.name = "gpmc";
4093- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
4094- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
4095- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
4096- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
4097- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
4098- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
4099- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
4100-
4101 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
4102 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
4103
4104diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4105index 4001325..b14e2a0 100644
4106--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4107+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4108@@ -84,7 +84,7 @@ struct cpu_pm_ops {
4109 int (*finish_suspend)(unsigned long cpu_state);
4110 void (*resume)(void);
4111 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
4112-};
4113+} __no_const;
4114
4115 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
4116 static struct powerdomain *mpuss_pd;
4117@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
4118 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
4119 {}
4120
4121-struct cpu_pm_ops omap_pm_ops = {
4122+static struct cpu_pm_ops omap_pm_ops __read_only = {
4123 .finish_suspend = default_finish_suspend,
4124 .resume = dummy_cpu_resume,
4125 .scu_prepare = dummy_scu_prepare,
4126diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
4127index 37843a7..a98df13 100644
4128--- a/arch/arm/mach-omap2/omap-wakeupgen.c
4129+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
4130@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
4131 return NOTIFY_OK;
4132 }
4133
4134-static struct notifier_block __refdata irq_hotplug_notifier = {
4135+static struct notifier_block irq_hotplug_notifier = {
4136 .notifier_call = irq_cpu_hotplug_notify,
4137 };
4138
4139diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
4140index 01ef59d..32ae28a8 100644
4141--- a/arch/arm/mach-omap2/omap_device.c
4142+++ b/arch/arm/mach-omap2/omap_device.c
4143@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
4144 struct platform_device __init *omap_device_build(const char *pdev_name,
4145 int pdev_id,
4146 struct omap_hwmod *oh,
4147- void *pdata, int pdata_len)
4148+ const void *pdata, int pdata_len)
4149 {
4150 struct omap_hwmod *ohs[] = { oh };
4151
4152@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
4153 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
4154 int pdev_id,
4155 struct omap_hwmod **ohs,
4156- int oh_cnt, void *pdata,
4157+ int oh_cnt, const void *pdata,
4158 int pdata_len)
4159 {
4160 int ret = -ENOMEM;
4161diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
4162index 78c02b3..c94109a 100644
4163--- a/arch/arm/mach-omap2/omap_device.h
4164+++ b/arch/arm/mach-omap2/omap_device.h
4165@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
4166 /* Core code interface */
4167
4168 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
4169- struct omap_hwmod *oh, void *pdata,
4170+ struct omap_hwmod *oh, const void *pdata,
4171 int pdata_len);
4172
4173 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
4174 struct omap_hwmod **oh, int oh_cnt,
4175- void *pdata, int pdata_len);
4176+ const void *pdata, int pdata_len);
4177
4178 struct omap_device *omap_device_alloc(struct platform_device *pdev,
4179 struct omap_hwmod **ohs, int oh_cnt);
4180diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
4181index da1b256..ab2a327 100644
4182--- a/arch/arm/mach-omap2/omap_hwmod.c
4183+++ b/arch/arm/mach-omap2/omap_hwmod.c
4184@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
4185 int (*init_clkdm)(struct omap_hwmod *oh);
4186 void (*update_context_lost)(struct omap_hwmod *oh);
4187 int (*get_context_lost)(struct omap_hwmod *oh);
4188-};
4189+} __no_const;
4190
4191 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
4192-static struct omap_hwmod_soc_ops soc_ops;
4193+static struct omap_hwmod_soc_ops soc_ops __read_only;
4194
4195 /* omap_hwmod_list contains all registered struct omap_hwmods */
4196 static LIST_HEAD(omap_hwmod_list);
4197diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
4198index 95fee54..cfa9cf1 100644
4199--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
4200+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
4201@@ -10,6 +10,7 @@
4202
4203 #include <linux/kernel.h>
4204 #include <linux/init.h>
4205+#include <asm/pgtable.h>
4206
4207 #include "powerdomain.h"
4208
4209@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
4210
4211 void __init am43xx_powerdomains_init(void)
4212 {
4213- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4214+ pax_open_kernel();
4215+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4216+ pax_close_kernel();
4217 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
4218 pwrdm_register_pwrdms(powerdomains_am43xx);
4219 pwrdm_complete_init();
4220diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
4221index 97d6607..8429d14 100644
4222--- a/arch/arm/mach-omap2/wd_timer.c
4223+++ b/arch/arm/mach-omap2/wd_timer.c
4224@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
4225 struct omap_hwmod *oh;
4226 char *oh_name = "wd_timer2";
4227 char *dev_name = "omap_wdt";
4228- struct omap_wd_timer_platform_data pdata;
4229+ static struct omap_wd_timer_platform_data pdata = {
4230+ .read_reset_sources = prm_read_reset_sources
4231+ };
4232
4233 if (!cpu_class_is_omap2() || of_have_populated_dt())
4234 return 0;
4235@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
4236 return -EINVAL;
4237 }
4238
4239- pdata.read_reset_sources = prm_read_reset_sources;
4240-
4241 pdev = omap_device_build(dev_name, id, oh, &pdata,
4242 sizeof(struct omap_wd_timer_platform_data));
4243 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
4244diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
4245index b82dcae..44ee5b6 100644
4246--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
4247+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
4248@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
4249 bool entered_lp2 = false;
4250
4251 if (tegra_pending_sgi())
4252- ACCESS_ONCE(abort_flag) = true;
4253+ ACCESS_ONCE_RW(abort_flag) = true;
4254
4255 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
4256
4257diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
4258index 2dea8b5..6499da2 100644
4259--- a/arch/arm/mach-ux500/setup.h
4260+++ b/arch/arm/mach-ux500/setup.h
4261@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
4262 .type = MT_DEVICE, \
4263 }
4264
4265-#define __MEM_DEV_DESC(x, sz) { \
4266- .virtual = IO_ADDRESS(x), \
4267- .pfn = __phys_to_pfn(x), \
4268- .length = sz, \
4269- .type = MT_MEMORY_RWX, \
4270-}
4271-
4272 extern struct smp_operations ux500_smp_ops;
4273 extern void ux500_cpu_die(unsigned int cpu);
4274
4275diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
4276index c348eae..456a1a4 100644
4277--- a/arch/arm/mm/Kconfig
4278+++ b/arch/arm/mm/Kconfig
4279@@ -446,6 +446,7 @@ config CPU_32v5
4280
4281 config CPU_32v6
4282 bool
4283+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4284 select TLS_REG_EMUL if !CPU_32v6K && !MMU
4285
4286 config CPU_32v6K
4287@@ -600,6 +601,7 @@ config CPU_CP15_MPU
4288
4289 config CPU_USE_DOMAINS
4290 bool
4291+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4292 help
4293 This option enables or disables the use of domain switching
4294 via the set_fs() function.
4295@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
4296 config KUSER_HELPERS
4297 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
4298 default y
4299+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
4300 help
4301 Warning: disabling this option may break user programs.
4302
4303@@ -811,7 +814,7 @@ config KUSER_HELPERS
4304 See Documentation/arm/kernel_user_helpers.txt for details.
4305
4306 However, the fixed address nature of these helpers can be used
4307- by ROP (return orientated programming) authors when creating
4308+ by ROP (Return Oriented Programming) authors when creating
4309 exploits.
4310
4311 If all of the binaries and libraries which run on your platform
4312diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
4313index b8cb1a2..6a5624a 100644
4314--- a/arch/arm/mm/alignment.c
4315+++ b/arch/arm/mm/alignment.c
4316@@ -214,10 +214,12 @@ union offset_union {
4317 #define __get16_unaligned_check(ins,val,addr) \
4318 do { \
4319 unsigned int err = 0, v, a = addr; \
4320+ pax_open_userland(); \
4321 __get8_unaligned_check(ins,v,a,err); \
4322 val = v << ((BE) ? 8 : 0); \
4323 __get8_unaligned_check(ins,v,a,err); \
4324 val |= v << ((BE) ? 0 : 8); \
4325+ pax_close_userland(); \
4326 if (err) \
4327 goto fault; \
4328 } while (0)
4329@@ -231,6 +233,7 @@ union offset_union {
4330 #define __get32_unaligned_check(ins,val,addr) \
4331 do { \
4332 unsigned int err = 0, v, a = addr; \
4333+ pax_open_userland(); \
4334 __get8_unaligned_check(ins,v,a,err); \
4335 val = v << ((BE) ? 24 : 0); \
4336 __get8_unaligned_check(ins,v,a,err); \
4337@@ -239,6 +242,7 @@ union offset_union {
4338 val |= v << ((BE) ? 8 : 16); \
4339 __get8_unaligned_check(ins,v,a,err); \
4340 val |= v << ((BE) ? 0 : 24); \
4341+ pax_close_userland(); \
4342 if (err) \
4343 goto fault; \
4344 } while (0)
4345@@ -252,6 +256,7 @@ union offset_union {
4346 #define __put16_unaligned_check(ins,val,addr) \
4347 do { \
4348 unsigned int err = 0, v = val, a = addr; \
4349+ pax_open_userland(); \
4350 __asm__( FIRST_BYTE_16 \
4351 ARM( "1: "ins" %1, [%2], #1\n" ) \
4352 THUMB( "1: "ins" %1, [%2]\n" ) \
4353@@ -271,6 +276,7 @@ union offset_union {
4354 " .popsection\n" \
4355 : "=r" (err), "=&r" (v), "=&r" (a) \
4356 : "0" (err), "1" (v), "2" (a)); \
4357+ pax_close_userland(); \
4358 if (err) \
4359 goto fault; \
4360 } while (0)
4361@@ -284,6 +290,7 @@ union offset_union {
4362 #define __put32_unaligned_check(ins,val,addr) \
4363 do { \
4364 unsigned int err = 0, v = val, a = addr; \
4365+ pax_open_userland(); \
4366 __asm__( FIRST_BYTE_32 \
4367 ARM( "1: "ins" %1, [%2], #1\n" ) \
4368 THUMB( "1: "ins" %1, [%2]\n" ) \
4369@@ -313,6 +320,7 @@ union offset_union {
4370 " .popsection\n" \
4371 : "=r" (err), "=&r" (v), "=&r" (a) \
4372 : "0" (err), "1" (v), "2" (a)); \
4373+ pax_close_userland(); \
4374 if (err) \
4375 goto fault; \
4376 } while (0)
4377diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
4378index 7c3fb41..bfb87d8 100644
4379--- a/arch/arm/mm/cache-l2x0.c
4380+++ b/arch/arm/mm/cache-l2x0.c
4381@@ -41,7 +41,7 @@ struct l2c_init_data {
4382 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
4383 void (*save)(void __iomem *);
4384 struct outer_cache_fns outer_cache;
4385-};
4386+} __do_const;
4387
4388 #define CACHE_LINE_SIZE 32
4389
4390diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
4391index 6eb97b3..ac509f6 100644
4392--- a/arch/arm/mm/context.c
4393+++ b/arch/arm/mm/context.c
4394@@ -43,7 +43,7 @@
4395 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4396
4397 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4398-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4399+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4400 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4401
4402 static DEFINE_PER_CPU(atomic64_t, active_asids);
4403@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4404 {
4405 static u32 cur_idx = 1;
4406 u64 asid = atomic64_read(&mm->context.id);
4407- u64 generation = atomic64_read(&asid_generation);
4408+ u64 generation = atomic64_read_unchecked(&asid_generation);
4409
4410 if (asid != 0 && is_reserved_asid(asid)) {
4411 /*
4412@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4413 */
4414 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4415 if (asid == NUM_USER_ASIDS) {
4416- generation = atomic64_add_return(ASID_FIRST_VERSION,
4417+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4418 &asid_generation);
4419 flush_context(cpu);
4420 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4421@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4422 cpu_set_reserved_ttbr0();
4423
4424 asid = atomic64_read(&mm->context.id);
4425- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4426+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4427 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4428 goto switch_mm_fastpath;
4429
4430 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4431 /* Check that our ASID belongs to the current generation. */
4432 asid = atomic64_read(&mm->context.id);
4433- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4434+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4435 asid = new_context(mm, cpu);
4436 atomic64_set(&mm->context.id, asid);
4437 }
4438diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4439index eb8830a..5360ce7 100644
4440--- a/arch/arm/mm/fault.c
4441+++ b/arch/arm/mm/fault.c
4442@@ -25,6 +25,7 @@
4443 #include <asm/system_misc.h>
4444 #include <asm/system_info.h>
4445 #include <asm/tlbflush.h>
4446+#include <asm/sections.h>
4447
4448 #include "fault.h"
4449
4450@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4451 if (fixup_exception(regs))
4452 return;
4453
4454+#ifdef CONFIG_PAX_MEMORY_UDEREF
4455+ if (addr < TASK_SIZE) {
4456+ if (current->signal->curr_ip)
4457+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4458+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4459+ else
4460+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4461+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4462+ }
4463+#endif
4464+
4465+#ifdef CONFIG_PAX_KERNEXEC
4466+ if ((fsr & FSR_WRITE) &&
4467+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4468+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4469+ {
4470+ if (current->signal->curr_ip)
4471+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4472+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4473+ else
4474+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4475+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4476+ }
4477+#endif
4478+
4479 /*
4480 * No handler, we'll have to terminate things with extreme prejudice.
4481 */
4482@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4483 }
4484 #endif
4485
4486+#ifdef CONFIG_PAX_PAGEEXEC
4487+ if (fsr & FSR_LNX_PF) {
4488+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4489+ do_group_exit(SIGKILL);
4490+ }
4491+#endif
4492+
4493 tsk->thread.address = addr;
4494 tsk->thread.error_code = fsr;
4495 tsk->thread.trap_no = 14;
4496@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4497 }
4498 #endif /* CONFIG_MMU */
4499
4500+#ifdef CONFIG_PAX_PAGEEXEC
4501+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4502+{
4503+ long i;
4504+
4505+ printk(KERN_ERR "PAX: bytes at PC: ");
4506+ for (i = 0; i < 20; i++) {
4507+ unsigned char c;
4508+ if (get_user(c, (__force unsigned char __user *)pc+i))
4509+ printk(KERN_CONT "?? ");
4510+ else
4511+ printk(KERN_CONT "%02x ", c);
4512+ }
4513+ printk("\n");
4514+
4515+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4516+ for (i = -1; i < 20; i++) {
4517+ unsigned long c;
4518+ if (get_user(c, (__force unsigned long __user *)sp+i))
4519+ printk(KERN_CONT "???????? ");
4520+ else
4521+ printk(KERN_CONT "%08lx ", c);
4522+ }
4523+ printk("\n");
4524+}
4525+#endif
4526+
4527 /*
4528 * First Level Translation Fault Handler
4529 *
4530@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4531 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4532 struct siginfo info;
4533
4534+#ifdef CONFIG_PAX_MEMORY_UDEREF
4535+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4536+ if (current->signal->curr_ip)
4537+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4538+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4539+ else
4540+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4541+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4542+ goto die;
4543+ }
4544+#endif
4545+
4546 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4547 return;
4548
4549+die:
4550 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4551 inf->name, fsr, addr);
4552
4553@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4554 ifsr_info[nr].name = name;
4555 }
4556
4557+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4558+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4559+
4560 asmlinkage void __exception
4561 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4562 {
4563 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4564 struct siginfo info;
4565+ unsigned long pc = instruction_pointer(regs);
4566+
4567+ if (user_mode(regs)) {
4568+ unsigned long sigpage = current->mm->context.sigpage;
4569+
4570+ if (sigpage <= pc && pc < sigpage + 7*4) {
4571+ if (pc < sigpage + 3*4)
4572+ sys_sigreturn(regs);
4573+ else
4574+ sys_rt_sigreturn(regs);
4575+ return;
4576+ }
4577+ if (pc == 0xffff0f60UL) {
4578+ /*
4579+ * PaX: __kuser_cmpxchg64 emulation
4580+ */
4581+ // TODO
4582+ //regs->ARM_pc = regs->ARM_lr;
4583+ //return;
4584+ }
4585+ if (pc == 0xffff0fa0UL) {
4586+ /*
4587+ * PaX: __kuser_memory_barrier emulation
4588+ */
4589+ // dmb(); implied by the exception
4590+ regs->ARM_pc = regs->ARM_lr;
4591+ return;
4592+ }
4593+ if (pc == 0xffff0fc0UL) {
4594+ /*
4595+ * PaX: __kuser_cmpxchg emulation
4596+ */
4597+ // TODO
4598+ //long new;
4599+ //int op;
4600+
4601+ //op = FUTEX_OP_SET << 28;
4602+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4603+ //regs->ARM_r0 = old != new;
4604+ //regs->ARM_pc = regs->ARM_lr;
4605+ //return;
4606+ }
4607+ if (pc == 0xffff0fe0UL) {
4608+ /*
4609+ * PaX: __kuser_get_tls emulation
4610+ */
4611+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4612+ regs->ARM_pc = regs->ARM_lr;
4613+ return;
4614+ }
4615+ }
4616+
4617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4618+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4619+ if (current->signal->curr_ip)
4620+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4621+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4622+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4623+ else
4624+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4625+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4626+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4627+ goto die;
4628+ }
4629+#endif
4630+
4631+#ifdef CONFIG_PAX_REFCOUNT
4632+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4633+ unsigned int bkpt;
4634+
4635+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4636+ current->thread.error_code = ifsr;
4637+ current->thread.trap_no = 0;
4638+ pax_report_refcount_overflow(regs);
4639+ fixup_exception(regs);
4640+ return;
4641+ }
4642+ }
4643+#endif
4644
4645 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4646 return;
4647
4648+die:
4649 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4650 inf->name, ifsr, addr);
4651
4652diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4653index cf08bdf..772656c 100644
4654--- a/arch/arm/mm/fault.h
4655+++ b/arch/arm/mm/fault.h
4656@@ -3,6 +3,7 @@
4657
4658 /*
4659 * Fault status register encodings. We steal bit 31 for our own purposes.
4660+ * Set when the FSR value is from an instruction fault.
4661 */
4662 #define FSR_LNX_PF (1 << 31)
4663 #define FSR_WRITE (1 << 11)
4664@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4665 }
4666 #endif
4667
4668+/* valid for LPAE and !LPAE */
4669+static inline int is_xn_fault(unsigned int fsr)
4670+{
4671+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4672+}
4673+
4674+static inline int is_domain_fault(unsigned int fsr)
4675+{
4676+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4677+}
4678+
4679 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4680 unsigned long search_exception_table(unsigned long addr);
4681
4682diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4683index 659c75d..6f8c029 100644
4684--- a/arch/arm/mm/init.c
4685+++ b/arch/arm/mm/init.c
4686@@ -31,6 +31,8 @@
4687 #include <asm/setup.h>
4688 #include <asm/tlb.h>
4689 #include <asm/fixmap.h>
4690+#include <asm/system_info.h>
4691+#include <asm/cp15.h>
4692
4693 #include <asm/mach/arch.h>
4694 #include <asm/mach/map.h>
4695@@ -619,7 +621,46 @@ void free_initmem(void)
4696 {
4697 #ifdef CONFIG_HAVE_TCM
4698 extern char __tcm_start, __tcm_end;
4699+#endif
4700
4701+#ifdef CONFIG_PAX_KERNEXEC
4702+ unsigned long addr;
4703+ pgd_t *pgd;
4704+ pud_t *pud;
4705+ pmd_t *pmd;
4706+ int cpu_arch = cpu_architecture();
4707+ unsigned int cr = get_cr();
4708+
4709+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4710+ /* make pages tables, etc before .text NX */
4711+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4712+ pgd = pgd_offset_k(addr);
4713+ pud = pud_offset(pgd, addr);
4714+ pmd = pmd_offset(pud, addr);
4715+ __section_update(pmd, addr, PMD_SECT_XN);
4716+ }
4717+ /* make init NX */
4718+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4719+ pgd = pgd_offset_k(addr);
4720+ pud = pud_offset(pgd, addr);
4721+ pmd = pmd_offset(pud, addr);
4722+ __section_update(pmd, addr, PMD_SECT_XN);
4723+ }
4724+ /* make kernel code/rodata RX */
4725+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4726+ pgd = pgd_offset_k(addr);
4727+ pud = pud_offset(pgd, addr);
4728+ pmd = pmd_offset(pud, addr);
4729+#ifdef CONFIG_ARM_LPAE
4730+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4731+#else
4732+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4733+#endif
4734+ }
4735+ }
4736+#endif
4737+
4738+#ifdef CONFIG_HAVE_TCM
4739 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4740 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4741 #endif
4742diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4743index d1e5ad7..84dcbf2 100644
4744--- a/arch/arm/mm/ioremap.c
4745+++ b/arch/arm/mm/ioremap.c
4746@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4747 unsigned int mtype;
4748
4749 if (cached)
4750- mtype = MT_MEMORY_RWX;
4751+ mtype = MT_MEMORY_RX;
4752 else
4753- mtype = MT_MEMORY_RWX_NONCACHED;
4754+ mtype = MT_MEMORY_RX_NONCACHED;
4755
4756 return __arm_ioremap_caller(phys_addr, size, mtype,
4757 __builtin_return_address(0));
4758diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4759index 5e85ed3..b10a7ed 100644
4760--- a/arch/arm/mm/mmap.c
4761+++ b/arch/arm/mm/mmap.c
4762@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4763 struct vm_area_struct *vma;
4764 int do_align = 0;
4765 int aliasing = cache_is_vipt_aliasing();
4766+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4767 struct vm_unmapped_area_info info;
4768
4769 /*
4770@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4771 if (len > TASK_SIZE)
4772 return -ENOMEM;
4773
4774+#ifdef CONFIG_PAX_RANDMMAP
4775+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4776+#endif
4777+
4778 if (addr) {
4779 if (do_align)
4780 addr = COLOUR_ALIGN(addr, pgoff);
4781@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4782 addr = PAGE_ALIGN(addr);
4783
4784 vma = find_vma(mm, addr);
4785- if (TASK_SIZE - len >= addr &&
4786- (!vma || addr + len <= vma->vm_start))
4787+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4788 return addr;
4789 }
4790
4791@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4792 info.high_limit = TASK_SIZE;
4793 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4794 info.align_offset = pgoff << PAGE_SHIFT;
4795+ info.threadstack_offset = offset;
4796 return vm_unmapped_area(&info);
4797 }
4798
4799@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4800 unsigned long addr = addr0;
4801 int do_align = 0;
4802 int aliasing = cache_is_vipt_aliasing();
4803+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4804 struct vm_unmapped_area_info info;
4805
4806 /*
4807@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4808 return addr;
4809 }
4810
4811+#ifdef CONFIG_PAX_RANDMMAP
4812+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4813+#endif
4814+
4815 /* requesting a specific address */
4816 if (addr) {
4817 if (do_align)
4818@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4819 else
4820 addr = PAGE_ALIGN(addr);
4821 vma = find_vma(mm, addr);
4822- if (TASK_SIZE - len >= addr &&
4823- (!vma || addr + len <= vma->vm_start))
4824+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4825 return addr;
4826 }
4827
4828@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4829 info.high_limit = mm->mmap_base;
4830 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4831 info.align_offset = pgoff << PAGE_SHIFT;
4832+ info.threadstack_offset = offset;
4833 addr = vm_unmapped_area(&info);
4834
4835 /*
4836@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4837 {
4838 unsigned long random_factor = 0UL;
4839
4840+#ifdef CONFIG_PAX_RANDMMAP
4841+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4842+#endif
4843+
4844 /* 8 bits of randomness in 20 address space bits */
4845 if ((current->flags & PF_RANDOMIZE) &&
4846 !(current->personality & ADDR_NO_RANDOMIZE))
4847@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4848
4849 if (mmap_is_legacy()) {
4850 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4851+
4852+#ifdef CONFIG_PAX_RANDMMAP
4853+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4854+ mm->mmap_base += mm->delta_mmap;
4855+#endif
4856+
4857 mm->get_unmapped_area = arch_get_unmapped_area;
4858 } else {
4859 mm->mmap_base = mmap_base(random_factor);
4860+
4861+#ifdef CONFIG_PAX_RANDMMAP
4862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4864+#endif
4865+
4866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4867 }
4868 }
4869diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4870index 6e3ba8d..9cbb4d7 100644
4871--- a/arch/arm/mm/mmu.c
4872+++ b/arch/arm/mm/mmu.c
4873@@ -40,6 +40,22 @@
4874 #include "mm.h"
4875 #include "tcm.h"
4876
4877+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4878+void modify_domain(unsigned int dom, unsigned int type)
4879+{
4880+ struct thread_info *thread = current_thread_info();
4881+ unsigned int domain = thread->cpu_domain;
4882+ /*
4883+ * DOMAIN_MANAGER might be defined to some other value,
4884+ * use the arch-defined constant
4885+ */
4886+ domain &= ~domain_val(dom, 3);
4887+ thread->cpu_domain = domain | domain_val(dom, type);
4888+ set_domain(thread->cpu_domain);
4889+}
4890+EXPORT_SYMBOL(modify_domain);
4891+#endif
4892+
4893 /*
4894 * empty_zero_page is a special page that is used for
4895 * zero-initialized data and COW.
4896@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4897 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4898 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4899
4900-static struct mem_type mem_types[] = {
4901+#ifdef CONFIG_PAX_KERNEXEC
4902+#define L_PTE_KERNEXEC L_PTE_RDONLY
4903+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4904+#else
4905+#define L_PTE_KERNEXEC L_PTE_DIRTY
4906+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4907+#endif
4908+
4909+static struct mem_type mem_types[] __read_only = {
4910 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4911 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4912 L_PTE_SHARED,
4913@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4914 .prot_sect = PROT_SECT_DEVICE,
4915 .domain = DOMAIN_IO,
4916 },
4917- [MT_UNCACHED] = {
4918+ [MT_UNCACHED_RW] = {
4919 .prot_pte = PROT_PTE_DEVICE,
4920 .prot_l1 = PMD_TYPE_TABLE,
4921 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4922 .domain = DOMAIN_IO,
4923 },
4924- [MT_CACHECLEAN] = {
4925- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4926+ [MT_CACHECLEAN_RO] = {
4927+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4928 .domain = DOMAIN_KERNEL,
4929 },
4930 #ifndef CONFIG_ARM_LPAE
4931- [MT_MINICLEAN] = {
4932- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4933+ [MT_MINICLEAN_RO] = {
4934+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4935 .domain = DOMAIN_KERNEL,
4936 },
4937 #endif
4938@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4939 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4940 L_PTE_RDONLY,
4941 .prot_l1 = PMD_TYPE_TABLE,
4942- .domain = DOMAIN_USER,
4943+ .domain = DOMAIN_VECTORS,
4944 },
4945 [MT_HIGH_VECTORS] = {
4946 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4947 L_PTE_USER | L_PTE_RDONLY,
4948 .prot_l1 = PMD_TYPE_TABLE,
4949- .domain = DOMAIN_USER,
4950+ .domain = DOMAIN_VECTORS,
4951 },
4952- [MT_MEMORY_RWX] = {
4953+ [__MT_MEMORY_RWX] = {
4954 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4955 .prot_l1 = PMD_TYPE_TABLE,
4956 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4957@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4958 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4959 .domain = DOMAIN_KERNEL,
4960 },
4961- [MT_ROM] = {
4962- .prot_sect = PMD_TYPE_SECT,
4963+ [MT_MEMORY_RX] = {
4964+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4965+ .prot_l1 = PMD_TYPE_TABLE,
4966+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4967+ .domain = DOMAIN_KERNEL,
4968+ },
4969+ [MT_ROM_RX] = {
4970+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4971 .domain = DOMAIN_KERNEL,
4972 },
4973- [MT_MEMORY_RWX_NONCACHED] = {
4974+ [MT_MEMORY_RW_NONCACHED] = {
4975 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4976 L_PTE_MT_BUFFERABLE,
4977 .prot_l1 = PMD_TYPE_TABLE,
4978 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4979 .domain = DOMAIN_KERNEL,
4980 },
4981+ [MT_MEMORY_RX_NONCACHED] = {
4982+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4983+ L_PTE_MT_BUFFERABLE,
4984+ .prot_l1 = PMD_TYPE_TABLE,
4985+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4986+ .domain = DOMAIN_KERNEL,
4987+ },
4988 [MT_MEMORY_RW_DTCM] = {
4989 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4990 L_PTE_XN,
4991@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4992 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4993 .domain = DOMAIN_KERNEL,
4994 },
4995- [MT_MEMORY_RWX_ITCM] = {
4996- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4997+ [MT_MEMORY_RX_ITCM] = {
4998+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4999 .prot_l1 = PMD_TYPE_TABLE,
5000+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
5001 .domain = DOMAIN_KERNEL,
5002 },
5003 [MT_MEMORY_RW_SO] = {
5004@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
5005 * Mark cache clean areas and XIP ROM read only
5006 * from SVC mode and no access from userspace.
5007 */
5008- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5009- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5010- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5011+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5012+#ifdef CONFIG_PAX_KERNEXEC
5013+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5014+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5015+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5016+#endif
5017+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5018+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5019 #endif
5020
5021 /*
5022@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
5023 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
5024 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
5025 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
5026- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
5027- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
5028+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
5029+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
5030 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
5031 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
5032+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
5033+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
5034 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
5035- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
5036- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
5037+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
5038+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
5039+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
5040+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
5041 }
5042 }
5043
5044@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
5045 if (cpu_arch >= CPU_ARCH_ARMv6) {
5046 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
5047 /* Non-cacheable Normal is XCB = 001 */
5048- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5049+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5050+ PMD_SECT_BUFFERED;
5051+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5052 PMD_SECT_BUFFERED;
5053 } else {
5054 /* For both ARMv6 and non-TEX-remapping ARMv7 */
5055- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5056+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5057+ PMD_SECT_TEX(1);
5058+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5059 PMD_SECT_TEX(1);
5060 }
5061 } else {
5062- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5063+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5064+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5065 }
5066
5067 #ifdef CONFIG_ARM_LPAE
5068@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
5069 vecs_pgprot |= PTE_EXT_AF;
5070 #endif
5071
5072+ user_pgprot |= __supported_pte_mask;
5073+
5074 for (i = 0; i < 16; i++) {
5075 pteval_t v = pgprot_val(protection_map[i]);
5076 protection_map[i] = __pgprot(v | user_pgprot);
5077@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
5078
5079 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
5080 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
5081- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5082- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5083+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5084+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5085 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
5086 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
5087+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
5088+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
5089 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
5090- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
5091- mem_types[MT_ROM].prot_sect |= cp->pmd;
5092+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
5093+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
5094+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
5095
5096 switch (cp->pmd) {
5097 case PMD_SECT_WT:
5098- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
5099+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
5100 break;
5101 case PMD_SECT_WB:
5102 case PMD_SECT_WBWA:
5103- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
5104+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
5105 break;
5106 }
5107 pr_info("Memory policy: %sData cache %s\n",
5108@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
5109 return;
5110 }
5111
5112- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
5113+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
5114 md->virtual >= PAGE_OFFSET &&
5115 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
5116 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
5117@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
5118 * called function. This means you can't use any function or debugging
5119 * method which may touch any device, otherwise the kernel _will_ crash.
5120 */
5121+
5122+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
5123+
5124 static void __init devicemaps_init(const struct machine_desc *mdesc)
5125 {
5126 struct map_desc map;
5127 unsigned long addr;
5128- void *vectors;
5129
5130- /*
5131- * Allocate the vector page early.
5132- */
5133- vectors = early_alloc(PAGE_SIZE * 2);
5134-
5135- early_trap_init(vectors);
5136+ early_trap_init(&vectors);
5137
5138 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
5139 pmd_clear(pmd_off_k(addr));
5140@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5141 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
5142 map.virtual = MODULES_VADDR;
5143 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
5144- map.type = MT_ROM;
5145+ map.type = MT_ROM_RX;
5146 create_mapping(&map);
5147 #endif
5148
5149@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5150 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
5151 map.virtual = FLUSH_BASE;
5152 map.length = SZ_1M;
5153- map.type = MT_CACHECLEAN;
5154+ map.type = MT_CACHECLEAN_RO;
5155 create_mapping(&map);
5156 #endif
5157 #ifdef FLUSH_BASE_MINICACHE
5158 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
5159 map.virtual = FLUSH_BASE_MINICACHE;
5160 map.length = SZ_1M;
5161- map.type = MT_MINICLEAN;
5162+ map.type = MT_MINICLEAN_RO;
5163 create_mapping(&map);
5164 #endif
5165
5166@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5167 * location (0xffff0000). If we aren't using high-vectors, also
5168 * create a mapping at the low-vectors virtual address.
5169 */
5170- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
5171+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
5172 map.virtual = 0xffff0000;
5173 map.length = PAGE_SIZE;
5174 #ifdef CONFIG_KUSER_HELPERS
5175@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
5176 static void __init map_lowmem(void)
5177 {
5178 struct memblock_region *reg;
5179+#ifndef CONFIG_PAX_KERNEXEC
5180 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
5181 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
5182+#endif
5183
5184 /* Map all the lowmem memory banks. */
5185 for_each_memblock(memory, reg) {
5186@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
5187 if (start >= end)
5188 break;
5189
5190+#ifdef CONFIG_PAX_KERNEXEC
5191+ map.pfn = __phys_to_pfn(start);
5192+ map.virtual = __phys_to_virt(start);
5193+ map.length = end - start;
5194+
5195+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
5196+ struct map_desc kernel;
5197+ struct map_desc initmap;
5198+
5199+ /* when freeing initmem we will make this RW */
5200+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
5201+ initmap.virtual = (unsigned long)__init_begin;
5202+ initmap.length = _sdata - __init_begin;
5203+ initmap.type = __MT_MEMORY_RWX;
5204+ create_mapping(&initmap);
5205+
5206+ /* when freeing initmem we will make this RX */
5207+ kernel.pfn = __phys_to_pfn(__pa(_stext));
5208+ kernel.virtual = (unsigned long)_stext;
5209+ kernel.length = __init_begin - _stext;
5210+ kernel.type = __MT_MEMORY_RWX;
5211+ create_mapping(&kernel);
5212+
5213+ if (map.virtual < (unsigned long)_stext) {
5214+ map.length = (unsigned long)_stext - map.virtual;
5215+ map.type = __MT_MEMORY_RWX;
5216+ create_mapping(&map);
5217+ }
5218+
5219+ map.pfn = __phys_to_pfn(__pa(_sdata));
5220+ map.virtual = (unsigned long)_sdata;
5221+ map.length = end - __pa(_sdata);
5222+ }
5223+
5224+ map.type = MT_MEMORY_RW;
5225+ create_mapping(&map);
5226+#else
5227 if (end < kernel_x_start || start >= kernel_x_end) {
5228 map.pfn = __phys_to_pfn(start);
5229 map.virtual = __phys_to_virt(start);
5230 map.length = end - start;
5231- map.type = MT_MEMORY_RWX;
5232+ map.type = __MT_MEMORY_RWX;
5233
5234 create_mapping(&map);
5235 } else {
5236@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
5237 map.pfn = __phys_to_pfn(kernel_x_start);
5238 map.virtual = __phys_to_virt(kernel_x_start);
5239 map.length = kernel_x_end - kernel_x_start;
5240- map.type = MT_MEMORY_RWX;
5241+ map.type = __MT_MEMORY_RWX;
5242
5243 create_mapping(&map);
5244
5245@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
5246 create_mapping(&map);
5247 }
5248 }
5249+#endif
5250 }
5251 }
5252
5253diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
5254index 5b217f4..c23f40e 100644
5255--- a/arch/arm/plat-iop/setup.c
5256+++ b/arch/arm/plat-iop/setup.c
5257@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
5258 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
5259 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
5260 .length = IOP3XX_PERIPHERAL_SIZE,
5261- .type = MT_UNCACHED,
5262+ .type = MT_UNCACHED_RW,
5263 },
5264 };
5265
5266diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
5267index a5bc92d..0bb4730 100644
5268--- a/arch/arm/plat-omap/sram.c
5269+++ b/arch/arm/plat-omap/sram.c
5270@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
5271 * Looks like we need to preserve some bootloader code at the
5272 * beginning of SRAM for jumping to flash for reboot to work...
5273 */
5274+ pax_open_kernel();
5275 memset_io(omap_sram_base + omap_sram_skip, 0,
5276 omap_sram_size - omap_sram_skip);
5277+ pax_close_kernel();
5278 }
5279diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
5280index ce6d763..cfea917 100644
5281--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
5282+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
5283@@ -47,7 +47,7 @@ struct samsung_dma_ops {
5284 int (*started)(unsigned ch);
5285 int (*flush)(unsigned ch);
5286 int (*stop)(unsigned ch);
5287-};
5288+} __no_const;
5289
5290 extern void *samsung_dmadev_get_ops(void);
5291 extern void *s3c_dma_get_ops(void);
5292diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
5293index 6389d60..b5d3bdd 100644
5294--- a/arch/arm64/include/asm/barrier.h
5295+++ b/arch/arm64/include/asm/barrier.h
5296@@ -41,7 +41,7 @@
5297 do { \
5298 compiletime_assert_atomic_type(*p); \
5299 barrier(); \
5300- ACCESS_ONCE(*p) = (v); \
5301+ ACCESS_ONCE_RW(*p) = (v); \
5302 } while (0)
5303
5304 #define smp_load_acquire(p) \
5305diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
5306index 3bf8f4e..5dd5491 100644
5307--- a/arch/arm64/include/asm/uaccess.h
5308+++ b/arch/arm64/include/asm/uaccess.h
5309@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
5310 flag; \
5311 })
5312
5313+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5314 #define access_ok(type, addr, size) __range_ok(addr, size)
5315 #define user_addr_max get_fs
5316
5317diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
5318index c3a58a1..78fbf54 100644
5319--- a/arch/avr32/include/asm/cache.h
5320+++ b/arch/avr32/include/asm/cache.h
5321@@ -1,8 +1,10 @@
5322 #ifndef __ASM_AVR32_CACHE_H
5323 #define __ASM_AVR32_CACHE_H
5324
5325+#include <linux/const.h>
5326+
5327 #define L1_CACHE_SHIFT 5
5328-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5329+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5330
5331 /*
5332 * Memory returned by kmalloc() may be used for DMA, so we must make
5333diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5334index d232888..87c8df1 100644
5335--- a/arch/avr32/include/asm/elf.h
5336+++ b/arch/avr32/include/asm/elf.h
5337@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5338 the loader. We need to make sure that it is out of the way of the program
5339 that it will "exec", and that there is sufficient room for the brk. */
5340
5341-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
5342+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5343
5344+#ifdef CONFIG_PAX_ASLR
5345+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5346+
5347+#define PAX_DELTA_MMAP_LEN 15
5348+#define PAX_DELTA_STACK_LEN 15
5349+#endif
5350
5351 /* This yields a mask that user programs can use to figure out what
5352 instruction set this CPU supports. This could be done in user space,
5353diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5354index 479330b..53717a8 100644
5355--- a/arch/avr32/include/asm/kmap_types.h
5356+++ b/arch/avr32/include/asm/kmap_types.h
5357@@ -2,9 +2,9 @@
5358 #define __ASM_AVR32_KMAP_TYPES_H
5359
5360 #ifdef CONFIG_DEBUG_HIGHMEM
5361-# define KM_TYPE_NR 29
5362+# define KM_TYPE_NR 30
5363 #else
5364-# define KM_TYPE_NR 14
5365+# define KM_TYPE_NR 15
5366 #endif
5367
5368 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5369diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5370index 0eca933..eb78c7b 100644
5371--- a/arch/avr32/mm/fault.c
5372+++ b/arch/avr32/mm/fault.c
5373@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5374
5375 int exception_trace = 1;
5376
5377+#ifdef CONFIG_PAX_PAGEEXEC
5378+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5379+{
5380+ unsigned long i;
5381+
5382+ printk(KERN_ERR "PAX: bytes at PC: ");
5383+ for (i = 0; i < 20; i++) {
5384+ unsigned char c;
5385+ if (get_user(c, (unsigned char *)pc+i))
5386+ printk(KERN_CONT "???????? ");
5387+ else
5388+ printk(KERN_CONT "%02x ", c);
5389+ }
5390+ printk("\n");
5391+}
5392+#endif
5393+
5394 /*
5395 * This routine handles page faults. It determines the address and the
5396 * problem, and then passes it off to one of the appropriate routines.
5397@@ -176,6 +193,16 @@ bad_area:
5398 up_read(&mm->mmap_sem);
5399
5400 if (user_mode(regs)) {
5401+
5402+#ifdef CONFIG_PAX_PAGEEXEC
5403+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5404+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5405+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5406+ do_group_exit(SIGKILL);
5407+ }
5408+ }
5409+#endif
5410+
5411 if (exception_trace && printk_ratelimit())
5412 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5413 "sp %08lx ecr %lu\n",
5414diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5415index 568885a..f8008df 100644
5416--- a/arch/blackfin/include/asm/cache.h
5417+++ b/arch/blackfin/include/asm/cache.h
5418@@ -7,6 +7,7 @@
5419 #ifndef __ARCH_BLACKFIN_CACHE_H
5420 #define __ARCH_BLACKFIN_CACHE_H
5421
5422+#include <linux/const.h>
5423 #include <linux/linkage.h> /* for asmlinkage */
5424
5425 /*
5426@@ -14,7 +15,7 @@
5427 * Blackfin loads 32 bytes for cache
5428 */
5429 #define L1_CACHE_SHIFT 5
5430-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5431+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5432 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5433
5434 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5435diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5436index aea2718..3639a60 100644
5437--- a/arch/cris/include/arch-v10/arch/cache.h
5438+++ b/arch/cris/include/arch-v10/arch/cache.h
5439@@ -1,8 +1,9 @@
5440 #ifndef _ASM_ARCH_CACHE_H
5441 #define _ASM_ARCH_CACHE_H
5442
5443+#include <linux/const.h>
5444 /* Etrax 100LX have 32-byte cache-lines. */
5445-#define L1_CACHE_BYTES 32
5446 #define L1_CACHE_SHIFT 5
5447+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5448
5449 #endif /* _ASM_ARCH_CACHE_H */
5450diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5451index 7caf25d..ee65ac5 100644
5452--- a/arch/cris/include/arch-v32/arch/cache.h
5453+++ b/arch/cris/include/arch-v32/arch/cache.h
5454@@ -1,11 +1,12 @@
5455 #ifndef _ASM_CRIS_ARCH_CACHE_H
5456 #define _ASM_CRIS_ARCH_CACHE_H
5457
5458+#include <linux/const.h>
5459 #include <arch/hwregs/dma.h>
5460
5461 /* A cache-line is 32 bytes. */
5462-#define L1_CACHE_BYTES 32
5463 #define L1_CACHE_SHIFT 5
5464+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5465
5466 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5467
5468diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5469index f6c3a16..cd422a4 100644
5470--- a/arch/frv/include/asm/atomic.h
5471+++ b/arch/frv/include/asm/atomic.h
5472@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5473 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5474 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5475
5476+#define atomic64_read_unchecked(v) atomic64_read(v)
5477+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5478+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5479+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5480+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5481+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5482+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5483+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5484+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5485+
5486 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5487 {
5488 int c, old;
5489diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5490index 2797163..c2a401d 100644
5491--- a/arch/frv/include/asm/cache.h
5492+++ b/arch/frv/include/asm/cache.h
5493@@ -12,10 +12,11 @@
5494 #ifndef __ASM_CACHE_H
5495 #define __ASM_CACHE_H
5496
5497+#include <linux/const.h>
5498
5499 /* bytes per L1 cache line */
5500 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5501-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5502+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5503
5504 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5505 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5506diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5507index 43901f2..0d8b865 100644
5508--- a/arch/frv/include/asm/kmap_types.h
5509+++ b/arch/frv/include/asm/kmap_types.h
5510@@ -2,6 +2,6 @@
5511 #ifndef _ASM_KMAP_TYPES_H
5512 #define _ASM_KMAP_TYPES_H
5513
5514-#define KM_TYPE_NR 17
5515+#define KM_TYPE_NR 18
5516
5517 #endif
5518diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5519index 836f147..4cf23f5 100644
5520--- a/arch/frv/mm/elf-fdpic.c
5521+++ b/arch/frv/mm/elf-fdpic.c
5522@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5523 {
5524 struct vm_area_struct *vma;
5525 struct vm_unmapped_area_info info;
5526+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5527
5528 if (len > TASK_SIZE)
5529 return -ENOMEM;
5530@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5531 if (addr) {
5532 addr = PAGE_ALIGN(addr);
5533 vma = find_vma(current->mm, addr);
5534- if (TASK_SIZE - len >= addr &&
5535- (!vma || addr + len <= vma->vm_start))
5536+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5537 goto success;
5538 }
5539
5540@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5541 info.high_limit = (current->mm->start_stack - 0x00200000);
5542 info.align_mask = 0;
5543 info.align_offset = 0;
5544+ info.threadstack_offset = offset;
5545 addr = vm_unmapped_area(&info);
5546 if (!(addr & ~PAGE_MASK))
5547 goto success;
5548diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5549index f4ca594..adc72fd6 100644
5550--- a/arch/hexagon/include/asm/cache.h
5551+++ b/arch/hexagon/include/asm/cache.h
5552@@ -21,9 +21,11 @@
5553 #ifndef __ASM_CACHE_H
5554 #define __ASM_CACHE_H
5555
5556+#include <linux/const.h>
5557+
5558 /* Bytes per L1 cache line */
5559-#define L1_CACHE_SHIFT (5)
5560-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5561+#define L1_CACHE_SHIFT 5
5562+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5563
5564 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5565 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5566diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5567index 2f3abcf..e63c7fa 100644
5568--- a/arch/ia64/Kconfig
5569+++ b/arch/ia64/Kconfig
5570@@ -547,6 +547,7 @@ source "drivers/sn/Kconfig"
5571 config KEXEC
5572 bool "kexec system call"
5573 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5574+ depends on !GRKERNSEC_KMEM
5575 help
5576 kexec is a system call that implements the ability to shutdown your
5577 current kernel, and to start another kernel. It is like a reboot
5578diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5579index 0f8bf48..40ea950 100644
5580--- a/arch/ia64/include/asm/atomic.h
5581+++ b/arch/ia64/include/asm/atomic.h
5582@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5583 #define atomic64_inc(v) atomic64_add(1, (v))
5584 #define atomic64_dec(v) atomic64_sub(1, (v))
5585
5586+#define atomic64_read_unchecked(v) atomic64_read(v)
5587+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5588+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5589+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5590+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5591+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5592+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5593+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5594+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5595+
5596 #endif /* _ASM_IA64_ATOMIC_H */
5597diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5598index a48957c..e097b56 100644
5599--- a/arch/ia64/include/asm/barrier.h
5600+++ b/arch/ia64/include/asm/barrier.h
5601@@ -67,7 +67,7 @@
5602 do { \
5603 compiletime_assert_atomic_type(*p); \
5604 barrier(); \
5605- ACCESS_ONCE(*p) = (v); \
5606+ ACCESS_ONCE_RW(*p) = (v); \
5607 } while (0)
5608
5609 #define smp_load_acquire(p) \
5610diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5611index 988254a..e1ee885 100644
5612--- a/arch/ia64/include/asm/cache.h
5613+++ b/arch/ia64/include/asm/cache.h
5614@@ -1,6 +1,7 @@
5615 #ifndef _ASM_IA64_CACHE_H
5616 #define _ASM_IA64_CACHE_H
5617
5618+#include <linux/const.h>
5619
5620 /*
5621 * Copyright (C) 1998-2000 Hewlett-Packard Co
5622@@ -9,7 +10,7 @@
5623
5624 /* Bytes per L1 (data) cache line. */
5625 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5626-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5627+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5628
5629 #ifdef CONFIG_SMP
5630 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5631diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5632index 5a83c5c..4d7f553 100644
5633--- a/arch/ia64/include/asm/elf.h
5634+++ b/arch/ia64/include/asm/elf.h
5635@@ -42,6 +42,13 @@
5636 */
5637 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5638
5639+#ifdef CONFIG_PAX_ASLR
5640+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5641+
5642+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5643+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5644+#endif
5645+
5646 #define PT_IA_64_UNWIND 0x70000001
5647
5648 /* IA-64 relocations: */
5649diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5650index 5767cdf..7462574 100644
5651--- a/arch/ia64/include/asm/pgalloc.h
5652+++ b/arch/ia64/include/asm/pgalloc.h
5653@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5654 pgd_val(*pgd_entry) = __pa(pud);
5655 }
5656
5657+static inline void
5658+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5659+{
5660+ pgd_populate(mm, pgd_entry, pud);
5661+}
5662+
5663 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5664 {
5665 return quicklist_alloc(0, GFP_KERNEL, NULL);
5666@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5667 pud_val(*pud_entry) = __pa(pmd);
5668 }
5669
5670+static inline void
5671+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5672+{
5673+ pud_populate(mm, pud_entry, pmd);
5674+}
5675+
5676 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5677 {
5678 return quicklist_alloc(0, GFP_KERNEL, NULL);
5679diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5680index 7935115..c0eca6a 100644
5681--- a/arch/ia64/include/asm/pgtable.h
5682+++ b/arch/ia64/include/asm/pgtable.h
5683@@ -12,7 +12,7 @@
5684 * David Mosberger-Tang <davidm@hpl.hp.com>
5685 */
5686
5687-
5688+#include <linux/const.h>
5689 #include <asm/mman.h>
5690 #include <asm/page.h>
5691 #include <asm/processor.h>
5692@@ -142,6 +142,17 @@
5693 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5694 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5695 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5696+
5697+#ifdef CONFIG_PAX_PAGEEXEC
5698+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5699+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5700+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5701+#else
5702+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5703+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5704+# define PAGE_COPY_NOEXEC PAGE_COPY
5705+#endif
5706+
5707 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5708 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5709 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5710diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5711index 45698cd..e8e2dbc 100644
5712--- a/arch/ia64/include/asm/spinlock.h
5713+++ b/arch/ia64/include/asm/spinlock.h
5714@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5715 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5716
5717 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5718- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5719+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5720 }
5721
5722 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5723diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5724index 449c8c0..3d4b1e9 100644
5725--- a/arch/ia64/include/asm/uaccess.h
5726+++ b/arch/ia64/include/asm/uaccess.h
5727@@ -70,6 +70,7 @@
5728 && ((segment).seg == KERNEL_DS.seg \
5729 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5730 })
5731+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5732 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5733
5734 /*
5735@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5736 static inline unsigned long
5737 __copy_to_user (void __user *to, const void *from, unsigned long count)
5738 {
5739+ if (count > INT_MAX)
5740+ return count;
5741+
5742+ if (!__builtin_constant_p(count))
5743+ check_object_size(from, count, true);
5744+
5745 return __copy_user(to, (__force void __user *) from, count);
5746 }
5747
5748 static inline unsigned long
5749 __copy_from_user (void *to, const void __user *from, unsigned long count)
5750 {
5751+ if (count > INT_MAX)
5752+ return count;
5753+
5754+ if (!__builtin_constant_p(count))
5755+ check_object_size(to, count, false);
5756+
5757 return __copy_user((__force void __user *) to, from, count);
5758 }
5759
5760@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5761 ({ \
5762 void __user *__cu_to = (to); \
5763 const void *__cu_from = (from); \
5764- long __cu_len = (n); \
5765+ unsigned long __cu_len = (n); \
5766 \
5767- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5768+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5769+ if (!__builtin_constant_p(n)) \
5770+ check_object_size(__cu_from, __cu_len, true); \
5771 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5772+ } \
5773 __cu_len; \
5774 })
5775
5776@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5777 ({ \
5778 void *__cu_to = (to); \
5779 const void __user *__cu_from = (from); \
5780- long __cu_len = (n); \
5781+ unsigned long __cu_len = (n); \
5782 \
5783 __chk_user_ptr(__cu_from); \
5784- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5785+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5786+ if (!__builtin_constant_p(n)) \
5787+ check_object_size(__cu_to, __cu_len, false); \
5788 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5789+ } \
5790 __cu_len; \
5791 })
5792
5793diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5794index 24603be..948052d 100644
5795--- a/arch/ia64/kernel/module.c
5796+++ b/arch/ia64/kernel/module.c
5797@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5798 void
5799 module_free (struct module *mod, void *module_region)
5800 {
5801- if (mod && mod->arch.init_unw_table &&
5802- module_region == mod->module_init) {
5803+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5804 unw_remove_unwind_table(mod->arch.init_unw_table);
5805 mod->arch.init_unw_table = NULL;
5806 }
5807@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5808 }
5809
5810 static inline int
5811+in_init_rx (const struct module *mod, uint64_t addr)
5812+{
5813+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5814+}
5815+
5816+static inline int
5817+in_init_rw (const struct module *mod, uint64_t addr)
5818+{
5819+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5820+}
5821+
5822+static inline int
5823 in_init (const struct module *mod, uint64_t addr)
5824 {
5825- return addr - (uint64_t) mod->module_init < mod->init_size;
5826+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5827+}
5828+
5829+static inline int
5830+in_core_rx (const struct module *mod, uint64_t addr)
5831+{
5832+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5833+}
5834+
5835+static inline int
5836+in_core_rw (const struct module *mod, uint64_t addr)
5837+{
5838+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5839 }
5840
5841 static inline int
5842 in_core (const struct module *mod, uint64_t addr)
5843 {
5844- return addr - (uint64_t) mod->module_core < mod->core_size;
5845+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5846 }
5847
5848 static inline int
5849@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5850 break;
5851
5852 case RV_BDREL:
5853- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5854+ if (in_init_rx(mod, val))
5855+ val -= (uint64_t) mod->module_init_rx;
5856+ else if (in_init_rw(mod, val))
5857+ val -= (uint64_t) mod->module_init_rw;
5858+ else if (in_core_rx(mod, val))
5859+ val -= (uint64_t) mod->module_core_rx;
5860+ else if (in_core_rw(mod, val))
5861+ val -= (uint64_t) mod->module_core_rw;
5862 break;
5863
5864 case RV_LTV:
5865@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5866 * addresses have been selected...
5867 */
5868 uint64_t gp;
5869- if (mod->core_size > MAX_LTOFF)
5870+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5871 /*
5872 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5873 * at the end of the module.
5874 */
5875- gp = mod->core_size - MAX_LTOFF / 2;
5876+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5877 else
5878- gp = mod->core_size / 2;
5879- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5880+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5881+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5882 mod->arch.gp = gp;
5883 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5884 }
5885diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5886index c39c3cd..3c77738 100644
5887--- a/arch/ia64/kernel/palinfo.c
5888+++ b/arch/ia64/kernel/palinfo.c
5889@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5890 return NOTIFY_OK;
5891 }
5892
5893-static struct notifier_block __refdata palinfo_cpu_notifier =
5894+static struct notifier_block palinfo_cpu_notifier =
5895 {
5896 .notifier_call = palinfo_cpu_callback,
5897 .priority = 0,
5898diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5899index 41e33f8..65180b2a 100644
5900--- a/arch/ia64/kernel/sys_ia64.c
5901+++ b/arch/ia64/kernel/sys_ia64.c
5902@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5903 unsigned long align_mask = 0;
5904 struct mm_struct *mm = current->mm;
5905 struct vm_unmapped_area_info info;
5906+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5907
5908 if (len > RGN_MAP_LIMIT)
5909 return -ENOMEM;
5910@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5911 if (REGION_NUMBER(addr) == RGN_HPAGE)
5912 addr = 0;
5913 #endif
5914+
5915+#ifdef CONFIG_PAX_RANDMMAP
5916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5917+ addr = mm->free_area_cache;
5918+ else
5919+#endif
5920+
5921 if (!addr)
5922 addr = TASK_UNMAPPED_BASE;
5923
5924@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5925 info.high_limit = TASK_SIZE;
5926 info.align_mask = align_mask;
5927 info.align_offset = 0;
5928+ info.threadstack_offset = offset;
5929 return vm_unmapped_area(&info);
5930 }
5931
5932diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5933index 84f8a52..7c76178 100644
5934--- a/arch/ia64/kernel/vmlinux.lds.S
5935+++ b/arch/ia64/kernel/vmlinux.lds.S
5936@@ -192,7 +192,7 @@ SECTIONS {
5937 /* Per-cpu data: */
5938 . = ALIGN(PERCPU_PAGE_SIZE);
5939 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5940- __phys_per_cpu_start = __per_cpu_load;
5941+ __phys_per_cpu_start = per_cpu_load;
5942 /*
5943 * ensure percpu data fits
5944 * into percpu page size
5945diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5946index 7225dad..2a7c8256 100644
5947--- a/arch/ia64/mm/fault.c
5948+++ b/arch/ia64/mm/fault.c
5949@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5950 return pte_present(pte);
5951 }
5952
5953+#ifdef CONFIG_PAX_PAGEEXEC
5954+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5955+{
5956+ unsigned long i;
5957+
5958+ printk(KERN_ERR "PAX: bytes at PC: ");
5959+ for (i = 0; i < 8; i++) {
5960+ unsigned int c;
5961+ if (get_user(c, (unsigned int *)pc+i))
5962+ printk(KERN_CONT "???????? ");
5963+ else
5964+ printk(KERN_CONT "%08x ", c);
5965+ }
5966+ printk("\n");
5967+}
5968+#endif
5969+
5970 # define VM_READ_BIT 0
5971 # define VM_WRITE_BIT 1
5972 # define VM_EXEC_BIT 2
5973@@ -151,8 +168,21 @@ retry:
5974 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5975 goto bad_area;
5976
5977- if ((vma->vm_flags & mask) != mask)
5978+ if ((vma->vm_flags & mask) != mask) {
5979+
5980+#ifdef CONFIG_PAX_PAGEEXEC
5981+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5982+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5983+ goto bad_area;
5984+
5985+ up_read(&mm->mmap_sem);
5986+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5987+ do_group_exit(SIGKILL);
5988+ }
5989+#endif
5990+
5991 goto bad_area;
5992+ }
5993
5994 /*
5995 * If for any reason at all we couldn't handle the fault, make
5996diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5997index 76069c1..c2aa816 100644
5998--- a/arch/ia64/mm/hugetlbpage.c
5999+++ b/arch/ia64/mm/hugetlbpage.c
6000@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
6001 unsigned long pgoff, unsigned long flags)
6002 {
6003 struct vm_unmapped_area_info info;
6004+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
6005
6006 if (len > RGN_MAP_LIMIT)
6007 return -ENOMEM;
6008@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
6009 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
6010 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
6011 info.align_offset = 0;
6012+ info.threadstack_offset = offset;
6013 return vm_unmapped_area(&info);
6014 }
6015
6016diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
6017index 25c3502..560dae7 100644
6018--- a/arch/ia64/mm/init.c
6019+++ b/arch/ia64/mm/init.c
6020@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
6021 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
6022 vma->vm_end = vma->vm_start + PAGE_SIZE;
6023 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
6024+
6025+#ifdef CONFIG_PAX_PAGEEXEC
6026+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
6027+ vma->vm_flags &= ~VM_EXEC;
6028+
6029+#ifdef CONFIG_PAX_MPROTECT
6030+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
6031+ vma->vm_flags &= ~VM_MAYEXEC;
6032+#endif
6033+
6034+ }
6035+#endif
6036+
6037 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6038 down_write(&current->mm->mmap_sem);
6039 if (insert_vm_struct(current->mm, vma)) {
6040diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
6041index 40b3ee9..8c2c112 100644
6042--- a/arch/m32r/include/asm/cache.h
6043+++ b/arch/m32r/include/asm/cache.h
6044@@ -1,8 +1,10 @@
6045 #ifndef _ASM_M32R_CACHE_H
6046 #define _ASM_M32R_CACHE_H
6047
6048+#include <linux/const.h>
6049+
6050 /* L1 cache line size */
6051 #define L1_CACHE_SHIFT 4
6052-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6053+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6054
6055 #endif /* _ASM_M32R_CACHE_H */
6056diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
6057index 82abd15..d95ae5d 100644
6058--- a/arch/m32r/lib/usercopy.c
6059+++ b/arch/m32r/lib/usercopy.c
6060@@ -14,6 +14,9 @@
6061 unsigned long
6062 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6063 {
6064+ if ((long)n < 0)
6065+ return n;
6066+
6067 prefetch(from);
6068 if (access_ok(VERIFY_WRITE, to, n))
6069 __copy_user(to,from,n);
6070@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6071 unsigned long
6072 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
6073 {
6074+ if ((long)n < 0)
6075+ return n;
6076+
6077 prefetchw(to);
6078 if (access_ok(VERIFY_READ, from, n))
6079 __copy_user_zeroing(to,from,n);
6080diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
6081index 0395c51..5f26031 100644
6082--- a/arch/m68k/include/asm/cache.h
6083+++ b/arch/m68k/include/asm/cache.h
6084@@ -4,9 +4,11 @@
6085 #ifndef __ARCH_M68K_CACHE_H
6086 #define __ARCH_M68K_CACHE_H
6087
6088+#include <linux/const.h>
6089+
6090 /* bytes per L1 cache line */
6091 #define L1_CACHE_SHIFT 4
6092-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
6093+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6094
6095 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
6096
6097diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
6098index c7591e8..ecef036 100644
6099--- a/arch/metag/include/asm/barrier.h
6100+++ b/arch/metag/include/asm/barrier.h
6101@@ -89,7 +89,7 @@ static inline void fence(void)
6102 do { \
6103 compiletime_assert_atomic_type(*p); \
6104 smp_mb(); \
6105- ACCESS_ONCE(*p) = (v); \
6106+ ACCESS_ONCE_RW(*p) = (v); \
6107 } while (0)
6108
6109 #define smp_load_acquire(p) \
6110diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
6111index 3c52fa6..11b2ad8 100644
6112--- a/arch/metag/mm/hugetlbpage.c
6113+++ b/arch/metag/mm/hugetlbpage.c
6114@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
6115 info.high_limit = TASK_SIZE;
6116 info.align_mask = PAGE_MASK & HUGEPT_MASK;
6117 info.align_offset = 0;
6118+ info.threadstack_offset = 0;
6119 return vm_unmapped_area(&info);
6120 }
6121
6122diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
6123index 4efe96a..60e8699 100644
6124--- a/arch/microblaze/include/asm/cache.h
6125+++ b/arch/microblaze/include/asm/cache.h
6126@@ -13,11 +13,12 @@
6127 #ifndef _ASM_MICROBLAZE_CACHE_H
6128 #define _ASM_MICROBLAZE_CACHE_H
6129
6130+#include <linux/const.h>
6131 #include <asm/registers.h>
6132
6133 #define L1_CACHE_SHIFT 5
6134 /* word-granular cache in microblaze */
6135-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6137
6138 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6139
6140diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
6141index 4e238e6..7c9ed92 100644
6142--- a/arch/mips/Kconfig
6143+++ b/arch/mips/Kconfig
6144@@ -2392,6 +2392,7 @@ source "kernel/Kconfig.preempt"
6145
6146 config KEXEC
6147 bool "Kexec system call"
6148+ depends on !GRKERNSEC_KMEM
6149 help
6150 kexec is a system call that implements the ability to shutdown your
6151 current kernel, and to start another kernel. It is like a reboot
6152diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
6153index 02f2444..506969c 100644
6154--- a/arch/mips/cavium-octeon/dma-octeon.c
6155+++ b/arch/mips/cavium-octeon/dma-octeon.c
6156@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
6157 if (dma_release_from_coherent(dev, order, vaddr))
6158 return;
6159
6160- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
6161+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
6162 }
6163
6164 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
6165diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
6166index 37b2bef..02122b8 100644
6167--- a/arch/mips/include/asm/atomic.h
6168+++ b/arch/mips/include/asm/atomic.h
6169@@ -21,15 +21,39 @@
6170 #include <asm/cmpxchg.h>
6171 #include <asm/war.h>
6172
6173+#ifdef CONFIG_GENERIC_ATOMIC64
6174+#include <asm-generic/atomic64.h>
6175+#endif
6176+
6177 #define ATOMIC_INIT(i) { (i) }
6178
6179+#ifdef CONFIG_64BIT
6180+#define _ASM_EXTABLE(from, to) \
6181+" .section __ex_table,\"a\"\n" \
6182+" .dword " #from ", " #to"\n" \
6183+" .previous\n"
6184+#else
6185+#define _ASM_EXTABLE(from, to) \
6186+" .section __ex_table,\"a\"\n" \
6187+" .word " #from ", " #to"\n" \
6188+" .previous\n"
6189+#endif
6190+
6191 /*
6192 * atomic_read - read atomic variable
6193 * @v: pointer of type atomic_t
6194 *
6195 * Atomically reads the value of @v.
6196 */
6197-#define atomic_read(v) (*(volatile int *)&(v)->counter)
6198+static inline int atomic_read(const atomic_t *v)
6199+{
6200+ return (*(volatile const int *) &v->counter);
6201+}
6202+
6203+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6204+{
6205+ return (*(volatile const int *) &v->counter);
6206+}
6207
6208 /*
6209 * atomic_set - set atomic variable
6210@@ -38,7 +62,15 @@
6211 *
6212 * Atomically sets the value of @v to @i.
6213 */
6214-#define atomic_set(v, i) ((v)->counter = (i))
6215+static inline void atomic_set(atomic_t *v, int i)
6216+{
6217+ v->counter = i;
6218+}
6219+
6220+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6221+{
6222+ v->counter = i;
6223+}
6224
6225 /*
6226 * atomic_add - add integer to atomic variable
6227@@ -47,7 +79,67 @@
6228 *
6229 * Atomically adds @i to @v.
6230 */
6231-static __inline__ void atomic_add(int i, atomic_t * v)
6232+static __inline__ void atomic_add(int i, atomic_t *v)
6233+{
6234+ int temp;
6235+
6236+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6237+ __asm__ __volatile__(
6238+ " .set mips3 \n"
6239+ "1: ll %0, %1 # atomic_add \n"
6240+#ifdef CONFIG_PAX_REFCOUNT
6241+ /* Exception on overflow. */
6242+ "2: add %0, %2 \n"
6243+#else
6244+ " addu %0, %2 \n"
6245+#endif
6246+ " sc %0, %1 \n"
6247+ " beqzl %0, 1b \n"
6248+#ifdef CONFIG_PAX_REFCOUNT
6249+ "3: \n"
6250+ _ASM_EXTABLE(2b, 3b)
6251+#endif
6252+ " .set mips0 \n"
6253+ : "=&r" (temp), "+m" (v->counter)
6254+ : "Ir" (i));
6255+ } else if (kernel_uses_llsc) {
6256+ __asm__ __volatile__(
6257+ " .set mips3 \n"
6258+ "1: ll %0, %1 # atomic_add \n"
6259+#ifdef CONFIG_PAX_REFCOUNT
6260+ /* Exception on overflow. */
6261+ "2: add %0, %2 \n"
6262+#else
6263+ " addu %0, %2 \n"
6264+#endif
6265+ " sc %0, %1 \n"
6266+ " beqz %0, 1b \n"
6267+#ifdef CONFIG_PAX_REFCOUNT
6268+ "3: \n"
6269+ _ASM_EXTABLE(2b, 3b)
6270+#endif
6271+ " .set mips0 \n"
6272+ : "=&r" (temp), "+m" (v->counter)
6273+ : "Ir" (i));
6274+ } else {
6275+ unsigned long flags;
6276+
6277+ raw_local_irq_save(flags);
6278+ __asm__ __volatile__(
6279+#ifdef CONFIG_PAX_REFCOUNT
6280+ /* Exception on overflow. */
6281+ "1: add %0, %1 \n"
6282+ "2: \n"
6283+ _ASM_EXTABLE(1b, 2b)
6284+#else
6285+ " addu %0, %1 \n"
6286+#endif
6287+ : "+r" (v->counter) : "Ir" (i));
6288+ raw_local_irq_restore(flags);
6289+ }
6290+}
6291+
6292+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6293 {
6294 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6295 int temp;
6296@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
6297 *
6298 * Atomically subtracts @i from @v.
6299 */
6300-static __inline__ void atomic_sub(int i, atomic_t * v)
6301+static __inline__ void atomic_sub(int i, atomic_t *v)
6302+{
6303+ int temp;
6304+
6305+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6306+ __asm__ __volatile__(
6307+ " .set mips3 \n"
6308+ "1: ll %0, %1 # atomic64_sub \n"
6309+#ifdef CONFIG_PAX_REFCOUNT
6310+ /* Exception on overflow. */
6311+ "2: sub %0, %2 \n"
6312+#else
6313+ " subu %0, %2 \n"
6314+#endif
6315+ " sc %0, %1 \n"
6316+ " beqzl %0, 1b \n"
6317+#ifdef CONFIG_PAX_REFCOUNT
6318+ "3: \n"
6319+ _ASM_EXTABLE(2b, 3b)
6320+#endif
6321+ " .set mips0 \n"
6322+ : "=&r" (temp), "+m" (v->counter)
6323+ : "Ir" (i));
6324+ } else if (kernel_uses_llsc) {
6325+ __asm__ __volatile__(
6326+ " .set mips3 \n"
6327+ "1: ll %0, %1 # atomic64_sub \n"
6328+#ifdef CONFIG_PAX_REFCOUNT
6329+ /* Exception on overflow. */
6330+ "2: sub %0, %2 \n"
6331+#else
6332+ " subu %0, %2 \n"
6333+#endif
6334+ " sc %0, %1 \n"
6335+ " beqz %0, 1b \n"
6336+#ifdef CONFIG_PAX_REFCOUNT
6337+ "3: \n"
6338+ _ASM_EXTABLE(2b, 3b)
6339+#endif
6340+ " .set mips0 \n"
6341+ : "=&r" (temp), "+m" (v->counter)
6342+ : "Ir" (i));
6343+ } else {
6344+ unsigned long flags;
6345+
6346+ raw_local_irq_save(flags);
6347+ __asm__ __volatile__(
6348+#ifdef CONFIG_PAX_REFCOUNT
6349+ /* Exception on overflow. */
6350+ "1: sub %0, %1 \n"
6351+ "2: \n"
6352+ _ASM_EXTABLE(1b, 2b)
6353+#else
6354+ " subu %0, %1 \n"
6355+#endif
6356+ : "+r" (v->counter) : "Ir" (i));
6357+ raw_local_irq_restore(flags);
6358+ }
6359+}
6360+
6361+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6362 {
6363 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6364 int temp;
6365@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6366 /*
6367 * Same as above, but return the result value
6368 */
6369-static __inline__ int atomic_add_return(int i, atomic_t * v)
6370+static __inline__ int atomic_add_return(int i, atomic_t *v)
6371+{
6372+ int result;
6373+ int temp;
6374+
6375+ smp_mb__before_llsc();
6376+
6377+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6378+ __asm__ __volatile__(
6379+ " .set mips3 \n"
6380+ "1: ll %1, %2 # atomic_add_return \n"
6381+#ifdef CONFIG_PAX_REFCOUNT
6382+ "2: add %0, %1, %3 \n"
6383+#else
6384+ " addu %0, %1, %3 \n"
6385+#endif
6386+ " sc %0, %2 \n"
6387+ " beqzl %0, 1b \n"
6388+#ifdef CONFIG_PAX_REFCOUNT
6389+ " b 4f \n"
6390+ " .set noreorder \n"
6391+ "3: b 5f \n"
6392+ " move %0, %1 \n"
6393+ " .set reorder \n"
6394+ _ASM_EXTABLE(2b, 3b)
6395+#endif
6396+ "4: addu %0, %1, %3 \n"
6397+#ifdef CONFIG_PAX_REFCOUNT
6398+ "5: \n"
6399+#endif
6400+ " .set mips0 \n"
6401+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6402+ : "Ir" (i));
6403+ } else if (kernel_uses_llsc) {
6404+ __asm__ __volatile__(
6405+ " .set mips3 \n"
6406+ "1: ll %1, %2 # atomic_add_return \n"
6407+#ifdef CONFIG_PAX_REFCOUNT
6408+ "2: add %0, %1, %3 \n"
6409+#else
6410+ " addu %0, %1, %3 \n"
6411+#endif
6412+ " sc %0, %2 \n"
6413+ " bnez %0, 4f \n"
6414+ " b 1b \n"
6415+#ifdef CONFIG_PAX_REFCOUNT
6416+ " .set noreorder \n"
6417+ "3: b 5f \n"
6418+ " move %0, %1 \n"
6419+ " .set reorder \n"
6420+ _ASM_EXTABLE(2b, 3b)
6421+#endif
6422+ "4: addu %0, %1, %3 \n"
6423+#ifdef CONFIG_PAX_REFCOUNT
6424+ "5: \n"
6425+#endif
6426+ " .set mips0 \n"
6427+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6428+ : "Ir" (i));
6429+ } else {
6430+ unsigned long flags;
6431+
6432+ raw_local_irq_save(flags);
6433+ __asm__ __volatile__(
6434+ " lw %0, %1 \n"
6435+#ifdef CONFIG_PAX_REFCOUNT
6436+ /* Exception on overflow. */
6437+ "1: add %0, %2 \n"
6438+#else
6439+ " addu %0, %2 \n"
6440+#endif
6441+ " sw %0, %1 \n"
6442+#ifdef CONFIG_PAX_REFCOUNT
6443+ /* Note: Dest reg is not modified on overflow */
6444+ "2: \n"
6445+ _ASM_EXTABLE(1b, 2b)
6446+#endif
6447+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6448+ raw_local_irq_restore(flags);
6449+ }
6450+
6451+ smp_llsc_mb();
6452+
6453+ return result;
6454+}
6455+
6456+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6457 {
6458 int result;
6459
6460@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6461 return result;
6462 }
6463
6464-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6465+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6466+{
6467+ int result;
6468+ int temp;
6469+
6470+ smp_mb__before_llsc();
6471+
6472+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6473+ __asm__ __volatile__(
6474+ " .set mips3 \n"
6475+ "1: ll %1, %2 # atomic_sub_return \n"
6476+#ifdef CONFIG_PAX_REFCOUNT
6477+ "2: sub %0, %1, %3 \n"
6478+#else
6479+ " subu %0, %1, %3 \n"
6480+#endif
6481+ " sc %0, %2 \n"
6482+ " beqzl %0, 1b \n"
6483+#ifdef CONFIG_PAX_REFCOUNT
6484+ " b 4f \n"
6485+ " .set noreorder \n"
6486+ "3: b 5f \n"
6487+ " move %0, %1 \n"
6488+ " .set reorder \n"
6489+ _ASM_EXTABLE(2b, 3b)
6490+#endif
6491+ "4: subu %0, %1, %3 \n"
6492+#ifdef CONFIG_PAX_REFCOUNT
6493+ "5: \n"
6494+#endif
6495+ " .set mips0 \n"
6496+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6497+ : "Ir" (i), "m" (v->counter)
6498+ : "memory");
6499+ } else if (kernel_uses_llsc) {
6500+ __asm__ __volatile__(
6501+ " .set mips3 \n"
6502+ "1: ll %1, %2 # atomic_sub_return \n"
6503+#ifdef CONFIG_PAX_REFCOUNT
6504+ "2: sub %0, %1, %3 \n"
6505+#else
6506+ " subu %0, %1, %3 \n"
6507+#endif
6508+ " sc %0, %2 \n"
6509+ " bnez %0, 4f \n"
6510+ " b 1b \n"
6511+#ifdef CONFIG_PAX_REFCOUNT
6512+ " .set noreorder \n"
6513+ "3: b 5f \n"
6514+ " move %0, %1 \n"
6515+ " .set reorder \n"
6516+ _ASM_EXTABLE(2b, 3b)
6517+#endif
6518+ "4: subu %0, %1, %3 \n"
6519+#ifdef CONFIG_PAX_REFCOUNT
6520+ "5: \n"
6521+#endif
6522+ " .set mips0 \n"
6523+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6524+ : "Ir" (i));
6525+ } else {
6526+ unsigned long flags;
6527+
6528+ raw_local_irq_save(flags);
6529+ __asm__ __volatile__(
6530+ " lw %0, %1 \n"
6531+#ifdef CONFIG_PAX_REFCOUNT
6532+ /* Exception on overflow. */
6533+ "1: sub %0, %2 \n"
6534+#else
6535+ " subu %0, %2 \n"
6536+#endif
6537+ " sw %0, %1 \n"
6538+#ifdef CONFIG_PAX_REFCOUNT
6539+ /* Note: Dest reg is not modified on overflow */
6540+ "2: \n"
6541+ _ASM_EXTABLE(1b, 2b)
6542+#endif
6543+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6544+ raw_local_irq_restore(flags);
6545+ }
6546+
6547+ smp_llsc_mb();
6548+
6549+ return result;
6550+}
6551+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6552 {
6553 int result;
6554
6555@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6556 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6557 * The function returns the old value of @v minus @i.
6558 */
6559-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6560+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6561 {
6562 int result;
6563
6564@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6565 return result;
6566 }
6567
6568-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6569-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6570+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6571+{
6572+ return cmpxchg(&v->counter, old, new);
6573+}
6574+
6575+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6576+ int new)
6577+{
6578+ return cmpxchg(&(v->counter), old, new);
6579+}
6580+
6581+static inline int atomic_xchg(atomic_t *v, int new)
6582+{
6583+ return xchg(&v->counter, new);
6584+}
6585+
6586+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6587+{
6588+ return xchg(&(v->counter), new);
6589+}
6590
6591 /**
6592 * __atomic_add_unless - add unless the number is a given value
6593@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6594
6595 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6596 #define atomic_inc_return(v) atomic_add_return(1, (v))
6597+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6598+{
6599+ return atomic_add_return_unchecked(1, v);
6600+}
6601
6602 /*
6603 * atomic_sub_and_test - subtract value from variable and test result
6604@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6605 * other cases.
6606 */
6607 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6608+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6609+{
6610+ return atomic_add_return_unchecked(1, v) == 0;
6611+}
6612
6613 /*
6614 * atomic_dec_and_test - decrement by 1 and test
6615@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6616 * Atomically increments @v by 1.
6617 */
6618 #define atomic_inc(v) atomic_add(1, (v))
6619+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6620+{
6621+ atomic_add_unchecked(1, v);
6622+}
6623
6624 /*
6625 * atomic_dec - decrement and test
6626@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6627 * Atomically decrements @v by 1.
6628 */
6629 #define atomic_dec(v) atomic_sub(1, (v))
6630+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6631+{
6632+ atomic_sub_unchecked(1, v);
6633+}
6634
6635 /*
6636 * atomic_add_negative - add and test if negative
6637@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6638 * @v: pointer of type atomic64_t
6639 *
6640 */
6641-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6642+static inline long atomic64_read(const atomic64_t *v)
6643+{
6644+ return (*(volatile const long *) &v->counter);
6645+}
6646+
6647+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6648+{
6649+ return (*(volatile const long *) &v->counter);
6650+}
6651
6652 /*
6653 * atomic64_set - set atomic variable
6654 * @v: pointer of type atomic64_t
6655 * @i: required value
6656 */
6657-#define atomic64_set(v, i) ((v)->counter = (i))
6658+static inline void atomic64_set(atomic64_t *v, long i)
6659+{
6660+ v->counter = i;
6661+}
6662+
6663+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6664+{
6665+ v->counter = i;
6666+}
6667
6668 /*
6669 * atomic64_add - add integer to atomic variable
6670@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6671 *
6672 * Atomically adds @i to @v.
6673 */
6674-static __inline__ void atomic64_add(long i, atomic64_t * v)
6675+static __inline__ void atomic64_add(long i, atomic64_t *v)
6676+{
6677+ long temp;
6678+
6679+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6680+ __asm__ __volatile__(
6681+ " .set mips3 \n"
6682+ "1: lld %0, %1 # atomic64_add \n"
6683+#ifdef CONFIG_PAX_REFCOUNT
6684+ /* Exception on overflow. */
6685+ "2: dadd %0, %2 \n"
6686+#else
6687+ " daddu %0, %2 \n"
6688+#endif
6689+ " scd %0, %1 \n"
6690+ " beqzl %0, 1b \n"
6691+#ifdef CONFIG_PAX_REFCOUNT
6692+ "3: \n"
6693+ _ASM_EXTABLE(2b, 3b)
6694+#endif
6695+ " .set mips0 \n"
6696+ : "=&r" (temp), "+m" (v->counter)
6697+ : "Ir" (i));
6698+ } else if (kernel_uses_llsc) {
6699+ __asm__ __volatile__(
6700+ " .set mips3 \n"
6701+ "1: lld %0, %1 # atomic64_add \n"
6702+#ifdef CONFIG_PAX_REFCOUNT
6703+ /* Exception on overflow. */
6704+ "2: dadd %0, %2 \n"
6705+#else
6706+ " daddu %0, %2 \n"
6707+#endif
6708+ " scd %0, %1 \n"
6709+ " beqz %0, 1b \n"
6710+#ifdef CONFIG_PAX_REFCOUNT
6711+ "3: \n"
6712+ _ASM_EXTABLE(2b, 3b)
6713+#endif
6714+ " .set mips0 \n"
6715+ : "=&r" (temp), "+m" (v->counter)
6716+ : "Ir" (i));
6717+ } else {
6718+ unsigned long flags;
6719+
6720+ raw_local_irq_save(flags);
6721+ __asm__ __volatile__(
6722+#ifdef CONFIG_PAX_REFCOUNT
6723+ /* Exception on overflow. */
6724+ "1: dadd %0, %1 \n"
6725+ "2: \n"
6726+ _ASM_EXTABLE(1b, 2b)
6727+#else
6728+ " daddu %0, %1 \n"
6729+#endif
6730+ : "+r" (v->counter) : "Ir" (i));
6731+ raw_local_irq_restore(flags);
6732+ }
6733+}
6734+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6735 {
6736 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6737 long temp;
6738@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6739 *
6740 * Atomically subtracts @i from @v.
6741 */
6742-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6743+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6744+{
6745+ long temp;
6746+
6747+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6748+ __asm__ __volatile__(
6749+ " .set mips3 \n"
6750+ "1: lld %0, %1 # atomic64_sub \n"
6751+#ifdef CONFIG_PAX_REFCOUNT
6752+ /* Exception on overflow. */
6753+ "2: dsub %0, %2 \n"
6754+#else
6755+ " dsubu %0, %2 \n"
6756+#endif
6757+ " scd %0, %1 \n"
6758+ " beqzl %0, 1b \n"
6759+#ifdef CONFIG_PAX_REFCOUNT
6760+ "3: \n"
6761+ _ASM_EXTABLE(2b, 3b)
6762+#endif
6763+ " .set mips0 \n"
6764+ : "=&r" (temp), "+m" (v->counter)
6765+ : "Ir" (i));
6766+ } else if (kernel_uses_llsc) {
6767+ __asm__ __volatile__(
6768+ " .set mips3 \n"
6769+ "1: lld %0, %1 # atomic64_sub \n"
6770+#ifdef CONFIG_PAX_REFCOUNT
6771+ /* Exception on overflow. */
6772+ "2: dsub %0, %2 \n"
6773+#else
6774+ " dsubu %0, %2 \n"
6775+#endif
6776+ " scd %0, %1 \n"
6777+ " beqz %0, 1b \n"
6778+#ifdef CONFIG_PAX_REFCOUNT
6779+ "3: \n"
6780+ _ASM_EXTABLE(2b, 3b)
6781+#endif
6782+ " .set mips0 \n"
6783+ : "=&r" (temp), "+m" (v->counter)
6784+ : "Ir" (i));
6785+ } else {
6786+ unsigned long flags;
6787+
6788+ raw_local_irq_save(flags);
6789+ __asm__ __volatile__(
6790+#ifdef CONFIG_PAX_REFCOUNT
6791+ /* Exception on overflow. */
6792+ "1: dsub %0, %1 \n"
6793+ "2: \n"
6794+ _ASM_EXTABLE(1b, 2b)
6795+#else
6796+ " dsubu %0, %1 \n"
6797+#endif
6798+ : "+r" (v->counter) : "Ir" (i));
6799+ raw_local_irq_restore(flags);
6800+ }
6801+}
6802+
6803+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6804 {
6805 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6806 long temp;
6807@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6808 /*
6809 * Same as above, but return the result value
6810 */
6811-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6812+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6813+{
6814+ long result;
6815+ long temp;
6816+
6817+ smp_mb__before_llsc();
6818+
6819+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6820+ __asm__ __volatile__(
6821+ " .set mips3 \n"
6822+ "1: lld %1, %2 # atomic64_add_return \n"
6823+#ifdef CONFIG_PAX_REFCOUNT
6824+ "2: dadd %0, %1, %3 \n"
6825+#else
6826+ " daddu %0, %1, %3 \n"
6827+#endif
6828+ " scd %0, %2 \n"
6829+ " beqzl %0, 1b \n"
6830+#ifdef CONFIG_PAX_REFCOUNT
6831+ " b 4f \n"
6832+ " .set noreorder \n"
6833+ "3: b 5f \n"
6834+ " move %0, %1 \n"
6835+ " .set reorder \n"
6836+ _ASM_EXTABLE(2b, 3b)
6837+#endif
6838+ "4: daddu %0, %1, %3 \n"
6839+#ifdef CONFIG_PAX_REFCOUNT
6840+ "5: \n"
6841+#endif
6842+ " .set mips0 \n"
6843+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6844+ : "Ir" (i));
6845+ } else if (kernel_uses_llsc) {
6846+ __asm__ __volatile__(
6847+ " .set mips3 \n"
6848+ "1: lld %1, %2 # atomic64_add_return \n"
6849+#ifdef CONFIG_PAX_REFCOUNT
6850+ "2: dadd %0, %1, %3 \n"
6851+#else
6852+ " daddu %0, %1, %3 \n"
6853+#endif
6854+ " scd %0, %2 \n"
6855+ " bnez %0, 4f \n"
6856+ " b 1b \n"
6857+#ifdef CONFIG_PAX_REFCOUNT
6858+ " .set noreorder \n"
6859+ "3: b 5f \n"
6860+ " move %0, %1 \n"
6861+ " .set reorder \n"
6862+ _ASM_EXTABLE(2b, 3b)
6863+#endif
6864+ "4: daddu %0, %1, %3 \n"
6865+#ifdef CONFIG_PAX_REFCOUNT
6866+ "5: \n"
6867+#endif
6868+ " .set mips0 \n"
6869+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6870+ : "Ir" (i), "m" (v->counter)
6871+ : "memory");
6872+ } else {
6873+ unsigned long flags;
6874+
6875+ raw_local_irq_save(flags);
6876+ __asm__ __volatile__(
6877+ " ld %0, %1 \n"
6878+#ifdef CONFIG_PAX_REFCOUNT
6879+ /* Exception on overflow. */
6880+ "1: dadd %0, %2 \n"
6881+#else
6882+ " daddu %0, %2 \n"
6883+#endif
6884+ " sd %0, %1 \n"
6885+#ifdef CONFIG_PAX_REFCOUNT
6886+ /* Note: Dest reg is not modified on overflow */
6887+ "2: \n"
6888+ _ASM_EXTABLE(1b, 2b)
6889+#endif
6890+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6891+ raw_local_irq_restore(flags);
6892+ }
6893+
6894+ smp_llsc_mb();
6895+
6896+ return result;
6897+}
6898+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6899 {
6900 long result;
6901
6902@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6903 return result;
6904 }
6905
6906-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6907+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6908+{
6909+ long result;
6910+ long temp;
6911+
6912+ smp_mb__before_llsc();
6913+
6914+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6915+ long temp;
6916+
6917+ __asm__ __volatile__(
6918+ " .set mips3 \n"
6919+ "1: lld %1, %2 # atomic64_sub_return \n"
6920+#ifdef CONFIG_PAX_REFCOUNT
6921+ "2: dsub %0, %1, %3 \n"
6922+#else
6923+ " dsubu %0, %1, %3 \n"
6924+#endif
6925+ " scd %0, %2 \n"
6926+ " beqzl %0, 1b \n"
6927+#ifdef CONFIG_PAX_REFCOUNT
6928+ " b 4f \n"
6929+ " .set noreorder \n"
6930+ "3: b 5f \n"
6931+ " move %0, %1 \n"
6932+ " .set reorder \n"
6933+ _ASM_EXTABLE(2b, 3b)
6934+#endif
6935+ "4: dsubu %0, %1, %3 \n"
6936+#ifdef CONFIG_PAX_REFCOUNT
6937+ "5: \n"
6938+#endif
6939+ " .set mips0 \n"
6940+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6941+ : "Ir" (i), "m" (v->counter)
6942+ : "memory");
6943+ } else if (kernel_uses_llsc) {
6944+ __asm__ __volatile__(
6945+ " .set mips3 \n"
6946+ "1: lld %1, %2 # atomic64_sub_return \n"
6947+#ifdef CONFIG_PAX_REFCOUNT
6948+ "2: dsub %0, %1, %3 \n"
6949+#else
6950+ " dsubu %0, %1, %3 \n"
6951+#endif
6952+ " scd %0, %2 \n"
6953+ " bnez %0, 4f \n"
6954+ " b 1b \n"
6955+#ifdef CONFIG_PAX_REFCOUNT
6956+ " .set noreorder \n"
6957+ "3: b 5f \n"
6958+ " move %0, %1 \n"
6959+ " .set reorder \n"
6960+ _ASM_EXTABLE(2b, 3b)
6961+#endif
6962+ "4: dsubu %0, %1, %3 \n"
6963+#ifdef CONFIG_PAX_REFCOUNT
6964+ "5: \n"
6965+#endif
6966+ " .set mips0 \n"
6967+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6968+ : "Ir" (i), "m" (v->counter)
6969+ : "memory");
6970+ } else {
6971+ unsigned long flags;
6972+
6973+ raw_local_irq_save(flags);
6974+ __asm__ __volatile__(
6975+ " ld %0, %1 \n"
6976+#ifdef CONFIG_PAX_REFCOUNT
6977+ /* Exception on overflow. */
6978+ "1: dsub %0, %2 \n"
6979+#else
6980+ " dsubu %0, %2 \n"
6981+#endif
6982+ " sd %0, %1 \n"
6983+#ifdef CONFIG_PAX_REFCOUNT
6984+ /* Note: Dest reg is not modified on overflow */
6985+ "2: \n"
6986+ _ASM_EXTABLE(1b, 2b)
6987+#endif
6988+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6989+ raw_local_irq_restore(flags);
6990+ }
6991+
6992+ smp_llsc_mb();
6993+
6994+ return result;
6995+}
6996+
6997+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6998 {
6999 long result;
7000
7001@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
7002 * Atomically test @v and subtract @i if @v is greater or equal than @i.
7003 * The function returns the old value of @v minus @i.
7004 */
7005-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
7006+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
7007 {
7008 long result;
7009
7010@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
7011 return result;
7012 }
7013
7014-#define atomic64_cmpxchg(v, o, n) \
7015- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7016-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
7017+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7018+{
7019+ return cmpxchg(&v->counter, old, new);
7020+}
7021+
7022+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
7023+ long new)
7024+{
7025+ return cmpxchg(&(v->counter), old, new);
7026+}
7027+
7028+static inline long atomic64_xchg(atomic64_t *v, long new)
7029+{
7030+ return xchg(&v->counter, new);
7031+}
7032+
7033+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7034+{
7035+ return xchg(&(v->counter), new);
7036+}
7037
7038 /**
7039 * atomic64_add_unless - add unless the number is a given value
7040@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7041
7042 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
7043 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
7044+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
7045
7046 /*
7047 * atomic64_sub_and_test - subtract value from variable and test result
7048@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7049 * other cases.
7050 */
7051 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7052+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
7053
7054 /*
7055 * atomic64_dec_and_test - decrement by 1 and test
7056@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7057 * Atomically increments @v by 1.
7058 */
7059 #define atomic64_inc(v) atomic64_add(1, (v))
7060+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
7061
7062 /*
7063 * atomic64_dec - decrement and test
7064@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7065 * Atomically decrements @v by 1.
7066 */
7067 #define atomic64_dec(v) atomic64_sub(1, (v))
7068+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
7069
7070 /*
7071 * atomic64_add_negative - add and test if negative
7072diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
7073index d0101dd..266982c 100644
7074--- a/arch/mips/include/asm/barrier.h
7075+++ b/arch/mips/include/asm/barrier.h
7076@@ -184,7 +184,7 @@
7077 do { \
7078 compiletime_assert_atomic_type(*p); \
7079 smp_mb(); \
7080- ACCESS_ONCE(*p) = (v); \
7081+ ACCESS_ONCE_RW(*p) = (v); \
7082 } while (0)
7083
7084 #define smp_load_acquire(p) \
7085diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
7086index b4db69f..8f3b093 100644
7087--- a/arch/mips/include/asm/cache.h
7088+++ b/arch/mips/include/asm/cache.h
7089@@ -9,10 +9,11 @@
7090 #ifndef _ASM_CACHE_H
7091 #define _ASM_CACHE_H
7092
7093+#include <linux/const.h>
7094 #include <kmalloc.h>
7095
7096 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
7097-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7098+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7099
7100 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
7101 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7102diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
7103index d414405..6bb4ba2 100644
7104--- a/arch/mips/include/asm/elf.h
7105+++ b/arch/mips/include/asm/elf.h
7106@@ -398,13 +398,16 @@ extern const char *__elf_platform;
7107 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7108 #endif
7109
7110+#ifdef CONFIG_PAX_ASLR
7111+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7112+
7113+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7114+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7115+#endif
7116+
7117 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7118 struct linux_binprm;
7119 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7120 int uses_interp);
7121
7122-struct mm_struct;
7123-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7124-#define arch_randomize_brk arch_randomize_brk
7125-
7126 #endif /* _ASM_ELF_H */
7127diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
7128index c1f6afa..38cc6e9 100644
7129--- a/arch/mips/include/asm/exec.h
7130+++ b/arch/mips/include/asm/exec.h
7131@@ -12,6 +12,6 @@
7132 #ifndef _ASM_EXEC_H
7133 #define _ASM_EXEC_H
7134
7135-extern unsigned long arch_align_stack(unsigned long sp);
7136+#define arch_align_stack(x) ((x) & ~0xfUL)
7137
7138 #endif /* _ASM_EXEC_H */
7139diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
7140index 9e8ef59..1139d6b 100644
7141--- a/arch/mips/include/asm/hw_irq.h
7142+++ b/arch/mips/include/asm/hw_irq.h
7143@@ -10,7 +10,7 @@
7144
7145 #include <linux/atomic.h>
7146
7147-extern atomic_t irq_err_count;
7148+extern atomic_unchecked_t irq_err_count;
7149
7150 /*
7151 * interrupt-retrigger: NOP for now. This may not be appropriate for all
7152diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
7153index 46dfc3c..a16b13a 100644
7154--- a/arch/mips/include/asm/local.h
7155+++ b/arch/mips/include/asm/local.h
7156@@ -12,15 +12,25 @@ typedef struct
7157 atomic_long_t a;
7158 } local_t;
7159
7160+typedef struct {
7161+ atomic_long_unchecked_t a;
7162+} local_unchecked_t;
7163+
7164 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
7165
7166 #define local_read(l) atomic_long_read(&(l)->a)
7167+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
7168 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
7169+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
7170
7171 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
7172+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
7173 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
7174+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
7175 #define local_inc(l) atomic_long_inc(&(l)->a)
7176+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
7177 #define local_dec(l) atomic_long_dec(&(l)->a)
7178+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
7179
7180 /*
7181 * Same as above, but return the result value
7182@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
7183 return result;
7184 }
7185
7186+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
7187+{
7188+ unsigned long result;
7189+
7190+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
7191+ unsigned long temp;
7192+
7193+ __asm__ __volatile__(
7194+ " .set mips3 \n"
7195+ "1:" __LL "%1, %2 # local_add_return \n"
7196+ " addu %0, %1, %3 \n"
7197+ __SC "%0, %2 \n"
7198+ " beqzl %0, 1b \n"
7199+ " addu %0, %1, %3 \n"
7200+ " .set mips0 \n"
7201+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7202+ : "Ir" (i), "m" (l->a.counter)
7203+ : "memory");
7204+ } else if (kernel_uses_llsc) {
7205+ unsigned long temp;
7206+
7207+ __asm__ __volatile__(
7208+ " .set mips3 \n"
7209+ "1:" __LL "%1, %2 # local_add_return \n"
7210+ " addu %0, %1, %3 \n"
7211+ __SC "%0, %2 \n"
7212+ " beqz %0, 1b \n"
7213+ " addu %0, %1, %3 \n"
7214+ " .set mips0 \n"
7215+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7216+ : "Ir" (i), "m" (l->a.counter)
7217+ : "memory");
7218+ } else {
7219+ unsigned long flags;
7220+
7221+ local_irq_save(flags);
7222+ result = l->a.counter;
7223+ result += i;
7224+ l->a.counter = result;
7225+ local_irq_restore(flags);
7226+ }
7227+
7228+ return result;
7229+}
7230+
7231 static __inline__ long local_sub_return(long i, local_t * l)
7232 {
7233 unsigned long result;
7234@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
7235
7236 #define local_cmpxchg(l, o, n) \
7237 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7238+#define local_cmpxchg_unchecked(l, o, n) \
7239+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7240 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
7241
7242 /**
7243diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
7244index 5699ec3..95def83 100644
7245--- a/arch/mips/include/asm/page.h
7246+++ b/arch/mips/include/asm/page.h
7247@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
7248 #ifdef CONFIG_CPU_MIPS32
7249 typedef struct { unsigned long pte_low, pte_high; } pte_t;
7250 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
7251- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
7252+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
7253 #else
7254 typedef struct { unsigned long long pte; } pte_t;
7255 #define pte_val(x) ((x).pte)
7256diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
7257index b336037..5b874cc 100644
7258--- a/arch/mips/include/asm/pgalloc.h
7259+++ b/arch/mips/include/asm/pgalloc.h
7260@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7261 {
7262 set_pud(pud, __pud((unsigned long)pmd));
7263 }
7264+
7265+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7266+{
7267+ pud_populate(mm, pud, pmd);
7268+}
7269 #endif
7270
7271 /*
7272diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
7273index 539ddd1..8783f9a 100644
7274--- a/arch/mips/include/asm/pgtable.h
7275+++ b/arch/mips/include/asm/pgtable.h
7276@@ -20,6 +20,9 @@
7277 #include <asm/io.h>
7278 #include <asm/pgtable-bits.h>
7279
7280+#define ktla_ktva(addr) (addr)
7281+#define ktva_ktla(addr) (addr)
7282+
7283 struct mm_struct;
7284 struct vm_area_struct;
7285
7286diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
7287index 7de8658..c109224 100644
7288--- a/arch/mips/include/asm/thread_info.h
7289+++ b/arch/mips/include/asm/thread_info.h
7290@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
7291 #define TIF_SECCOMP 4 /* secure computing */
7292 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
7293 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
7294+/* li takes a 32bit immediate */
7295+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
7296+
7297 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
7298 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
7299 #define TIF_NOHZ 19 /* in adaptive nohz mode */
7300@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
7301 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
7302 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
7303 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7304+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7305
7306 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7307 _TIF_SYSCALL_AUDIT | \
7308- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
7309+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
7310+ _TIF_GRSEC_SETXID)
7311
7312 /* work to do in syscall_trace_leave() */
7313 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7314- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
7315+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7316
7317 /* work to do on interrupt/exception return */
7318 #define _TIF_WORK_MASK \
7319@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
7320 /* work to do on any return to u-space */
7321 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
7322 _TIF_WORK_SYSCALL_EXIT | \
7323- _TIF_SYSCALL_TRACEPOINT)
7324+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7325
7326 /*
7327 * We stash processor id into a COP0 register to retrieve it fast
7328diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
7329index a109510..94ee3f6 100644
7330--- a/arch/mips/include/asm/uaccess.h
7331+++ b/arch/mips/include/asm/uaccess.h
7332@@ -130,6 +130,7 @@ extern u64 __ua_limit;
7333 __ok == 0; \
7334 })
7335
7336+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
7337 #define access_ok(type, addr, size) \
7338 likely(__access_ok((addr), (size), __access_mask))
7339
7340diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7341index 1188e00..41cf144 100644
7342--- a/arch/mips/kernel/binfmt_elfn32.c
7343+++ b/arch/mips/kernel/binfmt_elfn32.c
7344@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7345 #undef ELF_ET_DYN_BASE
7346 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7347
7348+#ifdef CONFIG_PAX_ASLR
7349+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7350+
7351+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7352+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7353+#endif
7354+
7355 #include <asm/processor.h>
7356 #include <linux/module.h>
7357 #include <linux/elfcore.h>
7358diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7359index 7faf5f2..f3d3cf4 100644
7360--- a/arch/mips/kernel/binfmt_elfo32.c
7361+++ b/arch/mips/kernel/binfmt_elfo32.c
7362@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7363 #undef ELF_ET_DYN_BASE
7364 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7365
7366+#ifdef CONFIG_PAX_ASLR
7367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7368+
7369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7371+#endif
7372+
7373 #include <asm/processor.h>
7374
7375 /*
7376diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7377index 50b3648..c2f3cec 100644
7378--- a/arch/mips/kernel/i8259.c
7379+++ b/arch/mips/kernel/i8259.c
7380@@ -201,7 +201,7 @@ spurious_8259A_irq:
7381 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7382 spurious_irq_mask |= irqmask;
7383 }
7384- atomic_inc(&irq_err_count);
7385+ atomic_inc_unchecked(&irq_err_count);
7386 /*
7387 * Theoretically we do not have to handle this IRQ,
7388 * but in Linux this does not cause problems and is
7389diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7390index 44a1f79..2bd6aa3 100644
7391--- a/arch/mips/kernel/irq-gt641xx.c
7392+++ b/arch/mips/kernel/irq-gt641xx.c
7393@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7394 }
7395 }
7396
7397- atomic_inc(&irq_err_count);
7398+ atomic_inc_unchecked(&irq_err_count);
7399 }
7400
7401 void __init gt641xx_irq_init(void)
7402diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7403index d2bfbc2..a8eacd2 100644
7404--- a/arch/mips/kernel/irq.c
7405+++ b/arch/mips/kernel/irq.c
7406@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7407 printk("unexpected IRQ # %d\n", irq);
7408 }
7409
7410-atomic_t irq_err_count;
7411+atomic_unchecked_t irq_err_count;
7412
7413 int arch_show_interrupts(struct seq_file *p, int prec)
7414 {
7415- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7416+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7417 return 0;
7418 }
7419
7420 asmlinkage void spurious_interrupt(void)
7421 {
7422- atomic_inc(&irq_err_count);
7423+ atomic_inc_unchecked(&irq_err_count);
7424 }
7425
7426 void __init init_IRQ(void)
7427@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7428 #endif
7429 }
7430
7431+
7432 #ifdef DEBUG_STACKOVERFLOW
7433+extern void gr_handle_kernel_exploit(void);
7434+
7435 static inline void check_stack_overflow(void)
7436 {
7437 unsigned long sp;
7438@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7439 printk("do_IRQ: stack overflow: %ld\n",
7440 sp - sizeof(struct thread_info));
7441 dump_stack();
7442+ gr_handle_kernel_exploit();
7443 }
7444 }
7445 #else
7446diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7447index c4c2069..bde8051 100644
7448--- a/arch/mips/kernel/pm-cps.c
7449+++ b/arch/mips/kernel/pm-cps.c
7450@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7451 nc_core_ready_count = nc_addr;
7452
7453 /* Ensure ready_count is zero-initialised before the assembly runs */
7454- ACCESS_ONCE(*nc_core_ready_count) = 0;
7455+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7456 coupled_barrier(&per_cpu(pm_barrier, core), online);
7457
7458 /* Run the generated entry code */
7459diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7460index 0a1ec0f..d9e93b6 100644
7461--- a/arch/mips/kernel/process.c
7462+++ b/arch/mips/kernel/process.c
7463@@ -572,15 +572,3 @@ unsigned long get_wchan(struct task_struct *task)
7464 out:
7465 return pc;
7466 }
7467-
7468-/*
7469- * Don't forget that the stack pointer must be aligned on a 8 bytes
7470- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7471- */
7472-unsigned long arch_align_stack(unsigned long sp)
7473-{
7474- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7475- sp -= get_random_int() & ~PAGE_MASK;
7476-
7477- return sp & ALMASK;
7478-}
7479diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7480index f639ccd..e4b110d 100644
7481--- a/arch/mips/kernel/ptrace.c
7482+++ b/arch/mips/kernel/ptrace.c
7483@@ -630,6 +630,10 @@ long arch_ptrace(struct task_struct *child, long request,
7484 return ret;
7485 }
7486
7487+#ifdef CONFIG_GRKERNSEC_SETXID
7488+extern void gr_delayed_cred_worker(void);
7489+#endif
7490+
7491 /*
7492 * Notification of system call entry/exit
7493 * - triggered by current->work.syscall_trace
7494@@ -646,6 +650,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7495 tracehook_report_syscall_entry(regs))
7496 ret = -1;
7497
7498+#ifdef CONFIG_GRKERNSEC_SETXID
7499+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7500+ gr_delayed_cred_worker();
7501+#endif
7502+
7503 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7504 trace_sys_enter(regs, regs->regs[2]);
7505
7506diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7507index 07fc524..b9d7f28 100644
7508--- a/arch/mips/kernel/reset.c
7509+++ b/arch/mips/kernel/reset.c
7510@@ -13,6 +13,7 @@
7511 #include <linux/reboot.h>
7512
7513 #include <asm/reboot.h>
7514+#include <asm/bug.h>
7515
7516 /*
7517 * Urgs ... Too many MIPS machines to handle this in a generic way.
7518@@ -29,16 +30,19 @@ void machine_restart(char *command)
7519 {
7520 if (_machine_restart)
7521 _machine_restart(command);
7522+ BUG();
7523 }
7524
7525 void machine_halt(void)
7526 {
7527 if (_machine_halt)
7528 _machine_halt();
7529+ BUG();
7530 }
7531
7532 void machine_power_off(void)
7533 {
7534 if (pm_power_off)
7535 pm_power_off();
7536+ BUG();
7537 }
7538diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7539index 2242bdd..b284048 100644
7540--- a/arch/mips/kernel/sync-r4k.c
7541+++ b/arch/mips/kernel/sync-r4k.c
7542@@ -18,8 +18,8 @@
7543 #include <asm/mipsregs.h>
7544
7545 static atomic_t count_start_flag = ATOMIC_INIT(0);
7546-static atomic_t count_count_start = ATOMIC_INIT(0);
7547-static atomic_t count_count_stop = ATOMIC_INIT(0);
7548+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7549+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7550 static atomic_t count_reference = ATOMIC_INIT(0);
7551
7552 #define COUNTON 100
7553@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7554
7555 for (i = 0; i < NR_LOOPS; i++) {
7556 /* slaves loop on '!= 2' */
7557- while (atomic_read(&count_count_start) != 1)
7558+ while (atomic_read_unchecked(&count_count_start) != 1)
7559 mb();
7560- atomic_set(&count_count_stop, 0);
7561+ atomic_set_unchecked(&count_count_stop, 0);
7562 smp_wmb();
7563
7564 /* this lets the slaves write their count register */
7565- atomic_inc(&count_count_start);
7566+ atomic_inc_unchecked(&count_count_start);
7567
7568 /*
7569 * Everyone initialises count in the last loop:
7570@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7571 /*
7572 * Wait for all slaves to leave the synchronization point:
7573 */
7574- while (atomic_read(&count_count_stop) != 1)
7575+ while (atomic_read_unchecked(&count_count_stop) != 1)
7576 mb();
7577- atomic_set(&count_count_start, 0);
7578+ atomic_set_unchecked(&count_count_start, 0);
7579 smp_wmb();
7580- atomic_inc(&count_count_stop);
7581+ atomic_inc_unchecked(&count_count_stop);
7582 }
7583 /* Arrange for an interrupt in a short while */
7584 write_c0_compare(read_c0_count() + COUNTON);
7585@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7586 initcount = atomic_read(&count_reference);
7587
7588 for (i = 0; i < NR_LOOPS; i++) {
7589- atomic_inc(&count_count_start);
7590- while (atomic_read(&count_count_start) != 2)
7591+ atomic_inc_unchecked(&count_count_start);
7592+ while (atomic_read_unchecked(&count_count_start) != 2)
7593 mb();
7594
7595 /*
7596@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7597 if (i == NR_LOOPS-1)
7598 write_c0_count(initcount);
7599
7600- atomic_inc(&count_count_stop);
7601- while (atomic_read(&count_count_stop) != 2)
7602+ atomic_inc_unchecked(&count_count_stop);
7603+ while (atomic_read_unchecked(&count_count_stop) != 2)
7604 mb();
7605 }
7606 /* Arrange for an interrupt in a short while */
7607diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7608index 51706d6..ec1178c 100644
7609--- a/arch/mips/kernel/traps.c
7610+++ b/arch/mips/kernel/traps.c
7611@@ -687,7 +687,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7612 siginfo_t info;
7613
7614 prev_state = exception_enter();
7615- die_if_kernel("Integer overflow", regs);
7616+ if (unlikely(!user_mode(regs))) {
7617+
7618+#ifdef CONFIG_PAX_REFCOUNT
7619+ if (fixup_exception(regs)) {
7620+ pax_report_refcount_overflow(regs);
7621+ exception_exit(prev_state);
7622+ return;
7623+ }
7624+#endif
7625+
7626+ die("Integer overflow", regs);
7627+ }
7628
7629 info.si_code = FPE_INTOVF;
7630 info.si_signo = SIGFPE;
7631diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
7632index f3c56a1..6a2f01c 100644
7633--- a/arch/mips/kvm/kvm_mips.c
7634+++ b/arch/mips/kvm/kvm_mips.c
7635@@ -841,7 +841,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7636 return r;
7637 }
7638
7639-int kvm_arch_init(void *opaque)
7640+int kvm_arch_init(const void *opaque)
7641 {
7642 int ret;
7643
7644diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7645index becc42b..9e43d4b 100644
7646--- a/arch/mips/mm/fault.c
7647+++ b/arch/mips/mm/fault.c
7648@@ -28,6 +28,23 @@
7649 #include <asm/highmem.h> /* For VMALLOC_END */
7650 #include <linux/kdebug.h>
7651
7652+#ifdef CONFIG_PAX_PAGEEXEC
7653+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7654+{
7655+ unsigned long i;
7656+
7657+ printk(KERN_ERR "PAX: bytes at PC: ");
7658+ for (i = 0; i < 5; i++) {
7659+ unsigned int c;
7660+ if (get_user(c, (unsigned int *)pc+i))
7661+ printk(KERN_CONT "???????? ");
7662+ else
7663+ printk(KERN_CONT "%08x ", c);
7664+ }
7665+ printk("\n");
7666+}
7667+#endif
7668+
7669 /*
7670 * This routine handles page faults. It determines the address,
7671 * and the problem, and then passes it off to one of the appropriate
7672@@ -199,6 +216,14 @@ bad_area:
7673 bad_area_nosemaphore:
7674 /* User mode accesses just cause a SIGSEGV */
7675 if (user_mode(regs)) {
7676+
7677+#ifdef CONFIG_PAX_PAGEEXEC
7678+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7679+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7680+ do_group_exit(SIGKILL);
7681+ }
7682+#endif
7683+
7684 tsk->thread.cp0_badvaddr = address;
7685 tsk->thread.error_code = write;
7686 #if 0
7687diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7688index f1baadd..5472dca 100644
7689--- a/arch/mips/mm/mmap.c
7690+++ b/arch/mips/mm/mmap.c
7691@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7692 struct vm_area_struct *vma;
7693 unsigned long addr = addr0;
7694 int do_color_align;
7695+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7696 struct vm_unmapped_area_info info;
7697
7698 if (unlikely(len > TASK_SIZE))
7699@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7700 do_color_align = 1;
7701
7702 /* requesting a specific address */
7703+
7704+#ifdef CONFIG_PAX_RANDMMAP
7705+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7706+#endif
7707+
7708 if (addr) {
7709 if (do_color_align)
7710 addr = COLOUR_ALIGN(addr, pgoff);
7711@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7712 addr = PAGE_ALIGN(addr);
7713
7714 vma = find_vma(mm, addr);
7715- if (TASK_SIZE - len >= addr &&
7716- (!vma || addr + len <= vma->vm_start))
7717+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7718 return addr;
7719 }
7720
7721 info.length = len;
7722 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7723 info.align_offset = pgoff << PAGE_SHIFT;
7724+ info.threadstack_offset = offset;
7725
7726 if (dir == DOWN) {
7727 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7728@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7729 {
7730 unsigned long random_factor = 0UL;
7731
7732+#ifdef CONFIG_PAX_RANDMMAP
7733+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7734+#endif
7735+
7736 if (current->flags & PF_RANDOMIZE) {
7737 random_factor = get_random_int();
7738 random_factor = random_factor << PAGE_SHIFT;
7739@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7740
7741 if (mmap_is_legacy()) {
7742 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7743+
7744+#ifdef CONFIG_PAX_RANDMMAP
7745+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7746+ mm->mmap_base += mm->delta_mmap;
7747+#endif
7748+
7749 mm->get_unmapped_area = arch_get_unmapped_area;
7750 } else {
7751 mm->mmap_base = mmap_base(random_factor);
7752+
7753+#ifdef CONFIG_PAX_RANDMMAP
7754+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7755+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7756+#endif
7757+
7758 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7759 }
7760 }
7761
7762-static inline unsigned long brk_rnd(void)
7763-{
7764- unsigned long rnd = get_random_int();
7765-
7766- rnd = rnd << PAGE_SHIFT;
7767- /* 8MB for 32bit, 256MB for 64bit */
7768- if (TASK_IS_32BIT_ADDR)
7769- rnd = rnd & 0x7ffffful;
7770- else
7771- rnd = rnd & 0xffffffful;
7772-
7773- return rnd;
7774-}
7775-
7776-unsigned long arch_randomize_brk(struct mm_struct *mm)
7777-{
7778- unsigned long base = mm->brk;
7779- unsigned long ret;
7780-
7781- ret = PAGE_ALIGN(base + brk_rnd());
7782-
7783- if (ret < mm->brk)
7784- return mm->brk;
7785-
7786- return ret;
7787-}
7788-
7789 int __virt_addr_valid(const volatile void *kaddr)
7790 {
7791 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7792diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7793index 59cccd9..f39ac2f 100644
7794--- a/arch/mips/pci/pci-octeon.c
7795+++ b/arch/mips/pci/pci-octeon.c
7796@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7797
7798
7799 static struct pci_ops octeon_pci_ops = {
7800- octeon_read_config,
7801- octeon_write_config,
7802+ .read = octeon_read_config,
7803+ .write = octeon_write_config,
7804 };
7805
7806 static struct resource octeon_pci_mem_resource = {
7807diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7808index 5e36c33..eb4a17b 100644
7809--- a/arch/mips/pci/pcie-octeon.c
7810+++ b/arch/mips/pci/pcie-octeon.c
7811@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7812 }
7813
7814 static struct pci_ops octeon_pcie0_ops = {
7815- octeon_pcie0_read_config,
7816- octeon_pcie0_write_config,
7817+ .read = octeon_pcie0_read_config,
7818+ .write = octeon_pcie0_write_config,
7819 };
7820
7821 static struct resource octeon_pcie0_mem_resource = {
7822@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7823 };
7824
7825 static struct pci_ops octeon_pcie1_ops = {
7826- octeon_pcie1_read_config,
7827- octeon_pcie1_write_config,
7828+ .read = octeon_pcie1_read_config,
7829+ .write = octeon_pcie1_write_config,
7830 };
7831
7832 static struct resource octeon_pcie1_mem_resource = {
7833@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7834 };
7835
7836 static struct pci_ops octeon_dummy_ops = {
7837- octeon_dummy_read_config,
7838- octeon_dummy_write_config,
7839+ .read = octeon_dummy_read_config,
7840+ .write = octeon_dummy_write_config,
7841 };
7842
7843 static struct resource octeon_dummy_mem_resource = {
7844diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7845index a2358b4..7cead4f 100644
7846--- a/arch/mips/sgi-ip27/ip27-nmi.c
7847+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7848@@ -187,9 +187,9 @@ void
7849 cont_nmi_dump(void)
7850 {
7851 #ifndef REAL_NMI_SIGNAL
7852- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7853+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7854
7855- atomic_inc(&nmied_cpus);
7856+ atomic_inc_unchecked(&nmied_cpus);
7857 #endif
7858 /*
7859 * Only allow 1 cpu to proceed
7860@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7861 udelay(10000);
7862 }
7863 #else
7864- while (atomic_read(&nmied_cpus) != num_online_cpus());
7865+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7866 #endif
7867
7868 /*
7869diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7870index a046b30..6799527 100644
7871--- a/arch/mips/sni/rm200.c
7872+++ b/arch/mips/sni/rm200.c
7873@@ -270,7 +270,7 @@ spurious_8259A_irq:
7874 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7875 spurious_irq_mask |= irqmask;
7876 }
7877- atomic_inc(&irq_err_count);
7878+ atomic_inc_unchecked(&irq_err_count);
7879 /*
7880 * Theoretically we do not have to handle this IRQ,
7881 * but in Linux this does not cause problems and is
7882diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7883index 41e873b..34d33a7 100644
7884--- a/arch/mips/vr41xx/common/icu.c
7885+++ b/arch/mips/vr41xx/common/icu.c
7886@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7887
7888 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7889
7890- atomic_inc(&irq_err_count);
7891+ atomic_inc_unchecked(&irq_err_count);
7892
7893 return -1;
7894 }
7895diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7896index ae0e4ee..e8f0692 100644
7897--- a/arch/mips/vr41xx/common/irq.c
7898+++ b/arch/mips/vr41xx/common/irq.c
7899@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7900 irq_cascade_t *cascade;
7901
7902 if (irq >= NR_IRQS) {
7903- atomic_inc(&irq_err_count);
7904+ atomic_inc_unchecked(&irq_err_count);
7905 return;
7906 }
7907
7908@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7909 ret = cascade->get_irq(irq);
7910 irq = ret;
7911 if (ret < 0)
7912- atomic_inc(&irq_err_count);
7913+ atomic_inc_unchecked(&irq_err_count);
7914 else
7915 irq_dispatch(irq);
7916 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7917diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7918index 967d144..db12197 100644
7919--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7920+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7921@@ -11,12 +11,14 @@
7922 #ifndef _ASM_PROC_CACHE_H
7923 #define _ASM_PROC_CACHE_H
7924
7925+#include <linux/const.h>
7926+
7927 /* L1 cache */
7928
7929 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7930 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7931-#define L1_CACHE_BYTES 16 /* bytes per entry */
7932 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7933+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7934 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7935
7936 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7937diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7938index bcb5df2..84fabd2 100644
7939--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7940+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7941@@ -16,13 +16,15 @@
7942 #ifndef _ASM_PROC_CACHE_H
7943 #define _ASM_PROC_CACHE_H
7944
7945+#include <linux/const.h>
7946+
7947 /*
7948 * L1 cache
7949 */
7950 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7951 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7952-#define L1_CACHE_BYTES 32 /* bytes per entry */
7953 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7954+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7955 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7956
7957 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7958diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7959index 4ce7a01..449202a 100644
7960--- a/arch/openrisc/include/asm/cache.h
7961+++ b/arch/openrisc/include/asm/cache.h
7962@@ -19,11 +19,13 @@
7963 #ifndef __ASM_OPENRISC_CACHE_H
7964 #define __ASM_OPENRISC_CACHE_H
7965
7966+#include <linux/const.h>
7967+
7968 /* FIXME: How can we replace these with values from the CPU...
7969 * they shouldn't be hard-coded!
7970 */
7971
7972-#define L1_CACHE_BYTES 16
7973 #define L1_CACHE_SHIFT 4
7974+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7975
7976 #endif /* __ASM_OPENRISC_CACHE_H */
7977diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7978index 0be2db2..1b0f26d 100644
7979--- a/arch/parisc/include/asm/atomic.h
7980+++ b/arch/parisc/include/asm/atomic.h
7981@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7982 return dec;
7983 }
7984
7985+#define atomic64_read_unchecked(v) atomic64_read(v)
7986+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7987+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7988+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7989+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7990+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7991+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7992+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7993+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7994+
7995 #endif /* !CONFIG_64BIT */
7996
7997
7998diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7999index 47f11c7..3420df2 100644
8000--- a/arch/parisc/include/asm/cache.h
8001+++ b/arch/parisc/include/asm/cache.h
8002@@ -5,6 +5,7 @@
8003 #ifndef __ARCH_PARISC_CACHE_H
8004 #define __ARCH_PARISC_CACHE_H
8005
8006+#include <linux/const.h>
8007
8008 /*
8009 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
8010@@ -15,13 +16,13 @@
8011 * just ruin performance.
8012 */
8013 #ifdef CONFIG_PA20
8014-#define L1_CACHE_BYTES 64
8015 #define L1_CACHE_SHIFT 6
8016 #else
8017-#define L1_CACHE_BYTES 32
8018 #define L1_CACHE_SHIFT 5
8019 #endif
8020
8021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8022+
8023 #ifndef __ASSEMBLY__
8024
8025 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8026diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
8027index 3391d06..c23a2cc 100644
8028--- a/arch/parisc/include/asm/elf.h
8029+++ b/arch/parisc/include/asm/elf.h
8030@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
8031
8032 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
8033
8034+#ifdef CONFIG_PAX_ASLR
8035+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8036+
8037+#define PAX_DELTA_MMAP_LEN 16
8038+#define PAX_DELTA_STACK_LEN 16
8039+#endif
8040+
8041 /* This yields a mask that user programs can use to figure out what
8042 instruction set this CPU supports. This could be done in user space,
8043 but it's not easy, and we've already done it here. */
8044diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
8045index f213f5b..0af3e8e 100644
8046--- a/arch/parisc/include/asm/pgalloc.h
8047+++ b/arch/parisc/include/asm/pgalloc.h
8048@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8049 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
8050 }
8051
8052+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8053+{
8054+ pgd_populate(mm, pgd, pmd);
8055+}
8056+
8057 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
8058 {
8059 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
8060@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
8061 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
8062 #define pmd_free(mm, x) do { } while (0)
8063 #define pgd_populate(mm, pmd, pte) BUG()
8064+#define pgd_populate_kernel(mm, pmd, pte) BUG()
8065
8066 #endif
8067
8068diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
8069index 22b89d1..ce34230 100644
8070--- a/arch/parisc/include/asm/pgtable.h
8071+++ b/arch/parisc/include/asm/pgtable.h
8072@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
8073 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
8074 #define PAGE_COPY PAGE_EXECREAD
8075 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
8076+
8077+#ifdef CONFIG_PAX_PAGEEXEC
8078+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
8079+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8080+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8081+#else
8082+# define PAGE_SHARED_NOEXEC PAGE_SHARED
8083+# define PAGE_COPY_NOEXEC PAGE_COPY
8084+# define PAGE_READONLY_NOEXEC PAGE_READONLY
8085+#endif
8086+
8087 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
8088 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
8089 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
8090diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
8091index 4006964..fcb3cc2 100644
8092--- a/arch/parisc/include/asm/uaccess.h
8093+++ b/arch/parisc/include/asm/uaccess.h
8094@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
8095 const void __user *from,
8096 unsigned long n)
8097 {
8098- int sz = __compiletime_object_size(to);
8099+ size_t sz = __compiletime_object_size(to);
8100 int ret = -EFAULT;
8101
8102- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
8103+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
8104 ret = __copy_from_user(to, from, n);
8105 else
8106 copy_from_user_overflow();
8107diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
8108index 50dfafc..b9fc230 100644
8109--- a/arch/parisc/kernel/module.c
8110+++ b/arch/parisc/kernel/module.c
8111@@ -98,16 +98,38 @@
8112
8113 /* three functions to determine where in the module core
8114 * or init pieces the location is */
8115+static inline int in_init_rx(struct module *me, void *loc)
8116+{
8117+ return (loc >= me->module_init_rx &&
8118+ loc < (me->module_init_rx + me->init_size_rx));
8119+}
8120+
8121+static inline int in_init_rw(struct module *me, void *loc)
8122+{
8123+ return (loc >= me->module_init_rw &&
8124+ loc < (me->module_init_rw + me->init_size_rw));
8125+}
8126+
8127 static inline int in_init(struct module *me, void *loc)
8128 {
8129- return (loc >= me->module_init &&
8130- loc <= (me->module_init + me->init_size));
8131+ return in_init_rx(me, loc) || in_init_rw(me, loc);
8132+}
8133+
8134+static inline int in_core_rx(struct module *me, void *loc)
8135+{
8136+ return (loc >= me->module_core_rx &&
8137+ loc < (me->module_core_rx + me->core_size_rx));
8138+}
8139+
8140+static inline int in_core_rw(struct module *me, void *loc)
8141+{
8142+ return (loc >= me->module_core_rw &&
8143+ loc < (me->module_core_rw + me->core_size_rw));
8144 }
8145
8146 static inline int in_core(struct module *me, void *loc)
8147 {
8148- return (loc >= me->module_core &&
8149- loc <= (me->module_core + me->core_size));
8150+ return in_core_rx(me, loc) || in_core_rw(me, loc);
8151 }
8152
8153 static inline int in_local(struct module *me, void *loc)
8154@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
8155 }
8156
8157 /* align things a bit */
8158- me->core_size = ALIGN(me->core_size, 16);
8159- me->arch.got_offset = me->core_size;
8160- me->core_size += gots * sizeof(struct got_entry);
8161+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8162+ me->arch.got_offset = me->core_size_rw;
8163+ me->core_size_rw += gots * sizeof(struct got_entry);
8164
8165- me->core_size = ALIGN(me->core_size, 16);
8166- me->arch.fdesc_offset = me->core_size;
8167- me->core_size += fdescs * sizeof(Elf_Fdesc);
8168+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8169+ me->arch.fdesc_offset = me->core_size_rw;
8170+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
8171
8172 me->arch.got_max = gots;
8173 me->arch.fdesc_max = fdescs;
8174@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8175
8176 BUG_ON(value == 0);
8177
8178- got = me->module_core + me->arch.got_offset;
8179+ got = me->module_core_rw + me->arch.got_offset;
8180 for (i = 0; got[i].addr; i++)
8181 if (got[i].addr == value)
8182 goto out;
8183@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8184 #ifdef CONFIG_64BIT
8185 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8186 {
8187- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
8188+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
8189
8190 if (!value) {
8191 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
8192@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8193
8194 /* Create new one */
8195 fdesc->addr = value;
8196- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8197+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8198 return (Elf_Addr)fdesc;
8199 }
8200 #endif /* CONFIG_64BIT */
8201@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
8202
8203 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
8204 end = table + sechdrs[me->arch.unwind_section].sh_size;
8205- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8206+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8207
8208 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
8209 me->arch.unwind_section, table, end, gp);
8210diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
8211index e1ffea2..46ed66e 100644
8212--- a/arch/parisc/kernel/sys_parisc.c
8213+++ b/arch/parisc/kernel/sys_parisc.c
8214@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8215 unsigned long task_size = TASK_SIZE;
8216 int do_color_align, last_mmap;
8217 struct vm_unmapped_area_info info;
8218+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8219
8220 if (len > task_size)
8221 return -ENOMEM;
8222@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8223 goto found_addr;
8224 }
8225
8226+#ifdef CONFIG_PAX_RANDMMAP
8227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8228+#endif
8229+
8230 if (addr) {
8231 if (do_color_align && last_mmap)
8232 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8233@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8234 info.high_limit = mmap_upper_limit();
8235 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8236 info.align_offset = shared_align_offset(last_mmap, pgoff);
8237+ info.threadstack_offset = offset;
8238 addr = vm_unmapped_area(&info);
8239
8240 found_addr:
8241@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8242 unsigned long addr = addr0;
8243 int do_color_align, last_mmap;
8244 struct vm_unmapped_area_info info;
8245+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8246
8247 #ifdef CONFIG_64BIT
8248 /* This should only ever run for 32-bit processes. */
8249@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8250 }
8251
8252 /* requesting a specific address */
8253+#ifdef CONFIG_PAX_RANDMMAP
8254+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8255+#endif
8256+
8257 if (addr) {
8258 if (do_color_align && last_mmap)
8259 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8260@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8261 info.high_limit = mm->mmap_base;
8262 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8263 info.align_offset = shared_align_offset(last_mmap, pgoff);
8264+ info.threadstack_offset = offset;
8265 addr = vm_unmapped_area(&info);
8266 if (!(addr & ~PAGE_MASK))
8267 goto found_addr;
8268@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8269 mm->mmap_legacy_base = mmap_legacy_base();
8270 mm->mmap_base = mmap_upper_limit();
8271
8272+#ifdef CONFIG_PAX_RANDMMAP
8273+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
8274+ mm->mmap_legacy_base += mm->delta_mmap;
8275+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8276+ }
8277+#endif
8278+
8279 if (mmap_is_legacy()) {
8280 mm->mmap_base = mm->mmap_legacy_base;
8281 mm->get_unmapped_area = arch_get_unmapped_area;
8282diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
8283index 47ee620..1107387 100644
8284--- a/arch/parisc/kernel/traps.c
8285+++ b/arch/parisc/kernel/traps.c
8286@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
8287
8288 down_read(&current->mm->mmap_sem);
8289 vma = find_vma(current->mm,regs->iaoq[0]);
8290- if (vma && (regs->iaoq[0] >= vma->vm_start)
8291- && (vma->vm_flags & VM_EXEC)) {
8292-
8293+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
8294 fault_address = regs->iaoq[0];
8295 fault_space = regs->iasq[0];
8296
8297diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
8298index 3ca9c11..d163ef7 100644
8299--- a/arch/parisc/mm/fault.c
8300+++ b/arch/parisc/mm/fault.c
8301@@ -15,6 +15,7 @@
8302 #include <linux/sched.h>
8303 #include <linux/interrupt.h>
8304 #include <linux/module.h>
8305+#include <linux/unistd.h>
8306
8307 #include <asm/uaccess.h>
8308 #include <asm/traps.h>
8309@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
8310 static unsigned long
8311 parisc_acctyp(unsigned long code, unsigned int inst)
8312 {
8313- if (code == 6 || code == 16)
8314+ if (code == 6 || code == 7 || code == 16)
8315 return VM_EXEC;
8316
8317 switch (inst & 0xf0000000) {
8318@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
8319 }
8320 #endif
8321
8322+#ifdef CONFIG_PAX_PAGEEXEC
8323+/*
8324+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
8325+ *
8326+ * returns 1 when task should be killed
8327+ * 2 when rt_sigreturn trampoline was detected
8328+ * 3 when unpatched PLT trampoline was detected
8329+ */
8330+static int pax_handle_fetch_fault(struct pt_regs *regs)
8331+{
8332+
8333+#ifdef CONFIG_PAX_EMUPLT
8334+ int err;
8335+
8336+ do { /* PaX: unpatched PLT emulation */
8337+ unsigned int bl, depwi;
8338+
8339+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8340+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8341+
8342+ if (err)
8343+ break;
8344+
8345+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8346+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8347+
8348+ err = get_user(ldw, (unsigned int *)addr);
8349+ err |= get_user(bv, (unsigned int *)(addr+4));
8350+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8351+
8352+ if (err)
8353+ break;
8354+
8355+ if (ldw == 0x0E801096U &&
8356+ bv == 0xEAC0C000U &&
8357+ ldw2 == 0x0E881095U)
8358+ {
8359+ unsigned int resolver, map;
8360+
8361+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8362+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8363+ if (err)
8364+ break;
8365+
8366+ regs->gr[20] = instruction_pointer(regs)+8;
8367+ regs->gr[21] = map;
8368+ regs->gr[22] = resolver;
8369+ regs->iaoq[0] = resolver | 3UL;
8370+ regs->iaoq[1] = regs->iaoq[0] + 4;
8371+ return 3;
8372+ }
8373+ }
8374+ } while (0);
8375+#endif
8376+
8377+#ifdef CONFIG_PAX_EMUTRAMP
8378+
8379+#ifndef CONFIG_PAX_EMUSIGRT
8380+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8381+ return 1;
8382+#endif
8383+
8384+ do { /* PaX: rt_sigreturn emulation */
8385+ unsigned int ldi1, ldi2, bel, nop;
8386+
8387+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8388+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8389+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8390+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8391+
8392+ if (err)
8393+ break;
8394+
8395+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8396+ ldi2 == 0x3414015AU &&
8397+ bel == 0xE4008200U &&
8398+ nop == 0x08000240U)
8399+ {
8400+ regs->gr[25] = (ldi1 & 2) >> 1;
8401+ regs->gr[20] = __NR_rt_sigreturn;
8402+ regs->gr[31] = regs->iaoq[1] + 16;
8403+ regs->sr[0] = regs->iasq[1];
8404+ regs->iaoq[0] = 0x100UL;
8405+ regs->iaoq[1] = regs->iaoq[0] + 4;
8406+ regs->iasq[0] = regs->sr[2];
8407+ regs->iasq[1] = regs->sr[2];
8408+ return 2;
8409+ }
8410+ } while (0);
8411+#endif
8412+
8413+ return 1;
8414+}
8415+
8416+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8417+{
8418+ unsigned long i;
8419+
8420+ printk(KERN_ERR "PAX: bytes at PC: ");
8421+ for (i = 0; i < 5; i++) {
8422+ unsigned int c;
8423+ if (get_user(c, (unsigned int *)pc+i))
8424+ printk(KERN_CONT "???????? ");
8425+ else
8426+ printk(KERN_CONT "%08x ", c);
8427+ }
8428+ printk("\n");
8429+}
8430+#endif
8431+
8432 int fixup_exception(struct pt_regs *regs)
8433 {
8434 const struct exception_table_entry *fix;
8435@@ -234,8 +345,33 @@ retry:
8436
8437 good_area:
8438
8439- if ((vma->vm_flags & acc_type) != acc_type)
8440+ if ((vma->vm_flags & acc_type) != acc_type) {
8441+
8442+#ifdef CONFIG_PAX_PAGEEXEC
8443+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8444+ (address & ~3UL) == instruction_pointer(regs))
8445+ {
8446+ up_read(&mm->mmap_sem);
8447+ switch (pax_handle_fetch_fault(regs)) {
8448+
8449+#ifdef CONFIG_PAX_EMUPLT
8450+ case 3:
8451+ return;
8452+#endif
8453+
8454+#ifdef CONFIG_PAX_EMUTRAMP
8455+ case 2:
8456+ return;
8457+#endif
8458+
8459+ }
8460+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8461+ do_group_exit(SIGKILL);
8462+ }
8463+#endif
8464+
8465 goto bad_area;
8466+ }
8467
8468 /*
8469 * If for any reason at all we couldn't handle the fault, make
8470diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8471index 80b94b0..a3274fb 100644
8472--- a/arch/powerpc/Kconfig
8473+++ b/arch/powerpc/Kconfig
8474@@ -398,6 +398,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8475 config KEXEC
8476 bool "kexec system call"
8477 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8478+ depends on !GRKERNSEC_KMEM
8479 help
8480 kexec is a system call that implements the ability to shutdown your
8481 current kernel, and to start another kernel. It is like a reboot
8482diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8483index 28992d0..c797b20 100644
8484--- a/arch/powerpc/include/asm/atomic.h
8485+++ b/arch/powerpc/include/asm/atomic.h
8486@@ -519,6 +519,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
8487 return t1;
8488 }
8489
8490+#define atomic64_read_unchecked(v) atomic64_read(v)
8491+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8492+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8493+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8494+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8495+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8496+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8497+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8498+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8499+
8500 #endif /* __powerpc64__ */
8501
8502 #endif /* __KERNEL__ */
8503diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8504index bab79a1..4a3eabc 100644
8505--- a/arch/powerpc/include/asm/barrier.h
8506+++ b/arch/powerpc/include/asm/barrier.h
8507@@ -73,7 +73,7 @@
8508 do { \
8509 compiletime_assert_atomic_type(*p); \
8510 __lwsync(); \
8511- ACCESS_ONCE(*p) = (v); \
8512+ ACCESS_ONCE_RW(*p) = (v); \
8513 } while (0)
8514
8515 #define smp_load_acquire(p) \
8516diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8517index ed0afc1..0332825 100644
8518--- a/arch/powerpc/include/asm/cache.h
8519+++ b/arch/powerpc/include/asm/cache.h
8520@@ -3,6 +3,7 @@
8521
8522 #ifdef __KERNEL__
8523
8524+#include <linux/const.h>
8525
8526 /* bytes per L1 cache line */
8527 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8528@@ -22,7 +23,7 @@
8529 #define L1_CACHE_SHIFT 7
8530 #endif
8531
8532-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8533+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8534
8535 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8536
8537diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8538index 888d8f3..66f581c 100644
8539--- a/arch/powerpc/include/asm/elf.h
8540+++ b/arch/powerpc/include/asm/elf.h
8541@@ -28,8 +28,19 @@
8542 the loader. We need to make sure that it is out of the way of the program
8543 that it will "exec", and that there is sufficient room for the brk. */
8544
8545-extern unsigned long randomize_et_dyn(unsigned long base);
8546-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8547+#define ELF_ET_DYN_BASE (0x20000000)
8548+
8549+#ifdef CONFIG_PAX_ASLR
8550+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8551+
8552+#ifdef __powerpc64__
8553+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8554+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8555+#else
8556+#define PAX_DELTA_MMAP_LEN 15
8557+#define PAX_DELTA_STACK_LEN 15
8558+#endif
8559+#endif
8560
8561 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8562
8563@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8564 (0x7ff >> (PAGE_SHIFT - 12)) : \
8565 (0x3ffff >> (PAGE_SHIFT - 12)))
8566
8567-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8568-#define arch_randomize_brk arch_randomize_brk
8569-
8570-
8571 #ifdef CONFIG_SPU_BASE
8572 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8573 #define NT_SPU 1
8574diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8575index 8196e9c..d83a9f3 100644
8576--- a/arch/powerpc/include/asm/exec.h
8577+++ b/arch/powerpc/include/asm/exec.h
8578@@ -4,6 +4,6 @@
8579 #ifndef _ASM_POWERPC_EXEC_H
8580 #define _ASM_POWERPC_EXEC_H
8581
8582-extern unsigned long arch_align_stack(unsigned long sp);
8583+#define arch_align_stack(x) ((x) & ~0xfUL)
8584
8585 #endif /* _ASM_POWERPC_EXEC_H */
8586diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8587index 5acabbd..7ea14fa 100644
8588--- a/arch/powerpc/include/asm/kmap_types.h
8589+++ b/arch/powerpc/include/asm/kmap_types.h
8590@@ -10,7 +10,7 @@
8591 * 2 of the License, or (at your option) any later version.
8592 */
8593
8594-#define KM_TYPE_NR 16
8595+#define KM_TYPE_NR 17
8596
8597 #endif /* __KERNEL__ */
8598 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8599diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8600index b8da913..60b608a 100644
8601--- a/arch/powerpc/include/asm/local.h
8602+++ b/arch/powerpc/include/asm/local.h
8603@@ -9,15 +9,26 @@ typedef struct
8604 atomic_long_t a;
8605 } local_t;
8606
8607+typedef struct
8608+{
8609+ atomic_long_unchecked_t a;
8610+} local_unchecked_t;
8611+
8612 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8613
8614 #define local_read(l) atomic_long_read(&(l)->a)
8615+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8616 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8617+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8618
8619 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8620+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8621 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8622+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8623 #define local_inc(l) atomic_long_inc(&(l)->a)
8624+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8625 #define local_dec(l) atomic_long_dec(&(l)->a)
8626+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8627
8628 static __inline__ long local_add_return(long a, local_t *l)
8629 {
8630@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8631
8632 return t;
8633 }
8634+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8635
8636 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8637
8638@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8639
8640 return t;
8641 }
8642+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8643
8644 static __inline__ long local_inc_return(local_t *l)
8645 {
8646@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8647
8648 #define local_cmpxchg(l, o, n) \
8649 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8650+#define local_cmpxchg_unchecked(l, o, n) \
8651+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8652 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8653
8654 /**
8655diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8656index 8565c25..2865190 100644
8657--- a/arch/powerpc/include/asm/mman.h
8658+++ b/arch/powerpc/include/asm/mman.h
8659@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8660 }
8661 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8662
8663-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8664+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8665 {
8666 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8667 }
8668diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8669index 32e4e21..62afb12 100644
8670--- a/arch/powerpc/include/asm/page.h
8671+++ b/arch/powerpc/include/asm/page.h
8672@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8673 * and needs to be executable. This means the whole heap ends
8674 * up being executable.
8675 */
8676-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8677- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8678+#define VM_DATA_DEFAULT_FLAGS32 \
8679+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8680+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8681
8682 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8683 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8684@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8685 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8686 #endif
8687
8688+#define ktla_ktva(addr) (addr)
8689+#define ktva_ktla(addr) (addr)
8690+
8691 #ifndef CONFIG_PPC_BOOK3S_64
8692 /*
8693 * Use the top bit of the higher-level page table entries to indicate whether
8694diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8695index 88693ce..ac6f9ab 100644
8696--- a/arch/powerpc/include/asm/page_64.h
8697+++ b/arch/powerpc/include/asm/page_64.h
8698@@ -153,15 +153,18 @@ do { \
8699 * stack by default, so in the absence of a PT_GNU_STACK program header
8700 * we turn execute permission off.
8701 */
8702-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8703- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8704+#define VM_STACK_DEFAULT_FLAGS32 \
8705+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8706+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8707
8708 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8709 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8710
8711+#ifndef CONFIG_PAX_PAGEEXEC
8712 #define VM_STACK_DEFAULT_FLAGS \
8713 (is_32bit_task() ? \
8714 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8715+#endif
8716
8717 #include <asm-generic/getorder.h>
8718
8719diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8720index 4b0be20..c15a27d 100644
8721--- a/arch/powerpc/include/asm/pgalloc-64.h
8722+++ b/arch/powerpc/include/asm/pgalloc-64.h
8723@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8724 #ifndef CONFIG_PPC_64K_PAGES
8725
8726 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8727+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8728
8729 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8730 {
8731@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8732 pud_set(pud, (unsigned long)pmd);
8733 }
8734
8735+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8736+{
8737+ pud_populate(mm, pud, pmd);
8738+}
8739+
8740 #define pmd_populate(mm, pmd, pte_page) \
8741 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8742 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8743@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8744 #endif
8745
8746 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8747+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8748
8749 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8750 pte_t *pte)
8751diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8752index d98c1ec..9f61569 100644
8753--- a/arch/powerpc/include/asm/pgtable.h
8754+++ b/arch/powerpc/include/asm/pgtable.h
8755@@ -2,6 +2,7 @@
8756 #define _ASM_POWERPC_PGTABLE_H
8757 #ifdef __KERNEL__
8758
8759+#include <linux/const.h>
8760 #ifndef __ASSEMBLY__
8761 #include <linux/mmdebug.h>
8762 #include <asm/processor.h> /* For TASK_SIZE */
8763diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8764index 4aad413..85d86bf 100644
8765--- a/arch/powerpc/include/asm/pte-hash32.h
8766+++ b/arch/powerpc/include/asm/pte-hash32.h
8767@@ -21,6 +21,7 @@
8768 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8769 #define _PAGE_USER 0x004 /* usermode access allowed */
8770 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8771+#define _PAGE_EXEC _PAGE_GUARDED
8772 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8773 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8774 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8775diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8776index bffd89d..a6641ed 100644
8777--- a/arch/powerpc/include/asm/reg.h
8778+++ b/arch/powerpc/include/asm/reg.h
8779@@ -251,6 +251,7 @@
8780 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8781 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8782 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8783+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8784 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8785 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8786 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8787diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8788index 5a6614a..d89995d1 100644
8789--- a/arch/powerpc/include/asm/smp.h
8790+++ b/arch/powerpc/include/asm/smp.h
8791@@ -51,7 +51,7 @@ struct smp_ops_t {
8792 int (*cpu_disable)(void);
8793 void (*cpu_die)(unsigned int nr);
8794 int (*cpu_bootable)(unsigned int nr);
8795-};
8796+} __no_const;
8797
8798 extern void smp_send_debugger_break(void);
8799 extern void start_secondary_resume(void);
8800diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8801index b034ecd..af7e31f 100644
8802--- a/arch/powerpc/include/asm/thread_info.h
8803+++ b/arch/powerpc/include/asm/thread_info.h
8804@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8805 #if defined(CONFIG_PPC64)
8806 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8807 #endif
8808+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8809+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8810
8811 /* as above, but as bit values */
8812 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8813@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8814 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8815 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8816 #define _TIF_NOHZ (1<<TIF_NOHZ)
8817+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8818 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8819 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8820- _TIF_NOHZ)
8821+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8822
8823 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8824 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8825diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8826index 9485b43..3bd3c16 100644
8827--- a/arch/powerpc/include/asm/uaccess.h
8828+++ b/arch/powerpc/include/asm/uaccess.h
8829@@ -58,6 +58,7 @@
8830
8831 #endif
8832
8833+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8834 #define access_ok(type, addr, size) \
8835 (__chk_user_ptr(addr), \
8836 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8837@@ -318,52 +319,6 @@ do { \
8838 extern unsigned long __copy_tofrom_user(void __user *to,
8839 const void __user *from, unsigned long size);
8840
8841-#ifndef __powerpc64__
8842-
8843-static inline unsigned long copy_from_user(void *to,
8844- const void __user *from, unsigned long n)
8845-{
8846- unsigned long over;
8847-
8848- if (access_ok(VERIFY_READ, from, n))
8849- return __copy_tofrom_user((__force void __user *)to, from, n);
8850- if ((unsigned long)from < TASK_SIZE) {
8851- over = (unsigned long)from + n - TASK_SIZE;
8852- return __copy_tofrom_user((__force void __user *)to, from,
8853- n - over) + over;
8854- }
8855- return n;
8856-}
8857-
8858-static inline unsigned long copy_to_user(void __user *to,
8859- const void *from, unsigned long n)
8860-{
8861- unsigned long over;
8862-
8863- if (access_ok(VERIFY_WRITE, to, n))
8864- return __copy_tofrom_user(to, (__force void __user *)from, n);
8865- if ((unsigned long)to < TASK_SIZE) {
8866- over = (unsigned long)to + n - TASK_SIZE;
8867- return __copy_tofrom_user(to, (__force void __user *)from,
8868- n - over) + over;
8869- }
8870- return n;
8871-}
8872-
8873-#else /* __powerpc64__ */
8874-
8875-#define __copy_in_user(to, from, size) \
8876- __copy_tofrom_user((to), (from), (size))
8877-
8878-extern unsigned long copy_from_user(void *to, const void __user *from,
8879- unsigned long n);
8880-extern unsigned long copy_to_user(void __user *to, const void *from,
8881- unsigned long n);
8882-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8883- unsigned long n);
8884-
8885-#endif /* __powerpc64__ */
8886-
8887 static inline unsigned long __copy_from_user_inatomic(void *to,
8888 const void __user *from, unsigned long n)
8889 {
8890@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8891 if (ret == 0)
8892 return 0;
8893 }
8894+
8895+ if (!__builtin_constant_p(n))
8896+ check_object_size(to, n, false);
8897+
8898 return __copy_tofrom_user((__force void __user *)to, from, n);
8899 }
8900
8901@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8902 if (ret == 0)
8903 return 0;
8904 }
8905+
8906+ if (!__builtin_constant_p(n))
8907+ check_object_size(from, n, true);
8908+
8909 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8910 }
8911
8912@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8913 return __copy_to_user_inatomic(to, from, size);
8914 }
8915
8916+#ifndef __powerpc64__
8917+
8918+static inline unsigned long __must_check copy_from_user(void *to,
8919+ const void __user *from, unsigned long n)
8920+{
8921+ unsigned long over;
8922+
8923+ if ((long)n < 0)
8924+ return n;
8925+
8926+ if (access_ok(VERIFY_READ, from, n)) {
8927+ if (!__builtin_constant_p(n))
8928+ check_object_size(to, n, false);
8929+ return __copy_tofrom_user((__force void __user *)to, from, n);
8930+ }
8931+ if ((unsigned long)from < TASK_SIZE) {
8932+ over = (unsigned long)from + n - TASK_SIZE;
8933+ if (!__builtin_constant_p(n - over))
8934+ check_object_size(to, n - over, false);
8935+ return __copy_tofrom_user((__force void __user *)to, from,
8936+ n - over) + over;
8937+ }
8938+ return n;
8939+}
8940+
8941+static inline unsigned long __must_check copy_to_user(void __user *to,
8942+ const void *from, unsigned long n)
8943+{
8944+ unsigned long over;
8945+
8946+ if ((long)n < 0)
8947+ return n;
8948+
8949+ if (access_ok(VERIFY_WRITE, to, n)) {
8950+ if (!__builtin_constant_p(n))
8951+ check_object_size(from, n, true);
8952+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8953+ }
8954+ if ((unsigned long)to < TASK_SIZE) {
8955+ over = (unsigned long)to + n - TASK_SIZE;
8956+ if (!__builtin_constant_p(n))
8957+ check_object_size(from, n - over, true);
8958+ return __copy_tofrom_user(to, (__force void __user *)from,
8959+ n - over) + over;
8960+ }
8961+ return n;
8962+}
8963+
8964+#else /* __powerpc64__ */
8965+
8966+#define __copy_in_user(to, from, size) \
8967+ __copy_tofrom_user((to), (from), (size))
8968+
8969+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8970+{
8971+ if ((long)n < 0 || n > INT_MAX)
8972+ return n;
8973+
8974+ if (!__builtin_constant_p(n))
8975+ check_object_size(to, n, false);
8976+
8977+ if (likely(access_ok(VERIFY_READ, from, n)))
8978+ n = __copy_from_user(to, from, n);
8979+ else
8980+ memset(to, 0, n);
8981+ return n;
8982+}
8983+
8984+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8985+{
8986+ if ((long)n < 0 || n > INT_MAX)
8987+ return n;
8988+
8989+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8990+ if (!__builtin_constant_p(n))
8991+ check_object_size(from, n, true);
8992+ n = __copy_to_user(to, from, n);
8993+ }
8994+ return n;
8995+}
8996+
8997+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8998+ unsigned long n);
8999+
9000+#endif /* __powerpc64__ */
9001+
9002 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9003
9004 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9005diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9006index 670c312..60c2b52 100644
9007--- a/arch/powerpc/kernel/Makefile
9008+++ b/arch/powerpc/kernel/Makefile
9009@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9010 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9011 endif
9012
9013+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9014+
9015 obj-y := cputable.o ptrace.o syscalls.o \
9016 irq.o align.o signal_32.o pmc.o vdso.o \
9017 process.o systbl.o idle.o \
9018diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9019index bb9cac6..5181202 100644
9020--- a/arch/powerpc/kernel/exceptions-64e.S
9021+++ b/arch/powerpc/kernel/exceptions-64e.S
9022@@ -1010,6 +1010,7 @@ storage_fault_common:
9023 std r14,_DAR(r1)
9024 std r15,_DSISR(r1)
9025 addi r3,r1,STACK_FRAME_OVERHEAD
9026+ bl save_nvgprs
9027 mr r4,r14
9028 mr r5,r15
9029 ld r14,PACA_EXGEN+EX_R14(r13)
9030@@ -1018,8 +1019,7 @@ storage_fault_common:
9031 cmpdi r3,0
9032 bne- 1f
9033 b ret_from_except_lite
9034-1: bl save_nvgprs
9035- mr r5,r3
9036+1: mr r5,r3
9037 addi r3,r1,STACK_FRAME_OVERHEAD
9038 ld r4,_DAR(r1)
9039 bl bad_page_fault
9040diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9041index a7d36b1..53af150 100644
9042--- a/arch/powerpc/kernel/exceptions-64s.S
9043+++ b/arch/powerpc/kernel/exceptions-64s.S
9044@@ -1637,10 +1637,10 @@ handle_page_fault:
9045 11: ld r4,_DAR(r1)
9046 ld r5,_DSISR(r1)
9047 addi r3,r1,STACK_FRAME_OVERHEAD
9048+ bl save_nvgprs
9049 bl do_page_fault
9050 cmpdi r3,0
9051 beq+ 12f
9052- bl save_nvgprs
9053 mr r5,r3
9054 addi r3,r1,STACK_FRAME_OVERHEAD
9055 lwz r4,_DAR(r1)
9056diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9057index 248ee7e..1eb60dd 100644
9058--- a/arch/powerpc/kernel/irq.c
9059+++ b/arch/powerpc/kernel/irq.c
9060@@ -447,6 +447,8 @@ void migrate_irqs(void)
9061 }
9062 #endif
9063
9064+extern void gr_handle_kernel_exploit(void);
9065+
9066 static inline void check_stack_overflow(void)
9067 {
9068 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9069@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void)
9070 printk("do_IRQ: stack overflow: %ld\n",
9071 sp - sizeof(struct thread_info));
9072 dump_stack();
9073+ gr_handle_kernel_exploit();
9074 }
9075 #endif
9076 }
9077diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9078index 6cff040..74ac5d1b 100644
9079--- a/arch/powerpc/kernel/module_32.c
9080+++ b/arch/powerpc/kernel/module_32.c
9081@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9082 me->arch.core_plt_section = i;
9083 }
9084 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9085- printk("Module doesn't contain .plt or .init.plt sections.\n");
9086+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9087 return -ENOEXEC;
9088 }
9089
9090@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9091
9092 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9093 /* Init, or core PLT? */
9094- if (location >= mod->module_core
9095- && location < mod->module_core + mod->core_size)
9096+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9097+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9098 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9099- else
9100+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9101+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9102 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9103+ else {
9104+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9105+ return ~0UL;
9106+ }
9107
9108 /* Find this entry, or if that fails, the next avail. entry */
9109 while (entry->jump[0]) {
9110@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9111 }
9112 #ifdef CONFIG_DYNAMIC_FTRACE
9113 module->arch.tramp =
9114- do_plt_call(module->module_core,
9115+ do_plt_call(module->module_core_rx,
9116 (unsigned long)ftrace_caller,
9117 sechdrs, module);
9118 #endif
9119diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9120index be99774..9879c82 100644
9121--- a/arch/powerpc/kernel/process.c
9122+++ b/arch/powerpc/kernel/process.c
9123@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9124 * Lookup NIP late so we have the best change of getting the
9125 * above info out without failing
9126 */
9127- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9128- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9129+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9130+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9131 #endif
9132 show_stack(current, (unsigned long *) regs->gpr[1]);
9133 if (!user_mode(regs))
9134@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9135 newsp = stack[0];
9136 ip = stack[STACK_FRAME_LR_SAVE];
9137 if (!firstframe || ip != lr) {
9138- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9139+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9140 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9141 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9142- printk(" (%pS)",
9143+ printk(" (%pA)",
9144 (void *)current->ret_stack[curr_frame].ret);
9145 curr_frame--;
9146 }
9147@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9148 struct pt_regs *regs = (struct pt_regs *)
9149 (sp + STACK_FRAME_OVERHEAD);
9150 lr = regs->link;
9151- printk("--- Exception: %lx at %pS\n LR = %pS\n",
9152+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
9153 regs->trap, (void *)regs->nip, (void *)lr);
9154 firstframe = 1;
9155 }
9156@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
9157 mtspr(SPRN_CTRLT, ctrl);
9158 }
9159 #endif /* CONFIG_PPC64 */
9160-
9161-unsigned long arch_align_stack(unsigned long sp)
9162-{
9163- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9164- sp -= get_random_int() & ~PAGE_MASK;
9165- return sp & ~0xf;
9166-}
9167-
9168-static inline unsigned long brk_rnd(void)
9169-{
9170- unsigned long rnd = 0;
9171-
9172- /* 8MB for 32bit, 1GB for 64bit */
9173- if (is_32bit_task())
9174- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9175- else
9176- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9177-
9178- return rnd << PAGE_SHIFT;
9179-}
9180-
9181-unsigned long arch_randomize_brk(struct mm_struct *mm)
9182-{
9183- unsigned long base = mm->brk;
9184- unsigned long ret;
9185-
9186-#ifdef CONFIG_PPC_STD_MMU_64
9187- /*
9188- * If we are using 1TB segments and we are allowed to randomise
9189- * the heap, we can put it above 1TB so it is backed by a 1TB
9190- * segment. Otherwise the heap will be in the bottom 1TB
9191- * which always uses 256MB segments and this may result in a
9192- * performance penalty.
9193- */
9194- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9195- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9196-#endif
9197-
9198- ret = PAGE_ALIGN(base + brk_rnd());
9199-
9200- if (ret < mm->brk)
9201- return mm->brk;
9202-
9203- return ret;
9204-}
9205-
9206-unsigned long randomize_et_dyn(unsigned long base)
9207-{
9208- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9209-
9210- if (ret < base)
9211- return base;
9212-
9213- return ret;
9214-}
9215diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9216index 2e3d2bf..35df241 100644
9217--- a/arch/powerpc/kernel/ptrace.c
9218+++ b/arch/powerpc/kernel/ptrace.c
9219@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9220 return ret;
9221 }
9222
9223+#ifdef CONFIG_GRKERNSEC_SETXID
9224+extern void gr_delayed_cred_worker(void);
9225+#endif
9226+
9227 /*
9228 * We must return the syscall number to actually look up in the table.
9229 * This can be -1L to skip running any syscall at all.
9230@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9231
9232 secure_computing_strict(regs->gpr[0]);
9233
9234+#ifdef CONFIG_GRKERNSEC_SETXID
9235+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9236+ gr_delayed_cred_worker();
9237+#endif
9238+
9239 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9240 tracehook_report_syscall_entry(regs))
9241 /*
9242@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9243 {
9244 int step;
9245
9246+#ifdef CONFIG_GRKERNSEC_SETXID
9247+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9248+ gr_delayed_cred_worker();
9249+#endif
9250+
9251 audit_syscall_exit(regs);
9252
9253 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9254diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9255index 1bc5a17..910d3f3 100644
9256--- a/arch/powerpc/kernel/signal_32.c
9257+++ b/arch/powerpc/kernel/signal_32.c
9258@@ -1012,7 +1012,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
9259 /* Save user registers on the stack */
9260 frame = &rt_sf->uc.uc_mcontext;
9261 addr = frame;
9262- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9263+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9264 sigret = 0;
9265 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9266 } else {
9267diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9268index 97c1e4b..f427f81 100644
9269--- a/arch/powerpc/kernel/signal_64.c
9270+++ b/arch/powerpc/kernel/signal_64.c
9271@@ -755,7 +755,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
9272 current->thread.fp_state.fpscr = 0;
9273
9274 /* Set up to return from userspace. */
9275- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9276+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9277 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9278 } else {
9279 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9280diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9281index 239f1cd..5359f76 100644
9282--- a/arch/powerpc/kernel/traps.c
9283+++ b/arch/powerpc/kernel/traps.c
9284@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9285 return flags;
9286 }
9287
9288+extern void gr_handle_kernel_exploit(void);
9289+
9290 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9291 int signr)
9292 {
9293@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9294 panic("Fatal exception in interrupt");
9295 if (panic_on_oops)
9296 panic("Fatal exception");
9297+
9298+ gr_handle_kernel_exploit();
9299+
9300 do_exit(signr);
9301 }
9302
9303diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9304index ce74c33..0803371 100644
9305--- a/arch/powerpc/kernel/vdso.c
9306+++ b/arch/powerpc/kernel/vdso.c
9307@@ -35,6 +35,7 @@
9308 #include <asm/vdso.h>
9309 #include <asm/vdso_datapage.h>
9310 #include <asm/setup.h>
9311+#include <asm/mman.h>
9312
9313 #undef DEBUG
9314
9315@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9316 vdso_base = VDSO32_MBASE;
9317 #endif
9318
9319- current->mm->context.vdso_base = 0;
9320+ current->mm->context.vdso_base = ~0UL;
9321
9322 /* vDSO has a problem and was disabled, just don't "enable" it for the
9323 * process
9324@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9325 vdso_base = get_unmapped_area(NULL, vdso_base,
9326 (vdso_pages << PAGE_SHIFT) +
9327 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9328- 0, 0);
9329+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9330 if (IS_ERR_VALUE(vdso_base)) {
9331 rc = vdso_base;
9332 goto fail_mmapsem;
9333diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9334index 61c738a..b1092d6 100644
9335--- a/arch/powerpc/kvm/powerpc.c
9336+++ b/arch/powerpc/kvm/powerpc.c
9337@@ -1195,7 +1195,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9338 }
9339 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9340
9341-int kvm_arch_init(void *opaque)
9342+int kvm_arch_init(const void *opaque)
9343 {
9344 return 0;
9345 }
9346diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9347index 5eea6f3..5d10396 100644
9348--- a/arch/powerpc/lib/usercopy_64.c
9349+++ b/arch/powerpc/lib/usercopy_64.c
9350@@ -9,22 +9,6 @@
9351 #include <linux/module.h>
9352 #include <asm/uaccess.h>
9353
9354-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9355-{
9356- if (likely(access_ok(VERIFY_READ, from, n)))
9357- n = __copy_from_user(to, from, n);
9358- else
9359- memset(to, 0, n);
9360- return n;
9361-}
9362-
9363-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9364-{
9365- if (likely(access_ok(VERIFY_WRITE, to, n)))
9366- n = __copy_to_user(to, from, n);
9367- return n;
9368-}
9369-
9370 unsigned long copy_in_user(void __user *to, const void __user *from,
9371 unsigned long n)
9372 {
9373@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9374 return n;
9375 }
9376
9377-EXPORT_SYMBOL(copy_from_user);
9378-EXPORT_SYMBOL(copy_to_user);
9379 EXPORT_SYMBOL(copy_in_user);
9380
9381diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9382index 51ab9e7..7d3c78b 100644
9383--- a/arch/powerpc/mm/fault.c
9384+++ b/arch/powerpc/mm/fault.c
9385@@ -33,6 +33,10 @@
9386 #include <linux/magic.h>
9387 #include <linux/ratelimit.h>
9388 #include <linux/context_tracking.h>
9389+#include <linux/slab.h>
9390+#include <linux/pagemap.h>
9391+#include <linux/compiler.h>
9392+#include <linux/unistd.h>
9393
9394 #include <asm/firmware.h>
9395 #include <asm/page.h>
9396@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9397 }
9398 #endif
9399
9400+#ifdef CONFIG_PAX_PAGEEXEC
9401+/*
9402+ * PaX: decide what to do with offenders (regs->nip = fault address)
9403+ *
9404+ * returns 1 when task should be killed
9405+ */
9406+static int pax_handle_fetch_fault(struct pt_regs *regs)
9407+{
9408+ return 1;
9409+}
9410+
9411+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9412+{
9413+ unsigned long i;
9414+
9415+ printk(KERN_ERR "PAX: bytes at PC: ");
9416+ for (i = 0; i < 5; i++) {
9417+ unsigned int c;
9418+ if (get_user(c, (unsigned int __user *)pc+i))
9419+ printk(KERN_CONT "???????? ");
9420+ else
9421+ printk(KERN_CONT "%08x ", c);
9422+ }
9423+ printk("\n");
9424+}
9425+#endif
9426+
9427 /*
9428 * Check whether the instruction at regs->nip is a store using
9429 * an update addressing form which will update r1.
9430@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9431 * indicate errors in DSISR but can validly be set in SRR1.
9432 */
9433 if (trap == 0x400)
9434- error_code &= 0x48200000;
9435+ error_code &= 0x58200000;
9436 else
9437 is_write = error_code & DSISR_ISSTORE;
9438 #else
9439@@ -378,7 +409,7 @@ good_area:
9440 * "undefined". Of those that can be set, this is the only
9441 * one which seems bad.
9442 */
9443- if (error_code & 0x10000000)
9444+ if (error_code & DSISR_GUARDED)
9445 /* Guarded storage error. */
9446 goto bad_area;
9447 #endif /* CONFIG_8xx */
9448@@ -393,7 +424,7 @@ good_area:
9449 * processors use the same I/D cache coherency mechanism
9450 * as embedded.
9451 */
9452- if (error_code & DSISR_PROTFAULT)
9453+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9454 goto bad_area;
9455 #endif /* CONFIG_PPC_STD_MMU */
9456
9457@@ -483,6 +514,23 @@ bad_area:
9458 bad_area_nosemaphore:
9459 /* User mode accesses cause a SIGSEGV */
9460 if (user_mode(regs)) {
9461+
9462+#ifdef CONFIG_PAX_PAGEEXEC
9463+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9464+#ifdef CONFIG_PPC_STD_MMU
9465+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9466+#else
9467+ if (is_exec && regs->nip == address) {
9468+#endif
9469+ switch (pax_handle_fetch_fault(regs)) {
9470+ }
9471+
9472+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9473+ do_group_exit(SIGKILL);
9474+ }
9475+ }
9476+#endif
9477+
9478 _exception(SIGSEGV, regs, code, address);
9479 goto bail;
9480 }
9481diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9482index cb8bdbe..cde4bc7 100644
9483--- a/arch/powerpc/mm/mmap.c
9484+++ b/arch/powerpc/mm/mmap.c
9485@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9486 return sysctl_legacy_va_layout;
9487 }
9488
9489-static unsigned long mmap_rnd(void)
9490+static unsigned long mmap_rnd(struct mm_struct *mm)
9491 {
9492 unsigned long rnd = 0;
9493
9494+#ifdef CONFIG_PAX_RANDMMAP
9495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9496+#endif
9497+
9498 if (current->flags & PF_RANDOMIZE) {
9499 /* 8MB for 32bit, 1GB for 64bit */
9500 if (is_32bit_task())
9501@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9502 return rnd << PAGE_SHIFT;
9503 }
9504
9505-static inline unsigned long mmap_base(void)
9506+static inline unsigned long mmap_base(struct mm_struct *mm)
9507 {
9508 unsigned long gap = rlimit(RLIMIT_STACK);
9509
9510@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9511 else if (gap > MAX_GAP)
9512 gap = MAX_GAP;
9513
9514- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9515+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9516 }
9517
9518 /*
9519@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9520 */
9521 if (mmap_is_legacy()) {
9522 mm->mmap_base = TASK_UNMAPPED_BASE;
9523+
9524+#ifdef CONFIG_PAX_RANDMMAP
9525+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9526+ mm->mmap_base += mm->delta_mmap;
9527+#endif
9528+
9529 mm->get_unmapped_area = arch_get_unmapped_area;
9530 } else {
9531- mm->mmap_base = mmap_base();
9532+ mm->mmap_base = mmap_base(mm);
9533+
9534+#ifdef CONFIG_PAX_RANDMMAP
9535+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9536+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9537+#endif
9538+
9539 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9540 }
9541 }
9542diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9543index b0c75cc..ef7fb93 100644
9544--- a/arch/powerpc/mm/slice.c
9545+++ b/arch/powerpc/mm/slice.c
9546@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9547 if ((mm->task_size - len) < addr)
9548 return 0;
9549 vma = find_vma(mm, addr);
9550- return (!vma || (addr + len) <= vma->vm_start);
9551+ return check_heap_stack_gap(vma, addr, len, 0);
9552 }
9553
9554 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9555@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9556 info.align_offset = 0;
9557
9558 addr = TASK_UNMAPPED_BASE;
9559+
9560+#ifdef CONFIG_PAX_RANDMMAP
9561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9562+ addr += mm->delta_mmap;
9563+#endif
9564+
9565 while (addr < TASK_SIZE) {
9566 info.low_limit = addr;
9567 if (!slice_scan_available(addr, available, 1, &addr))
9568@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9569 if (fixed && addr > (mm->task_size - len))
9570 return -ENOMEM;
9571
9572+#ifdef CONFIG_PAX_RANDMMAP
9573+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9574+ addr = 0;
9575+#endif
9576+
9577 /* If hint, make sure it matches our alignment restrictions */
9578 if (!fixed && addr) {
9579 addr = _ALIGN_UP(addr, 1ul << pshift);
9580diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9581index 4278acf..67fd0e6 100644
9582--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9583+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9584@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9585 }
9586
9587 static struct pci_ops scc_pciex_pci_ops = {
9588- scc_pciex_read_config,
9589- scc_pciex_write_config,
9590+ .read = scc_pciex_read_config,
9591+ .write = scc_pciex_write_config,
9592 };
9593
9594 static void pciex_clear_intr_all(unsigned int __iomem *base)
9595diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9596index 9098692..3d54cd1 100644
9597--- a/arch/powerpc/platforms/cell/spufs/file.c
9598+++ b/arch/powerpc/platforms/cell/spufs/file.c
9599@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9600 return VM_FAULT_NOPAGE;
9601 }
9602
9603-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9604+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9605 unsigned long address,
9606- void *buf, int len, int write)
9607+ void *buf, size_t len, int write)
9608 {
9609 struct spu_context *ctx = vma->vm_file->private_data;
9610 unsigned long offset = address - vma->vm_start;
9611diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9612index fa934fe..c296056 100644
9613--- a/arch/s390/include/asm/atomic.h
9614+++ b/arch/s390/include/asm/atomic.h
9615@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9616 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9617 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9618
9619+#define atomic64_read_unchecked(v) atomic64_read(v)
9620+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9621+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9622+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9623+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9624+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9625+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9626+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9627+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9628+
9629 #endif /* __ARCH_S390_ATOMIC__ */
9630diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9631index 19ff956..8d39cb1 100644
9632--- a/arch/s390/include/asm/barrier.h
9633+++ b/arch/s390/include/asm/barrier.h
9634@@ -37,7 +37,7 @@
9635 do { \
9636 compiletime_assert_atomic_type(*p); \
9637 barrier(); \
9638- ACCESS_ONCE(*p) = (v); \
9639+ ACCESS_ONCE_RW(*p) = (v); \
9640 } while (0)
9641
9642 #define smp_load_acquire(p) \
9643diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9644index 4d7ccac..d03d0ad 100644
9645--- a/arch/s390/include/asm/cache.h
9646+++ b/arch/s390/include/asm/cache.h
9647@@ -9,8 +9,10 @@
9648 #ifndef __ARCH_S390_CACHE_H
9649 #define __ARCH_S390_CACHE_H
9650
9651-#define L1_CACHE_BYTES 256
9652+#include <linux/const.h>
9653+
9654 #define L1_CACHE_SHIFT 8
9655+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9656 #define NET_SKB_PAD 32
9657
9658 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9659diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9660index 78f4f87..598ce39 100644
9661--- a/arch/s390/include/asm/elf.h
9662+++ b/arch/s390/include/asm/elf.h
9663@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9664 the loader. We need to make sure that it is out of the way of the program
9665 that it will "exec", and that there is sufficient room for the brk. */
9666
9667-extern unsigned long randomize_et_dyn(unsigned long base);
9668-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9669+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9670+
9671+#ifdef CONFIG_PAX_ASLR
9672+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9673+
9674+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9675+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9676+#endif
9677
9678 /* This yields a mask that user programs can use to figure out what
9679 instruction set this CPU supports. */
9680@@ -222,9 +228,6 @@ struct linux_binprm;
9681 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9682 int arch_setup_additional_pages(struct linux_binprm *, int);
9683
9684-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9685-#define arch_randomize_brk arch_randomize_brk
9686-
9687 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9688
9689 #endif
9690diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9691index c4a93d6..4d2a9b4 100644
9692--- a/arch/s390/include/asm/exec.h
9693+++ b/arch/s390/include/asm/exec.h
9694@@ -7,6 +7,6 @@
9695 #ifndef __ASM_EXEC_H
9696 #define __ASM_EXEC_H
9697
9698-extern unsigned long arch_align_stack(unsigned long sp);
9699+#define arch_align_stack(x) ((x) & ~0xfUL)
9700
9701 #endif /* __ASM_EXEC_H */
9702diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9703index cd4c68e..6764641 100644
9704--- a/arch/s390/include/asm/uaccess.h
9705+++ b/arch/s390/include/asm/uaccess.h
9706@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9707 __range_ok((unsigned long)(addr), (size)); \
9708 })
9709
9710+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9711 #define access_ok(type, addr, size) __access_ok(addr, size)
9712
9713 /*
9714@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9715 copy_to_user(void __user *to, const void *from, unsigned long n)
9716 {
9717 might_fault();
9718+
9719+ if ((long)n < 0)
9720+ return n;
9721+
9722 return __copy_to_user(to, from, n);
9723 }
9724
9725@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9726 static inline unsigned long __must_check
9727 copy_from_user(void *to, const void __user *from, unsigned long n)
9728 {
9729- unsigned int sz = __compiletime_object_size(to);
9730+ size_t sz = __compiletime_object_size(to);
9731
9732 might_fault();
9733- if (unlikely(sz != -1 && sz < n)) {
9734+
9735+ if ((long)n < 0)
9736+ return n;
9737+
9738+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9739 copy_from_user_overflow();
9740 return n;
9741 }
9742diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9743index b89b591..fd9609d 100644
9744--- a/arch/s390/kernel/module.c
9745+++ b/arch/s390/kernel/module.c
9746@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9747
9748 /* Increase core size by size of got & plt and set start
9749 offsets for got and plt. */
9750- me->core_size = ALIGN(me->core_size, 4);
9751- me->arch.got_offset = me->core_size;
9752- me->core_size += me->arch.got_size;
9753- me->arch.plt_offset = me->core_size;
9754- me->core_size += me->arch.plt_size;
9755+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9756+ me->arch.got_offset = me->core_size_rw;
9757+ me->core_size_rw += me->arch.got_size;
9758+ me->arch.plt_offset = me->core_size_rx;
9759+ me->core_size_rx += me->arch.plt_size;
9760 return 0;
9761 }
9762
9763@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9764 if (info->got_initialized == 0) {
9765 Elf_Addr *gotent;
9766
9767- gotent = me->module_core + me->arch.got_offset +
9768+ gotent = me->module_core_rw + me->arch.got_offset +
9769 info->got_offset;
9770 *gotent = val;
9771 info->got_initialized = 1;
9772@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9773 rc = apply_rela_bits(loc, val, 0, 64, 0);
9774 else if (r_type == R_390_GOTENT ||
9775 r_type == R_390_GOTPLTENT) {
9776- val += (Elf_Addr) me->module_core - loc;
9777+ val += (Elf_Addr) me->module_core_rw - loc;
9778 rc = apply_rela_bits(loc, val, 1, 32, 1);
9779 }
9780 break;
9781@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9782 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9783 if (info->plt_initialized == 0) {
9784 unsigned int *ip;
9785- ip = me->module_core + me->arch.plt_offset +
9786+ ip = me->module_core_rx + me->arch.plt_offset +
9787 info->plt_offset;
9788 #ifndef CONFIG_64BIT
9789 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9790@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9791 val - loc + 0xffffUL < 0x1ffffeUL) ||
9792 (r_type == R_390_PLT32DBL &&
9793 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9794- val = (Elf_Addr) me->module_core +
9795+ val = (Elf_Addr) me->module_core_rx +
9796 me->arch.plt_offset +
9797 info->plt_offset;
9798 val += rela->r_addend - loc;
9799@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9800 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9801 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9802 val = val + rela->r_addend -
9803- ((Elf_Addr) me->module_core + me->arch.got_offset);
9804+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9805 if (r_type == R_390_GOTOFF16)
9806 rc = apply_rela_bits(loc, val, 0, 16, 0);
9807 else if (r_type == R_390_GOTOFF32)
9808@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9809 break;
9810 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9811 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9812- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9813+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9814 rela->r_addend - loc;
9815 if (r_type == R_390_GOTPC)
9816 rc = apply_rela_bits(loc, val, 1, 32, 0);
9817diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9818index 93b9ca4..4ea1454 100644
9819--- a/arch/s390/kernel/process.c
9820+++ b/arch/s390/kernel/process.c
9821@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9822 }
9823 return 0;
9824 }
9825-
9826-unsigned long arch_align_stack(unsigned long sp)
9827-{
9828- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9829- sp -= get_random_int() & ~PAGE_MASK;
9830- return sp & ~0xf;
9831-}
9832-
9833-static inline unsigned long brk_rnd(void)
9834-{
9835- /* 8MB for 32bit, 1GB for 64bit */
9836- if (is_32bit_task())
9837- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9838- else
9839- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9840-}
9841-
9842-unsigned long arch_randomize_brk(struct mm_struct *mm)
9843-{
9844- unsigned long ret;
9845-
9846- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9847- return (ret > mm->brk) ? ret : mm->brk;
9848-}
9849-
9850-unsigned long randomize_et_dyn(unsigned long base)
9851-{
9852- unsigned long ret;
9853-
9854- if (!(current->flags & PF_RANDOMIZE))
9855- return base;
9856- ret = PAGE_ALIGN(base + brk_rnd());
9857- return (ret > base) ? ret : base;
9858-}
9859diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9860index 9b436c2..54fbf0a 100644
9861--- a/arch/s390/mm/mmap.c
9862+++ b/arch/s390/mm/mmap.c
9863@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9864 */
9865 if (mmap_is_legacy()) {
9866 mm->mmap_base = mmap_base_legacy();
9867+
9868+#ifdef CONFIG_PAX_RANDMMAP
9869+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9870+ mm->mmap_base += mm->delta_mmap;
9871+#endif
9872+
9873 mm->get_unmapped_area = arch_get_unmapped_area;
9874 } else {
9875 mm->mmap_base = mmap_base();
9876+
9877+#ifdef CONFIG_PAX_RANDMMAP
9878+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9879+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9880+#endif
9881+
9882 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9883 }
9884 }
9885@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9886 */
9887 if (mmap_is_legacy()) {
9888 mm->mmap_base = mmap_base_legacy();
9889+
9890+#ifdef CONFIG_PAX_RANDMMAP
9891+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9892+ mm->mmap_base += mm->delta_mmap;
9893+#endif
9894+
9895 mm->get_unmapped_area = s390_get_unmapped_area;
9896 } else {
9897 mm->mmap_base = mmap_base();
9898+
9899+#ifdef CONFIG_PAX_RANDMMAP
9900+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9901+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9902+#endif
9903+
9904 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9905 }
9906 }
9907diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9908index ae3d59f..f65f075 100644
9909--- a/arch/score/include/asm/cache.h
9910+++ b/arch/score/include/asm/cache.h
9911@@ -1,7 +1,9 @@
9912 #ifndef _ASM_SCORE_CACHE_H
9913 #define _ASM_SCORE_CACHE_H
9914
9915+#include <linux/const.h>
9916+
9917 #define L1_CACHE_SHIFT 4
9918-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9919+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9920
9921 #endif /* _ASM_SCORE_CACHE_H */
9922diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9923index f9f3cd5..58ff438 100644
9924--- a/arch/score/include/asm/exec.h
9925+++ b/arch/score/include/asm/exec.h
9926@@ -1,6 +1,6 @@
9927 #ifndef _ASM_SCORE_EXEC_H
9928 #define _ASM_SCORE_EXEC_H
9929
9930-extern unsigned long arch_align_stack(unsigned long sp);
9931+#define arch_align_stack(x) (x)
9932
9933 #endif /* _ASM_SCORE_EXEC_H */
9934diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9935index a1519ad3..e8ac1ff 100644
9936--- a/arch/score/kernel/process.c
9937+++ b/arch/score/kernel/process.c
9938@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9939
9940 return task_pt_regs(task)->cp0_epc;
9941 }
9942-
9943-unsigned long arch_align_stack(unsigned long sp)
9944-{
9945- return sp;
9946-}
9947diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9948index ef9e555..331bd29 100644
9949--- a/arch/sh/include/asm/cache.h
9950+++ b/arch/sh/include/asm/cache.h
9951@@ -9,10 +9,11 @@
9952 #define __ASM_SH_CACHE_H
9953 #ifdef __KERNEL__
9954
9955+#include <linux/const.h>
9956 #include <linux/init.h>
9957 #include <cpu/cache.h>
9958
9959-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9960+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9961
9962 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9963
9964diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9965index 6777177..cb5e44f 100644
9966--- a/arch/sh/mm/mmap.c
9967+++ b/arch/sh/mm/mmap.c
9968@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9969 struct mm_struct *mm = current->mm;
9970 struct vm_area_struct *vma;
9971 int do_colour_align;
9972+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9973 struct vm_unmapped_area_info info;
9974
9975 if (flags & MAP_FIXED) {
9976@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9977 if (filp || (flags & MAP_SHARED))
9978 do_colour_align = 1;
9979
9980+#ifdef CONFIG_PAX_RANDMMAP
9981+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9982+#endif
9983+
9984 if (addr) {
9985 if (do_colour_align)
9986 addr = COLOUR_ALIGN(addr, pgoff);
9987@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9988 addr = PAGE_ALIGN(addr);
9989
9990 vma = find_vma(mm, addr);
9991- if (TASK_SIZE - len >= addr &&
9992- (!vma || addr + len <= vma->vm_start))
9993+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9994 return addr;
9995 }
9996
9997 info.flags = 0;
9998 info.length = len;
9999- info.low_limit = TASK_UNMAPPED_BASE;
10000+ info.low_limit = mm->mmap_base;
10001 info.high_limit = TASK_SIZE;
10002 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10003 info.align_offset = pgoff << PAGE_SHIFT;
10004@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10005 struct mm_struct *mm = current->mm;
10006 unsigned long addr = addr0;
10007 int do_colour_align;
10008+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10009 struct vm_unmapped_area_info info;
10010
10011 if (flags & MAP_FIXED) {
10012@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10013 if (filp || (flags & MAP_SHARED))
10014 do_colour_align = 1;
10015
10016+#ifdef CONFIG_PAX_RANDMMAP
10017+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10018+#endif
10019+
10020 /* requesting a specific address */
10021 if (addr) {
10022 if (do_colour_align)
10023@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10024 addr = PAGE_ALIGN(addr);
10025
10026 vma = find_vma(mm, addr);
10027- if (TASK_SIZE - len >= addr &&
10028- (!vma || addr + len <= vma->vm_start))
10029+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10030 return addr;
10031 }
10032
10033@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10034 VM_BUG_ON(addr != -ENOMEM);
10035 info.flags = 0;
10036 info.low_limit = TASK_UNMAPPED_BASE;
10037+
10038+#ifdef CONFIG_PAX_RANDMMAP
10039+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10040+ info.low_limit += mm->delta_mmap;
10041+#endif
10042+
10043 info.high_limit = TASK_SIZE;
10044 addr = vm_unmapped_area(&info);
10045 }
10046diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10047index bb894c8..8141d5c 100644
10048--- a/arch/sparc/include/asm/atomic_64.h
10049+++ b/arch/sparc/include/asm/atomic_64.h
10050@@ -15,18 +15,40 @@
10051 #define ATOMIC64_INIT(i) { (i) }
10052
10053 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10054+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10055+{
10056+ return v->counter;
10057+}
10058 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10059+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10060+{
10061+ return v->counter;
10062+}
10063
10064 #define atomic_set(v, i) (((v)->counter) = i)
10065+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10066+{
10067+ v->counter = i;
10068+}
10069 #define atomic64_set(v, i) (((v)->counter) = i)
10070+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10071+{
10072+ v->counter = i;
10073+}
10074
10075 void atomic_add(int, atomic_t *);
10076+void atomic_add_unchecked(int, atomic_unchecked_t *);
10077 void atomic64_add(long, atomic64_t *);
10078+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10079 void atomic_sub(int, atomic_t *);
10080+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10081 void atomic64_sub(long, atomic64_t *);
10082+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10083
10084 int atomic_add_ret(int, atomic_t *);
10085+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10086 long atomic64_add_ret(long, atomic64_t *);
10087+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10088 int atomic_sub_ret(int, atomic_t *);
10089 long atomic64_sub_ret(long, atomic64_t *);
10090
10091@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10092 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10093
10094 #define atomic_inc_return(v) atomic_add_ret(1, v)
10095+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10096+{
10097+ return atomic_add_ret_unchecked(1, v);
10098+}
10099 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10100+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10101+{
10102+ return atomic64_add_ret_unchecked(1, v);
10103+}
10104
10105 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10106 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10107
10108 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10109+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10110+{
10111+ return atomic_add_ret_unchecked(i, v);
10112+}
10113 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10114+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10115+{
10116+ return atomic64_add_ret_unchecked(i, v);
10117+}
10118
10119 /*
10120 * atomic_inc_and_test - increment and test
10121@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10122 * other cases.
10123 */
10124 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10125+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10126+{
10127+ return atomic_inc_return_unchecked(v) == 0;
10128+}
10129 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10130
10131 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10132@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10133 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10134
10135 #define atomic_inc(v) atomic_add(1, v)
10136+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10137+{
10138+ atomic_add_unchecked(1, v);
10139+}
10140 #define atomic64_inc(v) atomic64_add(1, v)
10141+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10142+{
10143+ atomic64_add_unchecked(1, v);
10144+}
10145
10146 #define atomic_dec(v) atomic_sub(1, v)
10147+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10148+{
10149+ atomic_sub_unchecked(1, v);
10150+}
10151 #define atomic64_dec(v) atomic64_sub(1, v)
10152+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10153+{
10154+ atomic64_sub_unchecked(1, v);
10155+}
10156
10157 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10158 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10159
10160 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10161+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10162+{
10163+ return cmpxchg(&v->counter, old, new);
10164+}
10165 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10166+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10167+{
10168+ return xchg(&v->counter, new);
10169+}
10170
10171 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10172 {
10173- int c, old;
10174+ int c, old, new;
10175 c = atomic_read(v);
10176 for (;;) {
10177- if (unlikely(c == (u)))
10178+ if (unlikely(c == u))
10179 break;
10180- old = atomic_cmpxchg((v), c, c + (a));
10181+
10182+ asm volatile("addcc %2, %0, %0\n"
10183+
10184+#ifdef CONFIG_PAX_REFCOUNT
10185+ "tvs %%icc, 6\n"
10186+#endif
10187+
10188+ : "=r" (new)
10189+ : "0" (c), "ir" (a)
10190+ : "cc");
10191+
10192+ old = atomic_cmpxchg(v, c, new);
10193 if (likely(old == c))
10194 break;
10195 c = old;
10196@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10197 #define atomic64_cmpxchg(v, o, n) \
10198 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10199 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10200+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10201+{
10202+ return xchg(&v->counter, new);
10203+}
10204
10205 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10206 {
10207- long c, old;
10208+ long c, old, new;
10209 c = atomic64_read(v);
10210 for (;;) {
10211- if (unlikely(c == (u)))
10212+ if (unlikely(c == u))
10213 break;
10214- old = atomic64_cmpxchg((v), c, c + (a));
10215+
10216+ asm volatile("addcc %2, %0, %0\n"
10217+
10218+#ifdef CONFIG_PAX_REFCOUNT
10219+ "tvs %%xcc, 6\n"
10220+#endif
10221+
10222+ : "=r" (new)
10223+ : "0" (c), "ir" (a)
10224+ : "cc");
10225+
10226+ old = atomic64_cmpxchg(v, c, new);
10227 if (likely(old == c))
10228 break;
10229 c = old;
10230 }
10231- return c != (u);
10232+ return c != u;
10233 }
10234
10235 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10236diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10237index 305dcc3..7835030 100644
10238--- a/arch/sparc/include/asm/barrier_64.h
10239+++ b/arch/sparc/include/asm/barrier_64.h
10240@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10241 do { \
10242 compiletime_assert_atomic_type(*p); \
10243 barrier(); \
10244- ACCESS_ONCE(*p) = (v); \
10245+ ACCESS_ONCE_RW(*p) = (v); \
10246 } while (0)
10247
10248 #define smp_load_acquire(p) \
10249diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10250index 5bb6991..5c2132e 100644
10251--- a/arch/sparc/include/asm/cache.h
10252+++ b/arch/sparc/include/asm/cache.h
10253@@ -7,10 +7,12 @@
10254 #ifndef _SPARC_CACHE_H
10255 #define _SPARC_CACHE_H
10256
10257+#include <linux/const.h>
10258+
10259 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10260
10261 #define L1_CACHE_SHIFT 5
10262-#define L1_CACHE_BYTES 32
10263+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10264
10265 #ifdef CONFIG_SPARC32
10266 #define SMP_CACHE_BYTES_SHIFT 5
10267diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10268index a24e41f..47677ff 100644
10269--- a/arch/sparc/include/asm/elf_32.h
10270+++ b/arch/sparc/include/asm/elf_32.h
10271@@ -114,6 +114,13 @@ typedef struct {
10272
10273 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10274
10275+#ifdef CONFIG_PAX_ASLR
10276+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10277+
10278+#define PAX_DELTA_MMAP_LEN 16
10279+#define PAX_DELTA_STACK_LEN 16
10280+#endif
10281+
10282 /* This yields a mask that user programs can use to figure out what
10283 instruction set this cpu supports. This can NOT be done in userspace
10284 on Sparc. */
10285diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10286index 370ca1e..d4f4a98 100644
10287--- a/arch/sparc/include/asm/elf_64.h
10288+++ b/arch/sparc/include/asm/elf_64.h
10289@@ -189,6 +189,13 @@ typedef struct {
10290 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10291 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10292
10293+#ifdef CONFIG_PAX_ASLR
10294+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10295+
10296+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10297+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10298+#endif
10299+
10300 extern unsigned long sparc64_elf_hwcap;
10301 #define ELF_HWCAP sparc64_elf_hwcap
10302
10303diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10304index a3890da..f6a408e 100644
10305--- a/arch/sparc/include/asm/pgalloc_32.h
10306+++ b/arch/sparc/include/asm/pgalloc_32.h
10307@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10308 }
10309
10310 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10311+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10312
10313 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10314 unsigned long address)
10315diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10316index 39a7ac4..2c9b586 100644
10317--- a/arch/sparc/include/asm/pgalloc_64.h
10318+++ b/arch/sparc/include/asm/pgalloc_64.h
10319@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
10320 }
10321
10322 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
10323+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10324
10325 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
10326 {
10327diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10328index 59ba6f6..4518128 100644
10329--- a/arch/sparc/include/asm/pgtable.h
10330+++ b/arch/sparc/include/asm/pgtable.h
10331@@ -5,4 +5,8 @@
10332 #else
10333 #include <asm/pgtable_32.h>
10334 #endif
10335+
10336+#define ktla_ktva(addr) (addr)
10337+#define ktva_ktla(addr) (addr)
10338+
10339 #endif
10340diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10341index b9b91ae..950b91e 100644
10342--- a/arch/sparc/include/asm/pgtable_32.h
10343+++ b/arch/sparc/include/asm/pgtable_32.h
10344@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10345 #define PAGE_SHARED SRMMU_PAGE_SHARED
10346 #define PAGE_COPY SRMMU_PAGE_COPY
10347 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10348+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10349+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10350+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10351 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10352
10353 /* Top-level page directory - dummy used by init-mm.
10354@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10355
10356 /* xwr */
10357 #define __P000 PAGE_NONE
10358-#define __P001 PAGE_READONLY
10359-#define __P010 PAGE_COPY
10360-#define __P011 PAGE_COPY
10361+#define __P001 PAGE_READONLY_NOEXEC
10362+#define __P010 PAGE_COPY_NOEXEC
10363+#define __P011 PAGE_COPY_NOEXEC
10364 #define __P100 PAGE_READONLY
10365 #define __P101 PAGE_READONLY
10366 #define __P110 PAGE_COPY
10367 #define __P111 PAGE_COPY
10368
10369 #define __S000 PAGE_NONE
10370-#define __S001 PAGE_READONLY
10371-#define __S010 PAGE_SHARED
10372-#define __S011 PAGE_SHARED
10373+#define __S001 PAGE_READONLY_NOEXEC
10374+#define __S010 PAGE_SHARED_NOEXEC
10375+#define __S011 PAGE_SHARED_NOEXEC
10376 #define __S100 PAGE_READONLY
10377 #define __S101 PAGE_READONLY
10378 #define __S110 PAGE_SHARED
10379diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10380index 79da178..c2eede8 100644
10381--- a/arch/sparc/include/asm/pgtsrmmu.h
10382+++ b/arch/sparc/include/asm/pgtsrmmu.h
10383@@ -115,6 +115,11 @@
10384 SRMMU_EXEC | SRMMU_REF)
10385 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10386 SRMMU_EXEC | SRMMU_REF)
10387+
10388+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10389+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10390+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10391+
10392 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10393 SRMMU_DIRTY | SRMMU_REF)
10394
10395diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10396index f5fffd8..a0669f0 100644
10397--- a/arch/sparc/include/asm/setup.h
10398+++ b/arch/sparc/include/asm/setup.h
10399@@ -53,8 +53,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10400 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10401
10402 /* init_64.c */
10403-extern atomic_t dcpage_flushes;
10404-extern atomic_t dcpage_flushes_xcall;
10405+extern atomic_unchecked_t dcpage_flushes;
10406+extern atomic_unchecked_t dcpage_flushes_xcall;
10407
10408 extern int sysctl_tsb_ratio;
10409 #endif
10410diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10411index 9689176..63c18ea 100644
10412--- a/arch/sparc/include/asm/spinlock_64.h
10413+++ b/arch/sparc/include/asm/spinlock_64.h
10414@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10415
10416 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10417
10418-static void inline arch_read_lock(arch_rwlock_t *lock)
10419+static inline void arch_read_lock(arch_rwlock_t *lock)
10420 {
10421 unsigned long tmp1, tmp2;
10422
10423 __asm__ __volatile__ (
10424 "1: ldsw [%2], %0\n"
10425 " brlz,pn %0, 2f\n"
10426-"4: add %0, 1, %1\n"
10427+"4: addcc %0, 1, %1\n"
10428+
10429+#ifdef CONFIG_PAX_REFCOUNT
10430+" tvs %%icc, 6\n"
10431+#endif
10432+
10433 " cas [%2], %0, %1\n"
10434 " cmp %0, %1\n"
10435 " bne,pn %%icc, 1b\n"
10436@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10437 " .previous"
10438 : "=&r" (tmp1), "=&r" (tmp2)
10439 : "r" (lock)
10440- : "memory");
10441+ : "memory", "cc");
10442 }
10443
10444-static int inline arch_read_trylock(arch_rwlock_t *lock)
10445+static inline int arch_read_trylock(arch_rwlock_t *lock)
10446 {
10447 int tmp1, tmp2;
10448
10449@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10450 "1: ldsw [%2], %0\n"
10451 " brlz,a,pn %0, 2f\n"
10452 " mov 0, %0\n"
10453-" add %0, 1, %1\n"
10454+" addcc %0, 1, %1\n"
10455+
10456+#ifdef CONFIG_PAX_REFCOUNT
10457+" tvs %%icc, 6\n"
10458+#endif
10459+
10460 " cas [%2], %0, %1\n"
10461 " cmp %0, %1\n"
10462 " bne,pn %%icc, 1b\n"
10463@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10464 return tmp1;
10465 }
10466
10467-static void inline arch_read_unlock(arch_rwlock_t *lock)
10468+static inline void arch_read_unlock(arch_rwlock_t *lock)
10469 {
10470 unsigned long tmp1, tmp2;
10471
10472 __asm__ __volatile__(
10473 "1: lduw [%2], %0\n"
10474-" sub %0, 1, %1\n"
10475+" subcc %0, 1, %1\n"
10476+
10477+#ifdef CONFIG_PAX_REFCOUNT
10478+" tvs %%icc, 6\n"
10479+#endif
10480+
10481 " cas [%2], %0, %1\n"
10482 " cmp %0, %1\n"
10483 " bne,pn %%xcc, 1b\n"
10484@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10485 : "memory");
10486 }
10487
10488-static void inline arch_write_lock(arch_rwlock_t *lock)
10489+static inline void arch_write_lock(arch_rwlock_t *lock)
10490 {
10491 unsigned long mask, tmp1, tmp2;
10492
10493@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10494 : "memory");
10495 }
10496
10497-static void inline arch_write_unlock(arch_rwlock_t *lock)
10498+static inline void arch_write_unlock(arch_rwlock_t *lock)
10499 {
10500 __asm__ __volatile__(
10501 " stw %%g0, [%0]"
10502@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10503 : "memory");
10504 }
10505
10506-static int inline arch_write_trylock(arch_rwlock_t *lock)
10507+static inline int arch_write_trylock(arch_rwlock_t *lock)
10508 {
10509 unsigned long mask, tmp1, tmp2, result;
10510
10511diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10512index 96efa7a..16858bf 100644
10513--- a/arch/sparc/include/asm/thread_info_32.h
10514+++ b/arch/sparc/include/asm/thread_info_32.h
10515@@ -49,6 +49,8 @@ struct thread_info {
10516 unsigned long w_saved;
10517
10518 struct restart_block restart_block;
10519+
10520+ unsigned long lowest_stack;
10521 };
10522
10523 /*
10524diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10525index a5f01ac..703b554 100644
10526--- a/arch/sparc/include/asm/thread_info_64.h
10527+++ b/arch/sparc/include/asm/thread_info_64.h
10528@@ -63,6 +63,8 @@ struct thread_info {
10529 struct pt_regs *kern_una_regs;
10530 unsigned int kern_una_insn;
10531
10532+ unsigned long lowest_stack;
10533+
10534 unsigned long fpregs[0] __attribute__ ((aligned(64)));
10535 };
10536
10537@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10538 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10539 /* flag bit 4 is available */
10540 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10541-/* flag bit 6 is available */
10542+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10543 #define TIF_32BIT 7 /* 32-bit binary */
10544 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10545 #define TIF_SECCOMP 9 /* secure computing */
10546 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10547 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10548+
10549 /* NOTE: Thread flags >= 12 should be ones we have no interest
10550 * in using in assembly, else we can't use the mask as
10551 * an immediate value in instructions such as andcc.
10552@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10553 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10554 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10555 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10556+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10557
10558 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10559 _TIF_DO_NOTIFY_RESUME_MASK | \
10560 _TIF_NEED_RESCHED)
10561 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10562
10563+#define _TIF_WORK_SYSCALL \
10564+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10565+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10566+
10567+
10568 /*
10569 * Thread-synchronous status.
10570 *
10571diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10572index bd56c28..4b63d83 100644
10573--- a/arch/sparc/include/asm/uaccess.h
10574+++ b/arch/sparc/include/asm/uaccess.h
10575@@ -1,5 +1,6 @@
10576 #ifndef ___ASM_SPARC_UACCESS_H
10577 #define ___ASM_SPARC_UACCESS_H
10578+
10579 #if defined(__sparc__) && defined(__arch64__)
10580 #include <asm/uaccess_64.h>
10581 #else
10582diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10583index 9634d08..f55fe4f 100644
10584--- a/arch/sparc/include/asm/uaccess_32.h
10585+++ b/arch/sparc/include/asm/uaccess_32.h
10586@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10587
10588 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10589 {
10590- if (n && __access_ok((unsigned long) to, n))
10591+ if ((long)n < 0)
10592+ return n;
10593+
10594+ if (n && __access_ok((unsigned long) to, n)) {
10595+ if (!__builtin_constant_p(n))
10596+ check_object_size(from, n, true);
10597 return __copy_user(to, (__force void __user *) from, n);
10598- else
10599+ } else
10600 return n;
10601 }
10602
10603 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10604 {
10605+ if ((long)n < 0)
10606+ return n;
10607+
10608+ if (!__builtin_constant_p(n))
10609+ check_object_size(from, n, true);
10610+
10611 return __copy_user(to, (__force void __user *) from, n);
10612 }
10613
10614 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10615 {
10616- if (n && __access_ok((unsigned long) from, n))
10617+ if ((long)n < 0)
10618+ return n;
10619+
10620+ if (n && __access_ok((unsigned long) from, n)) {
10621+ if (!__builtin_constant_p(n))
10622+ check_object_size(to, n, false);
10623 return __copy_user((__force void __user *) to, from, n);
10624- else
10625+ } else
10626 return n;
10627 }
10628
10629 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10630 {
10631+ if ((long)n < 0)
10632+ return n;
10633+
10634 return __copy_user((__force void __user *) to, from, n);
10635 }
10636
10637diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10638index c990a5e..f17b9c1 100644
10639--- a/arch/sparc/include/asm/uaccess_64.h
10640+++ b/arch/sparc/include/asm/uaccess_64.h
10641@@ -10,6 +10,7 @@
10642 #include <linux/compiler.h>
10643 #include <linux/string.h>
10644 #include <linux/thread_info.h>
10645+#include <linux/kernel.h>
10646 #include <asm/asi.h>
10647 #include <asm/spitfire.h>
10648 #include <asm-generic/uaccess-unaligned.h>
10649@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10650 static inline unsigned long __must_check
10651 copy_from_user(void *to, const void __user *from, unsigned long size)
10652 {
10653- unsigned long ret = ___copy_from_user(to, from, size);
10654+ unsigned long ret;
10655
10656+ if ((long)size < 0 || size > INT_MAX)
10657+ return size;
10658+
10659+ if (!__builtin_constant_p(size))
10660+ check_object_size(to, size, false);
10661+
10662+ ret = ___copy_from_user(to, from, size);
10663 if (unlikely(ret))
10664 ret = copy_from_user_fixup(to, from, size);
10665
10666@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10667 static inline unsigned long __must_check
10668 copy_to_user(void __user *to, const void *from, unsigned long size)
10669 {
10670- unsigned long ret = ___copy_to_user(to, from, size);
10671+ unsigned long ret;
10672
10673+ if ((long)size < 0 || size > INT_MAX)
10674+ return size;
10675+
10676+ if (!__builtin_constant_p(size))
10677+ check_object_size(from, size, true);
10678+
10679+ ret = ___copy_to_user(to, from, size);
10680 if (unlikely(ret))
10681 ret = copy_to_user_fixup(to, from, size);
10682 return ret;
10683diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10684index 7cf9c6e..6206648 100644
10685--- a/arch/sparc/kernel/Makefile
10686+++ b/arch/sparc/kernel/Makefile
10687@@ -4,7 +4,7 @@
10688 #
10689
10690 asflags-y := -ansi
10691-ccflags-y := -Werror
10692+#ccflags-y := -Werror
10693
10694 extra-y := head_$(BITS).o
10695
10696diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10697index 50e7b62..79fae35 100644
10698--- a/arch/sparc/kernel/process_32.c
10699+++ b/arch/sparc/kernel/process_32.c
10700@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10701
10702 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10703 r->psr, r->pc, r->npc, r->y, print_tainted());
10704- printk("PC: <%pS>\n", (void *) r->pc);
10705+ printk("PC: <%pA>\n", (void *) r->pc);
10706 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10707 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10708 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10709 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10710 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10711 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10712- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10713+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10714
10715 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10716 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10717@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10718 rw = (struct reg_window32 *) fp;
10719 pc = rw->ins[7];
10720 printk("[%08lx : ", pc);
10721- printk("%pS ] ", (void *) pc);
10722+ printk("%pA ] ", (void *) pc);
10723 fp = rw->ins[6];
10724 } while (++count < 16);
10725 printk("\n");
10726diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10727index 027e099..6d4178f 100644
10728--- a/arch/sparc/kernel/process_64.c
10729+++ b/arch/sparc/kernel/process_64.c
10730@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10731 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10732 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10733 if (regs->tstate & TSTATE_PRIV)
10734- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10735+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10736 }
10737
10738 void show_regs(struct pt_regs *regs)
10739@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10740
10741 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10742 regs->tpc, regs->tnpc, regs->y, print_tainted());
10743- printk("TPC: <%pS>\n", (void *) regs->tpc);
10744+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10745 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10746 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10747 regs->u_regs[3]);
10748@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10749 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10750 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10751 regs->u_regs[15]);
10752- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10753+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10754 show_regwindow(regs);
10755 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10756 }
10757@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10758 ((tp && tp->task) ? tp->task->pid : -1));
10759
10760 if (gp->tstate & TSTATE_PRIV) {
10761- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10762+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10763 (void *) gp->tpc,
10764 (void *) gp->o7,
10765 (void *) gp->i7,
10766diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10767index 79cc0d1..ec62734 100644
10768--- a/arch/sparc/kernel/prom_common.c
10769+++ b/arch/sparc/kernel/prom_common.c
10770@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10771
10772 unsigned int prom_early_allocated __initdata;
10773
10774-static struct of_pdt_ops prom_sparc_ops __initdata = {
10775+static struct of_pdt_ops prom_sparc_ops __initconst = {
10776 .nextprop = prom_common_nextprop,
10777 .getproplen = prom_getproplen,
10778 .getproperty = prom_getproperty,
10779diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10780index c13c9f2..d572c34 100644
10781--- a/arch/sparc/kernel/ptrace_64.c
10782+++ b/arch/sparc/kernel/ptrace_64.c
10783@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10784 return ret;
10785 }
10786
10787+#ifdef CONFIG_GRKERNSEC_SETXID
10788+extern void gr_delayed_cred_worker(void);
10789+#endif
10790+
10791 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10792 {
10793 int ret = 0;
10794@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10795 if (test_thread_flag(TIF_NOHZ))
10796 user_exit();
10797
10798+#ifdef CONFIG_GRKERNSEC_SETXID
10799+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10800+ gr_delayed_cred_worker();
10801+#endif
10802+
10803 if (test_thread_flag(TIF_SYSCALL_TRACE))
10804 ret = tracehook_report_syscall_entry(regs);
10805
10806@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10807 if (test_thread_flag(TIF_NOHZ))
10808 user_exit();
10809
10810+#ifdef CONFIG_GRKERNSEC_SETXID
10811+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10812+ gr_delayed_cred_worker();
10813+#endif
10814+
10815 audit_syscall_exit(regs);
10816
10817 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10818diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10819index 41aa247..eadfb74 100644
10820--- a/arch/sparc/kernel/smp_64.c
10821+++ b/arch/sparc/kernel/smp_64.c
10822@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10823 return;
10824
10825 #ifdef CONFIG_DEBUG_DCFLUSH
10826- atomic_inc(&dcpage_flushes);
10827+ atomic_inc_unchecked(&dcpage_flushes);
10828 #endif
10829
10830 this_cpu = get_cpu();
10831@@ -907,7 +907,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10832 xcall_deliver(data0, __pa(pg_addr),
10833 (u64) pg_addr, cpumask_of(cpu));
10834 #ifdef CONFIG_DEBUG_DCFLUSH
10835- atomic_inc(&dcpage_flushes_xcall);
10836+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10837 #endif
10838 }
10839 }
10840@@ -926,7 +926,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10841 preempt_disable();
10842
10843 #ifdef CONFIG_DEBUG_DCFLUSH
10844- atomic_inc(&dcpage_flushes);
10845+ atomic_inc_unchecked(&dcpage_flushes);
10846 #endif
10847 data0 = 0;
10848 pg_addr = page_address(page);
10849@@ -943,7 +943,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10850 xcall_deliver(data0, __pa(pg_addr),
10851 (u64) pg_addr, cpu_online_mask);
10852 #ifdef CONFIG_DEBUG_DCFLUSH
10853- atomic_inc(&dcpage_flushes_xcall);
10854+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10855 #endif
10856 }
10857 __local_flush_dcache_page(page);
10858diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10859index 646988d..b88905f 100644
10860--- a/arch/sparc/kernel/sys_sparc_32.c
10861+++ b/arch/sparc/kernel/sys_sparc_32.c
10862@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10863 if (len > TASK_SIZE - PAGE_SIZE)
10864 return -ENOMEM;
10865 if (!addr)
10866- addr = TASK_UNMAPPED_BASE;
10867+ addr = current->mm->mmap_base;
10868
10869 info.flags = 0;
10870 info.length = len;
10871diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10872index c85403d..6af95c9 100644
10873--- a/arch/sparc/kernel/sys_sparc_64.c
10874+++ b/arch/sparc/kernel/sys_sparc_64.c
10875@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10876 struct vm_area_struct * vma;
10877 unsigned long task_size = TASK_SIZE;
10878 int do_color_align;
10879+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10880 struct vm_unmapped_area_info info;
10881
10882 if (flags & MAP_FIXED) {
10883 /* We do not accept a shared mapping if it would violate
10884 * cache aliasing constraints.
10885 */
10886- if ((flags & MAP_SHARED) &&
10887+ if ((filp || (flags & MAP_SHARED)) &&
10888 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10889 return -EINVAL;
10890 return addr;
10891@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10892 if (filp || (flags & MAP_SHARED))
10893 do_color_align = 1;
10894
10895+#ifdef CONFIG_PAX_RANDMMAP
10896+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10897+#endif
10898+
10899 if (addr) {
10900 if (do_color_align)
10901 addr = COLOR_ALIGN(addr, pgoff);
10902@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10903 addr = PAGE_ALIGN(addr);
10904
10905 vma = find_vma(mm, addr);
10906- if (task_size - len >= addr &&
10907- (!vma || addr + len <= vma->vm_start))
10908+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10909 return addr;
10910 }
10911
10912 info.flags = 0;
10913 info.length = len;
10914- info.low_limit = TASK_UNMAPPED_BASE;
10915+ info.low_limit = mm->mmap_base;
10916 info.high_limit = min(task_size, VA_EXCLUDE_START);
10917 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10918 info.align_offset = pgoff << PAGE_SHIFT;
10919+ info.threadstack_offset = offset;
10920 addr = vm_unmapped_area(&info);
10921
10922 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10923 VM_BUG_ON(addr != -ENOMEM);
10924 info.low_limit = VA_EXCLUDE_END;
10925+
10926+#ifdef CONFIG_PAX_RANDMMAP
10927+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10928+ info.low_limit += mm->delta_mmap;
10929+#endif
10930+
10931 info.high_limit = task_size;
10932 addr = vm_unmapped_area(&info);
10933 }
10934@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10935 unsigned long task_size = STACK_TOP32;
10936 unsigned long addr = addr0;
10937 int do_color_align;
10938+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10939 struct vm_unmapped_area_info info;
10940
10941 /* This should only ever run for 32-bit processes. */
10942@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10943 /* We do not accept a shared mapping if it would violate
10944 * cache aliasing constraints.
10945 */
10946- if ((flags & MAP_SHARED) &&
10947+ if ((filp || (flags & MAP_SHARED)) &&
10948 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10949 return -EINVAL;
10950 return addr;
10951@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10952 if (filp || (flags & MAP_SHARED))
10953 do_color_align = 1;
10954
10955+#ifdef CONFIG_PAX_RANDMMAP
10956+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10957+#endif
10958+
10959 /* requesting a specific address */
10960 if (addr) {
10961 if (do_color_align)
10962@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10963 addr = PAGE_ALIGN(addr);
10964
10965 vma = find_vma(mm, addr);
10966- if (task_size - len >= addr &&
10967- (!vma || addr + len <= vma->vm_start))
10968+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10969 return addr;
10970 }
10971
10972@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10973 info.high_limit = mm->mmap_base;
10974 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10975 info.align_offset = pgoff << PAGE_SHIFT;
10976+ info.threadstack_offset = offset;
10977 addr = vm_unmapped_area(&info);
10978
10979 /*
10980@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10981 VM_BUG_ON(addr != -ENOMEM);
10982 info.flags = 0;
10983 info.low_limit = TASK_UNMAPPED_BASE;
10984+
10985+#ifdef CONFIG_PAX_RANDMMAP
10986+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10987+ info.low_limit += mm->delta_mmap;
10988+#endif
10989+
10990 info.high_limit = STACK_TOP32;
10991 addr = vm_unmapped_area(&info);
10992 }
10993@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10994 EXPORT_SYMBOL(get_fb_unmapped_area);
10995
10996 /* Essentially the same as PowerPC. */
10997-static unsigned long mmap_rnd(void)
10998+static unsigned long mmap_rnd(struct mm_struct *mm)
10999 {
11000 unsigned long rnd = 0UL;
11001
11002+#ifdef CONFIG_PAX_RANDMMAP
11003+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11004+#endif
11005+
11006 if (current->flags & PF_RANDOMIZE) {
11007 unsigned long val = get_random_int();
11008 if (test_thread_flag(TIF_32BIT))
11009@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11010
11011 void arch_pick_mmap_layout(struct mm_struct *mm)
11012 {
11013- unsigned long random_factor = mmap_rnd();
11014+ unsigned long random_factor = mmap_rnd(mm);
11015 unsigned long gap;
11016
11017 /*
11018@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11019 gap == RLIM_INFINITY ||
11020 sysctl_legacy_va_layout) {
11021 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11022+
11023+#ifdef CONFIG_PAX_RANDMMAP
11024+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11025+ mm->mmap_base += mm->delta_mmap;
11026+#endif
11027+
11028 mm->get_unmapped_area = arch_get_unmapped_area;
11029 } else {
11030 /* We know it's 32-bit */
11031@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11032 gap = (task_size / 6 * 5);
11033
11034 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11035+
11036+#ifdef CONFIG_PAX_RANDMMAP
11037+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11038+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11039+#endif
11040+
11041 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11042 }
11043 }
11044diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11045index 33a17e7..d87fb1f 100644
11046--- a/arch/sparc/kernel/syscalls.S
11047+++ b/arch/sparc/kernel/syscalls.S
11048@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11049 #endif
11050 .align 32
11051 1: ldx [%g6 + TI_FLAGS], %l5
11052- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11053+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11054 be,pt %icc, rtrap
11055 nop
11056 call syscall_trace_leave
11057@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11058
11059 srl %i3, 0, %o3 ! IEU0
11060 srl %i2, 0, %o2 ! IEU0 Group
11061- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11062+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11063 bne,pn %icc, linux_syscall_trace32 ! CTI
11064 mov %i0, %l5 ! IEU1
11065 5: call %l7 ! CTI Group brk forced
11066@@ -208,7 +208,7 @@ linux_sparc_syscall:
11067
11068 mov %i3, %o3 ! IEU1
11069 mov %i4, %o4 ! IEU0 Group
11070- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11071+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11072 bne,pn %icc, linux_syscall_trace ! CTI Group
11073 mov %i0, %l5 ! IEU0
11074 2: call %l7 ! CTI Group brk forced
11075@@ -223,7 +223,7 @@ ret_sys_call:
11076
11077 cmp %o0, -ERESTART_RESTARTBLOCK
11078 bgeu,pn %xcc, 1f
11079- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11080+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11081 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11082
11083 2:
11084diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11085index 6fd386c5..6907d81 100644
11086--- a/arch/sparc/kernel/traps_32.c
11087+++ b/arch/sparc/kernel/traps_32.c
11088@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11089 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11090 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11091
11092+extern void gr_handle_kernel_exploit(void);
11093+
11094 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11095 {
11096 static int die_counter;
11097@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11098 count++ < 30 &&
11099 (((unsigned long) rw) >= PAGE_OFFSET) &&
11100 !(((unsigned long) rw) & 0x7)) {
11101- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11102+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11103 (void *) rw->ins[7]);
11104 rw = (struct reg_window32 *)rw->ins[6];
11105 }
11106 }
11107 printk("Instruction DUMP:");
11108 instruction_dump ((unsigned long *) regs->pc);
11109- if(regs->psr & PSR_PS)
11110+ if(regs->psr & PSR_PS) {
11111+ gr_handle_kernel_exploit();
11112 do_exit(SIGKILL);
11113+ }
11114 do_exit(SIGSEGV);
11115 }
11116
11117diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11118index fb6640e..2daada8 100644
11119--- a/arch/sparc/kernel/traps_64.c
11120+++ b/arch/sparc/kernel/traps_64.c
11121@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11122 i + 1,
11123 p->trapstack[i].tstate, p->trapstack[i].tpc,
11124 p->trapstack[i].tnpc, p->trapstack[i].tt);
11125- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11126+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11127 }
11128 }
11129
11130@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11131
11132 lvl -= 0x100;
11133 if (regs->tstate & TSTATE_PRIV) {
11134+
11135+#ifdef CONFIG_PAX_REFCOUNT
11136+ if (lvl == 6)
11137+ pax_report_refcount_overflow(regs);
11138+#endif
11139+
11140 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11141 die_if_kernel(buffer, regs);
11142 }
11143@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11144 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11145 {
11146 char buffer[32];
11147-
11148+
11149 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11150 0, lvl, SIGTRAP) == NOTIFY_STOP)
11151 return;
11152
11153+#ifdef CONFIG_PAX_REFCOUNT
11154+ if (lvl == 6)
11155+ pax_report_refcount_overflow(regs);
11156+#endif
11157+
11158 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11159
11160 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11161@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11162 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11163 printk("%s" "ERROR(%d): ",
11164 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11165- printk("TPC<%pS>\n", (void *) regs->tpc);
11166+ printk("TPC<%pA>\n", (void *) regs->tpc);
11167 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11168 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11169 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11170@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11171 smp_processor_id(),
11172 (type & 0x1) ? 'I' : 'D',
11173 regs->tpc);
11174- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11175+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11176 panic("Irrecoverable Cheetah+ parity error.");
11177 }
11178
11179@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11180 smp_processor_id(),
11181 (type & 0x1) ? 'I' : 'D',
11182 regs->tpc);
11183- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11184+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11185 }
11186
11187 struct sun4v_error_entry {
11188@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11189 /*0x38*/u64 reserved_5;
11190 };
11191
11192-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11193-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11194+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11195+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11196
11197 static const char *sun4v_err_type_to_str(u8 type)
11198 {
11199@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11200 }
11201
11202 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11203- int cpu, const char *pfx, atomic_t *ocnt)
11204+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11205 {
11206 u64 *raw_ptr = (u64 *) ent;
11207 u32 attrs;
11208@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11209
11210 show_regs(regs);
11211
11212- if ((cnt = atomic_read(ocnt)) != 0) {
11213- atomic_set(ocnt, 0);
11214+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11215+ atomic_set_unchecked(ocnt, 0);
11216 wmb();
11217 printk("%s: Queue overflowed %d times.\n",
11218 pfx, cnt);
11219@@ -2048,7 +2059,7 @@ out:
11220 */
11221 void sun4v_resum_overflow(struct pt_regs *regs)
11222 {
11223- atomic_inc(&sun4v_resum_oflow_cnt);
11224+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11225 }
11226
11227 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11228@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11229 /* XXX Actually even this can make not that much sense. Perhaps
11230 * XXX we should just pull the plug and panic directly from here?
11231 */
11232- atomic_inc(&sun4v_nonresum_oflow_cnt);
11233+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11234 }
11235
11236 unsigned long sun4v_err_itlb_vaddr;
11237@@ -2116,9 +2127,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11238
11239 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11240 regs->tpc, tl);
11241- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11242+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11243 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11244- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11245+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11246 (void *) regs->u_regs[UREG_I7]);
11247 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11248 "pte[%lx] error[%lx]\n",
11249@@ -2140,9 +2151,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11250
11251 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11252 regs->tpc, tl);
11253- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11254+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11255 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11256- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11257+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11258 (void *) regs->u_regs[UREG_I7]);
11259 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11260 "pte[%lx] error[%lx]\n",
11261@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11262 fp = (unsigned long)sf->fp + STACK_BIAS;
11263 }
11264
11265- printk(" [%016lx] %pS\n", pc, (void *) pc);
11266+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11267 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11268 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11269 int index = tsk->curr_ret_stack;
11270 if (tsk->ret_stack && index >= graph) {
11271 pc = tsk->ret_stack[index - graph].ret;
11272- printk(" [%016lx] %pS\n", pc, (void *) pc);
11273+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11274 graph++;
11275 }
11276 }
11277@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11278 return (struct reg_window *) (fp + STACK_BIAS);
11279 }
11280
11281+extern void gr_handle_kernel_exploit(void);
11282+
11283 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11284 {
11285 static int die_counter;
11286@@ -2411,7 +2424,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11287 while (rw &&
11288 count++ < 30 &&
11289 kstack_valid(tp, (unsigned long) rw)) {
11290- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11291+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11292 (void *) rw->ins[7]);
11293
11294 rw = kernel_stack_up(rw);
11295@@ -2424,8 +2437,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11296 }
11297 user_instruction_dump ((unsigned int __user *) regs->tpc);
11298 }
11299- if (regs->tstate & TSTATE_PRIV)
11300+ if (regs->tstate & TSTATE_PRIV) {
11301+ gr_handle_kernel_exploit();
11302 do_exit(SIGKILL);
11303+ }
11304 do_exit(SIGSEGV);
11305 }
11306 EXPORT_SYMBOL(die_if_kernel);
11307diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11308index 62098a8..547ab2c 100644
11309--- a/arch/sparc/kernel/unaligned_64.c
11310+++ b/arch/sparc/kernel/unaligned_64.c
11311@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11312 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11313
11314 if (__ratelimit(&ratelimit)) {
11315- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11316+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11317 regs->tpc, (void *) regs->tpc);
11318 }
11319 }
11320diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11321index 3269b02..64f5231 100644
11322--- a/arch/sparc/lib/Makefile
11323+++ b/arch/sparc/lib/Makefile
11324@@ -2,7 +2,7 @@
11325 #
11326
11327 asflags-y := -ansi -DST_DIV0=0x02
11328-ccflags-y := -Werror
11329+#ccflags-y := -Werror
11330
11331 lib-$(CONFIG_SPARC32) += ashrdi3.o
11332 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11333diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11334index 85c233d..68500e0 100644
11335--- a/arch/sparc/lib/atomic_64.S
11336+++ b/arch/sparc/lib/atomic_64.S
11337@@ -17,7 +17,12 @@
11338 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11339 BACKOFF_SETUP(%o2)
11340 1: lduw [%o1], %g1
11341- add %g1, %o0, %g7
11342+ addcc %g1, %o0, %g7
11343+
11344+#ifdef CONFIG_PAX_REFCOUNT
11345+ tvs %icc, 6
11346+#endif
11347+
11348 cas [%o1], %g1, %g7
11349 cmp %g1, %g7
11350 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11351@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11352 2: BACKOFF_SPIN(%o2, %o3, 1b)
11353 ENDPROC(atomic_add)
11354
11355+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11356+ BACKOFF_SETUP(%o2)
11357+1: lduw [%o1], %g1
11358+ add %g1, %o0, %g7
11359+ cas [%o1], %g1, %g7
11360+ cmp %g1, %g7
11361+ bne,pn %icc, 2f
11362+ nop
11363+ retl
11364+ nop
11365+2: BACKOFF_SPIN(%o2, %o3, 1b)
11366+ENDPROC(atomic_add_unchecked)
11367+
11368 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11369 BACKOFF_SETUP(%o2)
11370 1: lduw [%o1], %g1
11371- sub %g1, %o0, %g7
11372+ subcc %g1, %o0, %g7
11373+
11374+#ifdef CONFIG_PAX_REFCOUNT
11375+ tvs %icc, 6
11376+#endif
11377+
11378 cas [%o1], %g1, %g7
11379 cmp %g1, %g7
11380 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11381@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11382 2: BACKOFF_SPIN(%o2, %o3, 1b)
11383 ENDPROC(atomic_sub)
11384
11385+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11386+ BACKOFF_SETUP(%o2)
11387+1: lduw [%o1], %g1
11388+ sub %g1, %o0, %g7
11389+ cas [%o1], %g1, %g7
11390+ cmp %g1, %g7
11391+ bne,pn %icc, 2f
11392+ nop
11393+ retl
11394+ nop
11395+2: BACKOFF_SPIN(%o2, %o3, 1b)
11396+ENDPROC(atomic_sub_unchecked)
11397+
11398 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11399 BACKOFF_SETUP(%o2)
11400 1: lduw [%o1], %g1
11401- add %g1, %o0, %g7
11402+ addcc %g1, %o0, %g7
11403+
11404+#ifdef CONFIG_PAX_REFCOUNT
11405+ tvs %icc, 6
11406+#endif
11407+
11408 cas [%o1], %g1, %g7
11409 cmp %g1, %g7
11410 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11411@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11412 2: BACKOFF_SPIN(%o2, %o3, 1b)
11413 ENDPROC(atomic_add_ret)
11414
11415+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11416+ BACKOFF_SETUP(%o2)
11417+1: lduw [%o1], %g1
11418+ addcc %g1, %o0, %g7
11419+ cas [%o1], %g1, %g7
11420+ cmp %g1, %g7
11421+ bne,pn %icc, 2f
11422+ add %g7, %o0, %g7
11423+ sra %g7, 0, %o0
11424+ retl
11425+ nop
11426+2: BACKOFF_SPIN(%o2, %o3, 1b)
11427+ENDPROC(atomic_add_ret_unchecked)
11428+
11429 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11430 BACKOFF_SETUP(%o2)
11431 1: lduw [%o1], %g1
11432- sub %g1, %o0, %g7
11433+ subcc %g1, %o0, %g7
11434+
11435+#ifdef CONFIG_PAX_REFCOUNT
11436+ tvs %icc, 6
11437+#endif
11438+
11439 cas [%o1], %g1, %g7
11440 cmp %g1, %g7
11441 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11442@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11443 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11444 BACKOFF_SETUP(%o2)
11445 1: ldx [%o1], %g1
11446- add %g1, %o0, %g7
11447+ addcc %g1, %o0, %g7
11448+
11449+#ifdef CONFIG_PAX_REFCOUNT
11450+ tvs %xcc, 6
11451+#endif
11452+
11453 casx [%o1], %g1, %g7
11454 cmp %g1, %g7
11455 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11456@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11457 2: BACKOFF_SPIN(%o2, %o3, 1b)
11458 ENDPROC(atomic64_add)
11459
11460+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11461+ BACKOFF_SETUP(%o2)
11462+1: ldx [%o1], %g1
11463+ addcc %g1, %o0, %g7
11464+ casx [%o1], %g1, %g7
11465+ cmp %g1, %g7
11466+ bne,pn %xcc, 2f
11467+ nop
11468+ retl
11469+ nop
11470+2: BACKOFF_SPIN(%o2, %o3, 1b)
11471+ENDPROC(atomic64_add_unchecked)
11472+
11473 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11474 BACKOFF_SETUP(%o2)
11475 1: ldx [%o1], %g1
11476- sub %g1, %o0, %g7
11477+ subcc %g1, %o0, %g7
11478+
11479+#ifdef CONFIG_PAX_REFCOUNT
11480+ tvs %xcc, 6
11481+#endif
11482+
11483 casx [%o1], %g1, %g7
11484 cmp %g1, %g7
11485 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11486@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11487 2: BACKOFF_SPIN(%o2, %o3, 1b)
11488 ENDPROC(atomic64_sub)
11489
11490+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11491+ BACKOFF_SETUP(%o2)
11492+1: ldx [%o1], %g1
11493+ subcc %g1, %o0, %g7
11494+ casx [%o1], %g1, %g7
11495+ cmp %g1, %g7
11496+ bne,pn %xcc, 2f
11497+ nop
11498+ retl
11499+ nop
11500+2: BACKOFF_SPIN(%o2, %o3, 1b)
11501+ENDPROC(atomic64_sub_unchecked)
11502+
11503 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11504 BACKOFF_SETUP(%o2)
11505 1: ldx [%o1], %g1
11506- add %g1, %o0, %g7
11507+ addcc %g1, %o0, %g7
11508+
11509+#ifdef CONFIG_PAX_REFCOUNT
11510+ tvs %xcc, 6
11511+#endif
11512+
11513 casx [%o1], %g1, %g7
11514 cmp %g1, %g7
11515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11516@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11517 2: BACKOFF_SPIN(%o2, %o3, 1b)
11518 ENDPROC(atomic64_add_ret)
11519
11520+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11521+ BACKOFF_SETUP(%o2)
11522+1: ldx [%o1], %g1
11523+ addcc %g1, %o0, %g7
11524+ casx [%o1], %g1, %g7
11525+ cmp %g1, %g7
11526+ bne,pn %xcc, 2f
11527+ add %g7, %o0, %g7
11528+ mov %g7, %o0
11529+ retl
11530+ nop
11531+2: BACKOFF_SPIN(%o2, %o3, 1b)
11532+ENDPROC(atomic64_add_ret_unchecked)
11533+
11534 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11535 BACKOFF_SETUP(%o2)
11536 1: ldx [%o1], %g1
11537- sub %g1, %o0, %g7
11538+ subcc %g1, %o0, %g7
11539+
11540+#ifdef CONFIG_PAX_REFCOUNT
11541+ tvs %xcc, 6
11542+#endif
11543+
11544 casx [%o1], %g1, %g7
11545 cmp %g1, %g7
11546 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11547diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11548index 323335b..ed85ea2 100644
11549--- a/arch/sparc/lib/ksyms.c
11550+++ b/arch/sparc/lib/ksyms.c
11551@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11552
11553 /* Atomic counter implementation. */
11554 EXPORT_SYMBOL(atomic_add);
11555+EXPORT_SYMBOL(atomic_add_unchecked);
11556 EXPORT_SYMBOL(atomic_add_ret);
11557+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11558 EXPORT_SYMBOL(atomic_sub);
11559+EXPORT_SYMBOL(atomic_sub_unchecked);
11560 EXPORT_SYMBOL(atomic_sub_ret);
11561 EXPORT_SYMBOL(atomic64_add);
11562+EXPORT_SYMBOL(atomic64_add_unchecked);
11563 EXPORT_SYMBOL(atomic64_add_ret);
11564+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11565 EXPORT_SYMBOL(atomic64_sub);
11566+EXPORT_SYMBOL(atomic64_sub_unchecked);
11567 EXPORT_SYMBOL(atomic64_sub_ret);
11568 EXPORT_SYMBOL(atomic64_dec_if_positive);
11569
11570diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11571index 30c3ecc..736f015 100644
11572--- a/arch/sparc/mm/Makefile
11573+++ b/arch/sparc/mm/Makefile
11574@@ -2,7 +2,7 @@
11575 #
11576
11577 asflags-y := -ansi
11578-ccflags-y := -Werror
11579+#ccflags-y := -Werror
11580
11581 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11582 obj-y += fault_$(BITS).o
11583diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11584index 908e8c1..1524793 100644
11585--- a/arch/sparc/mm/fault_32.c
11586+++ b/arch/sparc/mm/fault_32.c
11587@@ -21,6 +21,9 @@
11588 #include <linux/perf_event.h>
11589 #include <linux/interrupt.h>
11590 #include <linux/kdebug.h>
11591+#include <linux/slab.h>
11592+#include <linux/pagemap.h>
11593+#include <linux/compiler.h>
11594
11595 #include <asm/page.h>
11596 #include <asm/pgtable.h>
11597@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11598 return safe_compute_effective_address(regs, insn);
11599 }
11600
11601+#ifdef CONFIG_PAX_PAGEEXEC
11602+#ifdef CONFIG_PAX_DLRESOLVE
11603+static void pax_emuplt_close(struct vm_area_struct *vma)
11604+{
11605+ vma->vm_mm->call_dl_resolve = 0UL;
11606+}
11607+
11608+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11609+{
11610+ unsigned int *kaddr;
11611+
11612+ vmf->page = alloc_page(GFP_HIGHUSER);
11613+ if (!vmf->page)
11614+ return VM_FAULT_OOM;
11615+
11616+ kaddr = kmap(vmf->page);
11617+ memset(kaddr, 0, PAGE_SIZE);
11618+ kaddr[0] = 0x9DE3BFA8U; /* save */
11619+ flush_dcache_page(vmf->page);
11620+ kunmap(vmf->page);
11621+ return VM_FAULT_MAJOR;
11622+}
11623+
11624+static const struct vm_operations_struct pax_vm_ops = {
11625+ .close = pax_emuplt_close,
11626+ .fault = pax_emuplt_fault
11627+};
11628+
11629+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11630+{
11631+ int ret;
11632+
11633+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11634+ vma->vm_mm = current->mm;
11635+ vma->vm_start = addr;
11636+ vma->vm_end = addr + PAGE_SIZE;
11637+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11638+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11639+ vma->vm_ops = &pax_vm_ops;
11640+
11641+ ret = insert_vm_struct(current->mm, vma);
11642+ if (ret)
11643+ return ret;
11644+
11645+ ++current->mm->total_vm;
11646+ return 0;
11647+}
11648+#endif
11649+
11650+/*
11651+ * PaX: decide what to do with offenders (regs->pc = fault address)
11652+ *
11653+ * returns 1 when task should be killed
11654+ * 2 when patched PLT trampoline was detected
11655+ * 3 when unpatched PLT trampoline was detected
11656+ */
11657+static int pax_handle_fetch_fault(struct pt_regs *regs)
11658+{
11659+
11660+#ifdef CONFIG_PAX_EMUPLT
11661+ int err;
11662+
11663+ do { /* PaX: patched PLT emulation #1 */
11664+ unsigned int sethi1, sethi2, jmpl;
11665+
11666+ err = get_user(sethi1, (unsigned int *)regs->pc);
11667+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11668+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11669+
11670+ if (err)
11671+ break;
11672+
11673+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11674+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11675+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11676+ {
11677+ unsigned int addr;
11678+
11679+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11680+ addr = regs->u_regs[UREG_G1];
11681+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11682+ regs->pc = addr;
11683+ regs->npc = addr+4;
11684+ return 2;
11685+ }
11686+ } while (0);
11687+
11688+ do { /* PaX: patched PLT emulation #2 */
11689+ unsigned int ba;
11690+
11691+ err = get_user(ba, (unsigned int *)regs->pc);
11692+
11693+ if (err)
11694+ break;
11695+
11696+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11697+ unsigned int addr;
11698+
11699+ if ((ba & 0xFFC00000U) == 0x30800000U)
11700+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11701+ else
11702+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11703+ regs->pc = addr;
11704+ regs->npc = addr+4;
11705+ return 2;
11706+ }
11707+ } while (0);
11708+
11709+ do { /* PaX: patched PLT emulation #3 */
11710+ unsigned int sethi, bajmpl, nop;
11711+
11712+ err = get_user(sethi, (unsigned int *)regs->pc);
11713+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11714+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11715+
11716+ if (err)
11717+ break;
11718+
11719+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11720+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11721+ nop == 0x01000000U)
11722+ {
11723+ unsigned int addr;
11724+
11725+ addr = (sethi & 0x003FFFFFU) << 10;
11726+ regs->u_regs[UREG_G1] = addr;
11727+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11728+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11729+ else
11730+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11731+ regs->pc = addr;
11732+ regs->npc = addr+4;
11733+ return 2;
11734+ }
11735+ } while (0);
11736+
11737+ do { /* PaX: unpatched PLT emulation step 1 */
11738+ unsigned int sethi, ba, nop;
11739+
11740+ err = get_user(sethi, (unsigned int *)regs->pc);
11741+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11742+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11743+
11744+ if (err)
11745+ break;
11746+
11747+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11748+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11749+ nop == 0x01000000U)
11750+ {
11751+ unsigned int addr, save, call;
11752+
11753+ if ((ba & 0xFFC00000U) == 0x30800000U)
11754+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11755+ else
11756+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11757+
11758+ err = get_user(save, (unsigned int *)addr);
11759+ err |= get_user(call, (unsigned int *)(addr+4));
11760+ err |= get_user(nop, (unsigned int *)(addr+8));
11761+ if (err)
11762+ break;
11763+
11764+#ifdef CONFIG_PAX_DLRESOLVE
11765+ if (save == 0x9DE3BFA8U &&
11766+ (call & 0xC0000000U) == 0x40000000U &&
11767+ nop == 0x01000000U)
11768+ {
11769+ struct vm_area_struct *vma;
11770+ unsigned long call_dl_resolve;
11771+
11772+ down_read(&current->mm->mmap_sem);
11773+ call_dl_resolve = current->mm->call_dl_resolve;
11774+ up_read(&current->mm->mmap_sem);
11775+ if (likely(call_dl_resolve))
11776+ goto emulate;
11777+
11778+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11779+
11780+ down_write(&current->mm->mmap_sem);
11781+ if (current->mm->call_dl_resolve) {
11782+ call_dl_resolve = current->mm->call_dl_resolve;
11783+ up_write(&current->mm->mmap_sem);
11784+ if (vma)
11785+ kmem_cache_free(vm_area_cachep, vma);
11786+ goto emulate;
11787+ }
11788+
11789+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11790+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11791+ up_write(&current->mm->mmap_sem);
11792+ if (vma)
11793+ kmem_cache_free(vm_area_cachep, vma);
11794+ return 1;
11795+ }
11796+
11797+ if (pax_insert_vma(vma, call_dl_resolve)) {
11798+ up_write(&current->mm->mmap_sem);
11799+ kmem_cache_free(vm_area_cachep, vma);
11800+ return 1;
11801+ }
11802+
11803+ current->mm->call_dl_resolve = call_dl_resolve;
11804+ up_write(&current->mm->mmap_sem);
11805+
11806+emulate:
11807+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11808+ regs->pc = call_dl_resolve;
11809+ regs->npc = addr+4;
11810+ return 3;
11811+ }
11812+#endif
11813+
11814+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11815+ if ((save & 0xFFC00000U) == 0x05000000U &&
11816+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11817+ nop == 0x01000000U)
11818+ {
11819+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11820+ regs->u_regs[UREG_G2] = addr + 4;
11821+ addr = (save & 0x003FFFFFU) << 10;
11822+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11823+ regs->pc = addr;
11824+ regs->npc = addr+4;
11825+ return 3;
11826+ }
11827+ }
11828+ } while (0);
11829+
11830+ do { /* PaX: unpatched PLT emulation step 2 */
11831+ unsigned int save, call, nop;
11832+
11833+ err = get_user(save, (unsigned int *)(regs->pc-4));
11834+ err |= get_user(call, (unsigned int *)regs->pc);
11835+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11836+ if (err)
11837+ break;
11838+
11839+ if (save == 0x9DE3BFA8U &&
11840+ (call & 0xC0000000U) == 0x40000000U &&
11841+ nop == 0x01000000U)
11842+ {
11843+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11844+
11845+ regs->u_regs[UREG_RETPC] = regs->pc;
11846+ regs->pc = dl_resolve;
11847+ regs->npc = dl_resolve+4;
11848+ return 3;
11849+ }
11850+ } while (0);
11851+#endif
11852+
11853+ return 1;
11854+}
11855+
11856+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11857+{
11858+ unsigned long i;
11859+
11860+ printk(KERN_ERR "PAX: bytes at PC: ");
11861+ for (i = 0; i < 8; i++) {
11862+ unsigned int c;
11863+ if (get_user(c, (unsigned int *)pc+i))
11864+ printk(KERN_CONT "???????? ");
11865+ else
11866+ printk(KERN_CONT "%08x ", c);
11867+ }
11868+ printk("\n");
11869+}
11870+#endif
11871+
11872 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11873 int text_fault)
11874 {
11875@@ -226,6 +500,24 @@ good_area:
11876 if (!(vma->vm_flags & VM_WRITE))
11877 goto bad_area;
11878 } else {
11879+
11880+#ifdef CONFIG_PAX_PAGEEXEC
11881+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11882+ up_read(&mm->mmap_sem);
11883+ switch (pax_handle_fetch_fault(regs)) {
11884+
11885+#ifdef CONFIG_PAX_EMUPLT
11886+ case 2:
11887+ case 3:
11888+ return;
11889+#endif
11890+
11891+ }
11892+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11893+ do_group_exit(SIGKILL);
11894+ }
11895+#endif
11896+
11897 /* Allow reads even for write-only mappings */
11898 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11899 goto bad_area;
11900diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11901index 587cd05..fbdf17a 100644
11902--- a/arch/sparc/mm/fault_64.c
11903+++ b/arch/sparc/mm/fault_64.c
11904@@ -22,6 +22,9 @@
11905 #include <linux/kdebug.h>
11906 #include <linux/percpu.h>
11907 #include <linux/context_tracking.h>
11908+#include <linux/slab.h>
11909+#include <linux/pagemap.h>
11910+#include <linux/compiler.h>
11911
11912 #include <asm/page.h>
11913 #include <asm/pgtable.h>
11914@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11915 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11916 regs->tpc);
11917 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11918- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11919+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11920 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11921 dump_stack();
11922 unhandled_fault(regs->tpc, current, regs);
11923@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11924 show_regs(regs);
11925 }
11926
11927+#ifdef CONFIG_PAX_PAGEEXEC
11928+#ifdef CONFIG_PAX_DLRESOLVE
11929+static void pax_emuplt_close(struct vm_area_struct *vma)
11930+{
11931+ vma->vm_mm->call_dl_resolve = 0UL;
11932+}
11933+
11934+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11935+{
11936+ unsigned int *kaddr;
11937+
11938+ vmf->page = alloc_page(GFP_HIGHUSER);
11939+ if (!vmf->page)
11940+ return VM_FAULT_OOM;
11941+
11942+ kaddr = kmap(vmf->page);
11943+ memset(kaddr, 0, PAGE_SIZE);
11944+ kaddr[0] = 0x9DE3BFA8U; /* save */
11945+ flush_dcache_page(vmf->page);
11946+ kunmap(vmf->page);
11947+ return VM_FAULT_MAJOR;
11948+}
11949+
11950+static const struct vm_operations_struct pax_vm_ops = {
11951+ .close = pax_emuplt_close,
11952+ .fault = pax_emuplt_fault
11953+};
11954+
11955+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11956+{
11957+ int ret;
11958+
11959+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11960+ vma->vm_mm = current->mm;
11961+ vma->vm_start = addr;
11962+ vma->vm_end = addr + PAGE_SIZE;
11963+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11964+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11965+ vma->vm_ops = &pax_vm_ops;
11966+
11967+ ret = insert_vm_struct(current->mm, vma);
11968+ if (ret)
11969+ return ret;
11970+
11971+ ++current->mm->total_vm;
11972+ return 0;
11973+}
11974+#endif
11975+
11976+/*
11977+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11978+ *
11979+ * returns 1 when task should be killed
11980+ * 2 when patched PLT trampoline was detected
11981+ * 3 when unpatched PLT trampoline was detected
11982+ */
11983+static int pax_handle_fetch_fault(struct pt_regs *regs)
11984+{
11985+
11986+#ifdef CONFIG_PAX_EMUPLT
11987+ int err;
11988+
11989+ do { /* PaX: patched PLT emulation #1 */
11990+ unsigned int sethi1, sethi2, jmpl;
11991+
11992+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11993+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11994+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11995+
11996+ if (err)
11997+ break;
11998+
11999+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12000+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12001+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12002+ {
12003+ unsigned long addr;
12004+
12005+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12006+ addr = regs->u_regs[UREG_G1];
12007+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12008+
12009+ if (test_thread_flag(TIF_32BIT))
12010+ addr &= 0xFFFFFFFFUL;
12011+
12012+ regs->tpc = addr;
12013+ regs->tnpc = addr+4;
12014+ return 2;
12015+ }
12016+ } while (0);
12017+
12018+ do { /* PaX: patched PLT emulation #2 */
12019+ unsigned int ba;
12020+
12021+ err = get_user(ba, (unsigned int *)regs->tpc);
12022+
12023+ if (err)
12024+ break;
12025+
12026+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12027+ unsigned long addr;
12028+
12029+ if ((ba & 0xFFC00000U) == 0x30800000U)
12030+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12031+ else
12032+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12033+
12034+ if (test_thread_flag(TIF_32BIT))
12035+ addr &= 0xFFFFFFFFUL;
12036+
12037+ regs->tpc = addr;
12038+ regs->tnpc = addr+4;
12039+ return 2;
12040+ }
12041+ } while (0);
12042+
12043+ do { /* PaX: patched PLT emulation #3 */
12044+ unsigned int sethi, bajmpl, nop;
12045+
12046+ err = get_user(sethi, (unsigned int *)regs->tpc);
12047+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12048+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12049+
12050+ if (err)
12051+ break;
12052+
12053+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12054+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long addr;
12058+
12059+ addr = (sethi & 0x003FFFFFU) << 10;
12060+ regs->u_regs[UREG_G1] = addr;
12061+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12062+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12063+ else
12064+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12065+
12066+ if (test_thread_flag(TIF_32BIT))
12067+ addr &= 0xFFFFFFFFUL;
12068+
12069+ regs->tpc = addr;
12070+ regs->tnpc = addr+4;
12071+ return 2;
12072+ }
12073+ } while (0);
12074+
12075+ do { /* PaX: patched PLT emulation #4 */
12076+ unsigned int sethi, mov1, call, mov2;
12077+
12078+ err = get_user(sethi, (unsigned int *)regs->tpc);
12079+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12080+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12081+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12082+
12083+ if (err)
12084+ break;
12085+
12086+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12087+ mov1 == 0x8210000FU &&
12088+ (call & 0xC0000000U) == 0x40000000U &&
12089+ mov2 == 0x9E100001U)
12090+ {
12091+ unsigned long addr;
12092+
12093+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12094+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12095+
12096+ if (test_thread_flag(TIF_32BIT))
12097+ addr &= 0xFFFFFFFFUL;
12098+
12099+ regs->tpc = addr;
12100+ regs->tnpc = addr+4;
12101+ return 2;
12102+ }
12103+ } while (0);
12104+
12105+ do { /* PaX: patched PLT emulation #5 */
12106+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12107+
12108+ err = get_user(sethi, (unsigned int *)regs->tpc);
12109+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12110+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12111+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12112+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12113+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12114+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12115+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12116+
12117+ if (err)
12118+ break;
12119+
12120+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12121+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12122+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12123+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12124+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12125+ sllx == 0x83287020U &&
12126+ jmpl == 0x81C04005U &&
12127+ nop == 0x01000000U)
12128+ {
12129+ unsigned long addr;
12130+
12131+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12132+ regs->u_regs[UREG_G1] <<= 32;
12133+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12134+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12135+ regs->tpc = addr;
12136+ regs->tnpc = addr+4;
12137+ return 2;
12138+ }
12139+ } while (0);
12140+
12141+ do { /* PaX: patched PLT emulation #6 */
12142+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12143+
12144+ err = get_user(sethi, (unsigned int *)regs->tpc);
12145+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12146+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12147+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12148+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12149+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12150+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12151+
12152+ if (err)
12153+ break;
12154+
12155+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12156+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12157+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12158+ sllx == 0x83287020U &&
12159+ (or & 0xFFFFE000U) == 0x8A116000U &&
12160+ jmpl == 0x81C04005U &&
12161+ nop == 0x01000000U)
12162+ {
12163+ unsigned long addr;
12164+
12165+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12166+ regs->u_regs[UREG_G1] <<= 32;
12167+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12168+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12169+ regs->tpc = addr;
12170+ regs->tnpc = addr+4;
12171+ return 2;
12172+ }
12173+ } while (0);
12174+
12175+ do { /* PaX: unpatched PLT emulation step 1 */
12176+ unsigned int sethi, ba, nop;
12177+
12178+ err = get_user(sethi, (unsigned int *)regs->tpc);
12179+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12180+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12181+
12182+ if (err)
12183+ break;
12184+
12185+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12186+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12187+ nop == 0x01000000U)
12188+ {
12189+ unsigned long addr;
12190+ unsigned int save, call;
12191+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12192+
12193+ if ((ba & 0xFFC00000U) == 0x30800000U)
12194+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12195+ else
12196+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12197+
12198+ if (test_thread_flag(TIF_32BIT))
12199+ addr &= 0xFFFFFFFFUL;
12200+
12201+ err = get_user(save, (unsigned int *)addr);
12202+ err |= get_user(call, (unsigned int *)(addr+4));
12203+ err |= get_user(nop, (unsigned int *)(addr+8));
12204+ if (err)
12205+ break;
12206+
12207+#ifdef CONFIG_PAX_DLRESOLVE
12208+ if (save == 0x9DE3BFA8U &&
12209+ (call & 0xC0000000U) == 0x40000000U &&
12210+ nop == 0x01000000U)
12211+ {
12212+ struct vm_area_struct *vma;
12213+ unsigned long call_dl_resolve;
12214+
12215+ down_read(&current->mm->mmap_sem);
12216+ call_dl_resolve = current->mm->call_dl_resolve;
12217+ up_read(&current->mm->mmap_sem);
12218+ if (likely(call_dl_resolve))
12219+ goto emulate;
12220+
12221+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12222+
12223+ down_write(&current->mm->mmap_sem);
12224+ if (current->mm->call_dl_resolve) {
12225+ call_dl_resolve = current->mm->call_dl_resolve;
12226+ up_write(&current->mm->mmap_sem);
12227+ if (vma)
12228+ kmem_cache_free(vm_area_cachep, vma);
12229+ goto emulate;
12230+ }
12231+
12232+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12233+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12234+ up_write(&current->mm->mmap_sem);
12235+ if (vma)
12236+ kmem_cache_free(vm_area_cachep, vma);
12237+ return 1;
12238+ }
12239+
12240+ if (pax_insert_vma(vma, call_dl_resolve)) {
12241+ up_write(&current->mm->mmap_sem);
12242+ kmem_cache_free(vm_area_cachep, vma);
12243+ return 1;
12244+ }
12245+
12246+ current->mm->call_dl_resolve = call_dl_resolve;
12247+ up_write(&current->mm->mmap_sem);
12248+
12249+emulate:
12250+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12251+ regs->tpc = call_dl_resolve;
12252+ regs->tnpc = addr+4;
12253+ return 3;
12254+ }
12255+#endif
12256+
12257+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12258+ if ((save & 0xFFC00000U) == 0x05000000U &&
12259+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12260+ nop == 0x01000000U)
12261+ {
12262+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12263+ regs->u_regs[UREG_G2] = addr + 4;
12264+ addr = (save & 0x003FFFFFU) << 10;
12265+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12266+
12267+ if (test_thread_flag(TIF_32BIT))
12268+ addr &= 0xFFFFFFFFUL;
12269+
12270+ regs->tpc = addr;
12271+ regs->tnpc = addr+4;
12272+ return 3;
12273+ }
12274+
12275+ /* PaX: 64-bit PLT stub */
12276+ err = get_user(sethi1, (unsigned int *)addr);
12277+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12278+ err |= get_user(or1, (unsigned int *)(addr+8));
12279+ err |= get_user(or2, (unsigned int *)(addr+12));
12280+ err |= get_user(sllx, (unsigned int *)(addr+16));
12281+ err |= get_user(add, (unsigned int *)(addr+20));
12282+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12283+ err |= get_user(nop, (unsigned int *)(addr+28));
12284+ if (err)
12285+ break;
12286+
12287+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12288+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12289+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12290+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12291+ sllx == 0x89293020U &&
12292+ add == 0x8A010005U &&
12293+ jmpl == 0x89C14000U &&
12294+ nop == 0x01000000U)
12295+ {
12296+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12297+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12298+ regs->u_regs[UREG_G4] <<= 32;
12299+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12300+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12301+ regs->u_regs[UREG_G4] = addr + 24;
12302+ addr = regs->u_regs[UREG_G5];
12303+ regs->tpc = addr;
12304+ regs->tnpc = addr+4;
12305+ return 3;
12306+ }
12307+ }
12308+ } while (0);
12309+
12310+#ifdef CONFIG_PAX_DLRESOLVE
12311+ do { /* PaX: unpatched PLT emulation step 2 */
12312+ unsigned int save, call, nop;
12313+
12314+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12315+ err |= get_user(call, (unsigned int *)regs->tpc);
12316+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12317+ if (err)
12318+ break;
12319+
12320+ if (save == 0x9DE3BFA8U &&
12321+ (call & 0xC0000000U) == 0x40000000U &&
12322+ nop == 0x01000000U)
12323+ {
12324+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12325+
12326+ if (test_thread_flag(TIF_32BIT))
12327+ dl_resolve &= 0xFFFFFFFFUL;
12328+
12329+ regs->u_regs[UREG_RETPC] = regs->tpc;
12330+ regs->tpc = dl_resolve;
12331+ regs->tnpc = dl_resolve+4;
12332+ return 3;
12333+ }
12334+ } while (0);
12335+#endif
12336+
12337+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12338+ unsigned int sethi, ba, nop;
12339+
12340+ err = get_user(sethi, (unsigned int *)regs->tpc);
12341+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12342+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12343+
12344+ if (err)
12345+ break;
12346+
12347+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12348+ (ba & 0xFFF00000U) == 0x30600000U &&
12349+ nop == 0x01000000U)
12350+ {
12351+ unsigned long addr;
12352+
12353+ addr = (sethi & 0x003FFFFFU) << 10;
12354+ regs->u_regs[UREG_G1] = addr;
12355+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12356+
12357+ if (test_thread_flag(TIF_32BIT))
12358+ addr &= 0xFFFFFFFFUL;
12359+
12360+ regs->tpc = addr;
12361+ regs->tnpc = addr+4;
12362+ return 2;
12363+ }
12364+ } while (0);
12365+
12366+#endif
12367+
12368+ return 1;
12369+}
12370+
12371+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12372+{
12373+ unsigned long i;
12374+
12375+ printk(KERN_ERR "PAX: bytes at PC: ");
12376+ for (i = 0; i < 8; i++) {
12377+ unsigned int c;
12378+ if (get_user(c, (unsigned int *)pc+i))
12379+ printk(KERN_CONT "???????? ");
12380+ else
12381+ printk(KERN_CONT "%08x ", c);
12382+ }
12383+ printk("\n");
12384+}
12385+#endif
12386+
12387 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12388 {
12389 enum ctx_state prev_state = exception_enter();
12390@@ -350,6 +813,29 @@ retry:
12391 if (!vma)
12392 goto bad_area;
12393
12394+#ifdef CONFIG_PAX_PAGEEXEC
12395+ /* PaX: detect ITLB misses on non-exec pages */
12396+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12397+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12398+ {
12399+ if (address != regs->tpc)
12400+ goto good_area;
12401+
12402+ up_read(&mm->mmap_sem);
12403+ switch (pax_handle_fetch_fault(regs)) {
12404+
12405+#ifdef CONFIG_PAX_EMUPLT
12406+ case 2:
12407+ case 3:
12408+ return;
12409+#endif
12410+
12411+ }
12412+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12413+ do_group_exit(SIGKILL);
12414+ }
12415+#endif
12416+
12417 /* Pure DTLB misses do not tell us whether the fault causing
12418 * load/store/atomic was a write or not, it only says that there
12419 * was no match. So in such a case we (carefully) read the
12420diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12421index d329537..2c3746a 100644
12422--- a/arch/sparc/mm/hugetlbpage.c
12423+++ b/arch/sparc/mm/hugetlbpage.c
12424@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12425 unsigned long addr,
12426 unsigned long len,
12427 unsigned long pgoff,
12428- unsigned long flags)
12429+ unsigned long flags,
12430+ unsigned long offset)
12431 {
12432+ struct mm_struct *mm = current->mm;
12433 unsigned long task_size = TASK_SIZE;
12434 struct vm_unmapped_area_info info;
12435
12436@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12437
12438 info.flags = 0;
12439 info.length = len;
12440- info.low_limit = TASK_UNMAPPED_BASE;
12441+ info.low_limit = mm->mmap_base;
12442 info.high_limit = min(task_size, VA_EXCLUDE_START);
12443 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12444 info.align_offset = 0;
12445+ info.threadstack_offset = offset;
12446 addr = vm_unmapped_area(&info);
12447
12448 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12449 VM_BUG_ON(addr != -ENOMEM);
12450 info.low_limit = VA_EXCLUDE_END;
12451+
12452+#ifdef CONFIG_PAX_RANDMMAP
12453+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12454+ info.low_limit += mm->delta_mmap;
12455+#endif
12456+
12457 info.high_limit = task_size;
12458 addr = vm_unmapped_area(&info);
12459 }
12460@@ -55,7 +64,8 @@ static unsigned long
12461 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12462 const unsigned long len,
12463 const unsigned long pgoff,
12464- const unsigned long flags)
12465+ const unsigned long flags,
12466+ const unsigned long offset)
12467 {
12468 struct mm_struct *mm = current->mm;
12469 unsigned long addr = addr0;
12470@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12471 info.high_limit = mm->mmap_base;
12472 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12473 info.align_offset = 0;
12474+ info.threadstack_offset = offset;
12475 addr = vm_unmapped_area(&info);
12476
12477 /*
12478@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12479 VM_BUG_ON(addr != -ENOMEM);
12480 info.flags = 0;
12481 info.low_limit = TASK_UNMAPPED_BASE;
12482+
12483+#ifdef CONFIG_PAX_RANDMMAP
12484+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12485+ info.low_limit += mm->delta_mmap;
12486+#endif
12487+
12488 info.high_limit = STACK_TOP32;
12489 addr = vm_unmapped_area(&info);
12490 }
12491@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12492 struct mm_struct *mm = current->mm;
12493 struct vm_area_struct *vma;
12494 unsigned long task_size = TASK_SIZE;
12495+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12496
12497 if (test_thread_flag(TIF_32BIT))
12498 task_size = STACK_TOP32;
12499@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12500 return addr;
12501 }
12502
12503+#ifdef CONFIG_PAX_RANDMMAP
12504+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12505+#endif
12506+
12507 if (addr) {
12508 addr = ALIGN(addr, HPAGE_SIZE);
12509 vma = find_vma(mm, addr);
12510- if (task_size - len >= addr &&
12511- (!vma || addr + len <= vma->vm_start))
12512+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12513 return addr;
12514 }
12515 if (mm->get_unmapped_area == arch_get_unmapped_area)
12516 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12517- pgoff, flags);
12518+ pgoff, flags, offset);
12519 else
12520 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12521- pgoff, flags);
12522+ pgoff, flags, offset);
12523 }
12524
12525 pte_t *huge_pte_alloc(struct mm_struct *mm,
12526diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12527index 2cfb0f2..e917d9f 100644
12528--- a/arch/sparc/mm/init_64.c
12529+++ b/arch/sparc/mm/init_64.c
12530@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12531 int num_kernel_image_mappings;
12532
12533 #ifdef CONFIG_DEBUG_DCFLUSH
12534-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12535+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12536 #ifdef CONFIG_SMP
12537-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12538+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12539 #endif
12540 #endif
12541
12542@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
12543 {
12544 BUG_ON(tlb_type == hypervisor);
12545 #ifdef CONFIG_DEBUG_DCFLUSH
12546- atomic_inc(&dcpage_flushes);
12547+ atomic_inc_unchecked(&dcpage_flushes);
12548 #endif
12549
12550 #ifdef DCACHE_ALIASING_POSSIBLE
12551@@ -471,10 +471,10 @@ void mmu_info(struct seq_file *m)
12552
12553 #ifdef CONFIG_DEBUG_DCFLUSH
12554 seq_printf(m, "DCPageFlushes\t: %d\n",
12555- atomic_read(&dcpage_flushes));
12556+ atomic_read_unchecked(&dcpage_flushes));
12557 #ifdef CONFIG_SMP
12558 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12559- atomic_read(&dcpage_flushes_xcall));
12560+ atomic_read_unchecked(&dcpage_flushes_xcall));
12561 #endif /* CONFIG_SMP */
12562 #endif /* CONFIG_DEBUG_DCFLUSH */
12563 }
12564diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12565index 4f3006b..453f625f 100644
12566--- a/arch/tile/Kconfig
12567+++ b/arch/tile/Kconfig
12568@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12569
12570 config KEXEC
12571 bool "kexec system call"
12572+ depends on !GRKERNSEC_KMEM
12573 ---help---
12574 kexec is a system call that implements the ability to shutdown your
12575 current kernel, and to start another kernel. It is like a reboot
12576diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12577index 7b11c5f..755a026 100644
12578--- a/arch/tile/include/asm/atomic_64.h
12579+++ b/arch/tile/include/asm/atomic_64.h
12580@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12581
12582 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12583
12584+#define atomic64_read_unchecked(v) atomic64_read(v)
12585+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12586+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12587+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12588+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12589+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12590+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12591+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12592+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12593+
12594 /* Define this to indicate that cmpxchg is an efficient operation. */
12595 #define __HAVE_ARCH_CMPXCHG
12596
12597diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12598index 6160761..00cac88 100644
12599--- a/arch/tile/include/asm/cache.h
12600+++ b/arch/tile/include/asm/cache.h
12601@@ -15,11 +15,12 @@
12602 #ifndef _ASM_TILE_CACHE_H
12603 #define _ASM_TILE_CACHE_H
12604
12605+#include <linux/const.h>
12606 #include <arch/chip.h>
12607
12608 /* bytes per L1 data cache line */
12609 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12612
12613 /* bytes per L2 cache line */
12614 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12615diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12616index b6cde32..c0cb736 100644
12617--- a/arch/tile/include/asm/uaccess.h
12618+++ b/arch/tile/include/asm/uaccess.h
12619@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12620 const void __user *from,
12621 unsigned long n)
12622 {
12623- int sz = __compiletime_object_size(to);
12624+ size_t sz = __compiletime_object_size(to);
12625
12626- if (likely(sz == -1 || sz >= n))
12627+ if (likely(sz == (size_t)-1 || sz >= n))
12628 n = _copy_from_user(to, from, n);
12629 else
12630 copy_from_user_overflow();
12631diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12632index e514899..f8743c4 100644
12633--- a/arch/tile/mm/hugetlbpage.c
12634+++ b/arch/tile/mm/hugetlbpage.c
12635@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12636 info.high_limit = TASK_SIZE;
12637 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12638 info.align_offset = 0;
12639+ info.threadstack_offset = 0;
12640 return vm_unmapped_area(&info);
12641 }
12642
12643@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12644 info.high_limit = current->mm->mmap_base;
12645 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12646 info.align_offset = 0;
12647+ info.threadstack_offset = 0;
12648 addr = vm_unmapped_area(&info);
12649
12650 /*
12651diff --git a/arch/um/Makefile b/arch/um/Makefile
12652index e4b1a96..16162f8 100644
12653--- a/arch/um/Makefile
12654+++ b/arch/um/Makefile
12655@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12656 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12657 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12658
12659+ifdef CONSTIFY_PLUGIN
12660+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12661+endif
12662+
12663 #This will adjust *FLAGS accordingly to the platform.
12664 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12665
12666diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12667index 19e1bdd..3665b77 100644
12668--- a/arch/um/include/asm/cache.h
12669+++ b/arch/um/include/asm/cache.h
12670@@ -1,6 +1,7 @@
12671 #ifndef __UM_CACHE_H
12672 #define __UM_CACHE_H
12673
12674+#include <linux/const.h>
12675
12676 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12677 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12678@@ -12,6 +13,6 @@
12679 # define L1_CACHE_SHIFT 5
12680 #endif
12681
12682-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12683+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12684
12685 #endif
12686diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12687index 2e0a6b1..a64d0f5 100644
12688--- a/arch/um/include/asm/kmap_types.h
12689+++ b/arch/um/include/asm/kmap_types.h
12690@@ -8,6 +8,6 @@
12691
12692 /* No more #include "asm/arch/kmap_types.h" ! */
12693
12694-#define KM_TYPE_NR 14
12695+#define KM_TYPE_NR 15
12696
12697 #endif
12698diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12699index 5ff53d9..5850cdf 100644
12700--- a/arch/um/include/asm/page.h
12701+++ b/arch/um/include/asm/page.h
12702@@ -14,6 +14,9 @@
12703 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12704 #define PAGE_MASK (~(PAGE_SIZE-1))
12705
12706+#define ktla_ktva(addr) (addr)
12707+#define ktva_ktla(addr) (addr)
12708+
12709 #ifndef __ASSEMBLY__
12710
12711 struct page;
12712diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12713index 0032f92..cd151e0 100644
12714--- a/arch/um/include/asm/pgtable-3level.h
12715+++ b/arch/um/include/asm/pgtable-3level.h
12716@@ -58,6 +58,7 @@
12717 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12718 #define pud_populate(mm, pud, pmd) \
12719 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12720+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12721
12722 #ifdef CONFIG_64BIT
12723 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12724diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12725index f17bca8..48adb87 100644
12726--- a/arch/um/kernel/process.c
12727+++ b/arch/um/kernel/process.c
12728@@ -356,22 +356,6 @@ int singlestepping(void * t)
12729 return 2;
12730 }
12731
12732-/*
12733- * Only x86 and x86_64 have an arch_align_stack().
12734- * All other arches have "#define arch_align_stack(x) (x)"
12735- * in their asm/exec.h
12736- * As this is included in UML from asm-um/system-generic.h,
12737- * we can use it to behave as the subarch does.
12738- */
12739-#ifndef arch_align_stack
12740-unsigned long arch_align_stack(unsigned long sp)
12741-{
12742- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12743- sp -= get_random_int() % 8192;
12744- return sp & ~0xf;
12745-}
12746-#endif
12747-
12748 unsigned long get_wchan(struct task_struct *p)
12749 {
12750 unsigned long stack_page, sp, ip;
12751diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12752index ad8f795..2c7eec6 100644
12753--- a/arch/unicore32/include/asm/cache.h
12754+++ b/arch/unicore32/include/asm/cache.h
12755@@ -12,8 +12,10 @@
12756 #ifndef __UNICORE_CACHE_H__
12757 #define __UNICORE_CACHE_H__
12758
12759-#define L1_CACHE_SHIFT (5)
12760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12761+#include <linux/const.h>
12762+
12763+#define L1_CACHE_SHIFT 5
12764+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12765
12766 /*
12767 * Memory returned by kmalloc() may be used for DMA, so we must make
12768diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12769index 27adfd9..bc3551d 100644
12770--- a/arch/x86/Kconfig
12771+++ b/arch/x86/Kconfig
12772@@ -128,7 +128,7 @@ config X86
12773 select RTC_LIB
12774 select HAVE_DEBUG_STACKOVERFLOW
12775 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12776- select HAVE_CC_STACKPROTECTOR
12777+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12778 select GENERIC_CPU_AUTOPROBE
12779 select HAVE_ARCH_AUDITSYSCALL
12780 select ARCH_SUPPORTS_ATOMIC_RMW
12781@@ -253,7 +253,7 @@ config X86_HT
12782
12783 config X86_32_LAZY_GS
12784 def_bool y
12785- depends on X86_32 && !CC_STACKPROTECTOR
12786+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12787
12788 config ARCH_HWEIGHT_CFLAGS
12789 string
12790@@ -549,6 +549,7 @@ config SCHED_OMIT_FRAME_POINTER
12791
12792 menuconfig HYPERVISOR_GUEST
12793 bool "Linux guest support"
12794+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12795 ---help---
12796 Say Y here to enable options for running Linux under various hyper-
12797 visors. This option enables basic hypervisor detection and platform
12798@@ -1076,6 +1077,7 @@ choice
12799
12800 config NOHIGHMEM
12801 bool "off"
12802+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12803 ---help---
12804 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12805 However, the address space of 32-bit x86 processors is only 4
12806@@ -1112,6 +1114,7 @@ config NOHIGHMEM
12807
12808 config HIGHMEM4G
12809 bool "4GB"
12810+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12811 ---help---
12812 Select this if you have a 32-bit processor and between 1 and 4
12813 gigabytes of physical RAM.
12814@@ -1164,7 +1167,7 @@ config PAGE_OFFSET
12815 hex
12816 default 0xB0000000 if VMSPLIT_3G_OPT
12817 default 0x80000000 if VMSPLIT_2G
12818- default 0x78000000 if VMSPLIT_2G_OPT
12819+ default 0x70000000 if VMSPLIT_2G_OPT
12820 default 0x40000000 if VMSPLIT_1G
12821 default 0xC0000000
12822 depends on X86_32
12823@@ -1578,6 +1581,7 @@ source kernel/Kconfig.hz
12824
12825 config KEXEC
12826 bool "kexec system call"
12827+ depends on !GRKERNSEC_KMEM
12828 ---help---
12829 kexec is a system call that implements the ability to shutdown your
12830 current kernel, and to start another kernel. It is like a reboot
12831@@ -1728,7 +1732,9 @@ config X86_NEED_RELOCS
12832
12833 config PHYSICAL_ALIGN
12834 hex "Alignment value to which kernel should be aligned"
12835- default "0x200000"
12836+ default "0x1000000"
12837+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12838+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12839 range 0x2000 0x1000000 if X86_32
12840 range 0x200000 0x1000000 if X86_64
12841 ---help---
12842@@ -1811,6 +1817,7 @@ config COMPAT_VDSO
12843 def_bool n
12844 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12845 depends on X86_32 || IA32_EMULATION
12846+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12847 ---help---
12848 Certain buggy versions of glibc will crash if they are
12849 presented with a 32-bit vDSO that is not mapped at the address
12850diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12851index 6983314..54ad7e8 100644
12852--- a/arch/x86/Kconfig.cpu
12853+++ b/arch/x86/Kconfig.cpu
12854@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12855
12856 config X86_F00F_BUG
12857 def_bool y
12858- depends on M586MMX || M586TSC || M586 || M486
12859+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12860
12861 config X86_INVD_BUG
12862 def_bool y
12863@@ -327,7 +327,7 @@ config X86_INVD_BUG
12864
12865 config X86_ALIGNMENT_16
12866 def_bool y
12867- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12868+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12869
12870 config X86_INTEL_USERCOPY
12871 def_bool y
12872@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12873 # generates cmov.
12874 config X86_CMOV
12875 def_bool y
12876- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12877+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12878
12879 config X86_MINIMUM_CPU_FAMILY
12880 int
12881diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12882index 61bd2ad..50b625d 100644
12883--- a/arch/x86/Kconfig.debug
12884+++ b/arch/x86/Kconfig.debug
12885@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12886 config DEBUG_RODATA
12887 bool "Write protect kernel read-only data structures"
12888 default y
12889- depends on DEBUG_KERNEL
12890+ depends on DEBUG_KERNEL && BROKEN
12891 ---help---
12892 Mark the kernel read-only data as write-protected in the pagetables,
12893 in order to catch accidental (and incorrect) writes to such const
12894@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12895
12896 config DEBUG_SET_MODULE_RONX
12897 bool "Set loadable kernel module data as NX and text as RO"
12898- depends on MODULES
12899+ depends on MODULES && BROKEN
12900 ---help---
12901 This option helps catch unintended modifications to loadable
12902 kernel module's text and read-only data. It also prevents execution
12903diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12904index 33f71b0..c2cefa2 100644
12905--- a/arch/x86/Makefile
12906+++ b/arch/x86/Makefile
12907@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y)
12908 # CPU-specific tuning. Anything which can be shared with UML should go here.
12909 include $(srctree)/arch/x86/Makefile_32.cpu
12910 KBUILD_CFLAGS += $(cflags-y)
12911-
12912- # temporary until string.h is fixed
12913- KBUILD_CFLAGS += -ffreestanding
12914 else
12915 BITS := 64
12916 UTS_MACHINE := x86_64
12917@@ -114,6 +111,9 @@ else
12918 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12919 endif
12920
12921+# temporary until string.h is fixed
12922+KBUILD_CFLAGS += -ffreestanding
12923+
12924 # Make sure compiler does not have buggy stack-protector support.
12925 ifdef CONFIG_CC_STACKPROTECTOR
12926 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12927@@ -271,3 +271,12 @@ define archhelp
12928 echo ' FDINITRD=file initrd for the booted kernel'
12929 echo ' kvmconfig - Enable additional options for guest kernel support'
12930 endef
12931+
12932+define OLD_LD
12933+
12934+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12935+*** Please upgrade your binutils to 2.18 or newer
12936+endef
12937+
12938+archprepare:
12939+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12940diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12941index dbe8dd2..2f0a98f 100644
12942--- a/arch/x86/boot/Makefile
12943+++ b/arch/x86/boot/Makefile
12944@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
12945 # ---------------------------------------------------------------------------
12946
12947 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12948+ifdef CONSTIFY_PLUGIN
12949+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12950+endif
12951 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12952 GCOV_PROFILE := n
12953
12954diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12955index 878e4b9..20537ab 100644
12956--- a/arch/x86/boot/bitops.h
12957+++ b/arch/x86/boot/bitops.h
12958@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12959 u8 v;
12960 const u32 *p = (const u32 *)addr;
12961
12962- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12963+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12964 return v;
12965 }
12966
12967@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12968
12969 static inline void set_bit(int nr, void *addr)
12970 {
12971- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12972+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12973 }
12974
12975 #endif /* BOOT_BITOPS_H */
12976diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12977index bd49ec6..94c7f58 100644
12978--- a/arch/x86/boot/boot.h
12979+++ b/arch/x86/boot/boot.h
12980@@ -84,7 +84,7 @@ static inline void io_delay(void)
12981 static inline u16 ds(void)
12982 {
12983 u16 seg;
12984- asm("movw %%ds,%0" : "=rm" (seg));
12985+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12986 return seg;
12987 }
12988
12989diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12990index 0fcd913..3bb5c42 100644
12991--- a/arch/x86/boot/compressed/Makefile
12992+++ b/arch/x86/boot/compressed/Makefile
12993@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
12994 KBUILD_CFLAGS += -mno-mmx -mno-sse
12995 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12996 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12997+ifdef CONSTIFY_PLUGIN
12998+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12999+endif
13000
13001 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13002 GCOV_PROFILE := n
13003diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13004index a53440e..c3dbf1e 100644
13005--- a/arch/x86/boot/compressed/efi_stub_32.S
13006+++ b/arch/x86/boot/compressed/efi_stub_32.S
13007@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13008 * parameter 2, ..., param n. To make things easy, we save the return
13009 * address of efi_call_phys in a global variable.
13010 */
13011- popl %ecx
13012- movl %ecx, saved_return_addr(%edx)
13013- /* get the function pointer into ECX*/
13014- popl %ecx
13015- movl %ecx, efi_rt_function_ptr(%edx)
13016+ popl saved_return_addr(%edx)
13017+ popl efi_rt_function_ptr(%edx)
13018
13019 /*
13020 * 3. Call the physical function.
13021 */
13022- call *%ecx
13023+ call *efi_rt_function_ptr(%edx)
13024
13025 /*
13026 * 4. Balance the stack. And because EAX contain the return value,
13027@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13028 1: popl %edx
13029 subl $1b, %edx
13030
13031- movl efi_rt_function_ptr(%edx), %ecx
13032- pushl %ecx
13033+ pushl efi_rt_function_ptr(%edx)
13034
13035 /*
13036 * 10. Push the saved return address onto the stack and return.
13037 */
13038- movl saved_return_addr(%edx), %ecx
13039- pushl %ecx
13040- ret
13041+ jmpl *saved_return_addr(%edx)
13042 ENDPROC(efi_call_phys)
13043 .previous
13044
13045diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13046index cbed140..5f2ca57 100644
13047--- a/arch/x86/boot/compressed/head_32.S
13048+++ b/arch/x86/boot/compressed/head_32.S
13049@@ -140,10 +140,10 @@ preferred_addr:
13050 addl %eax, %ebx
13051 notl %eax
13052 andl %eax, %ebx
13053- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13054+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13055 jge 1f
13056 #endif
13057- movl $LOAD_PHYSICAL_ADDR, %ebx
13058+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13059 1:
13060
13061 /* Target address to relocate to for decompression */
13062diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13063index 2884e0c..904a2f7 100644
13064--- a/arch/x86/boot/compressed/head_64.S
13065+++ b/arch/x86/boot/compressed/head_64.S
13066@@ -94,10 +94,10 @@ ENTRY(startup_32)
13067 addl %eax, %ebx
13068 notl %eax
13069 andl %eax, %ebx
13070- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13071+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13072 jge 1f
13073 #endif
13074- movl $LOAD_PHYSICAL_ADDR, %ebx
13075+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13076 1:
13077
13078 /* Target address to relocate to for decompression */
13079@@ -322,10 +322,10 @@ preferred_addr:
13080 addq %rax, %rbp
13081 notq %rax
13082 andq %rax, %rbp
13083- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13084+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13085 jge 1f
13086 #endif
13087- movq $LOAD_PHYSICAL_ADDR, %rbp
13088+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13089 1:
13090
13091 /* Target address to relocate to for decompression */
13092@@ -431,8 +431,8 @@ gdt:
13093 .long gdt
13094 .word 0
13095 .quad 0x0000000000000000 /* NULL descriptor */
13096- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13097- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13098+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13099+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13100 .quad 0x0080890000000000 /* TS descriptor */
13101 .quad 0x0000000000000000 /* TS continued */
13102 gdt_end:
13103diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13104index 57ab74d..7c52182 100644
13105--- a/arch/x86/boot/compressed/misc.c
13106+++ b/arch/x86/boot/compressed/misc.c
13107@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13108 * Calculate the delta between where vmlinux was linked to load
13109 * and where it was actually loaded.
13110 */
13111- delta = min_addr - LOAD_PHYSICAL_ADDR;
13112+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13113 if (!delta) {
13114 debug_putstr("No relocation needed... ");
13115 return;
13116@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13117 Elf32_Ehdr ehdr;
13118 Elf32_Phdr *phdrs, *phdr;
13119 #endif
13120- void *dest;
13121+ void *dest, *prev;
13122 int i;
13123
13124 memcpy(&ehdr, output, sizeof(ehdr));
13125@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13126 case PT_LOAD:
13127 #ifdef CONFIG_RELOCATABLE
13128 dest = output;
13129- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13130+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13131 #else
13132 dest = (void *)(phdr->p_paddr);
13133 #endif
13134 memcpy(dest,
13135 output + phdr->p_offset,
13136 phdr->p_filesz);
13137+ if (i)
13138+ memset(prev, 0xff, dest - prev);
13139+ prev = dest + phdr->p_filesz;
13140 break;
13141 default: /* Ignore other PT_* */ break;
13142 }
13143@@ -395,7 +398,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13144 error("Destination address too large");
13145 #endif
13146 #ifndef CONFIG_RELOCATABLE
13147- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13148+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13149 error("Wrong destination address");
13150 #endif
13151
13152diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13153index 1fd7d57..0f7d096 100644
13154--- a/arch/x86/boot/cpucheck.c
13155+++ b/arch/x86/boot/cpucheck.c
13156@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13157 u32 ecx = MSR_K7_HWCR;
13158 u32 eax, edx;
13159
13160- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13161+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13162 eax &= ~(1 << 15);
13163- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13164+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13165
13166 get_cpuflags(); /* Make sure it really did something */
13167 err = check_cpuflags();
13168@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13169 u32 ecx = MSR_VIA_FCR;
13170 u32 eax, edx;
13171
13172- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13173+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13174 eax |= (1<<1)|(1<<7);
13175- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13176+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13177
13178 set_bit(X86_FEATURE_CX8, cpu.flags);
13179 err = check_cpuflags();
13180@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13181 u32 eax, edx;
13182 u32 level = 1;
13183
13184- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13185- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13186- asm("cpuid"
13187+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13188+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13189+ asm volatile("cpuid"
13190 : "+a" (level), "=d" (cpu.flags[0])
13191 : : "ecx", "ebx");
13192- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13193+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13194
13195 err = check_cpuflags();
13196 } else if (err == 0x01 &&
13197diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13198index 7a6d43a..edf6e40 100644
13199--- a/arch/x86/boot/header.S
13200+++ b/arch/x86/boot/header.S
13201@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13202 # single linked list of
13203 # struct setup_data
13204
13205-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13206+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13207
13208 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13209+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13210+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13211+#else
13212 #define VO_INIT_SIZE (VO__end - VO__text)
13213+#endif
13214 #if ZO_INIT_SIZE > VO_INIT_SIZE
13215 #define INIT_SIZE ZO_INIT_SIZE
13216 #else
13217diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13218index db75d07..8e6d0af 100644
13219--- a/arch/x86/boot/memory.c
13220+++ b/arch/x86/boot/memory.c
13221@@ -19,7 +19,7 @@
13222
13223 static int detect_memory_e820(void)
13224 {
13225- int count = 0;
13226+ unsigned int count = 0;
13227 struct biosregs ireg, oreg;
13228 struct e820entry *desc = boot_params.e820_map;
13229 static struct e820entry buf; /* static so it is zeroed */
13230diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13231index ba3e100..6501b8f 100644
13232--- a/arch/x86/boot/video-vesa.c
13233+++ b/arch/x86/boot/video-vesa.c
13234@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13235
13236 boot_params.screen_info.vesapm_seg = oreg.es;
13237 boot_params.screen_info.vesapm_off = oreg.di;
13238+ boot_params.screen_info.vesapm_size = oreg.cx;
13239 }
13240
13241 /*
13242diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13243index 43eda28..5ab5fdb 100644
13244--- a/arch/x86/boot/video.c
13245+++ b/arch/x86/boot/video.c
13246@@ -96,7 +96,7 @@ static void store_mode_params(void)
13247 static unsigned int get_entry(void)
13248 {
13249 char entry_buf[4];
13250- int i, len = 0;
13251+ unsigned int i, len = 0;
13252 int key;
13253 unsigned int v;
13254
13255diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13256index 9105655..41779c1 100644
13257--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13258+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13259@@ -8,6 +8,8 @@
13260 * including this sentence is retained in full.
13261 */
13262
13263+#include <asm/alternative-asm.h>
13264+
13265 .extern crypto_ft_tab
13266 .extern crypto_it_tab
13267 .extern crypto_fl_tab
13268@@ -70,6 +72,8 @@
13269 je B192; \
13270 leaq 32(r9),r9;
13271
13272+#define ret pax_force_retaddr; ret
13273+
13274 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13275 movq r1,r2; \
13276 movq r3,r4; \
13277diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13278index 477e9d7..c92c7d8 100644
13279--- a/arch/x86/crypto/aesni-intel_asm.S
13280+++ b/arch/x86/crypto/aesni-intel_asm.S
13281@@ -31,6 +31,7 @@
13282
13283 #include <linux/linkage.h>
13284 #include <asm/inst.h>
13285+#include <asm/alternative-asm.h>
13286
13287 #ifdef __x86_64__
13288 .data
13289@@ -205,7 +206,7 @@ enc: .octa 0x2
13290 * num_initial_blocks = b mod 4
13291 * encrypt the initial num_initial_blocks blocks and apply ghash on
13292 * the ciphertext
13293-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13294+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13295 * are clobbered
13296 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13297 */
13298@@ -214,8 +215,8 @@ enc: .octa 0x2
13299 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13300 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13301 mov arg7, %r10 # %r10 = AAD
13302- mov arg8, %r12 # %r12 = aadLen
13303- mov %r12, %r11
13304+ mov arg8, %r15 # %r15 = aadLen
13305+ mov %r15, %r11
13306 pxor %xmm\i, %xmm\i
13307 _get_AAD_loop\num_initial_blocks\operation:
13308 movd (%r10), \TMP1
13309@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13310 psrldq $4, %xmm\i
13311 pxor \TMP1, %xmm\i
13312 add $4, %r10
13313- sub $4, %r12
13314+ sub $4, %r15
13315 jne _get_AAD_loop\num_initial_blocks\operation
13316 cmp $16, %r11
13317 je _get_AAD_loop2_done\num_initial_blocks\operation
13318- mov $16, %r12
13319+ mov $16, %r15
13320 _get_AAD_loop2\num_initial_blocks\operation:
13321 psrldq $4, %xmm\i
13322- sub $4, %r12
13323- cmp %r11, %r12
13324+ sub $4, %r15
13325+ cmp %r11, %r15
13326 jne _get_AAD_loop2\num_initial_blocks\operation
13327 _get_AAD_loop2_done\num_initial_blocks\operation:
13328 movdqa SHUF_MASK(%rip), %xmm14
13329@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13330 * num_initial_blocks = b mod 4
13331 * encrypt the initial num_initial_blocks blocks and apply ghash on
13332 * the ciphertext
13333-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13334+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13335 * are clobbered
13336 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13337 */
13338@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13339 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13340 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13341 mov arg7, %r10 # %r10 = AAD
13342- mov arg8, %r12 # %r12 = aadLen
13343- mov %r12, %r11
13344+ mov arg8, %r15 # %r15 = aadLen
13345+ mov %r15, %r11
13346 pxor %xmm\i, %xmm\i
13347 _get_AAD_loop\num_initial_blocks\operation:
13348 movd (%r10), \TMP1
13349@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13350 psrldq $4, %xmm\i
13351 pxor \TMP1, %xmm\i
13352 add $4, %r10
13353- sub $4, %r12
13354+ sub $4, %r15
13355 jne _get_AAD_loop\num_initial_blocks\operation
13356 cmp $16, %r11
13357 je _get_AAD_loop2_done\num_initial_blocks\operation
13358- mov $16, %r12
13359+ mov $16, %r15
13360 _get_AAD_loop2\num_initial_blocks\operation:
13361 psrldq $4, %xmm\i
13362- sub $4, %r12
13363- cmp %r11, %r12
13364+ sub $4, %r15
13365+ cmp %r11, %r15
13366 jne _get_AAD_loop2\num_initial_blocks\operation
13367 _get_AAD_loop2_done\num_initial_blocks\operation:
13368 movdqa SHUF_MASK(%rip), %xmm14
13369@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13370 *
13371 *****************************************************************************/
13372 ENTRY(aesni_gcm_dec)
13373- push %r12
13374+ push %r15
13375 push %r13
13376 push %r14
13377 mov %rsp, %r14
13378@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13379 */
13380 sub $VARIABLE_OFFSET, %rsp
13381 and $~63, %rsp # align rsp to 64 bytes
13382- mov %arg6, %r12
13383- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13384+ mov %arg6, %r15
13385+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13386 movdqa SHUF_MASK(%rip), %xmm2
13387 PSHUFB_XMM %xmm2, %xmm13
13388
13389@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13390 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13391 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13392 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13393- mov %r13, %r12
13394- and $(3<<4), %r12
13395+ mov %r13, %r15
13396+ and $(3<<4), %r15
13397 jz _initial_num_blocks_is_0_decrypt
13398- cmp $(2<<4), %r12
13399+ cmp $(2<<4), %r15
13400 jb _initial_num_blocks_is_1_decrypt
13401 je _initial_num_blocks_is_2_decrypt
13402 _initial_num_blocks_is_3_decrypt:
13403@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13404 sub $16, %r11
13405 add %r13, %r11
13406 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13407- lea SHIFT_MASK+16(%rip), %r12
13408- sub %r13, %r12
13409+ lea SHIFT_MASK+16(%rip), %r15
13410+ sub %r13, %r15
13411 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13412 # (%r13 is the number of bytes in plaintext mod 16)
13413- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13414+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13415 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13416
13417 movdqa %xmm1, %xmm2
13418 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13419- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13420+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13421 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13422 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13423 pand %xmm1, %xmm2
13424@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13425 sub $1, %r13
13426 jne _less_than_8_bytes_left_decrypt
13427 _multiple_of_16_bytes_decrypt:
13428- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13429- shl $3, %r12 # convert into number of bits
13430- movd %r12d, %xmm15 # len(A) in %xmm15
13431+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13432+ shl $3, %r15 # convert into number of bits
13433+ movd %r15d, %xmm15 # len(A) in %xmm15
13434 shl $3, %arg4 # len(C) in bits (*128)
13435 MOVQ_R64_XMM %arg4, %xmm1
13436 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13437@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13438 mov %r14, %rsp
13439 pop %r14
13440 pop %r13
13441- pop %r12
13442+ pop %r15
13443+ pax_force_retaddr
13444 ret
13445 ENDPROC(aesni_gcm_dec)
13446
13447@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13448 * poly = x^128 + x^127 + x^126 + x^121 + 1
13449 ***************************************************************************/
13450 ENTRY(aesni_gcm_enc)
13451- push %r12
13452+ push %r15
13453 push %r13
13454 push %r14
13455 mov %rsp, %r14
13456@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13457 #
13458 sub $VARIABLE_OFFSET, %rsp
13459 and $~63, %rsp
13460- mov %arg6, %r12
13461- movdqu (%r12), %xmm13
13462+ mov %arg6, %r15
13463+ movdqu (%r15), %xmm13
13464 movdqa SHUF_MASK(%rip), %xmm2
13465 PSHUFB_XMM %xmm2, %xmm13
13466
13467@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13468 movdqa %xmm13, HashKey(%rsp)
13469 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13470 and $-16, %r13
13471- mov %r13, %r12
13472+ mov %r13, %r15
13473
13474 # Encrypt first few blocks
13475
13476- and $(3<<4), %r12
13477+ and $(3<<4), %r15
13478 jz _initial_num_blocks_is_0_encrypt
13479- cmp $(2<<4), %r12
13480+ cmp $(2<<4), %r15
13481 jb _initial_num_blocks_is_1_encrypt
13482 je _initial_num_blocks_is_2_encrypt
13483 _initial_num_blocks_is_3_encrypt:
13484@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13485 sub $16, %r11
13486 add %r13, %r11
13487 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13488- lea SHIFT_MASK+16(%rip), %r12
13489- sub %r13, %r12
13490+ lea SHIFT_MASK+16(%rip), %r15
13491+ sub %r13, %r15
13492 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13493 # (%r13 is the number of bytes in plaintext mod 16)
13494- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13495+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13496 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13497 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13498- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13499+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13500 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13501 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13502 movdqa SHUF_MASK(%rip), %xmm10
13503@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13504 sub $1, %r13
13505 jne _less_than_8_bytes_left_encrypt
13506 _multiple_of_16_bytes_encrypt:
13507- mov arg8, %r12 # %r12 = addLen (number of bytes)
13508- shl $3, %r12
13509- movd %r12d, %xmm15 # len(A) in %xmm15
13510+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13511+ shl $3, %r15
13512+ movd %r15d, %xmm15 # len(A) in %xmm15
13513 shl $3, %arg4 # len(C) in bits (*128)
13514 MOVQ_R64_XMM %arg4, %xmm1
13515 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13516@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13517 mov %r14, %rsp
13518 pop %r14
13519 pop %r13
13520- pop %r12
13521+ pop %r15
13522+ pax_force_retaddr
13523 ret
13524 ENDPROC(aesni_gcm_enc)
13525
13526@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13527 pxor %xmm1, %xmm0
13528 movaps %xmm0, (TKEYP)
13529 add $0x10, TKEYP
13530+ pax_force_retaddr
13531 ret
13532 ENDPROC(_key_expansion_128)
13533 ENDPROC(_key_expansion_256a)
13534@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13535 shufps $0b01001110, %xmm2, %xmm1
13536 movaps %xmm1, 0x10(TKEYP)
13537 add $0x20, TKEYP
13538+ pax_force_retaddr
13539 ret
13540 ENDPROC(_key_expansion_192a)
13541
13542@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13543
13544 movaps %xmm0, (TKEYP)
13545 add $0x10, TKEYP
13546+ pax_force_retaddr
13547 ret
13548 ENDPROC(_key_expansion_192b)
13549
13550@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13551 pxor %xmm1, %xmm2
13552 movaps %xmm2, (TKEYP)
13553 add $0x10, TKEYP
13554+ pax_force_retaddr
13555 ret
13556 ENDPROC(_key_expansion_256b)
13557
13558@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13559 #ifndef __x86_64__
13560 popl KEYP
13561 #endif
13562+ pax_force_retaddr
13563 ret
13564 ENDPROC(aesni_set_key)
13565
13566@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13567 popl KLEN
13568 popl KEYP
13569 #endif
13570+ pax_force_retaddr
13571 ret
13572 ENDPROC(aesni_enc)
13573
13574@@ -1974,6 +1983,7 @@ _aesni_enc1:
13575 AESENC KEY STATE
13576 movaps 0x70(TKEYP), KEY
13577 AESENCLAST KEY STATE
13578+ pax_force_retaddr
13579 ret
13580 ENDPROC(_aesni_enc1)
13581
13582@@ -2083,6 +2093,7 @@ _aesni_enc4:
13583 AESENCLAST KEY STATE2
13584 AESENCLAST KEY STATE3
13585 AESENCLAST KEY STATE4
13586+ pax_force_retaddr
13587 ret
13588 ENDPROC(_aesni_enc4)
13589
13590@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13591 popl KLEN
13592 popl KEYP
13593 #endif
13594+ pax_force_retaddr
13595 ret
13596 ENDPROC(aesni_dec)
13597
13598@@ -2164,6 +2176,7 @@ _aesni_dec1:
13599 AESDEC KEY STATE
13600 movaps 0x70(TKEYP), KEY
13601 AESDECLAST KEY STATE
13602+ pax_force_retaddr
13603 ret
13604 ENDPROC(_aesni_dec1)
13605
13606@@ -2273,6 +2286,7 @@ _aesni_dec4:
13607 AESDECLAST KEY STATE2
13608 AESDECLAST KEY STATE3
13609 AESDECLAST KEY STATE4
13610+ pax_force_retaddr
13611 ret
13612 ENDPROC(_aesni_dec4)
13613
13614@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13615 popl KEYP
13616 popl LEN
13617 #endif
13618+ pax_force_retaddr
13619 ret
13620 ENDPROC(aesni_ecb_enc)
13621
13622@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13623 popl KEYP
13624 popl LEN
13625 #endif
13626+ pax_force_retaddr
13627 ret
13628 ENDPROC(aesni_ecb_dec)
13629
13630@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13631 popl LEN
13632 popl IVP
13633 #endif
13634+ pax_force_retaddr
13635 ret
13636 ENDPROC(aesni_cbc_enc)
13637
13638@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13639 popl LEN
13640 popl IVP
13641 #endif
13642+ pax_force_retaddr
13643 ret
13644 ENDPROC(aesni_cbc_dec)
13645
13646@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13647 mov $1, TCTR_LOW
13648 MOVQ_R64_XMM TCTR_LOW INC
13649 MOVQ_R64_XMM CTR TCTR_LOW
13650+ pax_force_retaddr
13651 ret
13652 ENDPROC(_aesni_inc_init)
13653
13654@@ -2579,6 +2598,7 @@ _aesni_inc:
13655 .Linc_low:
13656 movaps CTR, IV
13657 PSHUFB_XMM BSWAP_MASK IV
13658+ pax_force_retaddr
13659 ret
13660 ENDPROC(_aesni_inc)
13661
13662@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13663 .Lctr_enc_ret:
13664 movups IV, (IVP)
13665 .Lctr_enc_just_ret:
13666+ pax_force_retaddr
13667 ret
13668 ENDPROC(aesni_ctr_enc)
13669
13670@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13671 pxor INC, STATE4
13672 movdqu STATE4, 0x70(OUTP)
13673
13674+ pax_force_retaddr
13675 ret
13676 ENDPROC(aesni_xts_crypt8)
13677
13678diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13679index 246c670..466e2d6 100644
13680--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13681+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13682@@ -21,6 +21,7 @@
13683 */
13684
13685 #include <linux/linkage.h>
13686+#include <asm/alternative-asm.h>
13687
13688 .file "blowfish-x86_64-asm.S"
13689 .text
13690@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13691 jnz .L__enc_xor;
13692
13693 write_block();
13694+ pax_force_retaddr
13695 ret;
13696 .L__enc_xor:
13697 xor_block();
13698+ pax_force_retaddr
13699 ret;
13700 ENDPROC(__blowfish_enc_blk)
13701
13702@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13703
13704 movq %r11, %rbp;
13705
13706+ pax_force_retaddr
13707 ret;
13708 ENDPROC(blowfish_dec_blk)
13709
13710@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13711
13712 popq %rbx;
13713 popq %rbp;
13714+ pax_force_retaddr
13715 ret;
13716
13717 .L__enc_xor4:
13718@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13719
13720 popq %rbx;
13721 popq %rbp;
13722+ pax_force_retaddr
13723 ret;
13724 ENDPROC(__blowfish_enc_blk_4way)
13725
13726@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13727 popq %rbx;
13728 popq %rbp;
13729
13730+ pax_force_retaddr
13731 ret;
13732 ENDPROC(blowfish_dec_blk_4way)
13733diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13734index ce71f92..1dce7ec 100644
13735--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13736+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13737@@ -16,6 +16,7 @@
13738 */
13739
13740 #include <linux/linkage.h>
13741+#include <asm/alternative-asm.h>
13742
13743 #define CAMELLIA_TABLE_BYTE_LEN 272
13744
13745@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13746 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13747 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13748 %rcx, (%r9));
13749+ pax_force_retaddr
13750 ret;
13751 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13752
13753@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13754 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13755 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13756 %rax, (%r9));
13757+ pax_force_retaddr
13758 ret;
13759 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13760
13761@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13762 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13763 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13764
13765+ pax_force_retaddr
13766 ret;
13767
13768 .align 8
13769@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13770 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13771 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13772
13773+ pax_force_retaddr
13774 ret;
13775
13776 .align 8
13777@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13778 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13779 %xmm8, %rsi);
13780
13781+ pax_force_retaddr
13782 ret;
13783 ENDPROC(camellia_ecb_enc_16way)
13784
13785@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13786 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13787 %xmm8, %rsi);
13788
13789+ pax_force_retaddr
13790 ret;
13791 ENDPROC(camellia_ecb_dec_16way)
13792
13793@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13794 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13795 %xmm8, %rsi);
13796
13797+ pax_force_retaddr
13798 ret;
13799 ENDPROC(camellia_cbc_dec_16way)
13800
13801@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13802 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13803 %xmm8, %rsi);
13804
13805+ pax_force_retaddr
13806 ret;
13807 ENDPROC(camellia_ctr_16way)
13808
13809@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13810 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13811 %xmm8, %rsi);
13812
13813+ pax_force_retaddr
13814 ret;
13815 ENDPROC(camellia_xts_crypt_16way)
13816
13817diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13818index 0e0b886..5a3123c 100644
13819--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13820+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13821@@ -11,6 +11,7 @@
13822 */
13823
13824 #include <linux/linkage.h>
13825+#include <asm/alternative-asm.h>
13826
13827 #define CAMELLIA_TABLE_BYTE_LEN 272
13828
13829@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13830 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13831 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13832 %rcx, (%r9));
13833+ pax_force_retaddr
13834 ret;
13835 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13836
13837@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13838 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13839 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13840 %rax, (%r9));
13841+ pax_force_retaddr
13842 ret;
13843 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13844
13845@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13846 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13847 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13848
13849+ pax_force_retaddr
13850 ret;
13851
13852 .align 8
13853@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13854 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13855 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13856
13857+ pax_force_retaddr
13858 ret;
13859
13860 .align 8
13861@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13862
13863 vzeroupper;
13864
13865+ pax_force_retaddr
13866 ret;
13867 ENDPROC(camellia_ecb_enc_32way)
13868
13869@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13870
13871 vzeroupper;
13872
13873+ pax_force_retaddr
13874 ret;
13875 ENDPROC(camellia_ecb_dec_32way)
13876
13877@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13878
13879 vzeroupper;
13880
13881+ pax_force_retaddr
13882 ret;
13883 ENDPROC(camellia_cbc_dec_32way)
13884
13885@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13886
13887 vzeroupper;
13888
13889+ pax_force_retaddr
13890 ret;
13891 ENDPROC(camellia_ctr_32way)
13892
13893@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13894
13895 vzeroupper;
13896
13897+ pax_force_retaddr
13898 ret;
13899 ENDPROC(camellia_xts_crypt_32way)
13900
13901diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13902index 310319c..db3d7b5 100644
13903--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13904+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13905@@ -21,6 +21,7 @@
13906 */
13907
13908 #include <linux/linkage.h>
13909+#include <asm/alternative-asm.h>
13910
13911 .file "camellia-x86_64-asm_64.S"
13912 .text
13913@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13914 enc_outunpack(mov, RT1);
13915
13916 movq RRBP, %rbp;
13917+ pax_force_retaddr
13918 ret;
13919
13920 .L__enc_xor:
13921 enc_outunpack(xor, RT1);
13922
13923 movq RRBP, %rbp;
13924+ pax_force_retaddr
13925 ret;
13926 ENDPROC(__camellia_enc_blk)
13927
13928@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13929 dec_outunpack();
13930
13931 movq RRBP, %rbp;
13932+ pax_force_retaddr
13933 ret;
13934 ENDPROC(camellia_dec_blk)
13935
13936@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13937
13938 movq RRBP, %rbp;
13939 popq %rbx;
13940+ pax_force_retaddr
13941 ret;
13942
13943 .L__enc2_xor:
13944@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13945
13946 movq RRBP, %rbp;
13947 popq %rbx;
13948+ pax_force_retaddr
13949 ret;
13950 ENDPROC(__camellia_enc_blk_2way)
13951
13952@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13953
13954 movq RRBP, %rbp;
13955 movq RXOR, %rbx;
13956+ pax_force_retaddr
13957 ret;
13958 ENDPROC(camellia_dec_blk_2way)
13959diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13960index c35fd5d..2d8c7db 100644
13961--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13962+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13963@@ -24,6 +24,7 @@
13964 */
13965
13966 #include <linux/linkage.h>
13967+#include <asm/alternative-asm.h>
13968
13969 .file "cast5-avx-x86_64-asm_64.S"
13970
13971@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13972 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13973 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13974
13975+ pax_force_retaddr
13976 ret;
13977 ENDPROC(__cast5_enc_blk16)
13978
13979@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13980 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13981 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13982
13983+ pax_force_retaddr
13984 ret;
13985
13986 .L__skip_dec:
13987@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13988 vmovdqu RR4, (6*4*4)(%r11);
13989 vmovdqu RL4, (7*4*4)(%r11);
13990
13991+ pax_force_retaddr
13992 ret;
13993 ENDPROC(cast5_ecb_enc_16way)
13994
13995@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13996 vmovdqu RR4, (6*4*4)(%r11);
13997 vmovdqu RL4, (7*4*4)(%r11);
13998
13999+ pax_force_retaddr
14000 ret;
14001 ENDPROC(cast5_ecb_dec_16way)
14002
14003@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14004 * %rdx: src
14005 */
14006
14007- pushq %r12;
14008+ pushq %r14;
14009
14010 movq %rsi, %r11;
14011- movq %rdx, %r12;
14012+ movq %rdx, %r14;
14013
14014 vmovdqu (0*16)(%rdx), RL1;
14015 vmovdqu (1*16)(%rdx), RR1;
14016@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14017 call __cast5_dec_blk16;
14018
14019 /* xor with src */
14020- vmovq (%r12), RX;
14021+ vmovq (%r14), RX;
14022 vpshufd $0x4f, RX, RX;
14023 vpxor RX, RR1, RR1;
14024- vpxor 0*16+8(%r12), RL1, RL1;
14025- vpxor 1*16+8(%r12), RR2, RR2;
14026- vpxor 2*16+8(%r12), RL2, RL2;
14027- vpxor 3*16+8(%r12), RR3, RR3;
14028- vpxor 4*16+8(%r12), RL3, RL3;
14029- vpxor 5*16+8(%r12), RR4, RR4;
14030- vpxor 6*16+8(%r12), RL4, RL4;
14031+ vpxor 0*16+8(%r14), RL1, RL1;
14032+ vpxor 1*16+8(%r14), RR2, RR2;
14033+ vpxor 2*16+8(%r14), RL2, RL2;
14034+ vpxor 3*16+8(%r14), RR3, RR3;
14035+ vpxor 4*16+8(%r14), RL3, RL3;
14036+ vpxor 5*16+8(%r14), RR4, RR4;
14037+ vpxor 6*16+8(%r14), RL4, RL4;
14038
14039 vmovdqu RR1, (0*16)(%r11);
14040 vmovdqu RL1, (1*16)(%r11);
14041@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14042 vmovdqu RR4, (6*16)(%r11);
14043 vmovdqu RL4, (7*16)(%r11);
14044
14045- popq %r12;
14046+ popq %r14;
14047
14048+ pax_force_retaddr
14049 ret;
14050 ENDPROC(cast5_cbc_dec_16way)
14051
14052@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14053 * %rcx: iv (big endian, 64bit)
14054 */
14055
14056- pushq %r12;
14057+ pushq %r14;
14058
14059 movq %rsi, %r11;
14060- movq %rdx, %r12;
14061+ movq %rdx, %r14;
14062
14063 vpcmpeqd RTMP, RTMP, RTMP;
14064 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14065@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14066 call __cast5_enc_blk16;
14067
14068 /* dst = src ^ iv */
14069- vpxor (0*16)(%r12), RR1, RR1;
14070- vpxor (1*16)(%r12), RL1, RL1;
14071- vpxor (2*16)(%r12), RR2, RR2;
14072- vpxor (3*16)(%r12), RL2, RL2;
14073- vpxor (4*16)(%r12), RR3, RR3;
14074- vpxor (5*16)(%r12), RL3, RL3;
14075- vpxor (6*16)(%r12), RR4, RR4;
14076- vpxor (7*16)(%r12), RL4, RL4;
14077+ vpxor (0*16)(%r14), RR1, RR1;
14078+ vpxor (1*16)(%r14), RL1, RL1;
14079+ vpxor (2*16)(%r14), RR2, RR2;
14080+ vpxor (3*16)(%r14), RL2, RL2;
14081+ vpxor (4*16)(%r14), RR3, RR3;
14082+ vpxor (5*16)(%r14), RL3, RL3;
14083+ vpxor (6*16)(%r14), RR4, RR4;
14084+ vpxor (7*16)(%r14), RL4, RL4;
14085 vmovdqu RR1, (0*16)(%r11);
14086 vmovdqu RL1, (1*16)(%r11);
14087 vmovdqu RR2, (2*16)(%r11);
14088@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14089 vmovdqu RR4, (6*16)(%r11);
14090 vmovdqu RL4, (7*16)(%r11);
14091
14092- popq %r12;
14093+ popq %r14;
14094
14095+ pax_force_retaddr
14096 ret;
14097 ENDPROC(cast5_ctr_16way)
14098diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14099index e3531f8..e123f35 100644
14100--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14101+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14102@@ -24,6 +24,7 @@
14103 */
14104
14105 #include <linux/linkage.h>
14106+#include <asm/alternative-asm.h>
14107 #include "glue_helper-asm-avx.S"
14108
14109 .file "cast6-avx-x86_64-asm_64.S"
14110@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14111 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14112 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14113
14114+ pax_force_retaddr
14115 ret;
14116 ENDPROC(__cast6_enc_blk8)
14117
14118@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14119 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14120 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14121
14122+ pax_force_retaddr
14123 ret;
14124 ENDPROC(__cast6_dec_blk8)
14125
14126@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14127
14128 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14129
14130+ pax_force_retaddr
14131 ret;
14132 ENDPROC(cast6_ecb_enc_8way)
14133
14134@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14135
14136 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14137
14138+ pax_force_retaddr
14139 ret;
14140 ENDPROC(cast6_ecb_dec_8way)
14141
14142@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14143 * %rdx: src
14144 */
14145
14146- pushq %r12;
14147+ pushq %r14;
14148
14149 movq %rsi, %r11;
14150- movq %rdx, %r12;
14151+ movq %rdx, %r14;
14152
14153 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14154
14155 call __cast6_dec_blk8;
14156
14157- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14158+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14159
14160- popq %r12;
14161+ popq %r14;
14162
14163+ pax_force_retaddr
14164 ret;
14165 ENDPROC(cast6_cbc_dec_8way)
14166
14167@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14168 * %rcx: iv (little endian, 128bit)
14169 */
14170
14171- pushq %r12;
14172+ pushq %r14;
14173
14174 movq %rsi, %r11;
14175- movq %rdx, %r12;
14176+ movq %rdx, %r14;
14177
14178 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14179 RD2, RX, RKR, RKM);
14180
14181 call __cast6_enc_blk8;
14182
14183- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14184+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14185
14186- popq %r12;
14187+ popq %r14;
14188
14189+ pax_force_retaddr
14190 ret;
14191 ENDPROC(cast6_ctr_8way)
14192
14193@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14194 /* dst <= regs xor IVs(in dst) */
14195 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14196
14197+ pax_force_retaddr
14198 ret;
14199 ENDPROC(cast6_xts_enc_8way)
14200
14201@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14202 /* dst <= regs xor IVs(in dst) */
14203 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14204
14205+ pax_force_retaddr
14206 ret;
14207 ENDPROC(cast6_xts_dec_8way)
14208diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14209index dbc4339..de6e120 100644
14210--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14211+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14212@@ -45,6 +45,7 @@
14213
14214 #include <asm/inst.h>
14215 #include <linux/linkage.h>
14216+#include <asm/alternative-asm.h>
14217
14218 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14219
14220@@ -312,6 +313,7 @@ do_return:
14221 popq %rsi
14222 popq %rdi
14223 popq %rbx
14224+ pax_force_retaddr
14225 ret
14226
14227 ################################################################
14228diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14229index 5d1e007..098cb4f 100644
14230--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14231+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14232@@ -18,6 +18,7 @@
14233
14234 #include <linux/linkage.h>
14235 #include <asm/inst.h>
14236+#include <asm/alternative-asm.h>
14237
14238 .data
14239
14240@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14241 psrlq $1, T2
14242 pxor T2, T1
14243 pxor T1, DATA
14244+ pax_force_retaddr
14245 ret
14246 ENDPROC(__clmul_gf128mul_ble)
14247
14248@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14249 call __clmul_gf128mul_ble
14250 PSHUFB_XMM BSWAP DATA
14251 movups DATA, (%rdi)
14252+ pax_force_retaddr
14253 ret
14254 ENDPROC(clmul_ghash_mul)
14255
14256@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14257 PSHUFB_XMM BSWAP DATA
14258 movups DATA, (%rdi)
14259 .Lupdate_just_ret:
14260+ pax_force_retaddr
14261 ret
14262 ENDPROC(clmul_ghash_update)
14263diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14264index 9279e0b..c4b3d2c 100644
14265--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14266+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14267@@ -1,4 +1,5 @@
14268 #include <linux/linkage.h>
14269+#include <asm/alternative-asm.h>
14270
14271 # enter salsa20_encrypt_bytes
14272 ENTRY(salsa20_encrypt_bytes)
14273@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14274 add %r11,%rsp
14275 mov %rdi,%rax
14276 mov %rsi,%rdx
14277+ pax_force_retaddr
14278 ret
14279 # bytesatleast65:
14280 ._bytesatleast65:
14281@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14282 add %r11,%rsp
14283 mov %rdi,%rax
14284 mov %rsi,%rdx
14285+ pax_force_retaddr
14286 ret
14287 ENDPROC(salsa20_keysetup)
14288
14289@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14290 add %r11,%rsp
14291 mov %rdi,%rax
14292 mov %rsi,%rdx
14293+ pax_force_retaddr
14294 ret
14295 ENDPROC(salsa20_ivsetup)
14296diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14297index 2f202f4..d9164d6 100644
14298--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14299+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14300@@ -24,6 +24,7 @@
14301 */
14302
14303 #include <linux/linkage.h>
14304+#include <asm/alternative-asm.h>
14305 #include "glue_helper-asm-avx.S"
14306
14307 .file "serpent-avx-x86_64-asm_64.S"
14308@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14309 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14310 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14311
14312+ pax_force_retaddr
14313 ret;
14314 ENDPROC(__serpent_enc_blk8_avx)
14315
14316@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14317 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14318 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14319
14320+ pax_force_retaddr
14321 ret;
14322 ENDPROC(__serpent_dec_blk8_avx)
14323
14324@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14325
14326 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14327
14328+ pax_force_retaddr
14329 ret;
14330 ENDPROC(serpent_ecb_enc_8way_avx)
14331
14332@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14333
14334 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14335
14336+ pax_force_retaddr
14337 ret;
14338 ENDPROC(serpent_ecb_dec_8way_avx)
14339
14340@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14341
14342 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14343
14344+ pax_force_retaddr
14345 ret;
14346 ENDPROC(serpent_cbc_dec_8way_avx)
14347
14348@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14349
14350 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14351
14352+ pax_force_retaddr
14353 ret;
14354 ENDPROC(serpent_ctr_8way_avx)
14355
14356@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14357 /* dst <= regs xor IVs(in dst) */
14358 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14359
14360+ pax_force_retaddr
14361 ret;
14362 ENDPROC(serpent_xts_enc_8way_avx)
14363
14364@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14365 /* dst <= regs xor IVs(in dst) */
14366 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14367
14368+ pax_force_retaddr
14369 ret;
14370 ENDPROC(serpent_xts_dec_8way_avx)
14371diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14372index b222085..abd483c 100644
14373--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14374+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14375@@ -15,6 +15,7 @@
14376 */
14377
14378 #include <linux/linkage.h>
14379+#include <asm/alternative-asm.h>
14380 #include "glue_helper-asm-avx2.S"
14381
14382 .file "serpent-avx2-asm_64.S"
14383@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14384 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14385 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14386
14387+ pax_force_retaddr
14388 ret;
14389 ENDPROC(__serpent_enc_blk16)
14390
14391@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14392 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14393 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14394
14395+ pax_force_retaddr
14396 ret;
14397 ENDPROC(__serpent_dec_blk16)
14398
14399@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14400
14401 vzeroupper;
14402
14403+ pax_force_retaddr
14404 ret;
14405 ENDPROC(serpent_ecb_enc_16way)
14406
14407@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14408
14409 vzeroupper;
14410
14411+ pax_force_retaddr
14412 ret;
14413 ENDPROC(serpent_ecb_dec_16way)
14414
14415@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14416
14417 vzeroupper;
14418
14419+ pax_force_retaddr
14420 ret;
14421 ENDPROC(serpent_cbc_dec_16way)
14422
14423@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14424
14425 vzeroupper;
14426
14427+ pax_force_retaddr
14428 ret;
14429 ENDPROC(serpent_ctr_16way)
14430
14431@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14432
14433 vzeroupper;
14434
14435+ pax_force_retaddr
14436 ret;
14437 ENDPROC(serpent_xts_enc_16way)
14438
14439@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14440
14441 vzeroupper;
14442
14443+ pax_force_retaddr
14444 ret;
14445 ENDPROC(serpent_xts_dec_16way)
14446diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14447index acc066c..1559cc4 100644
14448--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14449+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14450@@ -25,6 +25,7 @@
14451 */
14452
14453 #include <linux/linkage.h>
14454+#include <asm/alternative-asm.h>
14455
14456 .file "serpent-sse2-x86_64-asm_64.S"
14457 .text
14458@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14459 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14460 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14461
14462+ pax_force_retaddr
14463 ret;
14464
14465 .L__enc_xor8:
14466 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14467 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14468
14469+ pax_force_retaddr
14470 ret;
14471 ENDPROC(__serpent_enc_blk_8way)
14472
14473@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14474 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14475 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14476
14477+ pax_force_retaddr
14478 ret;
14479 ENDPROC(serpent_dec_blk_8way)
14480diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14481index a410950..9dfe7ad 100644
14482--- a/arch/x86/crypto/sha1_ssse3_asm.S
14483+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14484@@ -29,6 +29,7 @@
14485 */
14486
14487 #include <linux/linkage.h>
14488+#include <asm/alternative-asm.h>
14489
14490 #define CTX %rdi // arg1
14491 #define BUF %rsi // arg2
14492@@ -75,9 +76,9 @@
14493
14494 push %rbx
14495 push %rbp
14496- push %r12
14497+ push %r14
14498
14499- mov %rsp, %r12
14500+ mov %rsp, %r14
14501 sub $64, %rsp # allocate workspace
14502 and $~15, %rsp # align stack
14503
14504@@ -99,11 +100,12 @@
14505 xor %rax, %rax
14506 rep stosq
14507
14508- mov %r12, %rsp # deallocate workspace
14509+ mov %r14, %rsp # deallocate workspace
14510
14511- pop %r12
14512+ pop %r14
14513 pop %rbp
14514 pop %rbx
14515+ pax_force_retaddr
14516 ret
14517
14518 ENDPROC(\name)
14519diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14520index 642f156..51a513c 100644
14521--- a/arch/x86/crypto/sha256-avx-asm.S
14522+++ b/arch/x86/crypto/sha256-avx-asm.S
14523@@ -49,6 +49,7 @@
14524
14525 #ifdef CONFIG_AS_AVX
14526 #include <linux/linkage.h>
14527+#include <asm/alternative-asm.h>
14528
14529 ## assume buffers not aligned
14530 #define VMOVDQ vmovdqu
14531@@ -460,6 +461,7 @@ done_hash:
14532 popq %r13
14533 popq %rbp
14534 popq %rbx
14535+ pax_force_retaddr
14536 ret
14537 ENDPROC(sha256_transform_avx)
14538
14539diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14540index 9e86944..3795e6a 100644
14541--- a/arch/x86/crypto/sha256-avx2-asm.S
14542+++ b/arch/x86/crypto/sha256-avx2-asm.S
14543@@ -50,6 +50,7 @@
14544
14545 #ifdef CONFIG_AS_AVX2
14546 #include <linux/linkage.h>
14547+#include <asm/alternative-asm.h>
14548
14549 ## assume buffers not aligned
14550 #define VMOVDQ vmovdqu
14551@@ -720,6 +721,7 @@ done_hash:
14552 popq %r12
14553 popq %rbp
14554 popq %rbx
14555+ pax_force_retaddr
14556 ret
14557 ENDPROC(sha256_transform_rorx)
14558
14559diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14560index f833b74..8c62a9e 100644
14561--- a/arch/x86/crypto/sha256-ssse3-asm.S
14562+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14563@@ -47,6 +47,7 @@
14564 ########################################################################
14565
14566 #include <linux/linkage.h>
14567+#include <asm/alternative-asm.h>
14568
14569 ## assume buffers not aligned
14570 #define MOVDQ movdqu
14571@@ -471,6 +472,7 @@ done_hash:
14572 popq %rbp
14573 popq %rbx
14574
14575+ pax_force_retaddr
14576 ret
14577 ENDPROC(sha256_transform_ssse3)
14578
14579diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14580index 974dde9..a823ff9 100644
14581--- a/arch/x86/crypto/sha512-avx-asm.S
14582+++ b/arch/x86/crypto/sha512-avx-asm.S
14583@@ -49,6 +49,7 @@
14584
14585 #ifdef CONFIG_AS_AVX
14586 #include <linux/linkage.h>
14587+#include <asm/alternative-asm.h>
14588
14589 .text
14590
14591@@ -364,6 +365,7 @@ updateblock:
14592 mov frame_RSPSAVE(%rsp), %rsp
14593
14594 nowork:
14595+ pax_force_retaddr
14596 ret
14597 ENDPROC(sha512_transform_avx)
14598
14599diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14600index 568b961..ed20c37 100644
14601--- a/arch/x86/crypto/sha512-avx2-asm.S
14602+++ b/arch/x86/crypto/sha512-avx2-asm.S
14603@@ -51,6 +51,7 @@
14604
14605 #ifdef CONFIG_AS_AVX2
14606 #include <linux/linkage.h>
14607+#include <asm/alternative-asm.h>
14608
14609 .text
14610
14611@@ -678,6 +679,7 @@ done_hash:
14612
14613 # Restore Stack Pointer
14614 mov frame_RSPSAVE(%rsp), %rsp
14615+ pax_force_retaddr
14616 ret
14617 ENDPROC(sha512_transform_rorx)
14618
14619diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14620index fb56855..6edd768 100644
14621--- a/arch/x86/crypto/sha512-ssse3-asm.S
14622+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14623@@ -48,6 +48,7 @@
14624 ########################################################################
14625
14626 #include <linux/linkage.h>
14627+#include <asm/alternative-asm.h>
14628
14629 .text
14630
14631@@ -363,6 +364,7 @@ updateblock:
14632 mov frame_RSPSAVE(%rsp), %rsp
14633
14634 nowork:
14635+ pax_force_retaddr
14636 ret
14637 ENDPROC(sha512_transform_ssse3)
14638
14639diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14640index 0505813..b067311 100644
14641--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14642+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14643@@ -24,6 +24,7 @@
14644 */
14645
14646 #include <linux/linkage.h>
14647+#include <asm/alternative-asm.h>
14648 #include "glue_helper-asm-avx.S"
14649
14650 .file "twofish-avx-x86_64-asm_64.S"
14651@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14652 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14653 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14654
14655+ pax_force_retaddr
14656 ret;
14657 ENDPROC(__twofish_enc_blk8)
14658
14659@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14660 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14661 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14662
14663+ pax_force_retaddr
14664 ret;
14665 ENDPROC(__twofish_dec_blk8)
14666
14667@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14668
14669 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14670
14671+ pax_force_retaddr
14672 ret;
14673 ENDPROC(twofish_ecb_enc_8way)
14674
14675@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14676
14677 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14678
14679+ pax_force_retaddr
14680 ret;
14681 ENDPROC(twofish_ecb_dec_8way)
14682
14683@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14684 * %rdx: src
14685 */
14686
14687- pushq %r12;
14688+ pushq %r14;
14689
14690 movq %rsi, %r11;
14691- movq %rdx, %r12;
14692+ movq %rdx, %r14;
14693
14694 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14695
14696 call __twofish_dec_blk8;
14697
14698- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14699+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14700
14701- popq %r12;
14702+ popq %r14;
14703
14704+ pax_force_retaddr
14705 ret;
14706 ENDPROC(twofish_cbc_dec_8way)
14707
14708@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14709 * %rcx: iv (little endian, 128bit)
14710 */
14711
14712- pushq %r12;
14713+ pushq %r14;
14714
14715 movq %rsi, %r11;
14716- movq %rdx, %r12;
14717+ movq %rdx, %r14;
14718
14719 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14720 RD2, RX0, RX1, RY0);
14721
14722 call __twofish_enc_blk8;
14723
14724- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14725+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14726
14727- popq %r12;
14728+ popq %r14;
14729
14730+ pax_force_retaddr
14731 ret;
14732 ENDPROC(twofish_ctr_8way)
14733
14734@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14735 /* dst <= regs xor IVs(in dst) */
14736 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14737
14738+ pax_force_retaddr
14739 ret;
14740 ENDPROC(twofish_xts_enc_8way)
14741
14742@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14743 /* dst <= regs xor IVs(in dst) */
14744 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14745
14746+ pax_force_retaddr
14747 ret;
14748 ENDPROC(twofish_xts_dec_8way)
14749diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14750index 1c3b7ce..02f578d 100644
14751--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14752+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14753@@ -21,6 +21,7 @@
14754 */
14755
14756 #include <linux/linkage.h>
14757+#include <asm/alternative-asm.h>
14758
14759 .file "twofish-x86_64-asm-3way.S"
14760 .text
14761@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14762 popq %r13;
14763 popq %r14;
14764 popq %r15;
14765+ pax_force_retaddr
14766 ret;
14767
14768 .L__enc_xor3:
14769@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14770 popq %r13;
14771 popq %r14;
14772 popq %r15;
14773+ pax_force_retaddr
14774 ret;
14775 ENDPROC(__twofish_enc_blk_3way)
14776
14777@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14778 popq %r13;
14779 popq %r14;
14780 popq %r15;
14781+ pax_force_retaddr
14782 ret;
14783 ENDPROC(twofish_dec_blk_3way)
14784diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14785index a039d21..524b8b2 100644
14786--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14787+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14788@@ -22,6 +22,7 @@
14789
14790 #include <linux/linkage.h>
14791 #include <asm/asm-offsets.h>
14792+#include <asm/alternative-asm.h>
14793
14794 #define a_offset 0
14795 #define b_offset 4
14796@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14797
14798 popq R1
14799 movq $1,%rax
14800+ pax_force_retaddr
14801 ret
14802 ENDPROC(twofish_enc_blk)
14803
14804@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14805
14806 popq R1
14807 movq $1,%rax
14808+ pax_force_retaddr
14809 ret
14810 ENDPROC(twofish_dec_blk)
14811diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14812index d21ff89..6da8e6e 100644
14813--- a/arch/x86/ia32/ia32_aout.c
14814+++ b/arch/x86/ia32/ia32_aout.c
14815@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14816 unsigned long dump_start, dump_size;
14817 struct user32 dump;
14818
14819+ memset(&dump, 0, sizeof(dump));
14820+
14821 fs = get_fs();
14822 set_fs(KERNEL_DS);
14823 has_dumped = 1;
14824diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14825index f9e181a..b0df8b3 100644
14826--- a/arch/x86/ia32/ia32_signal.c
14827+++ b/arch/x86/ia32/ia32_signal.c
14828@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14829 if (__get_user(set.sig[0], &frame->sc.oldmask)
14830 || (_COMPAT_NSIG_WORDS > 1
14831 && __copy_from_user((((char *) &set.sig) + 4),
14832- &frame->extramask,
14833+ frame->extramask,
14834 sizeof(frame->extramask))))
14835 goto badframe;
14836
14837@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14838 sp -= frame_size;
14839 /* Align the stack pointer according to the i386 ABI,
14840 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14841- sp = ((sp + 4) & -16ul) - 4;
14842+ sp = ((sp - 12) & -16ul) - 4;
14843 return (void __user *) sp;
14844 }
14845
14846@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14847 restorer = current->mm->context.vdso +
14848 selected_vdso32->sym___kernel_sigreturn;
14849 else
14850- restorer = &frame->retcode;
14851+ restorer = frame->retcode;
14852 }
14853
14854 put_user_try {
14855@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14856 * These are actually not used anymore, but left because some
14857 * gdb versions depend on them as a marker.
14858 */
14859- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14860+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14861 } put_user_catch(err);
14862
14863 if (err)
14864@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14865 0xb8,
14866 __NR_ia32_rt_sigreturn,
14867 0x80cd,
14868- 0,
14869+ 0
14870 };
14871
14872 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14873@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14874
14875 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14876 restorer = ksig->ka.sa.sa_restorer;
14877- else
14878+ else if (current->mm->context.vdso)
14879+ /* Return stub is in 32bit vsyscall page */
14880 restorer = current->mm->context.vdso +
14881 selected_vdso32->sym___kernel_rt_sigreturn;
14882+ else
14883+ restorer = frame->retcode;
14884 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14885
14886 /*
14887 * Not actually used anymore, but left because some gdb
14888 * versions need it.
14889 */
14890- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14891+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14892 } put_user_catch(err);
14893
14894 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14895diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14896index 4299eb0..fefe70e 100644
14897--- a/arch/x86/ia32/ia32entry.S
14898+++ b/arch/x86/ia32/ia32entry.S
14899@@ -15,8 +15,10 @@
14900 #include <asm/irqflags.h>
14901 #include <asm/asm.h>
14902 #include <asm/smap.h>
14903+#include <asm/pgtable.h>
14904 #include <linux/linkage.h>
14905 #include <linux/err.h>
14906+#include <asm/alternative-asm.h>
14907
14908 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14909 #include <linux/elf-em.h>
14910@@ -62,12 +64,12 @@
14911 */
14912 .macro LOAD_ARGS32 offset, _r9=0
14913 .if \_r9
14914- movl \offset+16(%rsp),%r9d
14915+ movl \offset+R9(%rsp),%r9d
14916 .endif
14917- movl \offset+40(%rsp),%ecx
14918- movl \offset+48(%rsp),%edx
14919- movl \offset+56(%rsp),%esi
14920- movl \offset+64(%rsp),%edi
14921+ movl \offset+RCX(%rsp),%ecx
14922+ movl \offset+RDX(%rsp),%edx
14923+ movl \offset+RSI(%rsp),%esi
14924+ movl \offset+RDI(%rsp),%edi
14925 movl %eax,%eax /* zero extension */
14926 .endm
14927
14928@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14929 ENDPROC(native_irq_enable_sysexit)
14930 #endif
14931
14932+ .macro pax_enter_kernel_user
14933+ pax_set_fptr_mask
14934+#ifdef CONFIG_PAX_MEMORY_UDEREF
14935+ call pax_enter_kernel_user
14936+#endif
14937+ .endm
14938+
14939+ .macro pax_exit_kernel_user
14940+#ifdef CONFIG_PAX_MEMORY_UDEREF
14941+ call pax_exit_kernel_user
14942+#endif
14943+#ifdef CONFIG_PAX_RANDKSTACK
14944+ pushq %rax
14945+ pushq %r11
14946+ call pax_randomize_kstack
14947+ popq %r11
14948+ popq %rax
14949+#endif
14950+ .endm
14951+
14952+ .macro pax_erase_kstack
14953+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14954+ call pax_erase_kstack
14955+#endif
14956+ .endm
14957+
14958 /*
14959 * 32bit SYSENTER instruction entry.
14960 *
14961@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14962 CFI_REGISTER rsp,rbp
14963 SWAPGS_UNSAFE_STACK
14964 movq PER_CPU_VAR(kernel_stack), %rsp
14965- addq $(KERNEL_STACK_OFFSET),%rsp
14966- /*
14967- * No need to follow this irqs on/off section: the syscall
14968- * disabled irqs, here we enable it straight after entry:
14969- */
14970- ENABLE_INTERRUPTS(CLBR_NONE)
14971 movl %ebp,%ebp /* zero extension */
14972 pushq_cfi $__USER32_DS
14973 /*CFI_REL_OFFSET ss,0*/
14974@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14975 CFI_REL_OFFSET rsp,0
14976 pushfq_cfi
14977 /*CFI_REL_OFFSET rflags,0*/
14978- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14979- CFI_REGISTER rip,r10
14980+ orl $X86_EFLAGS_IF,(%rsp)
14981+ GET_THREAD_INFO(%r11)
14982+ movl TI_sysenter_return(%r11), %r11d
14983+ CFI_REGISTER rip,r11
14984 pushq_cfi $__USER32_CS
14985 /*CFI_REL_OFFSET cs,0*/
14986 movl %eax, %eax
14987- pushq_cfi %r10
14988+ pushq_cfi %r11
14989 CFI_REL_OFFSET rip,0
14990 pushq_cfi %rax
14991 cld
14992 SAVE_ARGS 0,1,0
14993+ pax_enter_kernel_user
14994+
14995+#ifdef CONFIG_PAX_RANDKSTACK
14996+ pax_erase_kstack
14997+#endif
14998+
14999+ /*
15000+ * No need to follow this irqs on/off section: the syscall
15001+ * disabled irqs, here we enable it straight after entry:
15002+ */
15003+ ENABLE_INTERRUPTS(CLBR_NONE)
15004 /* no need to do an access_ok check here because rbp has been
15005 32bit zero extended */
15006+
15007+#ifdef CONFIG_PAX_MEMORY_UDEREF
15008+ addq pax_user_shadow_base,%rbp
15009+ ASM_PAX_OPEN_USERLAND
15010+#endif
15011+
15012 ASM_STAC
15013 1: movl (%rbp),%ebp
15014 _ASM_EXTABLE(1b,ia32_badarg)
15015 ASM_CLAC
15016- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15017- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15018+
15019+#ifdef CONFIG_PAX_MEMORY_UDEREF
15020+ ASM_PAX_CLOSE_USERLAND
15021+#endif
15022+
15023+ GET_THREAD_INFO(%r11)
15024+ orl $TS_COMPAT,TI_status(%r11)
15025+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15026 CFI_REMEMBER_STATE
15027 jnz sysenter_tracesys
15028 cmpq $(IA32_NR_syscalls-1),%rax
15029@@ -162,15 +209,18 @@ sysenter_do_call:
15030 sysenter_dispatch:
15031 call *ia32_sys_call_table(,%rax,8)
15032 movq %rax,RAX-ARGOFFSET(%rsp)
15033+ GET_THREAD_INFO(%r11)
15034 DISABLE_INTERRUPTS(CLBR_NONE)
15035 TRACE_IRQS_OFF
15036- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15037+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15038 jnz sysexit_audit
15039 sysexit_from_sys_call:
15040- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15041+ pax_exit_kernel_user
15042+ pax_erase_kstack
15043+ andl $~TS_COMPAT,TI_status(%r11)
15044 /* clear IF, that popfq doesn't enable interrupts early */
15045- andl $~0x200,EFLAGS-R11(%rsp)
15046- movl RIP-R11(%rsp),%edx /* User %eip */
15047+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15048+ movl RIP(%rsp),%edx /* User %eip */
15049 CFI_REGISTER rip,rdx
15050 RESTORE_ARGS 0,24,0,0,0,0
15051 xorq %r8,%r8
15052@@ -193,6 +243,9 @@ sysexit_from_sys_call:
15053 movl %eax,%esi /* 2nd arg: syscall number */
15054 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15055 call __audit_syscall_entry
15056+
15057+ pax_erase_kstack
15058+
15059 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15060 cmpq $(IA32_NR_syscalls-1),%rax
15061 ja ia32_badsys
15062@@ -204,7 +257,7 @@ sysexit_from_sys_call:
15063 .endm
15064
15065 .macro auditsys_exit exit
15066- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15067+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15068 jnz ia32_ret_from_sys_call
15069 TRACE_IRQS_ON
15070 ENABLE_INTERRUPTS(CLBR_NONE)
15071@@ -215,11 +268,12 @@ sysexit_from_sys_call:
15072 1: setbe %al /* 1 if error, 0 if not */
15073 movzbl %al,%edi /* zero-extend that into %edi */
15074 call __audit_syscall_exit
15075+ GET_THREAD_INFO(%r11)
15076 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15077 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15078 DISABLE_INTERRUPTS(CLBR_NONE)
15079 TRACE_IRQS_OFF
15080- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15081+ testl %edi,TI_flags(%r11)
15082 jz \exit
15083 CLEAR_RREGS -ARGOFFSET
15084 jmp int_with_check
15085@@ -237,7 +291,7 @@ sysexit_audit:
15086
15087 sysenter_tracesys:
15088 #ifdef CONFIG_AUDITSYSCALL
15089- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15090+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15091 jz sysenter_auditsys
15092 #endif
15093 SAVE_REST
15094@@ -249,6 +303,9 @@ sysenter_tracesys:
15095 RESTORE_REST
15096 cmpq $(IA32_NR_syscalls-1),%rax
15097 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15098+
15099+ pax_erase_kstack
15100+
15101 jmp sysenter_do_call
15102 CFI_ENDPROC
15103 ENDPROC(ia32_sysenter_target)
15104@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
15105 ENTRY(ia32_cstar_target)
15106 CFI_STARTPROC32 simple
15107 CFI_SIGNAL_FRAME
15108- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15109+ CFI_DEF_CFA rsp,0
15110 CFI_REGISTER rip,rcx
15111 /*CFI_REGISTER rflags,r11*/
15112 SWAPGS_UNSAFE_STACK
15113 movl %esp,%r8d
15114 CFI_REGISTER rsp,r8
15115 movq PER_CPU_VAR(kernel_stack),%rsp
15116+ SAVE_ARGS 8*6,0,0
15117+ pax_enter_kernel_user
15118+
15119+#ifdef CONFIG_PAX_RANDKSTACK
15120+ pax_erase_kstack
15121+#endif
15122+
15123 /*
15124 * No need to follow this irqs on/off section: the syscall
15125 * disabled irqs and here we enable it straight after entry:
15126 */
15127 ENABLE_INTERRUPTS(CLBR_NONE)
15128- SAVE_ARGS 8,0,0
15129 movl %eax,%eax /* zero extension */
15130 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15131 movq %rcx,RIP-ARGOFFSET(%rsp)
15132@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15133 /* no need to do an access_ok check here because r8 has been
15134 32bit zero extended */
15135 /* hardware stack frame is complete now */
15136+
15137+#ifdef CONFIG_PAX_MEMORY_UDEREF
15138+ ASM_PAX_OPEN_USERLAND
15139+ movq pax_user_shadow_base,%r8
15140+ addq RSP-ARGOFFSET(%rsp),%r8
15141+#endif
15142+
15143 ASM_STAC
15144 1: movl (%r8),%r9d
15145 _ASM_EXTABLE(1b,ia32_badarg)
15146 ASM_CLAC
15147- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15148- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15149+
15150+#ifdef CONFIG_PAX_MEMORY_UDEREF
15151+ ASM_PAX_CLOSE_USERLAND
15152+#endif
15153+
15154+ GET_THREAD_INFO(%r11)
15155+ orl $TS_COMPAT,TI_status(%r11)
15156+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15157 CFI_REMEMBER_STATE
15158 jnz cstar_tracesys
15159 cmpq $IA32_NR_syscalls-1,%rax
15160@@ -319,13 +395,16 @@ cstar_do_call:
15161 cstar_dispatch:
15162 call *ia32_sys_call_table(,%rax,8)
15163 movq %rax,RAX-ARGOFFSET(%rsp)
15164+ GET_THREAD_INFO(%r11)
15165 DISABLE_INTERRUPTS(CLBR_NONE)
15166 TRACE_IRQS_OFF
15167- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15168+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15169 jnz sysretl_audit
15170 sysretl_from_sys_call:
15171- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15172- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15173+ pax_exit_kernel_user
15174+ pax_erase_kstack
15175+ andl $~TS_COMPAT,TI_status(%r11)
15176+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15177 movl RIP-ARGOFFSET(%rsp),%ecx
15178 CFI_REGISTER rip,rcx
15179 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15180@@ -352,7 +431,7 @@ sysretl_audit:
15181
15182 cstar_tracesys:
15183 #ifdef CONFIG_AUDITSYSCALL
15184- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15185+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15186 jz cstar_auditsys
15187 #endif
15188 xchgl %r9d,%ebp
15189@@ -366,11 +445,19 @@ cstar_tracesys:
15190 xchgl %ebp,%r9d
15191 cmpq $(IA32_NR_syscalls-1),%rax
15192 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15193+
15194+ pax_erase_kstack
15195+
15196 jmp cstar_do_call
15197 END(ia32_cstar_target)
15198
15199 ia32_badarg:
15200 ASM_CLAC
15201+
15202+#ifdef CONFIG_PAX_MEMORY_UDEREF
15203+ ASM_PAX_CLOSE_USERLAND
15204+#endif
15205+
15206 movq $-EFAULT,%rax
15207 jmp ia32_sysret
15208 CFI_ENDPROC
15209@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15210 CFI_REL_OFFSET rip,RIP-RIP
15211 PARAVIRT_ADJUST_EXCEPTION_FRAME
15212 SWAPGS
15213- /*
15214- * No need to follow this irqs on/off section: the syscall
15215- * disabled irqs and here we enable it straight after entry:
15216- */
15217- ENABLE_INTERRUPTS(CLBR_NONE)
15218 movl %eax,%eax
15219 pushq_cfi %rax
15220 cld
15221 /* note the registers are not zero extended to the sf.
15222 this could be a problem. */
15223 SAVE_ARGS 0,1,0
15224- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15225- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15226+ pax_enter_kernel_user
15227+
15228+#ifdef CONFIG_PAX_RANDKSTACK
15229+ pax_erase_kstack
15230+#endif
15231+
15232+ /*
15233+ * No need to follow this irqs on/off section: the syscall
15234+ * disabled irqs and here we enable it straight after entry:
15235+ */
15236+ ENABLE_INTERRUPTS(CLBR_NONE)
15237+ GET_THREAD_INFO(%r11)
15238+ orl $TS_COMPAT,TI_status(%r11)
15239+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15240 jnz ia32_tracesys
15241 cmpq $(IA32_NR_syscalls-1),%rax
15242 ja ia32_badsys
15243@@ -442,6 +536,9 @@ ia32_tracesys:
15244 RESTORE_REST
15245 cmpq $(IA32_NR_syscalls-1),%rax
15246 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15247+
15248+ pax_erase_kstack
15249+
15250 jmp ia32_do_call
15251 END(ia32_syscall)
15252
15253diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15254index 8e0ceec..af13504 100644
15255--- a/arch/x86/ia32/sys_ia32.c
15256+++ b/arch/x86/ia32/sys_ia32.c
15257@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15258 */
15259 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15260 {
15261- typeof(ubuf->st_uid) uid = 0;
15262- typeof(ubuf->st_gid) gid = 0;
15263+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15264+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15265 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15266 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15267 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15268diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15269index 372231c..51b537d 100644
15270--- a/arch/x86/include/asm/alternative-asm.h
15271+++ b/arch/x86/include/asm/alternative-asm.h
15272@@ -18,6 +18,45 @@
15273 .endm
15274 #endif
15275
15276+#ifdef KERNEXEC_PLUGIN
15277+ .macro pax_force_retaddr_bts rip=0
15278+ btsq $63,\rip(%rsp)
15279+ .endm
15280+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15281+ .macro pax_force_retaddr rip=0, reload=0
15282+ btsq $63,\rip(%rsp)
15283+ .endm
15284+ .macro pax_force_fptr ptr
15285+ btsq $63,\ptr
15286+ .endm
15287+ .macro pax_set_fptr_mask
15288+ .endm
15289+#endif
15290+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15291+ .macro pax_force_retaddr rip=0, reload=0
15292+ .if \reload
15293+ pax_set_fptr_mask
15294+ .endif
15295+ orq %r12,\rip(%rsp)
15296+ .endm
15297+ .macro pax_force_fptr ptr
15298+ orq %r12,\ptr
15299+ .endm
15300+ .macro pax_set_fptr_mask
15301+ movabs $0x8000000000000000,%r12
15302+ .endm
15303+#endif
15304+#else
15305+ .macro pax_force_retaddr rip=0, reload=0
15306+ .endm
15307+ .macro pax_force_fptr ptr
15308+ .endm
15309+ .macro pax_force_retaddr_bts rip=0
15310+ .endm
15311+ .macro pax_set_fptr_mask
15312+ .endm
15313+#endif
15314+
15315 .macro altinstruction_entry orig alt feature orig_len alt_len
15316 .long \orig - .
15317 .long \alt - .
15318diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15319index 0a3f9c9..c9d081d 100644
15320--- a/arch/x86/include/asm/alternative.h
15321+++ b/arch/x86/include/asm/alternative.h
15322@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15323 ".pushsection .discard,\"aw\",@progbits\n" \
15324 DISCARD_ENTRY(1) \
15325 ".popsection\n" \
15326- ".pushsection .altinstr_replacement, \"ax\"\n" \
15327+ ".pushsection .altinstr_replacement, \"a\"\n" \
15328 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15329 ".popsection"
15330
15331@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15332 DISCARD_ENTRY(1) \
15333 DISCARD_ENTRY(2) \
15334 ".popsection\n" \
15335- ".pushsection .altinstr_replacement, \"ax\"\n" \
15336+ ".pushsection .altinstr_replacement, \"a\"\n" \
15337 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15338 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15339 ".popsection"
15340diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15341index 19b0eba..12254cd 100644
15342--- a/arch/x86/include/asm/apic.h
15343+++ b/arch/x86/include/asm/apic.h
15344@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15345
15346 #ifdef CONFIG_X86_LOCAL_APIC
15347
15348-extern unsigned int apic_verbosity;
15349+extern int apic_verbosity;
15350 extern int local_apic_timer_c2_ok;
15351
15352 extern int disable_apic;
15353diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15354index 20370c6..a2eb9b0 100644
15355--- a/arch/x86/include/asm/apm.h
15356+++ b/arch/x86/include/asm/apm.h
15357@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15358 __asm__ __volatile__(APM_DO_ZERO_SEGS
15359 "pushl %%edi\n\t"
15360 "pushl %%ebp\n\t"
15361- "lcall *%%cs:apm_bios_entry\n\t"
15362+ "lcall *%%ss:apm_bios_entry\n\t"
15363 "setc %%al\n\t"
15364 "popl %%ebp\n\t"
15365 "popl %%edi\n\t"
15366@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15367 __asm__ __volatile__(APM_DO_ZERO_SEGS
15368 "pushl %%edi\n\t"
15369 "pushl %%ebp\n\t"
15370- "lcall *%%cs:apm_bios_entry\n\t"
15371+ "lcall *%%ss:apm_bios_entry\n\t"
15372 "setc %%bl\n\t"
15373 "popl %%ebp\n\t"
15374 "popl %%edi\n\t"
15375diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15376index 6dd1c7dd..2edd216 100644
15377--- a/arch/x86/include/asm/atomic.h
15378+++ b/arch/x86/include/asm/atomic.h
15379@@ -24,7 +24,18 @@
15380 */
15381 static inline int atomic_read(const atomic_t *v)
15382 {
15383- return (*(volatile int *)&(v)->counter);
15384+ return (*(volatile const int *)&(v)->counter);
15385+}
15386+
15387+/**
15388+ * atomic_read_unchecked - read atomic variable
15389+ * @v: pointer of type atomic_unchecked_t
15390+ *
15391+ * Atomically reads the value of @v.
15392+ */
15393+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15394+{
15395+ return (*(volatile const int *)&(v)->counter);
15396 }
15397
15398 /**
15399@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15400 }
15401
15402 /**
15403+ * atomic_set_unchecked - set atomic variable
15404+ * @v: pointer of type atomic_unchecked_t
15405+ * @i: required value
15406+ *
15407+ * Atomically sets the value of @v to @i.
15408+ */
15409+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15410+{
15411+ v->counter = i;
15412+}
15413+
15414+/**
15415 * atomic_add - add integer to atomic variable
15416 * @i: integer value to add
15417 * @v: pointer of type atomic_t
15418@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15419 */
15420 static inline void atomic_add(int i, atomic_t *v)
15421 {
15422- asm volatile(LOCK_PREFIX "addl %1,%0"
15423+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15424+
15425+#ifdef CONFIG_PAX_REFCOUNT
15426+ "jno 0f\n"
15427+ LOCK_PREFIX "subl %1,%0\n"
15428+ "int $4\n0:\n"
15429+ _ASM_EXTABLE(0b, 0b)
15430+#endif
15431+
15432+ : "+m" (v->counter)
15433+ : "ir" (i));
15434+}
15435+
15436+/**
15437+ * atomic_add_unchecked - add integer to atomic variable
15438+ * @i: integer value to add
15439+ * @v: pointer of type atomic_unchecked_t
15440+ *
15441+ * Atomically adds @i to @v.
15442+ */
15443+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15444+{
15445+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15446 : "+m" (v->counter)
15447 : "ir" (i));
15448 }
15449@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15450 */
15451 static inline void atomic_sub(int i, atomic_t *v)
15452 {
15453- asm volatile(LOCK_PREFIX "subl %1,%0"
15454+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15455+
15456+#ifdef CONFIG_PAX_REFCOUNT
15457+ "jno 0f\n"
15458+ LOCK_PREFIX "addl %1,%0\n"
15459+ "int $4\n0:\n"
15460+ _ASM_EXTABLE(0b, 0b)
15461+#endif
15462+
15463+ : "+m" (v->counter)
15464+ : "ir" (i));
15465+}
15466+
15467+/**
15468+ * atomic_sub_unchecked - subtract integer from atomic variable
15469+ * @i: integer value to subtract
15470+ * @v: pointer of type atomic_unchecked_t
15471+ *
15472+ * Atomically subtracts @i from @v.
15473+ */
15474+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15475+{
15476+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15477 : "+m" (v->counter)
15478 : "ir" (i));
15479 }
15480@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15481 */
15482 static inline int atomic_sub_and_test(int i, atomic_t *v)
15483 {
15484- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15485+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15486 }
15487
15488 /**
15489@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15490 */
15491 static inline void atomic_inc(atomic_t *v)
15492 {
15493- asm volatile(LOCK_PREFIX "incl %0"
15494+ asm volatile(LOCK_PREFIX "incl %0\n"
15495+
15496+#ifdef CONFIG_PAX_REFCOUNT
15497+ "jno 0f\n"
15498+ LOCK_PREFIX "decl %0\n"
15499+ "int $4\n0:\n"
15500+ _ASM_EXTABLE(0b, 0b)
15501+#endif
15502+
15503+ : "+m" (v->counter));
15504+}
15505+
15506+/**
15507+ * atomic_inc_unchecked - increment atomic variable
15508+ * @v: pointer of type atomic_unchecked_t
15509+ *
15510+ * Atomically increments @v by 1.
15511+ */
15512+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15513+{
15514+ asm volatile(LOCK_PREFIX "incl %0\n"
15515 : "+m" (v->counter));
15516 }
15517
15518@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15519 */
15520 static inline void atomic_dec(atomic_t *v)
15521 {
15522- asm volatile(LOCK_PREFIX "decl %0"
15523+ asm volatile(LOCK_PREFIX "decl %0\n"
15524+
15525+#ifdef CONFIG_PAX_REFCOUNT
15526+ "jno 0f\n"
15527+ LOCK_PREFIX "incl %0\n"
15528+ "int $4\n0:\n"
15529+ _ASM_EXTABLE(0b, 0b)
15530+#endif
15531+
15532+ : "+m" (v->counter));
15533+}
15534+
15535+/**
15536+ * atomic_dec_unchecked - decrement atomic variable
15537+ * @v: pointer of type atomic_unchecked_t
15538+ *
15539+ * Atomically decrements @v by 1.
15540+ */
15541+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15542+{
15543+ asm volatile(LOCK_PREFIX "decl %0\n"
15544 : "+m" (v->counter));
15545 }
15546
15547@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15548 */
15549 static inline int atomic_dec_and_test(atomic_t *v)
15550 {
15551- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15552+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15553 }
15554
15555 /**
15556@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15557 */
15558 static inline int atomic_inc_and_test(atomic_t *v)
15559 {
15560- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15561+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15562+}
15563+
15564+/**
15565+ * atomic_inc_and_test_unchecked - increment and test
15566+ * @v: pointer of type atomic_unchecked_t
15567+ *
15568+ * Atomically increments @v by 1
15569+ * and returns true if the result is zero, or false for all
15570+ * other cases.
15571+ */
15572+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15573+{
15574+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15575 }
15576
15577 /**
15578@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15579 */
15580 static inline int atomic_add_negative(int i, atomic_t *v)
15581 {
15582- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15583+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15584 }
15585
15586 /**
15587@@ -154,6 +274,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15588 */
15589 static inline int atomic_add_return(int i, atomic_t *v)
15590 {
15591+ return i + xadd_check_overflow(&v->counter, i);
15592+}
15593+
15594+/**
15595+ * atomic_add_return_unchecked - add integer and return
15596+ * @i: integer value to add
15597+ * @v: pointer of type atomic_unchecked_t
15598+ *
15599+ * Atomically adds @i to @v and returns @i + @v
15600+ */
15601+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15602+{
15603 return i + xadd(&v->counter, i);
15604 }
15605
15606@@ -170,9 +302,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15607 }
15608
15609 #define atomic_inc_return(v) (atomic_add_return(1, v))
15610+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15611+{
15612+ return atomic_add_return_unchecked(1, v);
15613+}
15614 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15615
15616-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15617+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15618+{
15619+ return cmpxchg(&v->counter, old, new);
15620+}
15621+
15622+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15623 {
15624 return cmpxchg(&v->counter, old, new);
15625 }
15626@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15627 return xchg(&v->counter, new);
15628 }
15629
15630+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15631+{
15632+ return xchg(&v->counter, new);
15633+}
15634+
15635 /**
15636 * __atomic_add_unless - add unless the number is already a given value
15637 * @v: pointer of type atomic_t
15638@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15639 * Atomically adds @a to @v, so long as @v was not already @u.
15640 * Returns the old value of @v.
15641 */
15642-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15643+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15644 {
15645- int c, old;
15646+ int c, old, new;
15647 c = atomic_read(v);
15648 for (;;) {
15649- if (unlikely(c == (u)))
15650+ if (unlikely(c == u))
15651 break;
15652- old = atomic_cmpxchg((v), c, c + (a));
15653+
15654+ asm volatile("addl %2,%0\n"
15655+
15656+#ifdef CONFIG_PAX_REFCOUNT
15657+ "jno 0f\n"
15658+ "subl %2,%0\n"
15659+ "int $4\n0:\n"
15660+ _ASM_EXTABLE(0b, 0b)
15661+#endif
15662+
15663+ : "=r" (new)
15664+ : "0" (c), "ir" (a));
15665+
15666+ old = atomic_cmpxchg(v, c, new);
15667 if (likely(old == c))
15668 break;
15669 c = old;
15670@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15671 }
15672
15673 /**
15674+ * atomic_inc_not_zero_hint - increment if not null
15675+ * @v: pointer of type atomic_t
15676+ * @hint: probable value of the atomic before the increment
15677+ *
15678+ * This version of atomic_inc_not_zero() gives a hint of probable
15679+ * value of the atomic. This helps processor to not read the memory
15680+ * before doing the atomic read/modify/write cycle, lowering
15681+ * number of bus transactions on some arches.
15682+ *
15683+ * Returns: 0 if increment was not done, 1 otherwise.
15684+ */
15685+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15686+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15687+{
15688+ int val, c = hint, new;
15689+
15690+ /* sanity test, should be removed by compiler if hint is a constant */
15691+ if (!hint)
15692+ return __atomic_add_unless(v, 1, 0);
15693+
15694+ do {
15695+ asm volatile("incl %0\n"
15696+
15697+#ifdef CONFIG_PAX_REFCOUNT
15698+ "jno 0f\n"
15699+ "decl %0\n"
15700+ "int $4\n0:\n"
15701+ _ASM_EXTABLE(0b, 0b)
15702+#endif
15703+
15704+ : "=r" (new)
15705+ : "0" (c));
15706+
15707+ val = atomic_cmpxchg(v, c, new);
15708+ if (val == c)
15709+ return 1;
15710+ c = val;
15711+ } while (c);
15712+
15713+ return 0;
15714+}
15715+
15716+/**
15717 * atomic_inc_short - increment of a short integer
15718 * @v: pointer to type int
15719 *
15720@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15721 #endif
15722
15723 /* These are x86-specific, used by some header files */
15724-#define atomic_clear_mask(mask, addr) \
15725- asm volatile(LOCK_PREFIX "andl %0,%1" \
15726- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15727+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15728+{
15729+ asm volatile(LOCK_PREFIX "andl %1,%0"
15730+ : "+m" (v->counter)
15731+ : "r" (~(mask))
15732+ : "memory");
15733+}
15734
15735-#define atomic_set_mask(mask, addr) \
15736- asm volatile(LOCK_PREFIX "orl %0,%1" \
15737- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15738- : "memory")
15739+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15740+{
15741+ asm volatile(LOCK_PREFIX "andl %1,%0"
15742+ : "+m" (v->counter)
15743+ : "r" (~(mask))
15744+ : "memory");
15745+}
15746+
15747+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15748+{
15749+ asm volatile(LOCK_PREFIX "orl %1,%0"
15750+ : "+m" (v->counter)
15751+ : "r" (mask)
15752+ : "memory");
15753+}
15754+
15755+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15756+{
15757+ asm volatile(LOCK_PREFIX "orl %1,%0"
15758+ : "+m" (v->counter)
15759+ : "r" (mask)
15760+ : "memory");
15761+}
15762
15763 #ifdef CONFIG_X86_32
15764 # include <asm/atomic64_32.h>
15765diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15766index b154de7..bf18a5a 100644
15767--- a/arch/x86/include/asm/atomic64_32.h
15768+++ b/arch/x86/include/asm/atomic64_32.h
15769@@ -12,6 +12,14 @@ typedef struct {
15770 u64 __aligned(8) counter;
15771 } atomic64_t;
15772
15773+#ifdef CONFIG_PAX_REFCOUNT
15774+typedef struct {
15775+ u64 __aligned(8) counter;
15776+} atomic64_unchecked_t;
15777+#else
15778+typedef atomic64_t atomic64_unchecked_t;
15779+#endif
15780+
15781 #define ATOMIC64_INIT(val) { (val) }
15782
15783 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15784@@ -37,21 +45,31 @@ typedef struct {
15785 ATOMIC64_DECL_ONE(sym##_386)
15786
15787 ATOMIC64_DECL_ONE(add_386);
15788+ATOMIC64_DECL_ONE(add_unchecked_386);
15789 ATOMIC64_DECL_ONE(sub_386);
15790+ATOMIC64_DECL_ONE(sub_unchecked_386);
15791 ATOMIC64_DECL_ONE(inc_386);
15792+ATOMIC64_DECL_ONE(inc_unchecked_386);
15793 ATOMIC64_DECL_ONE(dec_386);
15794+ATOMIC64_DECL_ONE(dec_unchecked_386);
15795 #endif
15796
15797 #define alternative_atomic64(f, out, in...) \
15798 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15799
15800 ATOMIC64_DECL(read);
15801+ATOMIC64_DECL(read_unchecked);
15802 ATOMIC64_DECL(set);
15803+ATOMIC64_DECL(set_unchecked);
15804 ATOMIC64_DECL(xchg);
15805 ATOMIC64_DECL(add_return);
15806+ATOMIC64_DECL(add_return_unchecked);
15807 ATOMIC64_DECL(sub_return);
15808+ATOMIC64_DECL(sub_return_unchecked);
15809 ATOMIC64_DECL(inc_return);
15810+ATOMIC64_DECL(inc_return_unchecked);
15811 ATOMIC64_DECL(dec_return);
15812+ATOMIC64_DECL(dec_return_unchecked);
15813 ATOMIC64_DECL(dec_if_positive);
15814 ATOMIC64_DECL(inc_not_zero);
15815 ATOMIC64_DECL(add_unless);
15816@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15817 }
15818
15819 /**
15820+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15821+ * @p: pointer to type atomic64_unchecked_t
15822+ * @o: expected value
15823+ * @n: new value
15824+ *
15825+ * Atomically sets @v to @n if it was equal to @o and returns
15826+ * the old value.
15827+ */
15828+
15829+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15830+{
15831+ return cmpxchg64(&v->counter, o, n);
15832+}
15833+
15834+/**
15835 * atomic64_xchg - xchg atomic64 variable
15836 * @v: pointer to type atomic64_t
15837 * @n: value to assign
15838@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15839 }
15840
15841 /**
15842+ * atomic64_set_unchecked - set atomic64 variable
15843+ * @v: pointer to type atomic64_unchecked_t
15844+ * @n: value to assign
15845+ *
15846+ * Atomically sets the value of @v to @n.
15847+ */
15848+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15849+{
15850+ unsigned high = (unsigned)(i >> 32);
15851+ unsigned low = (unsigned)i;
15852+ alternative_atomic64(set, /* no output */,
15853+ "S" (v), "b" (low), "c" (high)
15854+ : "eax", "edx", "memory");
15855+}
15856+
15857+/**
15858 * atomic64_read - read atomic64 variable
15859 * @v: pointer to type atomic64_t
15860 *
15861@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15862 }
15863
15864 /**
15865+ * atomic64_read_unchecked - read atomic64 variable
15866+ * @v: pointer to type atomic64_unchecked_t
15867+ *
15868+ * Atomically reads the value of @v and returns it.
15869+ */
15870+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15871+{
15872+ long long r;
15873+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15874+ return r;
15875+ }
15876+
15877+/**
15878 * atomic64_add_return - add and return
15879 * @i: integer value to add
15880 * @v: pointer to type atomic64_t
15881@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15882 return i;
15883 }
15884
15885+/**
15886+ * atomic64_add_return_unchecked - add and return
15887+ * @i: integer value to add
15888+ * @v: pointer to type atomic64_unchecked_t
15889+ *
15890+ * Atomically adds @i to @v and returns @i + *@v
15891+ */
15892+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15893+{
15894+ alternative_atomic64(add_return_unchecked,
15895+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15896+ ASM_NO_INPUT_CLOBBER("memory"));
15897+ return i;
15898+}
15899+
15900 /*
15901 * Other variants with different arithmetic operators:
15902 */
15903@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15904 return a;
15905 }
15906
15907+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15908+{
15909+ long long a;
15910+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15911+ "S" (v) : "memory", "ecx");
15912+ return a;
15913+}
15914+
15915 static inline long long atomic64_dec_return(atomic64_t *v)
15916 {
15917 long long a;
15918@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15919 }
15920
15921 /**
15922+ * atomic64_add_unchecked - add integer to atomic64 variable
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v.
15927+ */
15928+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15929+{
15930+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15931+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15932+ ASM_NO_INPUT_CLOBBER("memory"));
15933+ return i;
15934+}
15935+
15936+/**
15937 * atomic64_sub - subtract the atomic64 variable
15938 * @i: integer value to subtract
15939 * @v: pointer to type atomic64_t
15940diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15941index 46e9052..ae45136 100644
15942--- a/arch/x86/include/asm/atomic64_64.h
15943+++ b/arch/x86/include/asm/atomic64_64.h
15944@@ -18,7 +18,19 @@
15945 */
15946 static inline long atomic64_read(const atomic64_t *v)
15947 {
15948- return (*(volatile long *)&(v)->counter);
15949+ return (*(volatile const long *)&(v)->counter);
15950+}
15951+
15952+/**
15953+ * atomic64_read_unchecked - read atomic64 variable
15954+ * @v: pointer of type atomic64_unchecked_t
15955+ *
15956+ * Atomically reads the value of @v.
15957+ * Doesn't imply a read memory barrier.
15958+ */
15959+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15960+{
15961+ return (*(volatile const long *)&(v)->counter);
15962 }
15963
15964 /**
15965@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15966 }
15967
15968 /**
15969+ * atomic64_set_unchecked - set atomic64 variable
15970+ * @v: pointer to type atomic64_unchecked_t
15971+ * @i: required value
15972+ *
15973+ * Atomically sets the value of @v to @i.
15974+ */
15975+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15976+{
15977+ v->counter = i;
15978+}
15979+
15980+/**
15981 * atomic64_add - add integer to atomic64 variable
15982 * @i: integer value to add
15983 * @v: pointer to type atomic64_t
15984@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15985 */
15986 static inline void atomic64_add(long i, atomic64_t *v)
15987 {
15988+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15989+
15990+#ifdef CONFIG_PAX_REFCOUNT
15991+ "jno 0f\n"
15992+ LOCK_PREFIX "subq %1,%0\n"
15993+ "int $4\n0:\n"
15994+ _ASM_EXTABLE(0b, 0b)
15995+#endif
15996+
15997+ : "=m" (v->counter)
15998+ : "er" (i), "m" (v->counter));
15999+}
16000+
16001+/**
16002+ * atomic64_add_unchecked - add integer to atomic64 variable
16003+ * @i: integer value to add
16004+ * @v: pointer to type atomic64_unchecked_t
16005+ *
16006+ * Atomically adds @i to @v.
16007+ */
16008+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16009+{
16010 asm volatile(LOCK_PREFIX "addq %1,%0"
16011 : "=m" (v->counter)
16012 : "er" (i), "m" (v->counter));
16013@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16014 */
16015 static inline void atomic64_sub(long i, atomic64_t *v)
16016 {
16017- asm volatile(LOCK_PREFIX "subq %1,%0"
16018+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16019+
16020+#ifdef CONFIG_PAX_REFCOUNT
16021+ "jno 0f\n"
16022+ LOCK_PREFIX "addq %1,%0\n"
16023+ "int $4\n0:\n"
16024+ _ASM_EXTABLE(0b, 0b)
16025+#endif
16026+
16027+ : "=m" (v->counter)
16028+ : "er" (i), "m" (v->counter));
16029+}
16030+
16031+/**
16032+ * atomic64_sub_unchecked - subtract the atomic64 variable
16033+ * @i: integer value to subtract
16034+ * @v: pointer to type atomic64_unchecked_t
16035+ *
16036+ * Atomically subtracts @i from @v.
16037+ */
16038+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16039+{
16040+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16041 : "=m" (v->counter)
16042 : "er" (i), "m" (v->counter));
16043 }
16044@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16045 */
16046 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16049+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16050 }
16051
16052 /**
16053@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16054 */
16055 static inline void atomic64_inc(atomic64_t *v)
16056 {
16057+ asm volatile(LOCK_PREFIX "incq %0\n"
16058+
16059+#ifdef CONFIG_PAX_REFCOUNT
16060+ "jno 0f\n"
16061+ LOCK_PREFIX "decq %0\n"
16062+ "int $4\n0:\n"
16063+ _ASM_EXTABLE(0b, 0b)
16064+#endif
16065+
16066+ : "=m" (v->counter)
16067+ : "m" (v->counter));
16068+}
16069+
16070+/**
16071+ * atomic64_inc_unchecked - increment atomic64 variable
16072+ * @v: pointer to type atomic64_unchecked_t
16073+ *
16074+ * Atomically increments @v by 1.
16075+ */
16076+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16077+{
16078 asm volatile(LOCK_PREFIX "incq %0"
16079 : "=m" (v->counter)
16080 : "m" (v->counter));
16081@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16082 */
16083 static inline void atomic64_dec(atomic64_t *v)
16084 {
16085- asm volatile(LOCK_PREFIX "decq %0"
16086+ asm volatile(LOCK_PREFIX "decq %0\n"
16087+
16088+#ifdef CONFIG_PAX_REFCOUNT
16089+ "jno 0f\n"
16090+ LOCK_PREFIX "incq %0\n"
16091+ "int $4\n0:\n"
16092+ _ASM_EXTABLE(0b, 0b)
16093+#endif
16094+
16095+ : "=m" (v->counter)
16096+ : "m" (v->counter));
16097+}
16098+
16099+/**
16100+ * atomic64_dec_unchecked - decrement atomic64 variable
16101+ * @v: pointer to type atomic64_t
16102+ *
16103+ * Atomically decrements @v by 1.
16104+ */
16105+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16106+{
16107+ asm volatile(LOCK_PREFIX "decq %0\n"
16108 : "=m" (v->counter)
16109 : "m" (v->counter));
16110 }
16111@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16112 */
16113 static inline int atomic64_dec_and_test(atomic64_t *v)
16114 {
16115- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16116+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16117 }
16118
16119 /**
16120@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16121 */
16122 static inline int atomic64_inc_and_test(atomic64_t *v)
16123 {
16124- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16125+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16126 }
16127
16128 /**
16129@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16130 */
16131 static inline int atomic64_add_negative(long i, atomic64_t *v)
16132 {
16133- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16134+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16135 }
16136
16137 /**
16138@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16139 */
16140 static inline long atomic64_add_return(long i, atomic64_t *v)
16141 {
16142+ return i + xadd_check_overflow(&v->counter, i);
16143+}
16144+
16145+/**
16146+ * atomic64_add_return_unchecked - add and return
16147+ * @i: integer value to add
16148+ * @v: pointer to type atomic64_unchecked_t
16149+ *
16150+ * Atomically adds @i to @v and returns @i + @v
16151+ */
16152+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16153+{
16154 return i + xadd(&v->counter, i);
16155 }
16156
16157@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16158 }
16159
16160 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16161+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16162+{
16163+ return atomic64_add_return_unchecked(1, v);
16164+}
16165 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16166
16167 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16168@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16169 return cmpxchg(&v->counter, old, new);
16170 }
16171
16172+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16173+{
16174+ return cmpxchg(&v->counter, old, new);
16175+}
16176+
16177 static inline long atomic64_xchg(atomic64_t *v, long new)
16178 {
16179 return xchg(&v->counter, new);
16180@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16181 */
16182 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16183 {
16184- long c, old;
16185+ long c, old, new;
16186 c = atomic64_read(v);
16187 for (;;) {
16188- if (unlikely(c == (u)))
16189+ if (unlikely(c == u))
16190 break;
16191- old = atomic64_cmpxchg((v), c, c + (a));
16192+
16193+ asm volatile("add %2,%0\n"
16194+
16195+#ifdef CONFIG_PAX_REFCOUNT
16196+ "jno 0f\n"
16197+ "sub %2,%0\n"
16198+ "int $4\n0:\n"
16199+ _ASM_EXTABLE(0b, 0b)
16200+#endif
16201+
16202+ : "=r" (new)
16203+ : "0" (c), "ir" (a));
16204+
16205+ old = atomic64_cmpxchg(v, c, new);
16206 if (likely(old == c))
16207 break;
16208 c = old;
16209 }
16210- return c != (u);
16211+ return c != u;
16212 }
16213
16214 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16215diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16216index 5c7198c..44180b5 100644
16217--- a/arch/x86/include/asm/barrier.h
16218+++ b/arch/x86/include/asm/barrier.h
16219@@ -107,7 +107,7 @@
16220 do { \
16221 compiletime_assert_atomic_type(*p); \
16222 smp_mb(); \
16223- ACCESS_ONCE(*p) = (v); \
16224+ ACCESS_ONCE_RW(*p) = (v); \
16225 } while (0)
16226
16227 #define smp_load_acquire(p) \
16228@@ -124,7 +124,7 @@ do { \
16229 do { \
16230 compiletime_assert_atomic_type(*p); \
16231 barrier(); \
16232- ACCESS_ONCE(*p) = (v); \
16233+ ACCESS_ONCE_RW(*p) = (v); \
16234 } while (0)
16235
16236 #define smp_load_acquire(p) \
16237diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16238index afcd35d..141b32d 100644
16239--- a/arch/x86/include/asm/bitops.h
16240+++ b/arch/x86/include/asm/bitops.h
16241@@ -50,7 +50,7 @@
16242 * a mask operation on a byte.
16243 */
16244 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16245-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16246+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16247 #define CONST_MASK(nr) (1 << ((nr) & 7))
16248
16249 /**
16250@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16251 */
16252 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16253 {
16254- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16255+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16256 }
16257
16258 /**
16259@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16260 */
16261 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16262 {
16263- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16264+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16265 }
16266
16267 /**
16268@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16269 */
16270 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16271 {
16272- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16273+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16274 }
16275
16276 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16277@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16278 *
16279 * Undefined if no bit exists, so code should check against 0 first.
16280 */
16281-static inline unsigned long __ffs(unsigned long word)
16282+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16283 {
16284 asm("rep; bsf %1,%0"
16285 : "=r" (word)
16286@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16287 *
16288 * Undefined if no zero exists, so code should check against ~0UL first.
16289 */
16290-static inline unsigned long ffz(unsigned long word)
16291+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16292 {
16293 asm("rep; bsf %1,%0"
16294 : "=r" (word)
16295@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16296 *
16297 * Undefined if no set bit exists, so code should check against 0 first.
16298 */
16299-static inline unsigned long __fls(unsigned long word)
16300+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16301 {
16302 asm("bsr %1,%0"
16303 : "=r" (word)
16304@@ -434,7 +434,7 @@ static inline int ffs(int x)
16305 * set bit if value is nonzero. The last (most significant) bit is
16306 * at position 32.
16307 */
16308-static inline int fls(int x)
16309+static inline int __intentional_overflow(-1) fls(int x)
16310 {
16311 int r;
16312
16313@@ -476,7 +476,7 @@ static inline int fls(int x)
16314 * at position 64.
16315 */
16316 #ifdef CONFIG_X86_64
16317-static __always_inline int fls64(__u64 x)
16318+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16319 {
16320 int bitpos = -1;
16321 /*
16322diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16323index 4fa687a..60f2d39 100644
16324--- a/arch/x86/include/asm/boot.h
16325+++ b/arch/x86/include/asm/boot.h
16326@@ -6,10 +6,15 @@
16327 #include <uapi/asm/boot.h>
16328
16329 /* Physical address where kernel should be loaded. */
16330-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16331+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16332 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16333 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16334
16335+#ifndef __ASSEMBLY__
16336+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16337+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16338+#endif
16339+
16340 /* Minimum kernel alignment, as a power of two */
16341 #ifdef CONFIG_X86_64
16342 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16343diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16344index 48f99f1..d78ebf9 100644
16345--- a/arch/x86/include/asm/cache.h
16346+++ b/arch/x86/include/asm/cache.h
16347@@ -5,12 +5,13 @@
16348
16349 /* L1 cache line size */
16350 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16351-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16352+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16353
16354 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16355+#define __read_only __attribute__((__section__(".data..read_only")))
16356
16357 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16358-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16359+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16360
16361 #ifdef CONFIG_X86_VSMP
16362 #ifdef CONFIG_SMP
16363diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16364index 9863ee3..4a1f8e1 100644
16365--- a/arch/x86/include/asm/cacheflush.h
16366+++ b/arch/x86/include/asm/cacheflush.h
16367@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16368 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16369
16370 if (pg_flags == _PGMT_DEFAULT)
16371- return -1;
16372+ return ~0UL;
16373 else if (pg_flags == _PGMT_WC)
16374 return _PAGE_CACHE_WC;
16375 else if (pg_flags == _PGMT_UC_MINUS)
16376diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16377index cb4c73b..c473c29 100644
16378--- a/arch/x86/include/asm/calling.h
16379+++ b/arch/x86/include/asm/calling.h
16380@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16381 #define RSP 152
16382 #define SS 160
16383
16384-#define ARGOFFSET R11
16385-#define SWFRAME ORIG_RAX
16386+#define ARGOFFSET R15
16387
16388 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16389- subq $9*8+\addskip, %rsp
16390- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16391- movq_cfi rdi, 8*8
16392- movq_cfi rsi, 7*8
16393- movq_cfi rdx, 6*8
16394+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16395+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16396+ movq_cfi rdi, RDI
16397+ movq_cfi rsi, RSI
16398+ movq_cfi rdx, RDX
16399
16400 .if \save_rcx
16401- movq_cfi rcx, 5*8
16402+ movq_cfi rcx, RCX
16403 .endif
16404
16405- movq_cfi rax, 4*8
16406+ movq_cfi rax, RAX
16407
16408 .if \save_r891011
16409- movq_cfi r8, 3*8
16410- movq_cfi r9, 2*8
16411- movq_cfi r10, 1*8
16412- movq_cfi r11, 0*8
16413+ movq_cfi r8, R8
16414+ movq_cfi r9, R9
16415+ movq_cfi r10, R10
16416+ movq_cfi r11, R11
16417 .endif
16418
16419+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16420+ movq_cfi r12, R12
16421+#endif
16422+
16423 .endm
16424
16425-#define ARG_SKIP (9*8)
16426+#define ARG_SKIP ORIG_RAX
16427
16428 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16429 rstor_r8910=1, rstor_rdx=1
16430+
16431+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16432+ movq_cfi_restore R12, r12
16433+#endif
16434+
16435 .if \rstor_r11
16436- movq_cfi_restore 0*8, r11
16437+ movq_cfi_restore R11, r11
16438 .endif
16439
16440 .if \rstor_r8910
16441- movq_cfi_restore 1*8, r10
16442- movq_cfi_restore 2*8, r9
16443- movq_cfi_restore 3*8, r8
16444+ movq_cfi_restore R10, r10
16445+ movq_cfi_restore R9, r9
16446+ movq_cfi_restore R8, r8
16447 .endif
16448
16449 .if \rstor_rax
16450- movq_cfi_restore 4*8, rax
16451+ movq_cfi_restore RAX, rax
16452 .endif
16453
16454 .if \rstor_rcx
16455- movq_cfi_restore 5*8, rcx
16456+ movq_cfi_restore RCX, rcx
16457 .endif
16458
16459 .if \rstor_rdx
16460- movq_cfi_restore 6*8, rdx
16461+ movq_cfi_restore RDX, rdx
16462 .endif
16463
16464- movq_cfi_restore 7*8, rsi
16465- movq_cfi_restore 8*8, rdi
16466+ movq_cfi_restore RSI, rsi
16467+ movq_cfi_restore RDI, rdi
16468
16469- .if ARG_SKIP+\addskip > 0
16470- addq $ARG_SKIP+\addskip, %rsp
16471- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16472+ .if ORIG_RAX+\addskip > 0
16473+ addq $ORIG_RAX+\addskip, %rsp
16474+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16475 .endif
16476 .endm
16477
16478- .macro LOAD_ARGS offset, skiprax=0
16479- movq \offset(%rsp), %r11
16480- movq \offset+8(%rsp), %r10
16481- movq \offset+16(%rsp), %r9
16482- movq \offset+24(%rsp), %r8
16483- movq \offset+40(%rsp), %rcx
16484- movq \offset+48(%rsp), %rdx
16485- movq \offset+56(%rsp), %rsi
16486- movq \offset+64(%rsp), %rdi
16487+ .macro LOAD_ARGS skiprax=0
16488+ movq R11(%rsp), %r11
16489+ movq R10(%rsp), %r10
16490+ movq R9(%rsp), %r9
16491+ movq R8(%rsp), %r8
16492+ movq RCX(%rsp), %rcx
16493+ movq RDX(%rsp), %rdx
16494+ movq RSI(%rsp), %rsi
16495+ movq RDI(%rsp), %rdi
16496 .if \skiprax
16497 .else
16498- movq \offset+72(%rsp), %rax
16499+ movq RAX(%rsp), %rax
16500 .endif
16501 .endm
16502
16503-#define REST_SKIP (6*8)
16504-
16505 .macro SAVE_REST
16506- subq $REST_SKIP, %rsp
16507- CFI_ADJUST_CFA_OFFSET REST_SKIP
16508- movq_cfi rbx, 5*8
16509- movq_cfi rbp, 4*8
16510- movq_cfi r12, 3*8
16511- movq_cfi r13, 2*8
16512- movq_cfi r14, 1*8
16513- movq_cfi r15, 0*8
16514+ movq_cfi rbx, RBX
16515+ movq_cfi rbp, RBP
16516+
16517+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16518+ movq_cfi r12, R12
16519+#endif
16520+
16521+ movq_cfi r13, R13
16522+ movq_cfi r14, R14
16523+ movq_cfi r15, R15
16524 .endm
16525
16526 .macro RESTORE_REST
16527- movq_cfi_restore 0*8, r15
16528- movq_cfi_restore 1*8, r14
16529- movq_cfi_restore 2*8, r13
16530- movq_cfi_restore 3*8, r12
16531- movq_cfi_restore 4*8, rbp
16532- movq_cfi_restore 5*8, rbx
16533- addq $REST_SKIP, %rsp
16534- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16535+ movq_cfi_restore R15, r15
16536+ movq_cfi_restore R14, r14
16537+ movq_cfi_restore R13, r13
16538+
16539+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16540+ movq_cfi_restore R12, r12
16541+#endif
16542+
16543+ movq_cfi_restore RBP, rbp
16544+ movq_cfi_restore RBX, rbx
16545 .endm
16546
16547 .macro SAVE_ALL
16548diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16549index f50de69..2b0a458 100644
16550--- a/arch/x86/include/asm/checksum_32.h
16551+++ b/arch/x86/include/asm/checksum_32.h
16552@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16553 int len, __wsum sum,
16554 int *src_err_ptr, int *dst_err_ptr);
16555
16556+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16557+ int len, __wsum sum,
16558+ int *src_err_ptr, int *dst_err_ptr);
16559+
16560+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16561+ int len, __wsum sum,
16562+ int *src_err_ptr, int *dst_err_ptr);
16563+
16564 /*
16565 * Note: when you get a NULL pointer exception here this means someone
16566 * passed in an incorrect kernel address to one of these functions.
16567@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16568
16569 might_sleep();
16570 stac();
16571- ret = csum_partial_copy_generic((__force void *)src, dst,
16572+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16573 len, sum, err_ptr, NULL);
16574 clac();
16575
16576@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16577 might_sleep();
16578 if (access_ok(VERIFY_WRITE, dst, len)) {
16579 stac();
16580- ret = csum_partial_copy_generic(src, (__force void *)dst,
16581+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16582 len, sum, NULL, err_ptr);
16583 clac();
16584 return ret;
16585diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16586index d47786a..2d8883e 100644
16587--- a/arch/x86/include/asm/cmpxchg.h
16588+++ b/arch/x86/include/asm/cmpxchg.h
16589@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16590 __compiletime_error("Bad argument size for cmpxchg");
16591 extern void __xadd_wrong_size(void)
16592 __compiletime_error("Bad argument size for xadd");
16593+extern void __xadd_check_overflow_wrong_size(void)
16594+ __compiletime_error("Bad argument size for xadd_check_overflow");
16595 extern void __add_wrong_size(void)
16596 __compiletime_error("Bad argument size for add");
16597+extern void __add_check_overflow_wrong_size(void)
16598+ __compiletime_error("Bad argument size for add_check_overflow");
16599
16600 /*
16601 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16602@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
16603 __ret; \
16604 })
16605
16606+#ifdef CONFIG_PAX_REFCOUNT
16607+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16608+ ({ \
16609+ __typeof__ (*(ptr)) __ret = (arg); \
16610+ switch (sizeof(*(ptr))) { \
16611+ case __X86_CASE_L: \
16612+ asm volatile (lock #op "l %0, %1\n" \
16613+ "jno 0f\n" \
16614+ "mov %0,%1\n" \
16615+ "int $4\n0:\n" \
16616+ _ASM_EXTABLE(0b, 0b) \
16617+ : "+r" (__ret), "+m" (*(ptr)) \
16618+ : : "memory", "cc"); \
16619+ break; \
16620+ case __X86_CASE_Q: \
16621+ asm volatile (lock #op "q %q0, %1\n" \
16622+ "jno 0f\n" \
16623+ "mov %0,%1\n" \
16624+ "int $4\n0:\n" \
16625+ _ASM_EXTABLE(0b, 0b) \
16626+ : "+r" (__ret), "+m" (*(ptr)) \
16627+ : : "memory", "cc"); \
16628+ break; \
16629+ default: \
16630+ __ ## op ## _check_overflow_wrong_size(); \
16631+ } \
16632+ __ret; \
16633+ })
16634+#else
16635+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16636+#endif
16637+
16638 /*
16639 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16640 * Since this is generally used to protect other memory information, we
16641@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16642 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16643 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16644
16645+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16646+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16647+
16648 #define __add(ptr, inc, lock) \
16649 ({ \
16650 __typeof__ (*(ptr)) __ret = (inc); \
16651diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16652index 59c6c40..5e0b22c 100644
16653--- a/arch/x86/include/asm/compat.h
16654+++ b/arch/x86/include/asm/compat.h
16655@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16656 typedef u32 compat_uint_t;
16657 typedef u32 compat_ulong_t;
16658 typedef u64 __attribute__((aligned(4))) compat_u64;
16659-typedef u32 compat_uptr_t;
16660+typedef u32 __user compat_uptr_t;
16661
16662 struct compat_timespec {
16663 compat_time_t tv_sec;
16664diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16665index e265ff9..72c253b 100644
16666--- a/arch/x86/include/asm/cpufeature.h
16667+++ b/arch/x86/include/asm/cpufeature.h
16668@@ -203,7 +203,7 @@
16669 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16670 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16671 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16672-
16673+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16674
16675 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16676 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16677@@ -211,7 +211,7 @@
16678 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16679 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16680 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16681-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16682+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16683 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16684 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16685 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16686@@ -359,6 +359,7 @@ extern const char * const x86_power_flags[32];
16687 #undef cpu_has_centaur_mcr
16688 #define cpu_has_centaur_mcr 0
16689
16690+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16691 #endif /* CONFIG_X86_64 */
16692
16693 #if __GNUC__ >= 4
16694@@ -411,7 +412,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16695
16696 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16697 t_warn:
16698- warn_pre_alternatives();
16699+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16700+ warn_pre_alternatives();
16701 return false;
16702 #endif
16703
16704@@ -431,7 +433,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16705 ".section .discard,\"aw\",@progbits\n"
16706 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16707 ".previous\n"
16708- ".section .altinstr_replacement,\"ax\"\n"
16709+ ".section .altinstr_replacement,\"a\"\n"
16710 "3: movb $1,%0\n"
16711 "4:\n"
16712 ".previous\n"
16713@@ -468,7 +470,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16714 " .byte 2b - 1b\n" /* src len */
16715 " .byte 4f - 3f\n" /* repl len */
16716 ".previous\n"
16717- ".section .altinstr_replacement,\"ax\"\n"
16718+ ".section .altinstr_replacement,\"a\"\n"
16719 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16720 "4:\n"
16721 ".previous\n"
16722@@ -501,7 +503,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16723 ".section .discard,\"aw\",@progbits\n"
16724 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16725 ".previous\n"
16726- ".section .altinstr_replacement,\"ax\"\n"
16727+ ".section .altinstr_replacement,\"a\"\n"
16728 "3: movb $0,%0\n"
16729 "4:\n"
16730 ".previous\n"
16731@@ -515,7 +517,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16732 ".section .discard,\"aw\",@progbits\n"
16733 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16734 ".previous\n"
16735- ".section .altinstr_replacement,\"ax\"\n"
16736+ ".section .altinstr_replacement,\"a\"\n"
16737 "5: movb $1,%0\n"
16738 "6:\n"
16739 ".previous\n"
16740diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16741index 50d033a..37deb26 100644
16742--- a/arch/x86/include/asm/desc.h
16743+++ b/arch/x86/include/asm/desc.h
16744@@ -4,6 +4,7 @@
16745 #include <asm/desc_defs.h>
16746 #include <asm/ldt.h>
16747 #include <asm/mmu.h>
16748+#include <asm/pgtable.h>
16749
16750 #include <linux/smp.h>
16751 #include <linux/percpu.h>
16752@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16753
16754 desc->type = (info->read_exec_only ^ 1) << 1;
16755 desc->type |= info->contents << 2;
16756+ desc->type |= info->seg_not_present ^ 1;
16757
16758 desc->s = 1;
16759 desc->dpl = 0x3;
16760@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16761 }
16762
16763 extern struct desc_ptr idt_descr;
16764-extern gate_desc idt_table[];
16765-extern struct desc_ptr debug_idt_descr;
16766-extern gate_desc debug_idt_table[];
16767-
16768-struct gdt_page {
16769- struct desc_struct gdt[GDT_ENTRIES];
16770-} __attribute__((aligned(PAGE_SIZE)));
16771-
16772-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16773+extern gate_desc idt_table[IDT_ENTRIES];
16774+extern const struct desc_ptr debug_idt_descr;
16775+extern gate_desc debug_idt_table[IDT_ENTRIES];
16776
16777+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16778 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16779 {
16780- return per_cpu(gdt_page, cpu).gdt;
16781+ return cpu_gdt_table[cpu];
16782 }
16783
16784 #ifdef CONFIG_X86_64
16785@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16786 unsigned long base, unsigned dpl, unsigned flags,
16787 unsigned short seg)
16788 {
16789- gate->a = (seg << 16) | (base & 0xffff);
16790- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16791+ gate->gate.offset_low = base;
16792+ gate->gate.seg = seg;
16793+ gate->gate.reserved = 0;
16794+ gate->gate.type = type;
16795+ gate->gate.s = 0;
16796+ gate->gate.dpl = dpl;
16797+ gate->gate.p = 1;
16798+ gate->gate.offset_high = base >> 16;
16799 }
16800
16801 #endif
16802@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16803
16804 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16805 {
16806+ pax_open_kernel();
16807 memcpy(&idt[entry], gate, sizeof(*gate));
16808+ pax_close_kernel();
16809 }
16810
16811 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16812 {
16813+ pax_open_kernel();
16814 memcpy(&ldt[entry], desc, 8);
16815+ pax_close_kernel();
16816 }
16817
16818 static inline void
16819@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16820 default: size = sizeof(*gdt); break;
16821 }
16822
16823+ pax_open_kernel();
16824 memcpy(&gdt[entry], desc, size);
16825+ pax_close_kernel();
16826 }
16827
16828 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16829@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16830
16831 static inline void native_load_tr_desc(void)
16832 {
16833+ pax_open_kernel();
16834 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16835+ pax_close_kernel();
16836 }
16837
16838 static inline void native_load_gdt(const struct desc_ptr *dtr)
16839@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16840 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16841 unsigned int i;
16842
16843+ pax_open_kernel();
16844 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16845 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16846+ pax_close_kernel();
16847 }
16848
16849 #define _LDT_empty(info) \
16850@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16851 preempt_enable();
16852 }
16853
16854-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16855+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16856 {
16857 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16858 }
16859@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16860 }
16861
16862 #ifdef CONFIG_X86_64
16863-static inline void set_nmi_gate(int gate, void *addr)
16864+static inline void set_nmi_gate(int gate, const void *addr)
16865 {
16866 gate_desc s;
16867
16868@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16869 #endif
16870
16871 #ifdef CONFIG_TRACING
16872-extern struct desc_ptr trace_idt_descr;
16873-extern gate_desc trace_idt_table[];
16874+extern const struct desc_ptr trace_idt_descr;
16875+extern gate_desc trace_idt_table[IDT_ENTRIES];
16876 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16877 {
16878 write_idt_entry(trace_idt_table, entry, gate);
16879 }
16880
16881-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16882+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16883 unsigned dpl, unsigned ist, unsigned seg)
16884 {
16885 gate_desc s;
16886@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16887 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16888 #endif
16889
16890-static inline void _set_gate(int gate, unsigned type, void *addr,
16891+static inline void _set_gate(int gate, unsigned type, const void *addr,
16892 unsigned dpl, unsigned ist, unsigned seg)
16893 {
16894 gate_desc s;
16895@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16896 #define set_intr_gate(n, addr) \
16897 do { \
16898 BUG_ON((unsigned)n > 0xFF); \
16899- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16900+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16901 __KERNEL_CS); \
16902- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16903+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16904 0, 0, __KERNEL_CS); \
16905 } while (0)
16906
16907@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16908 /*
16909 * This routine sets up an interrupt gate at directory privilege level 3.
16910 */
16911-static inline void set_system_intr_gate(unsigned int n, void *addr)
16912+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16913 {
16914 BUG_ON((unsigned)n > 0xFF);
16915 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16916 }
16917
16918-static inline void set_system_trap_gate(unsigned int n, void *addr)
16919+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16920 {
16921 BUG_ON((unsigned)n > 0xFF);
16922 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16923 }
16924
16925-static inline void set_trap_gate(unsigned int n, void *addr)
16926+static inline void set_trap_gate(unsigned int n, const void *addr)
16927 {
16928 BUG_ON((unsigned)n > 0xFF);
16929 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16930@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16931 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16932 {
16933 BUG_ON((unsigned)n > 0xFF);
16934- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16935+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16936 }
16937
16938-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16939+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16940 {
16941 BUG_ON((unsigned)n > 0xFF);
16942 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16943 }
16944
16945-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16946+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16947 {
16948 BUG_ON((unsigned)n > 0xFF);
16949 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16950@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16951 else
16952 load_idt((const struct desc_ptr *)&idt_descr);
16953 }
16954+
16955+#ifdef CONFIG_X86_32
16956+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16957+{
16958+ struct desc_struct d;
16959+
16960+ if (likely(limit))
16961+ limit = (limit - 1UL) >> PAGE_SHIFT;
16962+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16963+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16964+}
16965+#endif
16966+
16967 #endif /* _ASM_X86_DESC_H */
16968diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16969index 278441f..b95a174 100644
16970--- a/arch/x86/include/asm/desc_defs.h
16971+++ b/arch/x86/include/asm/desc_defs.h
16972@@ -31,6 +31,12 @@ struct desc_struct {
16973 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16974 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16975 };
16976+ struct {
16977+ u16 offset_low;
16978+ u16 seg;
16979+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16980+ unsigned offset_high: 16;
16981+ } gate;
16982 };
16983 } __attribute__((packed));
16984
16985diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16986index ced283a..ffe04cc 100644
16987--- a/arch/x86/include/asm/div64.h
16988+++ b/arch/x86/include/asm/div64.h
16989@@ -39,7 +39,7 @@
16990 __mod; \
16991 })
16992
16993-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16994+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16995 {
16996 union {
16997 u64 v64;
16998diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16999index 1a055c8..a1701de 100644
17000--- a/arch/x86/include/asm/elf.h
17001+++ b/arch/x86/include/asm/elf.h
17002@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17003
17004 #include <asm/vdso.h>
17005
17006-#ifdef CONFIG_X86_64
17007-extern unsigned int vdso64_enabled;
17008-#endif
17009 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17010 extern unsigned int vdso32_enabled;
17011 #endif
17012@@ -248,7 +245,25 @@ extern int force_personality32;
17013 the loader. We need to make sure that it is out of the way of the program
17014 that it will "exec", and that there is sufficient room for the brk. */
17015
17016+#ifdef CONFIG_PAX_SEGMEXEC
17017+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17018+#else
17019 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17020+#endif
17021+
17022+#ifdef CONFIG_PAX_ASLR
17023+#ifdef CONFIG_X86_32
17024+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17025+
17026+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17027+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17028+#else
17029+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17030+
17031+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17032+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17033+#endif
17034+#endif
17035
17036 /* This yields a mask that user programs can use to figure out what
17037 instruction set this CPU supports. This could be done in user space,
17038@@ -297,17 +312,13 @@ do { \
17039
17040 #define ARCH_DLINFO \
17041 do { \
17042- if (vdso64_enabled) \
17043- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17044- (unsigned long __force)current->mm->context.vdso); \
17045+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17046 } while (0)
17047
17048 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17049 #define ARCH_DLINFO_X32 \
17050 do { \
17051- if (vdso64_enabled) \
17052- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17053- (unsigned long __force)current->mm->context.vdso); \
17054+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17055 } while (0)
17056
17057 #define AT_SYSINFO 32
17058@@ -322,10 +333,10 @@ else \
17059
17060 #endif /* !CONFIG_X86_32 */
17061
17062-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17063+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17064
17065 #define VDSO_ENTRY \
17066- ((unsigned long)current->mm->context.vdso + \
17067+ (current->mm->context.vdso + \
17068 selected_vdso32->sym___kernel_vsyscall)
17069
17070 struct linux_binprm;
17071@@ -337,9 +348,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17072 int uses_interp);
17073 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17074
17075-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17076-#define arch_randomize_brk arch_randomize_brk
17077-
17078 /*
17079 * True on X86_32 or when emulating IA32 on X86_64
17080 */
17081diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17082index 77a99ac..39ff7f5 100644
17083--- a/arch/x86/include/asm/emergency-restart.h
17084+++ b/arch/x86/include/asm/emergency-restart.h
17085@@ -1,6 +1,6 @@
17086 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17087 #define _ASM_X86_EMERGENCY_RESTART_H
17088
17089-extern void machine_emergency_restart(void);
17090+extern void machine_emergency_restart(void) __noreturn;
17091
17092 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17093diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17094index 1c7eefe..d0e4702 100644
17095--- a/arch/x86/include/asm/floppy.h
17096+++ b/arch/x86/include/asm/floppy.h
17097@@ -229,18 +229,18 @@ static struct fd_routine_l {
17098 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17099 } fd_routine[] = {
17100 {
17101- request_dma,
17102- free_dma,
17103- get_dma_residue,
17104- dma_mem_alloc,
17105- hard_dma_setup
17106+ ._request_dma = request_dma,
17107+ ._free_dma = free_dma,
17108+ ._get_dma_residue = get_dma_residue,
17109+ ._dma_mem_alloc = dma_mem_alloc,
17110+ ._dma_setup = hard_dma_setup
17111 },
17112 {
17113- vdma_request_dma,
17114- vdma_nop,
17115- vdma_get_dma_residue,
17116- vdma_mem_alloc,
17117- vdma_dma_setup
17118+ ._request_dma = vdma_request_dma,
17119+ ._free_dma = vdma_nop,
17120+ ._get_dma_residue = vdma_get_dma_residue,
17121+ ._dma_mem_alloc = vdma_mem_alloc,
17122+ ._dma_setup = vdma_dma_setup
17123 }
17124 };
17125
17126diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17127index 115e368..76ecf6c 100644
17128--- a/arch/x86/include/asm/fpu-internal.h
17129+++ b/arch/x86/include/asm/fpu-internal.h
17130@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17131 #define user_insn(insn, output, input...) \
17132 ({ \
17133 int err; \
17134+ pax_open_userland(); \
17135 asm volatile(ASM_STAC "\n" \
17136- "1:" #insn "\n\t" \
17137+ "1:" \
17138+ __copyuser_seg \
17139+ #insn "\n\t" \
17140 "2: " ASM_CLAC "\n" \
17141 ".section .fixup,\"ax\"\n" \
17142 "3: movl $-1,%[err]\n" \
17143@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17144 _ASM_EXTABLE(1b, 3b) \
17145 : [err] "=r" (err), output \
17146 : "0"(0), input); \
17147+ pax_close_userland(); \
17148 err; \
17149 })
17150
17151@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17152 "fnclex\n\t"
17153 "emms\n\t"
17154 "fildl %P[addr]" /* set F?P to defined value */
17155- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17156+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17157 }
17158
17159 return fpu_restore_checking(&tsk->thread.fpu);
17160diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17161index b4c1f54..e290c08 100644
17162--- a/arch/x86/include/asm/futex.h
17163+++ b/arch/x86/include/asm/futex.h
17164@@ -12,6 +12,7 @@
17165 #include <asm/smap.h>
17166
17167 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17168+ typecheck(u32 __user *, uaddr); \
17169 asm volatile("\t" ASM_STAC "\n" \
17170 "1:\t" insn "\n" \
17171 "2:\t" ASM_CLAC "\n" \
17172@@ -20,15 +21,16 @@
17173 "\tjmp\t2b\n" \
17174 "\t.previous\n" \
17175 _ASM_EXTABLE(1b, 3b) \
17176- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17177+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17178 : "i" (-EFAULT), "0" (oparg), "1" (0))
17179
17180 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17181+ typecheck(u32 __user *, uaddr); \
17182 asm volatile("\t" ASM_STAC "\n" \
17183 "1:\tmovl %2, %0\n" \
17184 "\tmovl\t%0, %3\n" \
17185 "\t" insn "\n" \
17186- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17187+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17188 "\tjnz\t1b\n" \
17189 "3:\t" ASM_CLAC "\n" \
17190 "\t.section .fixup,\"ax\"\n" \
17191@@ -38,7 +40,7 @@
17192 _ASM_EXTABLE(1b, 4b) \
17193 _ASM_EXTABLE(2b, 4b) \
17194 : "=&a" (oldval), "=&r" (ret), \
17195- "+m" (*uaddr), "=&r" (tem) \
17196+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17197 : "r" (oparg), "i" (-EFAULT), "1" (0))
17198
17199 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17200@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17201
17202 pagefault_disable();
17203
17204+ pax_open_userland();
17205 switch (op) {
17206 case FUTEX_OP_SET:
17207- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17208+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17209 break;
17210 case FUTEX_OP_ADD:
17211- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17212+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17213 uaddr, oparg);
17214 break;
17215 case FUTEX_OP_OR:
17216@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17217 default:
17218 ret = -ENOSYS;
17219 }
17220+ pax_close_userland();
17221
17222 pagefault_enable();
17223
17224diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17225index 4615906..788c817 100644
17226--- a/arch/x86/include/asm/hw_irq.h
17227+++ b/arch/x86/include/asm/hw_irq.h
17228@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17229 extern void enable_IO_APIC(void);
17230
17231 /* Statistics */
17232-extern atomic_t irq_err_count;
17233-extern atomic_t irq_mis_count;
17234+extern atomic_unchecked_t irq_err_count;
17235+extern atomic_unchecked_t irq_mis_count;
17236
17237 /* EISA */
17238 extern void eisa_set_level_irq(unsigned int irq);
17239diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17240index a203659..9889f1c 100644
17241--- a/arch/x86/include/asm/i8259.h
17242+++ b/arch/x86/include/asm/i8259.h
17243@@ -62,7 +62,7 @@ struct legacy_pic {
17244 void (*init)(int auto_eoi);
17245 int (*irq_pending)(unsigned int irq);
17246 void (*make_irq)(unsigned int irq);
17247-};
17248+} __do_const;
17249
17250 extern struct legacy_pic *legacy_pic;
17251 extern struct legacy_pic null_legacy_pic;
17252diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17253index b8237d8..3e8864e 100644
17254--- a/arch/x86/include/asm/io.h
17255+++ b/arch/x86/include/asm/io.h
17256@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17257 "m" (*(volatile type __force *)addr) barrier); }
17258
17259 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17260-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17261-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17262+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17263+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17264
17265 build_mmio_read(__readb, "b", unsigned char, "=q", )
17266-build_mmio_read(__readw, "w", unsigned short, "=r", )
17267-build_mmio_read(__readl, "l", unsigned int, "=r", )
17268+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17269+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17270
17271 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17272 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17273@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17274 * this function
17275 */
17276
17277-static inline phys_addr_t virt_to_phys(volatile void *address)
17278+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17279 {
17280 return __pa(address);
17281 }
17282@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17283 return ioremap_nocache(offset, size);
17284 }
17285
17286-extern void iounmap(volatile void __iomem *addr);
17287+extern void iounmap(const volatile void __iomem *addr);
17288
17289 extern void set_iounmap_nonlazy(void);
17290
17291@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17292
17293 #include <linux/vmalloc.h>
17294
17295+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17296+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17297+{
17298+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17299+}
17300+
17301+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17302+{
17303+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17304+}
17305+
17306 /*
17307 * Convert a virtual cached pointer to an uncached pointer
17308 */
17309diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17310index 0a8b519..80e7d5b 100644
17311--- a/arch/x86/include/asm/irqflags.h
17312+++ b/arch/x86/include/asm/irqflags.h
17313@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17314 sti; \
17315 sysexit
17316
17317+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17318+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17319+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17320+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17321+
17322 #else
17323 #define INTERRUPT_RETURN iret
17324 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17325diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17326index 53cdfb2..d1369e6 100644
17327--- a/arch/x86/include/asm/kprobes.h
17328+++ b/arch/x86/include/asm/kprobes.h
17329@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17330 #define RELATIVEJUMP_SIZE 5
17331 #define RELATIVECALL_OPCODE 0xe8
17332 #define RELATIVE_ADDR_SIZE 4
17333-#define MAX_STACK_SIZE 64
17334-#define MIN_STACK_SIZE(ADDR) \
17335- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17336- THREAD_SIZE - (unsigned long)(ADDR))) \
17337- ? (MAX_STACK_SIZE) \
17338- : (((unsigned long)current_thread_info()) + \
17339- THREAD_SIZE - (unsigned long)(ADDR)))
17340+#define MAX_STACK_SIZE 64UL
17341+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17342
17343 #define flush_insn_slot(p) do { } while (0)
17344
17345diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17346index 4ad6560..75c7bdd 100644
17347--- a/arch/x86/include/asm/local.h
17348+++ b/arch/x86/include/asm/local.h
17349@@ -10,33 +10,97 @@ typedef struct {
17350 atomic_long_t a;
17351 } local_t;
17352
17353+typedef struct {
17354+ atomic_long_unchecked_t a;
17355+} local_unchecked_t;
17356+
17357 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17358
17359 #define local_read(l) atomic_long_read(&(l)->a)
17360+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17361 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17362+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17363
17364 static inline void local_inc(local_t *l)
17365 {
17366- asm volatile(_ASM_INC "%0"
17367+ asm volatile(_ASM_INC "%0\n"
17368+
17369+#ifdef CONFIG_PAX_REFCOUNT
17370+ "jno 0f\n"
17371+ _ASM_DEC "%0\n"
17372+ "int $4\n0:\n"
17373+ _ASM_EXTABLE(0b, 0b)
17374+#endif
17375+
17376+ : "+m" (l->a.counter));
17377+}
17378+
17379+static inline void local_inc_unchecked(local_unchecked_t *l)
17380+{
17381+ asm volatile(_ASM_INC "%0\n"
17382 : "+m" (l->a.counter));
17383 }
17384
17385 static inline void local_dec(local_t *l)
17386 {
17387- asm volatile(_ASM_DEC "%0"
17388+ asm volatile(_ASM_DEC "%0\n"
17389+
17390+#ifdef CONFIG_PAX_REFCOUNT
17391+ "jno 0f\n"
17392+ _ASM_INC "%0\n"
17393+ "int $4\n0:\n"
17394+ _ASM_EXTABLE(0b, 0b)
17395+#endif
17396+
17397+ : "+m" (l->a.counter));
17398+}
17399+
17400+static inline void local_dec_unchecked(local_unchecked_t *l)
17401+{
17402+ asm volatile(_ASM_DEC "%0\n"
17403 : "+m" (l->a.counter));
17404 }
17405
17406 static inline void local_add(long i, local_t *l)
17407 {
17408- asm volatile(_ASM_ADD "%1,%0"
17409+ asm volatile(_ASM_ADD "%1,%0\n"
17410+
17411+#ifdef CONFIG_PAX_REFCOUNT
17412+ "jno 0f\n"
17413+ _ASM_SUB "%1,%0\n"
17414+ "int $4\n0:\n"
17415+ _ASM_EXTABLE(0b, 0b)
17416+#endif
17417+
17418+ : "+m" (l->a.counter)
17419+ : "ir" (i));
17420+}
17421+
17422+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17423+{
17424+ asm volatile(_ASM_ADD "%1,%0\n"
17425 : "+m" (l->a.counter)
17426 : "ir" (i));
17427 }
17428
17429 static inline void local_sub(long i, local_t *l)
17430 {
17431- asm volatile(_ASM_SUB "%1,%0"
17432+ asm volatile(_ASM_SUB "%1,%0\n"
17433+
17434+#ifdef CONFIG_PAX_REFCOUNT
17435+ "jno 0f\n"
17436+ _ASM_ADD "%1,%0\n"
17437+ "int $4\n0:\n"
17438+ _ASM_EXTABLE(0b, 0b)
17439+#endif
17440+
17441+ : "+m" (l->a.counter)
17442+ : "ir" (i));
17443+}
17444+
17445+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17446+{
17447+ asm volatile(_ASM_SUB "%1,%0\n"
17448 : "+m" (l->a.counter)
17449 : "ir" (i));
17450 }
17451@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17452 */
17453 static inline int local_sub_and_test(long i, local_t *l)
17454 {
17455- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17456+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17457 }
17458
17459 /**
17460@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17461 */
17462 static inline int local_dec_and_test(local_t *l)
17463 {
17464- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17465+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17466 }
17467
17468 /**
17469@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17470 */
17471 static inline int local_inc_and_test(local_t *l)
17472 {
17473- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17474+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17475 }
17476
17477 /**
17478@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17479 */
17480 static inline int local_add_negative(long i, local_t *l)
17481 {
17482- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17483+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17484 }
17485
17486 /**
17487@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17488 static inline long local_add_return(long i, local_t *l)
17489 {
17490 long __i = i;
17491+ asm volatile(_ASM_XADD "%0, %1\n"
17492+
17493+#ifdef CONFIG_PAX_REFCOUNT
17494+ "jno 0f\n"
17495+ _ASM_MOV "%0,%1\n"
17496+ "int $4\n0:\n"
17497+ _ASM_EXTABLE(0b, 0b)
17498+#endif
17499+
17500+ : "+r" (i), "+m" (l->a.counter)
17501+ : : "memory");
17502+ return i + __i;
17503+}
17504+
17505+/**
17506+ * local_add_return_unchecked - add and return
17507+ * @i: integer value to add
17508+ * @l: pointer to type local_unchecked_t
17509+ *
17510+ * Atomically adds @i to @l and returns @i + @l
17511+ */
17512+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17513+{
17514+ long __i = i;
17515 asm volatile(_ASM_XADD "%0, %1;"
17516 : "+r" (i), "+m" (l->a.counter)
17517 : : "memory");
17518@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17519
17520 #define local_cmpxchg(l, o, n) \
17521 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17522+#define local_cmpxchg_unchecked(l, o, n) \
17523+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17524 /* Always has a lock prefix */
17525 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17526
17527diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17528new file mode 100644
17529index 0000000..2bfd3ba
17530--- /dev/null
17531+++ b/arch/x86/include/asm/mman.h
17532@@ -0,0 +1,15 @@
17533+#ifndef _X86_MMAN_H
17534+#define _X86_MMAN_H
17535+
17536+#include <uapi/asm/mman.h>
17537+
17538+#ifdef __KERNEL__
17539+#ifndef __ASSEMBLY__
17540+#ifdef CONFIG_X86_32
17541+#define arch_mmap_check i386_mmap_check
17542+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17543+#endif
17544+#endif
17545+#endif
17546+
17547+#endif /* X86_MMAN_H */
17548diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17549index 876e74e..e20bfb1 100644
17550--- a/arch/x86/include/asm/mmu.h
17551+++ b/arch/x86/include/asm/mmu.h
17552@@ -9,7 +9,7 @@
17553 * we put the segment information here.
17554 */
17555 typedef struct {
17556- void *ldt;
17557+ struct desc_struct *ldt;
17558 int size;
17559
17560 #ifdef CONFIG_X86_64
17561@@ -18,7 +18,19 @@ typedef struct {
17562 #endif
17563
17564 struct mutex lock;
17565- void __user *vdso;
17566+ unsigned long vdso;
17567+
17568+#ifdef CONFIG_X86_32
17569+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17570+ unsigned long user_cs_base;
17571+ unsigned long user_cs_limit;
17572+
17573+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17574+ cpumask_t cpu_user_cs_mask;
17575+#endif
17576+
17577+#endif
17578+#endif
17579 } mm_context_t;
17580
17581 #ifdef CONFIG_SMP
17582diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17583index be12c53..07fd3ca 100644
17584--- a/arch/x86/include/asm/mmu_context.h
17585+++ b/arch/x86/include/asm/mmu_context.h
17586@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17587
17588 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17589 {
17590+
17591+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17592+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17593+ unsigned int i;
17594+ pgd_t *pgd;
17595+
17596+ pax_open_kernel();
17597+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17598+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17599+ set_pgd_batched(pgd+i, native_make_pgd(0));
17600+ pax_close_kernel();
17601+ }
17602+#endif
17603+
17604 #ifdef CONFIG_SMP
17605 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17606 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17607@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17608 struct task_struct *tsk)
17609 {
17610 unsigned cpu = smp_processor_id();
17611+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17612+ int tlbstate = TLBSTATE_OK;
17613+#endif
17614
17615 if (likely(prev != next)) {
17616 #ifdef CONFIG_SMP
17617+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17618+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17619+#endif
17620 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17621 this_cpu_write(cpu_tlbstate.active_mm, next);
17622 #endif
17623 cpumask_set_cpu(cpu, mm_cpumask(next));
17624
17625 /* Re-load page tables */
17626+#ifdef CONFIG_PAX_PER_CPU_PGD
17627+ pax_open_kernel();
17628+
17629+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17630+ if (static_cpu_has(X86_FEATURE_PCID))
17631+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17632+ else
17633+#endif
17634+
17635+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17636+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17637+ pax_close_kernel();
17638+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17639+
17640+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17641+ if (static_cpu_has(X86_FEATURE_PCID)) {
17642+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17643+ u64 descriptor[2];
17644+ descriptor[0] = PCID_USER;
17645+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17646+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17647+ descriptor[0] = PCID_KERNEL;
17648+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17649+ }
17650+ } else {
17651+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17652+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17653+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17654+ else
17655+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17656+ }
17657+ } else
17658+#endif
17659+
17660+ load_cr3(get_cpu_pgd(cpu, kernel));
17661+#else
17662 load_cr3(next->pgd);
17663+#endif
17664
17665 /* Stop flush ipis for the previous mm */
17666 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17667@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17668 /* Load the LDT, if the LDT is different: */
17669 if (unlikely(prev->context.ldt != next->context.ldt))
17670 load_LDT_nolock(&next->context);
17671+
17672+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17673+ if (!(__supported_pte_mask & _PAGE_NX)) {
17674+ smp_mb__before_atomic();
17675+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17676+ smp_mb__after_atomic();
17677+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17678+ }
17679+#endif
17680+
17681+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17682+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17683+ prev->context.user_cs_limit != next->context.user_cs_limit))
17684+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17685+#ifdef CONFIG_SMP
17686+ else if (unlikely(tlbstate != TLBSTATE_OK))
17687+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17688+#endif
17689+#endif
17690+
17691 }
17692+ else {
17693+
17694+#ifdef CONFIG_PAX_PER_CPU_PGD
17695+ pax_open_kernel();
17696+
17697+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17698+ if (static_cpu_has(X86_FEATURE_PCID))
17699+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17700+ else
17701+#endif
17702+
17703+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17704+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17705+ pax_close_kernel();
17706+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17707+
17708+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17709+ if (static_cpu_has(X86_FEATURE_PCID)) {
17710+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17711+ u64 descriptor[2];
17712+ descriptor[0] = PCID_USER;
17713+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17714+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17715+ descriptor[0] = PCID_KERNEL;
17716+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17717+ }
17718+ } else {
17719+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17720+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17721+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17722+ else
17723+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17724+ }
17725+ } else
17726+#endif
17727+
17728+ load_cr3(get_cpu_pgd(cpu, kernel));
17729+#endif
17730+
17731 #ifdef CONFIG_SMP
17732- else {
17733 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17734 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17735
17736@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17737 * tlb flush IPI delivery. We must reload CR3
17738 * to make sure to use no freed page tables.
17739 */
17740+
17741+#ifndef CONFIG_PAX_PER_CPU_PGD
17742 load_cr3(next->pgd);
17743+#endif
17744+
17745 load_LDT_nolock(&next->context);
17746+
17747+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17748+ if (!(__supported_pte_mask & _PAGE_NX))
17749+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17750+#endif
17751+
17752+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17753+#ifdef CONFIG_PAX_PAGEEXEC
17754+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17755+#endif
17756+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17757+#endif
17758+
17759 }
17760+#endif
17761 }
17762-#endif
17763 }
17764
17765 #define activate_mm(prev, next) \
17766diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17767index e3b7819..b257c64 100644
17768--- a/arch/x86/include/asm/module.h
17769+++ b/arch/x86/include/asm/module.h
17770@@ -5,6 +5,7 @@
17771
17772 #ifdef CONFIG_X86_64
17773 /* X86_64 does not define MODULE_PROC_FAMILY */
17774+#define MODULE_PROC_FAMILY ""
17775 #elif defined CONFIG_M486
17776 #define MODULE_PROC_FAMILY "486 "
17777 #elif defined CONFIG_M586
17778@@ -57,8 +58,20 @@
17779 #error unknown processor family
17780 #endif
17781
17782-#ifdef CONFIG_X86_32
17783-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17784+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17785+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17786+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17787+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17788+#else
17789+#define MODULE_PAX_KERNEXEC ""
17790 #endif
17791
17792+#ifdef CONFIG_PAX_MEMORY_UDEREF
17793+#define MODULE_PAX_UDEREF "UDEREF "
17794+#else
17795+#define MODULE_PAX_UDEREF ""
17796+#endif
17797+
17798+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17799+
17800 #endif /* _ASM_X86_MODULE_H */
17801diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17802index 5f2fc44..106caa6 100644
17803--- a/arch/x86/include/asm/nmi.h
17804+++ b/arch/x86/include/asm/nmi.h
17805@@ -36,26 +36,35 @@ enum {
17806
17807 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17808
17809+struct nmiaction;
17810+
17811+struct nmiwork {
17812+ const struct nmiaction *action;
17813+ u64 max_duration;
17814+ struct irq_work irq_work;
17815+};
17816+
17817 struct nmiaction {
17818 struct list_head list;
17819 nmi_handler_t handler;
17820- u64 max_duration;
17821- struct irq_work irq_work;
17822 unsigned long flags;
17823 const char *name;
17824-};
17825+ struct nmiwork *work;
17826+} __do_const;
17827
17828 #define register_nmi_handler(t, fn, fg, n, init...) \
17829 ({ \
17830- static struct nmiaction init fn##_na = { \
17831+ static struct nmiwork fn##_nw; \
17832+ static const struct nmiaction init fn##_na = { \
17833 .handler = (fn), \
17834 .name = (n), \
17835 .flags = (fg), \
17836+ .work = &fn##_nw, \
17837 }; \
17838 __register_nmi_handler((t), &fn##_na); \
17839 })
17840
17841-int __register_nmi_handler(unsigned int, struct nmiaction *);
17842+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17843
17844 void unregister_nmi_handler(unsigned int, const char *);
17845
17846diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17847index 775873d..04cd306 100644
17848--- a/arch/x86/include/asm/page.h
17849+++ b/arch/x86/include/asm/page.h
17850@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17851 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17852
17853 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17854+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17855
17856 #define __boot_va(x) __va(x)
17857 #define __boot_pa(x) __pa(x)
17858@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17859 * virt_to_page(kaddr) returns a valid pointer if and only if
17860 * virt_addr_valid(kaddr) returns true.
17861 */
17862-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17863 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17864 extern bool __virt_addr_valid(unsigned long kaddr);
17865 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17866
17867+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17868+#define virt_to_page(kaddr) \
17869+ ({ \
17870+ const void *__kaddr = (const void *)(kaddr); \
17871+ BUG_ON(!virt_addr_valid(__kaddr)); \
17872+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17873+ })
17874+#else
17875+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17876+#endif
17877+
17878 #endif /* __ASSEMBLY__ */
17879
17880 #include <asm-generic/memory_model.h>
17881diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17882index 0f1ddee..e2fc3d1 100644
17883--- a/arch/x86/include/asm/page_64.h
17884+++ b/arch/x86/include/asm/page_64.h
17885@@ -7,9 +7,9 @@
17886
17887 /* duplicated to the one in bootmem.h */
17888 extern unsigned long max_pfn;
17889-extern unsigned long phys_base;
17890+extern const unsigned long phys_base;
17891
17892-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17893+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17894 {
17895 unsigned long y = x - __START_KERNEL_map;
17896
17897diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17898index cd6e1610..70f4418 100644
17899--- a/arch/x86/include/asm/paravirt.h
17900+++ b/arch/x86/include/asm/paravirt.h
17901@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17902 return (pmd_t) { ret };
17903 }
17904
17905-static inline pmdval_t pmd_val(pmd_t pmd)
17906+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17907 {
17908 pmdval_t ret;
17909
17910@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17911 val);
17912 }
17913
17914+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17915+{
17916+ pgdval_t val = native_pgd_val(pgd);
17917+
17918+ if (sizeof(pgdval_t) > sizeof(long))
17919+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17920+ val, (u64)val >> 32);
17921+ else
17922+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17923+ val);
17924+}
17925+
17926 static inline void pgd_clear(pgd_t *pgdp)
17927 {
17928 set_pgd(pgdp, __pgd(0));
17929@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17930 pv_mmu_ops.set_fixmap(idx, phys, flags);
17931 }
17932
17933+#ifdef CONFIG_PAX_KERNEXEC
17934+static inline unsigned long pax_open_kernel(void)
17935+{
17936+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17937+}
17938+
17939+static inline unsigned long pax_close_kernel(void)
17940+{
17941+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17942+}
17943+#else
17944+static inline unsigned long pax_open_kernel(void) { return 0; }
17945+static inline unsigned long pax_close_kernel(void) { return 0; }
17946+#endif
17947+
17948 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17949
17950 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17951@@ -906,7 +933,7 @@ extern void default_banner(void);
17952
17953 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17954 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17955-#define PARA_INDIRECT(addr) *%cs:addr
17956+#define PARA_INDIRECT(addr) *%ss:addr
17957 #endif
17958
17959 #define INTERRUPT_RETURN \
17960@@ -981,6 +1008,21 @@ extern void default_banner(void);
17961 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17962 CLBR_NONE, \
17963 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17964+
17965+#define GET_CR0_INTO_RDI \
17966+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17967+ mov %rax,%rdi
17968+
17969+#define SET_RDI_INTO_CR0 \
17970+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17971+
17972+#define GET_CR3_INTO_RDI \
17973+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17974+ mov %rax,%rdi
17975+
17976+#define SET_RDI_INTO_CR3 \
17977+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17978+
17979 #endif /* CONFIG_X86_32 */
17980
17981 #endif /* __ASSEMBLY__ */
17982diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17983index 7549b8b..f0edfda 100644
17984--- a/arch/x86/include/asm/paravirt_types.h
17985+++ b/arch/x86/include/asm/paravirt_types.h
17986@@ -84,7 +84,7 @@ struct pv_init_ops {
17987 */
17988 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17989 unsigned long addr, unsigned len);
17990-};
17991+} __no_const __no_randomize_layout;
17992
17993
17994 struct pv_lazy_ops {
17995@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17996 void (*enter)(void);
17997 void (*leave)(void);
17998 void (*flush)(void);
17999-};
18000+} __no_randomize_layout;
18001
18002 struct pv_time_ops {
18003 unsigned long long (*sched_clock)(void);
18004 unsigned long long (*steal_clock)(int cpu);
18005 unsigned long (*get_tsc_khz)(void);
18006-};
18007+} __no_const __no_randomize_layout;
18008
18009 struct pv_cpu_ops {
18010 /* hooks for various privileged instructions */
18011@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18012
18013 void (*start_context_switch)(struct task_struct *prev);
18014 void (*end_context_switch)(struct task_struct *next);
18015-};
18016+} __no_const __no_randomize_layout;
18017
18018 struct pv_irq_ops {
18019 /*
18020@@ -215,7 +215,7 @@ struct pv_irq_ops {
18021 #ifdef CONFIG_X86_64
18022 void (*adjust_exception_frame)(void);
18023 #endif
18024-};
18025+} __no_randomize_layout;
18026
18027 struct pv_apic_ops {
18028 #ifdef CONFIG_X86_LOCAL_APIC
18029@@ -223,7 +223,7 @@ struct pv_apic_ops {
18030 unsigned long start_eip,
18031 unsigned long start_esp);
18032 #endif
18033-};
18034+} __no_const __no_randomize_layout;
18035
18036 struct pv_mmu_ops {
18037 unsigned long (*read_cr2)(void);
18038@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18039 struct paravirt_callee_save make_pud;
18040
18041 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18042+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18043 #endif /* PAGETABLE_LEVELS == 4 */
18044 #endif /* PAGETABLE_LEVELS >= 3 */
18045
18046@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18047 an mfn. We can tell which is which from the index. */
18048 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18049 phys_addr_t phys, pgprot_t flags);
18050-};
18051+
18052+#ifdef CONFIG_PAX_KERNEXEC
18053+ unsigned long (*pax_open_kernel)(void);
18054+ unsigned long (*pax_close_kernel)(void);
18055+#endif
18056+
18057+} __no_randomize_layout;
18058
18059 struct arch_spinlock;
18060 #ifdef CONFIG_SMP
18061@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18062 struct pv_lock_ops {
18063 struct paravirt_callee_save lock_spinning;
18064 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18065-};
18066+} __no_randomize_layout;
18067
18068 /* This contains all the paravirt structures: we get a convenient
18069 * number for each function using the offset which we use to indicate
18070- * what to patch. */
18071+ * what to patch.
18072+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18073+ */
18074+
18075 struct paravirt_patch_template {
18076 struct pv_init_ops pv_init_ops;
18077 struct pv_time_ops pv_time_ops;
18078@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18079 struct pv_apic_ops pv_apic_ops;
18080 struct pv_mmu_ops pv_mmu_ops;
18081 struct pv_lock_ops pv_lock_ops;
18082-};
18083+} __no_randomize_layout;
18084
18085 extern struct pv_info pv_info;
18086 extern struct pv_init_ops pv_init_ops;
18087diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18088index c4412e9..90e88c5 100644
18089--- a/arch/x86/include/asm/pgalloc.h
18090+++ b/arch/x86/include/asm/pgalloc.h
18091@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18092 pmd_t *pmd, pte_t *pte)
18093 {
18094 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18095+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18096+}
18097+
18098+static inline void pmd_populate_user(struct mm_struct *mm,
18099+ pmd_t *pmd, pte_t *pte)
18100+{
18101+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18102 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18103 }
18104
18105@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18106
18107 #ifdef CONFIG_X86_PAE
18108 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18109+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18110+{
18111+ pud_populate(mm, pudp, pmd);
18112+}
18113 #else /* !CONFIG_X86_PAE */
18114 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18115 {
18116 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18117 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18118 }
18119+
18120+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18121+{
18122+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18123+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18124+}
18125 #endif /* CONFIG_X86_PAE */
18126
18127 #if PAGETABLE_LEVELS > 3
18128@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18129 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18130 }
18131
18132+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18133+{
18134+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18135+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18136+}
18137+
18138 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18139 {
18140 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18141diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18142index 206a87f..1623b06 100644
18143--- a/arch/x86/include/asm/pgtable-2level.h
18144+++ b/arch/x86/include/asm/pgtable-2level.h
18145@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18146
18147 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18148 {
18149+ pax_open_kernel();
18150 *pmdp = pmd;
18151+ pax_close_kernel();
18152 }
18153
18154 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18155diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18156index 81bb91b..9392125 100644
18157--- a/arch/x86/include/asm/pgtable-3level.h
18158+++ b/arch/x86/include/asm/pgtable-3level.h
18159@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18160
18161 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18162 {
18163+ pax_open_kernel();
18164 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18165+ pax_close_kernel();
18166 }
18167
18168 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18169 {
18170+ pax_open_kernel();
18171 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18172+ pax_close_kernel();
18173 }
18174
18175 /*
18176diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18177index aa97a07..f169e5b 100644
18178--- a/arch/x86/include/asm/pgtable.h
18179+++ b/arch/x86/include/asm/pgtable.h
18180@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18181
18182 #ifndef __PAGETABLE_PUD_FOLDED
18183 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18184+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18185 #define pgd_clear(pgd) native_pgd_clear(pgd)
18186 #endif
18187
18188@@ -83,12 +84,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18189
18190 #define arch_end_context_switch(prev) do {} while(0)
18191
18192+#define pax_open_kernel() native_pax_open_kernel()
18193+#define pax_close_kernel() native_pax_close_kernel()
18194 #endif /* CONFIG_PARAVIRT */
18195
18196+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18197+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18198+
18199+#ifdef CONFIG_PAX_KERNEXEC
18200+static inline unsigned long native_pax_open_kernel(void)
18201+{
18202+ unsigned long cr0;
18203+
18204+ preempt_disable();
18205+ barrier();
18206+ cr0 = read_cr0() ^ X86_CR0_WP;
18207+ BUG_ON(cr0 & X86_CR0_WP);
18208+ write_cr0(cr0);
18209+ return cr0 ^ X86_CR0_WP;
18210+}
18211+
18212+static inline unsigned long native_pax_close_kernel(void)
18213+{
18214+ unsigned long cr0;
18215+
18216+ cr0 = read_cr0() ^ X86_CR0_WP;
18217+ BUG_ON(!(cr0 & X86_CR0_WP));
18218+ write_cr0(cr0);
18219+ barrier();
18220+ preempt_enable_no_resched();
18221+ return cr0 ^ X86_CR0_WP;
18222+}
18223+#else
18224+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18225+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18226+#endif
18227+
18228 /*
18229 * The following only work if pte_present() is true.
18230 * Undefined behaviour if not..
18231 */
18232+static inline int pte_user(pte_t pte)
18233+{
18234+ return pte_val(pte) & _PAGE_USER;
18235+}
18236+
18237 static inline int pte_dirty(pte_t pte)
18238 {
18239 return pte_flags(pte) & _PAGE_DIRTY;
18240@@ -155,6 +195,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18241 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18242 }
18243
18244+static inline unsigned long pgd_pfn(pgd_t pgd)
18245+{
18246+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18247+}
18248+
18249 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18250
18251 static inline int pmd_large(pmd_t pte)
18252@@ -208,9 +253,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18253 return pte_clear_flags(pte, _PAGE_RW);
18254 }
18255
18256+static inline pte_t pte_mkread(pte_t pte)
18257+{
18258+ return __pte(pte_val(pte) | _PAGE_USER);
18259+}
18260+
18261 static inline pte_t pte_mkexec(pte_t pte)
18262 {
18263- return pte_clear_flags(pte, _PAGE_NX);
18264+#ifdef CONFIG_X86_PAE
18265+ if (__supported_pte_mask & _PAGE_NX)
18266+ return pte_clear_flags(pte, _PAGE_NX);
18267+ else
18268+#endif
18269+ return pte_set_flags(pte, _PAGE_USER);
18270+}
18271+
18272+static inline pte_t pte_exprotect(pte_t pte)
18273+{
18274+#ifdef CONFIG_X86_PAE
18275+ if (__supported_pte_mask & _PAGE_NX)
18276+ return pte_set_flags(pte, _PAGE_NX);
18277+ else
18278+#endif
18279+ return pte_clear_flags(pte, _PAGE_USER);
18280 }
18281
18282 static inline pte_t pte_mkdirty(pte_t pte)
18283@@ -440,6 +505,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18284 #endif
18285
18286 #ifndef __ASSEMBLY__
18287+
18288+#ifdef CONFIG_PAX_PER_CPU_PGD
18289+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18290+enum cpu_pgd_type {kernel = 0, user = 1};
18291+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18292+{
18293+ return cpu_pgd[cpu][type];
18294+}
18295+#endif
18296+
18297 #include <linux/mm_types.h>
18298 #include <linux/mmdebug.h>
18299 #include <linux/log2.h>
18300@@ -586,7 +661,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18301 * Currently stuck as a macro due to indirect forward reference to
18302 * linux/mmzone.h's __section_mem_map_addr() definition:
18303 */
18304-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18305+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18306
18307 /* Find an entry in the second-level page table.. */
18308 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18309@@ -626,7 +701,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18310 * Currently stuck as a macro due to indirect forward reference to
18311 * linux/mmzone.h's __section_mem_map_addr() definition:
18312 */
18313-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18314+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18315
18316 /* to find an entry in a page-table-directory. */
18317 static inline unsigned long pud_index(unsigned long address)
18318@@ -641,7 +716,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18319
18320 static inline int pgd_bad(pgd_t pgd)
18321 {
18322- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18323+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18324 }
18325
18326 static inline int pgd_none(pgd_t pgd)
18327@@ -664,7 +739,12 @@ static inline int pgd_none(pgd_t pgd)
18328 * pgd_offset() returns a (pgd_t *)
18329 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18330 */
18331-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18332+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18333+
18334+#ifdef CONFIG_PAX_PER_CPU_PGD
18335+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18336+#endif
18337+
18338 /*
18339 * a shortcut which implies the use of the kernel's pgd, instead
18340 * of a process's
18341@@ -675,6 +755,23 @@ static inline int pgd_none(pgd_t pgd)
18342 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18343 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18344
18345+#ifdef CONFIG_X86_32
18346+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18347+#else
18348+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18349+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18350+
18351+#ifdef CONFIG_PAX_MEMORY_UDEREF
18352+#ifdef __ASSEMBLY__
18353+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18354+#else
18355+extern unsigned long pax_user_shadow_base;
18356+extern pgdval_t clone_pgd_mask;
18357+#endif
18358+#endif
18359+
18360+#endif
18361+
18362 #ifndef __ASSEMBLY__
18363
18364 extern int direct_gbpages;
18365@@ -841,11 +938,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18366 * dst and src can be on the same page, but the range must not overlap,
18367 * and must not cross a page boundary.
18368 */
18369-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18370+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18371 {
18372- memcpy(dst, src, count * sizeof(pgd_t));
18373+ pax_open_kernel();
18374+ while (count--)
18375+ *dst++ = *src++;
18376+ pax_close_kernel();
18377 }
18378
18379+#ifdef CONFIG_PAX_PER_CPU_PGD
18380+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18381+#endif
18382+
18383+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18384+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18385+#else
18386+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18387+#endif
18388+
18389 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18390 static inline int page_level_shift(enum pg_level level)
18391 {
18392diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18393index 9ee3221..b979c6b 100644
18394--- a/arch/x86/include/asm/pgtable_32.h
18395+++ b/arch/x86/include/asm/pgtable_32.h
18396@@ -25,9 +25,6 @@
18397 struct mm_struct;
18398 struct vm_area_struct;
18399
18400-extern pgd_t swapper_pg_dir[1024];
18401-extern pgd_t initial_page_table[1024];
18402-
18403 static inline void pgtable_cache_init(void) { }
18404 static inline void check_pgt_cache(void) { }
18405 void paging_init(void);
18406@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18407 # include <asm/pgtable-2level.h>
18408 #endif
18409
18410+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18411+extern pgd_t initial_page_table[PTRS_PER_PGD];
18412+#ifdef CONFIG_X86_PAE
18413+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18414+#endif
18415+
18416 #if defined(CONFIG_HIGHPTE)
18417 #define pte_offset_map(dir, address) \
18418 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18419@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18420 /* Clear a kernel PTE and flush it from the TLB */
18421 #define kpte_clear_flush(ptep, vaddr) \
18422 do { \
18423+ pax_open_kernel(); \
18424 pte_clear(&init_mm, (vaddr), (ptep)); \
18425+ pax_close_kernel(); \
18426 __flush_tlb_one((vaddr)); \
18427 } while (0)
18428
18429 #endif /* !__ASSEMBLY__ */
18430
18431+#define HAVE_ARCH_UNMAPPED_AREA
18432+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18433+
18434 /*
18435 * kern_addr_valid() is (1) for FLATMEM and (0) for
18436 * SPARSEMEM and DISCONTIGMEM
18437diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18438index ed5903b..c7fe163 100644
18439--- a/arch/x86/include/asm/pgtable_32_types.h
18440+++ b/arch/x86/include/asm/pgtable_32_types.h
18441@@ -8,7 +8,7 @@
18442 */
18443 #ifdef CONFIG_X86_PAE
18444 # include <asm/pgtable-3level_types.h>
18445-# define PMD_SIZE (1UL << PMD_SHIFT)
18446+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18447 # define PMD_MASK (~(PMD_SIZE - 1))
18448 #else
18449 # include <asm/pgtable-2level_types.h>
18450@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18451 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18452 #endif
18453
18454+#ifdef CONFIG_PAX_KERNEXEC
18455+#ifndef __ASSEMBLY__
18456+extern unsigned char MODULES_EXEC_VADDR[];
18457+extern unsigned char MODULES_EXEC_END[];
18458+#endif
18459+#include <asm/boot.h>
18460+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18461+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18462+#else
18463+#define ktla_ktva(addr) (addr)
18464+#define ktva_ktla(addr) (addr)
18465+#endif
18466+
18467 #define MODULES_VADDR VMALLOC_START
18468 #define MODULES_END VMALLOC_END
18469 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18470diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18471index 5be9063..d62185b 100644
18472--- a/arch/x86/include/asm/pgtable_64.h
18473+++ b/arch/x86/include/asm/pgtable_64.h
18474@@ -16,10 +16,14 @@
18475
18476 extern pud_t level3_kernel_pgt[512];
18477 extern pud_t level3_ident_pgt[512];
18478+extern pud_t level3_vmalloc_start_pgt[512];
18479+extern pud_t level3_vmalloc_end_pgt[512];
18480+extern pud_t level3_vmemmap_pgt[512];
18481+extern pud_t level2_vmemmap_pgt[512];
18482 extern pmd_t level2_kernel_pgt[512];
18483 extern pmd_t level2_fixmap_pgt[512];
18484-extern pmd_t level2_ident_pgt[512];
18485-extern pgd_t init_level4_pgt[];
18486+extern pmd_t level2_ident_pgt[512*2];
18487+extern pgd_t init_level4_pgt[512];
18488
18489 #define swapper_pg_dir init_level4_pgt
18490
18491@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18492
18493 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18494 {
18495+ pax_open_kernel();
18496 *pmdp = pmd;
18497+ pax_close_kernel();
18498 }
18499
18500 static inline void native_pmd_clear(pmd_t *pmd)
18501@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18502
18503 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18504 {
18505+ pax_open_kernel();
18506 *pudp = pud;
18507+ pax_close_kernel();
18508 }
18509
18510 static inline void native_pud_clear(pud_t *pud)
18511@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
18512
18513 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18514 {
18515+ pax_open_kernel();
18516+ *pgdp = pgd;
18517+ pax_close_kernel();
18518+}
18519+
18520+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18521+{
18522 *pgdp = pgd;
18523 }
18524
18525diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18526index 7166e25..baaa6fe 100644
18527--- a/arch/x86/include/asm/pgtable_64_types.h
18528+++ b/arch/x86/include/asm/pgtable_64_types.h
18529@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
18530 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18531 #define MODULES_END _AC(0xffffffffff000000, UL)
18532 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18533+#define MODULES_EXEC_VADDR MODULES_VADDR
18534+#define MODULES_EXEC_END MODULES_END
18535 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18536 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18537
18538+#define ktla_ktva(addr) (addr)
18539+#define ktva_ktla(addr) (addr)
18540+
18541 #define EARLY_DYNAMIC_PAGE_TABLES 64
18542
18543 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18544diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18545index f216963..6bd7c21 100644
18546--- a/arch/x86/include/asm/pgtable_types.h
18547+++ b/arch/x86/include/asm/pgtable_types.h
18548@@ -111,8 +111,10 @@
18549
18550 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18551 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18552-#else
18553+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18554 #define _PAGE_NX (_AT(pteval_t, 0))
18555+#else
18556+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18557 #endif
18558
18559 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18560@@ -151,6 +153,9 @@
18561 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18562 _PAGE_ACCESSED)
18563
18564+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18565+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18566+
18567 #define __PAGE_KERNEL_EXEC \
18568 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18569 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18570@@ -161,7 +166,7 @@
18571 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
18572 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
18573 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
18574-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18575+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18576 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18577 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
18578 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18579@@ -218,7 +223,7 @@
18580 #ifdef CONFIG_X86_64
18581 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18582 #else
18583-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18584+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18585 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18586 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18587 #endif
18588@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18589 {
18590 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18591 }
18592+#endif
18593
18594+#if PAGETABLE_LEVELS == 3
18595+#include <asm-generic/pgtable-nopud.h>
18596+#endif
18597+
18598+#if PAGETABLE_LEVELS == 2
18599+#include <asm-generic/pgtable-nopmd.h>
18600+#endif
18601+
18602+#ifndef __ASSEMBLY__
18603 #if PAGETABLE_LEVELS > 3
18604 typedef struct { pudval_t pud; } pud_t;
18605
18606@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18607 return pud.pud;
18608 }
18609 #else
18610-#include <asm-generic/pgtable-nopud.h>
18611-
18612 static inline pudval_t native_pud_val(pud_t pud)
18613 {
18614 return native_pgd_val(pud.pgd);
18615@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18616 return pmd.pmd;
18617 }
18618 #else
18619-#include <asm-generic/pgtable-nopmd.h>
18620-
18621 static inline pmdval_t native_pmd_val(pmd_t pmd)
18622 {
18623 return native_pgd_val(pmd.pud.pgd);
18624@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
18625
18626 extern pteval_t __supported_pte_mask;
18627 extern void set_nx(void);
18628-extern int nx_enabled;
18629
18630 #define pgprot_writecombine pgprot_writecombine
18631 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18632diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18633index 7024c12..71c46b9 100644
18634--- a/arch/x86/include/asm/preempt.h
18635+++ b/arch/x86/include/asm/preempt.h
18636@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
18637 */
18638 static __always_inline bool __preempt_count_dec_and_test(void)
18639 {
18640- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18641+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18642 }
18643
18644 /*
18645diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18646index a4ea023..33aa874 100644
18647--- a/arch/x86/include/asm/processor.h
18648+++ b/arch/x86/include/asm/processor.h
18649@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18650 /* Index into per_cpu list: */
18651 u16 cpu_index;
18652 u32 microcode;
18653-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18654+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18655
18656 #define X86_VENDOR_INTEL 0
18657 #define X86_VENDOR_CYRIX 1
18658@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18659 : "memory");
18660 }
18661
18662+/* invpcid (%rdx),%rax */
18663+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18664+
18665+#define INVPCID_SINGLE_ADDRESS 0UL
18666+#define INVPCID_SINGLE_CONTEXT 1UL
18667+#define INVPCID_ALL_GLOBAL 2UL
18668+#define INVPCID_ALL_MONGLOBAL 3UL
18669+
18670+#define PCID_KERNEL 0UL
18671+#define PCID_USER 1UL
18672+#define PCID_NOFLUSH (1UL << 63)
18673+
18674 static inline void load_cr3(pgd_t *pgdir)
18675 {
18676- write_cr3(__pa(pgdir));
18677+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18678 }
18679
18680 #ifdef CONFIG_X86_32
18681@@ -283,7 +295,7 @@ struct tss_struct {
18682
18683 } ____cacheline_aligned;
18684
18685-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18686+extern struct tss_struct init_tss[NR_CPUS];
18687
18688 /*
18689 * Save the original ist values for checking stack pointers during debugging
18690@@ -479,6 +491,7 @@ struct thread_struct {
18691 unsigned short ds;
18692 unsigned short fsindex;
18693 unsigned short gsindex;
18694+ unsigned short ss;
18695 #endif
18696 #ifdef CONFIG_X86_32
18697 unsigned long ip;
18698@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18699 extern unsigned long mmu_cr4_features;
18700 extern u32 *trampoline_cr4_features;
18701
18702-static inline void set_in_cr4(unsigned long mask)
18703-{
18704- unsigned long cr4;
18705-
18706- mmu_cr4_features |= mask;
18707- if (trampoline_cr4_features)
18708- *trampoline_cr4_features = mmu_cr4_features;
18709- cr4 = read_cr4();
18710- cr4 |= mask;
18711- write_cr4(cr4);
18712-}
18713-
18714-static inline void clear_in_cr4(unsigned long mask)
18715-{
18716- unsigned long cr4;
18717-
18718- mmu_cr4_features &= ~mask;
18719- if (trampoline_cr4_features)
18720- *trampoline_cr4_features = mmu_cr4_features;
18721- cr4 = read_cr4();
18722- cr4 &= ~mask;
18723- write_cr4(cr4);
18724-}
18725+extern void set_in_cr4(unsigned long mask);
18726+extern void clear_in_cr4(unsigned long mask);
18727
18728 typedef struct {
18729 unsigned long seg;
18730@@ -836,11 +828,18 @@ static inline void spin_lock_prefetch(const void *x)
18731 */
18732 #define TASK_SIZE PAGE_OFFSET
18733 #define TASK_SIZE_MAX TASK_SIZE
18734+
18735+#ifdef CONFIG_PAX_SEGMEXEC
18736+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18737+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18738+#else
18739 #define STACK_TOP TASK_SIZE
18740-#define STACK_TOP_MAX STACK_TOP
18741+#endif
18742+
18743+#define STACK_TOP_MAX TASK_SIZE
18744
18745 #define INIT_THREAD { \
18746- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18747+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18748 .vm86_info = NULL, \
18749 .sysenter_cs = __KERNEL_CS, \
18750 .io_bitmap_ptr = NULL, \
18751@@ -854,7 +853,7 @@ static inline void spin_lock_prefetch(const void *x)
18752 */
18753 #define INIT_TSS { \
18754 .x86_tss = { \
18755- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18756+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18757 .ss0 = __KERNEL_DS, \
18758 .ss1 = __KERNEL_CS, \
18759 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18760@@ -865,11 +864,7 @@ static inline void spin_lock_prefetch(const void *x)
18761 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18762
18763 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18764-#define KSTK_TOP(info) \
18765-({ \
18766- unsigned long *__ptr = (unsigned long *)(info); \
18767- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18768-})
18769+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18770
18771 /*
18772 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18773@@ -884,7 +879,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18774 #define task_pt_regs(task) \
18775 ({ \
18776 struct pt_regs *__regs__; \
18777- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18778+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18779 __regs__ - 1; \
18780 })
18781
18782@@ -894,13 +889,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18783 /*
18784 * User space process size. 47bits minus one guard page.
18785 */
18786-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18787+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18788
18789 /* This decides where the kernel will search for a free chunk of vm
18790 * space during mmap's.
18791 */
18792 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18793- 0xc0000000 : 0xFFFFe000)
18794+ 0xc0000000 : 0xFFFFf000)
18795
18796 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18797 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18798@@ -911,11 +906,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18799 #define STACK_TOP_MAX TASK_SIZE_MAX
18800
18801 #define INIT_THREAD { \
18802- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18803+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18804 }
18805
18806 #define INIT_TSS { \
18807- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18808+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18809 }
18810
18811 /*
18812@@ -943,6 +938,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18813 */
18814 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18815
18816+#ifdef CONFIG_PAX_SEGMEXEC
18817+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18818+#endif
18819+
18820 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18821
18822 /* Get/set a process' ability to use the timestamp counter instruction */
18823@@ -969,7 +968,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18824 return 0;
18825 }
18826
18827-extern unsigned long arch_align_stack(unsigned long sp);
18828+#define arch_align_stack(x) ((x) & ~0xfUL)
18829 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18830
18831 void default_idle(void);
18832@@ -979,6 +978,6 @@ bool xen_set_default_idle(void);
18833 #define xen_set_default_idle 0
18834 #endif
18835
18836-void stop_this_cpu(void *dummy);
18837+void stop_this_cpu(void *dummy) __noreturn;
18838 void df_debug(struct pt_regs *regs, long error_code);
18839 #endif /* _ASM_X86_PROCESSOR_H */
18840diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18841index 6205f0c..688a3a9 100644
18842--- a/arch/x86/include/asm/ptrace.h
18843+++ b/arch/x86/include/asm/ptrace.h
18844@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18845 }
18846
18847 /*
18848- * user_mode_vm(regs) determines whether a register set came from user mode.
18849+ * user_mode(regs) determines whether a register set came from user mode.
18850 * This is true if V8086 mode was enabled OR if the register set was from
18851 * protected mode with RPL-3 CS value. This tricky test checks that with
18852 * one comparison. Many places in the kernel can bypass this full check
18853- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18854+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18855+ * be used.
18856 */
18857-static inline int user_mode(struct pt_regs *regs)
18858+static inline int user_mode_novm(struct pt_regs *regs)
18859 {
18860 #ifdef CONFIG_X86_32
18861 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18862 #else
18863- return !!(regs->cs & 3);
18864+ return !!(regs->cs & SEGMENT_RPL_MASK);
18865 #endif
18866 }
18867
18868-static inline int user_mode_vm(struct pt_regs *regs)
18869+static inline int user_mode(struct pt_regs *regs)
18870 {
18871 #ifdef CONFIG_X86_32
18872 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18873 USER_RPL;
18874 #else
18875- return user_mode(regs);
18876+ return user_mode_novm(regs);
18877 #endif
18878 }
18879
18880@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18881 #ifdef CONFIG_X86_64
18882 static inline bool user_64bit_mode(struct pt_regs *regs)
18883 {
18884+ unsigned long cs = regs->cs & 0xffff;
18885 #ifndef CONFIG_PARAVIRT
18886 /*
18887 * On non-paravirt systems, this is the only long mode CPL 3
18888 * selector. We do not allow long mode selectors in the LDT.
18889 */
18890- return regs->cs == __USER_CS;
18891+ return cs == __USER_CS;
18892 #else
18893 /* Headers are too twisted for this to go in paravirt.h. */
18894- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18895+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18896 #endif
18897 }
18898
18899@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18900 * Traps from the kernel do not save sp and ss.
18901 * Use the helper function to retrieve sp.
18902 */
18903- if (offset == offsetof(struct pt_regs, sp) &&
18904- regs->cs == __KERNEL_CS)
18905- return kernel_stack_pointer(regs);
18906+ if (offset == offsetof(struct pt_regs, sp)) {
18907+ unsigned long cs = regs->cs & 0xffff;
18908+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18909+ return kernel_stack_pointer(regs);
18910+ }
18911 #endif
18912 return *(unsigned long *)((unsigned long)regs + offset);
18913 }
18914diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18915index 70f46f0..adfbdb4 100644
18916--- a/arch/x86/include/asm/qrwlock.h
18917+++ b/arch/x86/include/asm/qrwlock.h
18918@@ -7,8 +7,8 @@
18919 #define queue_write_unlock queue_write_unlock
18920 static inline void queue_write_unlock(struct qrwlock *lock)
18921 {
18922- barrier();
18923- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18924+ barrier();
18925+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18926 }
18927 #endif
18928
18929diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18930index 9c6b890..5305f53 100644
18931--- a/arch/x86/include/asm/realmode.h
18932+++ b/arch/x86/include/asm/realmode.h
18933@@ -22,16 +22,14 @@ struct real_mode_header {
18934 #endif
18935 /* APM/BIOS reboot */
18936 u32 machine_real_restart_asm;
18937-#ifdef CONFIG_X86_64
18938 u32 machine_real_restart_seg;
18939-#endif
18940 };
18941
18942 /* This must match data at trampoline_32/64.S */
18943 struct trampoline_header {
18944 #ifdef CONFIG_X86_32
18945 u32 start;
18946- u16 gdt_pad;
18947+ u16 boot_cs;
18948 u16 gdt_limit;
18949 u32 gdt_base;
18950 #else
18951diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18952index a82c4f1..ac45053 100644
18953--- a/arch/x86/include/asm/reboot.h
18954+++ b/arch/x86/include/asm/reboot.h
18955@@ -6,13 +6,13 @@
18956 struct pt_regs;
18957
18958 struct machine_ops {
18959- void (*restart)(char *cmd);
18960- void (*halt)(void);
18961- void (*power_off)(void);
18962+ void (* __noreturn restart)(char *cmd);
18963+ void (* __noreturn halt)(void);
18964+ void (* __noreturn power_off)(void);
18965 void (*shutdown)(void);
18966 void (*crash_shutdown)(struct pt_regs *);
18967- void (*emergency_restart)(void);
18968-};
18969+ void (* __noreturn emergency_restart)(void);
18970+} __no_const;
18971
18972 extern struct machine_ops machine_ops;
18973
18974diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18975index 8f7866a..e442f20 100644
18976--- a/arch/x86/include/asm/rmwcc.h
18977+++ b/arch/x86/include/asm/rmwcc.h
18978@@ -3,7 +3,34 @@
18979
18980 #ifdef CC_HAVE_ASM_GOTO
18981
18982-#define __GEN_RMWcc(fullop, var, cc, ...) \
18983+#ifdef CONFIG_PAX_REFCOUNT
18984+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18985+do { \
18986+ asm_volatile_goto (fullop \
18987+ ";jno 0f\n" \
18988+ fullantiop \
18989+ ";int $4\n0:\n" \
18990+ _ASM_EXTABLE(0b, 0b) \
18991+ ";j" cc " %l[cc_label]" \
18992+ : : "m" (var), ## __VA_ARGS__ \
18993+ : "memory" : cc_label); \
18994+ return 0; \
18995+cc_label: \
18996+ return 1; \
18997+} while (0)
18998+#else
18999+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19000+do { \
19001+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19002+ : : "m" (var), ## __VA_ARGS__ \
19003+ : "memory" : cc_label); \
19004+ return 0; \
19005+cc_label: \
19006+ return 1; \
19007+} while (0)
19008+#endif
19009+
19010+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19011 do { \
19012 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19013 : : "m" (var), ## __VA_ARGS__ \
19014@@ -13,15 +40,46 @@ cc_label: \
19015 return 1; \
19016 } while (0)
19017
19018-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19019- __GEN_RMWcc(op " " arg0, var, cc)
19020+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19021+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19022
19023-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19024- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19025+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19026+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19027+
19028+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19029+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19030+
19031+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19032+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19033
19034 #else /* !CC_HAVE_ASM_GOTO */
19035
19036-#define __GEN_RMWcc(fullop, var, cc, ...) \
19037+#ifdef CONFIG_PAX_REFCOUNT
19038+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19039+do { \
19040+ char c; \
19041+ asm volatile (fullop \
19042+ ";jno 0f\n" \
19043+ fullantiop \
19044+ ";int $4\n0:\n" \
19045+ _ASM_EXTABLE(0b, 0b) \
19046+ "; set" cc " %1" \
19047+ : "+m" (var), "=qm" (c) \
19048+ : __VA_ARGS__ : "memory"); \
19049+ return c != 0; \
19050+} while (0)
19051+#else
19052+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19053+do { \
19054+ char c; \
19055+ asm volatile (fullop "; set" cc " %1" \
19056+ : "+m" (var), "=qm" (c) \
19057+ : __VA_ARGS__ : "memory"); \
19058+ return c != 0; \
19059+} while (0)
19060+#endif
19061+
19062+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19063 do { \
19064 char c; \
19065 asm volatile (fullop "; set" cc " %1" \
19066@@ -30,11 +88,17 @@ do { \
19067 return c != 0; \
19068 } while (0)
19069
19070-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19071- __GEN_RMWcc(op " " arg0, var, cc)
19072+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19073+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19074+
19075+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19076+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19077+
19078+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19079+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19080
19081-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19082- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19083+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19084+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19085
19086 #endif /* CC_HAVE_ASM_GOTO */
19087
19088diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19089index cad82c9..2e5c5c1 100644
19090--- a/arch/x86/include/asm/rwsem.h
19091+++ b/arch/x86/include/asm/rwsem.h
19092@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19093 {
19094 asm volatile("# beginning down_read\n\t"
19095 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19096+
19097+#ifdef CONFIG_PAX_REFCOUNT
19098+ "jno 0f\n"
19099+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19100+ "int $4\n0:\n"
19101+ _ASM_EXTABLE(0b, 0b)
19102+#endif
19103+
19104 /* adds 0x00000001 */
19105 " jns 1f\n"
19106 " call call_rwsem_down_read_failed\n"
19107@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19108 "1:\n\t"
19109 " mov %1,%2\n\t"
19110 " add %3,%2\n\t"
19111+
19112+#ifdef CONFIG_PAX_REFCOUNT
19113+ "jno 0f\n"
19114+ "sub %3,%2\n"
19115+ "int $4\n0:\n"
19116+ _ASM_EXTABLE(0b, 0b)
19117+#endif
19118+
19119 " jle 2f\n\t"
19120 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19121 " jnz 1b\n\t"
19122@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19123 long tmp;
19124 asm volatile("# beginning down_write\n\t"
19125 LOCK_PREFIX " xadd %1,(%2)\n\t"
19126+
19127+#ifdef CONFIG_PAX_REFCOUNT
19128+ "jno 0f\n"
19129+ "mov %1,(%2)\n"
19130+ "int $4\n0:\n"
19131+ _ASM_EXTABLE(0b, 0b)
19132+#endif
19133+
19134 /* adds 0xffff0001, returns the old value */
19135 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19136 /* was the active mask 0 before? */
19137@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19138 long tmp;
19139 asm volatile("# beginning __up_read\n\t"
19140 LOCK_PREFIX " xadd %1,(%2)\n\t"
19141+
19142+#ifdef CONFIG_PAX_REFCOUNT
19143+ "jno 0f\n"
19144+ "mov %1,(%2)\n"
19145+ "int $4\n0:\n"
19146+ _ASM_EXTABLE(0b, 0b)
19147+#endif
19148+
19149 /* subtracts 1, returns the old value */
19150 " jns 1f\n\t"
19151 " call call_rwsem_wake\n" /* expects old value in %edx */
19152@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19153 long tmp;
19154 asm volatile("# beginning __up_write\n\t"
19155 LOCK_PREFIX " xadd %1,(%2)\n\t"
19156+
19157+#ifdef CONFIG_PAX_REFCOUNT
19158+ "jno 0f\n"
19159+ "mov %1,(%2)\n"
19160+ "int $4\n0:\n"
19161+ _ASM_EXTABLE(0b, 0b)
19162+#endif
19163+
19164 /* subtracts 0xffff0001, returns the old value */
19165 " jns 1f\n\t"
19166 " call call_rwsem_wake\n" /* expects old value in %edx */
19167@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19168 {
19169 asm volatile("# beginning __downgrade_write\n\t"
19170 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19171+
19172+#ifdef CONFIG_PAX_REFCOUNT
19173+ "jno 0f\n"
19174+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19175+ "int $4\n0:\n"
19176+ _ASM_EXTABLE(0b, 0b)
19177+#endif
19178+
19179 /*
19180 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19181 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19182@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19183 */
19184 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19185 {
19186- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19187+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19188+
19189+#ifdef CONFIG_PAX_REFCOUNT
19190+ "jno 0f\n"
19191+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19192+ "int $4\n0:\n"
19193+ _ASM_EXTABLE(0b, 0b)
19194+#endif
19195+
19196 : "+m" (sem->count)
19197 : "er" (delta));
19198 }
19199@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19200 */
19201 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19202 {
19203- return delta + xadd(&sem->count, delta);
19204+ return delta + xadd_check_overflow(&sem->count, delta);
19205 }
19206
19207 #endif /* __KERNEL__ */
19208diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19209index 6f1c3a8..7744f19 100644
19210--- a/arch/x86/include/asm/segment.h
19211+++ b/arch/x86/include/asm/segment.h
19212@@ -64,10 +64,15 @@
19213 * 26 - ESPFIX small SS
19214 * 27 - per-cpu [ offset to per-cpu data area ]
19215 * 28 - stack_canary-20 [ for stack protector ]
19216- * 29 - unused
19217- * 30 - unused
19218+ * 29 - PCI BIOS CS
19219+ * 30 - PCI BIOS DS
19220 * 31 - TSS for double fault handler
19221 */
19222+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19223+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19224+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19225+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19226+
19227 #define GDT_ENTRY_TLS_MIN 6
19228 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19229
19230@@ -79,6 +84,8 @@
19231
19232 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19233
19234+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19235+
19236 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19237
19238 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19239@@ -104,6 +111,12 @@
19240 #define __KERNEL_STACK_CANARY 0
19241 #endif
19242
19243+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19244+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19245+
19246+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19247+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19248+
19249 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19250
19251 /*
19252@@ -141,7 +154,7 @@
19253 */
19254
19255 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19256-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19257+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19258
19259
19260 #else
19261@@ -165,6 +178,8 @@
19262 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19263 #define __USER32_DS __USER_DS
19264
19265+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19266+
19267 #define GDT_ENTRY_TSS 8 /* needs two entries */
19268 #define GDT_ENTRY_LDT 10 /* needs two entries */
19269 #define GDT_ENTRY_TLS_MIN 12
19270@@ -173,6 +188,8 @@
19271 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19272 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19273
19274+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19275+
19276 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19277 #define FS_TLS 0
19278 #define GS_TLS 1
19279@@ -180,12 +197,14 @@
19280 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19281 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19282
19283-#define GDT_ENTRIES 16
19284+#define GDT_ENTRIES 17
19285
19286 #endif
19287
19288 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19289+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19290 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19291+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19292 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19293 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19294 #ifndef CONFIG_PARAVIRT
19295@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19296 {
19297 unsigned long __limit;
19298 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19299- return __limit + 1;
19300+ return __limit;
19301 }
19302
19303 #endif /* !__ASSEMBLY__ */
19304diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19305index 8d3120f..352b440 100644
19306--- a/arch/x86/include/asm/smap.h
19307+++ b/arch/x86/include/asm/smap.h
19308@@ -25,11 +25,40 @@
19309
19310 #include <asm/alternative-asm.h>
19311
19312+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19313+#define ASM_PAX_OPEN_USERLAND \
19314+ 661: jmp 663f; \
19315+ .pushsection .altinstr_replacement, "a" ; \
19316+ 662: pushq %rax; nop; \
19317+ .popsection ; \
19318+ .pushsection .altinstructions, "a" ; \
19319+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19320+ .popsection ; \
19321+ call __pax_open_userland; \
19322+ popq %rax; \
19323+ 663:
19324+
19325+#define ASM_PAX_CLOSE_USERLAND \
19326+ 661: jmp 663f; \
19327+ .pushsection .altinstr_replacement, "a" ; \
19328+ 662: pushq %rax; nop; \
19329+ .popsection; \
19330+ .pushsection .altinstructions, "a" ; \
19331+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19332+ .popsection; \
19333+ call __pax_close_userland; \
19334+ popq %rax; \
19335+ 663:
19336+#else
19337+#define ASM_PAX_OPEN_USERLAND
19338+#define ASM_PAX_CLOSE_USERLAND
19339+#endif
19340+
19341 #ifdef CONFIG_X86_SMAP
19342
19343 #define ASM_CLAC \
19344 661: ASM_NOP3 ; \
19345- .pushsection .altinstr_replacement, "ax" ; \
19346+ .pushsection .altinstr_replacement, "a" ; \
19347 662: __ASM_CLAC ; \
19348 .popsection ; \
19349 .pushsection .altinstructions, "a" ; \
19350@@ -38,7 +67,7 @@
19351
19352 #define ASM_STAC \
19353 661: ASM_NOP3 ; \
19354- .pushsection .altinstr_replacement, "ax" ; \
19355+ .pushsection .altinstr_replacement, "a" ; \
19356 662: __ASM_STAC ; \
19357 .popsection ; \
19358 .pushsection .altinstructions, "a" ; \
19359@@ -56,6 +85,37 @@
19360
19361 #include <asm/alternative.h>
19362
19363+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19364+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19365+
19366+extern void __pax_open_userland(void);
19367+static __always_inline unsigned long pax_open_userland(void)
19368+{
19369+
19370+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19371+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19372+ :
19373+ : [open] "i" (__pax_open_userland)
19374+ : "memory", "rax");
19375+#endif
19376+
19377+ return 0;
19378+}
19379+
19380+extern void __pax_close_userland(void);
19381+static __always_inline unsigned long pax_close_userland(void)
19382+{
19383+
19384+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19385+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19386+ :
19387+ : [close] "i" (__pax_close_userland)
19388+ : "memory", "rax");
19389+#endif
19390+
19391+ return 0;
19392+}
19393+
19394 #ifdef CONFIG_X86_SMAP
19395
19396 static __always_inline void clac(void)
19397diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19398index 8cd27e0..7f05ec8 100644
19399--- a/arch/x86/include/asm/smp.h
19400+++ b/arch/x86/include/asm/smp.h
19401@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19402 /* cpus sharing the last level cache: */
19403 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19404 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19405-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19406+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19407
19408 static inline struct cpumask *cpu_sibling_mask(int cpu)
19409 {
19410@@ -78,7 +78,7 @@ struct smp_ops {
19411
19412 void (*send_call_func_ipi)(const struct cpumask *mask);
19413 void (*send_call_func_single_ipi)(int cpu);
19414-};
19415+} __no_const;
19416
19417 /* Globals due to paravirt */
19418 extern void set_cpu_sibling_map(int cpu);
19419@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19420 extern int safe_smp_processor_id(void);
19421
19422 #elif defined(CONFIG_X86_64_SMP)
19423-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19424-
19425-#define stack_smp_processor_id() \
19426-({ \
19427- struct thread_info *ti; \
19428- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19429- ti->cpu; \
19430-})
19431+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19432+#define stack_smp_processor_id() raw_smp_processor_id()
19433 #define safe_smp_processor_id() smp_processor_id()
19434
19435 #endif
19436diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19437index 54f1c80..39362a5 100644
19438--- a/arch/x86/include/asm/spinlock.h
19439+++ b/arch/x86/include/asm/spinlock.h
19440@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19441 static inline void arch_read_lock(arch_rwlock_t *rw)
19442 {
19443 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19444+
19445+#ifdef CONFIG_PAX_REFCOUNT
19446+ "jno 0f\n"
19447+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19448+ "int $4\n0:\n"
19449+ _ASM_EXTABLE(0b, 0b)
19450+#endif
19451+
19452 "jns 1f\n"
19453 "call __read_lock_failed\n\t"
19454 "1:\n"
19455@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19456 static inline void arch_write_lock(arch_rwlock_t *rw)
19457 {
19458 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19459+
19460+#ifdef CONFIG_PAX_REFCOUNT
19461+ "jno 0f\n"
19462+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19463+ "int $4\n0:\n"
19464+ _ASM_EXTABLE(0b, 0b)
19465+#endif
19466+
19467 "jz 1f\n"
19468 "call __write_lock_failed\n\t"
19469 "1:\n"
19470@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19471
19472 static inline void arch_read_unlock(arch_rwlock_t *rw)
19473 {
19474- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19475+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19476+
19477+#ifdef CONFIG_PAX_REFCOUNT
19478+ "jno 0f\n"
19479+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19480+ "int $4\n0:\n"
19481+ _ASM_EXTABLE(0b, 0b)
19482+#endif
19483+
19484 :"+m" (rw->lock) : : "memory");
19485 }
19486
19487 static inline void arch_write_unlock(arch_rwlock_t *rw)
19488 {
19489- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19490+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19491+
19492+#ifdef CONFIG_PAX_REFCOUNT
19493+ "jno 0f\n"
19494+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19495+ "int $4\n0:\n"
19496+ _ASM_EXTABLE(0b, 0b)
19497+#endif
19498+
19499 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
19500 }
19501 #else
19502diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19503index 6a99859..03cb807 100644
19504--- a/arch/x86/include/asm/stackprotector.h
19505+++ b/arch/x86/include/asm/stackprotector.h
19506@@ -47,7 +47,7 @@
19507 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19508 */
19509 #define GDT_STACK_CANARY_INIT \
19510- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19511+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19512
19513 /*
19514 * Initialize the stackprotector canary value.
19515@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19516
19517 static inline void load_stack_canary_segment(void)
19518 {
19519-#ifdef CONFIG_X86_32
19520+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19521 asm volatile ("mov %0, %%gs" : : "r" (0));
19522 #endif
19523 }
19524diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19525index 70bbe39..4ae2bd4 100644
19526--- a/arch/x86/include/asm/stacktrace.h
19527+++ b/arch/x86/include/asm/stacktrace.h
19528@@ -11,28 +11,20 @@
19529
19530 extern int kstack_depth_to_print;
19531
19532-struct thread_info;
19533+struct task_struct;
19534 struct stacktrace_ops;
19535
19536-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19537- unsigned long *stack,
19538- unsigned long bp,
19539- const struct stacktrace_ops *ops,
19540- void *data,
19541- unsigned long *end,
19542- int *graph);
19543+typedef unsigned long walk_stack_t(struct task_struct *task,
19544+ void *stack_start,
19545+ unsigned long *stack,
19546+ unsigned long bp,
19547+ const struct stacktrace_ops *ops,
19548+ void *data,
19549+ unsigned long *end,
19550+ int *graph);
19551
19552-extern unsigned long
19553-print_context_stack(struct thread_info *tinfo,
19554- unsigned long *stack, unsigned long bp,
19555- const struct stacktrace_ops *ops, void *data,
19556- unsigned long *end, int *graph);
19557-
19558-extern unsigned long
19559-print_context_stack_bp(struct thread_info *tinfo,
19560- unsigned long *stack, unsigned long bp,
19561- const struct stacktrace_ops *ops, void *data,
19562- unsigned long *end, int *graph);
19563+extern walk_stack_t print_context_stack;
19564+extern walk_stack_t print_context_stack_bp;
19565
19566 /* Generic stack tracer with callbacks */
19567
19568@@ -40,7 +32,7 @@ struct stacktrace_ops {
19569 void (*address)(void *data, unsigned long address, int reliable);
19570 /* On negative return stop dumping */
19571 int (*stack)(void *data, char *name);
19572- walk_stack_t walk_stack;
19573+ walk_stack_t *walk_stack;
19574 };
19575
19576 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19577diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19578index d7f3b3b..3cc39f1 100644
19579--- a/arch/x86/include/asm/switch_to.h
19580+++ b/arch/x86/include/asm/switch_to.h
19581@@ -108,7 +108,7 @@ do { \
19582 "call __switch_to\n\t" \
19583 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19584 __switch_canary \
19585- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19586+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19587 "movq %%rax,%%rdi\n\t" \
19588 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19589 "jnz ret_from_fork\n\t" \
19590@@ -119,7 +119,7 @@ do { \
19591 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19592 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19593 [_tif_fork] "i" (_TIF_FORK), \
19594- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19595+ [thread_info] "m" (current_tinfo), \
19596 [current_task] "m" (current_task) \
19597 __switch_canary_iparam \
19598 : "memory", "cc" __EXTRA_CLOBBER)
19599diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19600index 8540538..4b0b5e9 100644
19601--- a/arch/x86/include/asm/thread_info.h
19602+++ b/arch/x86/include/asm/thread_info.h
19603@@ -24,7 +24,6 @@ struct exec_domain;
19604 #include <linux/atomic.h>
19605
19606 struct thread_info {
19607- struct task_struct *task; /* main task structure */
19608 struct exec_domain *exec_domain; /* execution domain */
19609 __u32 flags; /* low level flags */
19610 __u32 status; /* thread synchronous flags */
19611@@ -33,13 +32,13 @@ struct thread_info {
19612 mm_segment_t addr_limit;
19613 struct restart_block restart_block;
19614 void __user *sysenter_return;
19615+ unsigned long lowest_stack;
19616 unsigned int sig_on_uaccess_error:1;
19617 unsigned int uaccess_err:1; /* uaccess failed */
19618 };
19619
19620-#define INIT_THREAD_INFO(tsk) \
19621+#define INIT_THREAD_INFO \
19622 { \
19623- .task = &tsk, \
19624 .exec_domain = &default_exec_domain, \
19625 .flags = 0, \
19626 .cpu = 0, \
19627@@ -50,7 +49,7 @@ struct thread_info {
19628 }, \
19629 }
19630
19631-#define init_thread_info (init_thread_union.thread_info)
19632+#define init_thread_info (init_thread_union.stack)
19633 #define init_stack (init_thread_union.stack)
19634
19635 #else /* !__ASSEMBLY__ */
19636@@ -91,6 +90,7 @@ struct thread_info {
19637 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19638 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19639 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19640+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19641
19642 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19643 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19644@@ -115,17 +115,18 @@ struct thread_info {
19645 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19646 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19647 #define _TIF_X32 (1 << TIF_X32)
19648+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19649
19650 /* work to do in syscall_trace_enter() */
19651 #define _TIF_WORK_SYSCALL_ENTRY \
19652 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19653 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19654- _TIF_NOHZ)
19655+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19656
19657 /* work to do in syscall_trace_leave() */
19658 #define _TIF_WORK_SYSCALL_EXIT \
19659 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19660- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19661+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19662
19663 /* work to do on interrupt/exception return */
19664 #define _TIF_WORK_MASK \
19665@@ -136,7 +137,7 @@ struct thread_info {
19666 /* work to do on any return to user space */
19667 #define _TIF_ALLWORK_MASK \
19668 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19669- _TIF_NOHZ)
19670+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19671
19672 /* Only used for 64 bit */
19673 #define _TIF_DO_NOTIFY_MASK \
19674@@ -151,7 +152,6 @@ struct thread_info {
19675 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19676
19677 #define STACK_WARN (THREAD_SIZE/8)
19678-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19679
19680 /*
19681 * macros/functions for gaining access to the thread information structure
19682@@ -162,26 +162,18 @@ struct thread_info {
19683
19684 DECLARE_PER_CPU(unsigned long, kernel_stack);
19685
19686+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19687+
19688 static inline struct thread_info *current_thread_info(void)
19689 {
19690- struct thread_info *ti;
19691- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19692- KERNEL_STACK_OFFSET - THREAD_SIZE);
19693- return ti;
19694+ return this_cpu_read_stable(current_tinfo);
19695 }
19696
19697 #else /* !__ASSEMBLY__ */
19698
19699 /* how to get the thread information struct from ASM */
19700 #define GET_THREAD_INFO(reg) \
19701- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19702- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19703-
19704-/*
19705- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19706- * a certain register (to be used in assembler memory operands).
19707- */
19708-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19709+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19710
19711 #endif
19712
19713@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19714 extern void arch_task_cache_init(void);
19715 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19716 extern void arch_release_task_struct(struct task_struct *tsk);
19717+
19718+#define __HAVE_THREAD_FUNCTIONS
19719+#define task_thread_info(task) (&(task)->tinfo)
19720+#define task_stack_page(task) ((task)->stack)
19721+#define setup_thread_stack(p, org) do {} while (0)
19722+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19723+
19724 #endif
19725 #endif /* _ASM_X86_THREAD_INFO_H */
19726diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19727index 04905bf..49203ca 100644
19728--- a/arch/x86/include/asm/tlbflush.h
19729+++ b/arch/x86/include/asm/tlbflush.h
19730@@ -17,18 +17,44 @@
19731
19732 static inline void __native_flush_tlb(void)
19733 {
19734+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19735+ u64 descriptor[2];
19736+
19737+ descriptor[0] = PCID_KERNEL;
19738+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
19739+ return;
19740+ }
19741+
19742+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19743+ if (static_cpu_has(X86_FEATURE_PCID)) {
19744+ unsigned int cpu = raw_get_cpu();
19745+
19746+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19747+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19748+ raw_put_cpu_no_resched();
19749+ return;
19750+ }
19751+#endif
19752+
19753 native_write_cr3(native_read_cr3());
19754 }
19755
19756 static inline void __native_flush_tlb_global_irq_disabled(void)
19757 {
19758- unsigned long cr4;
19759+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19760+ u64 descriptor[2];
19761
19762- cr4 = native_read_cr4();
19763- /* clear PGE */
19764- native_write_cr4(cr4 & ~X86_CR4_PGE);
19765- /* write old PGE again and flush TLBs */
19766- native_write_cr4(cr4);
19767+ descriptor[0] = PCID_KERNEL;
19768+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19769+ } else {
19770+ unsigned long cr4;
19771+
19772+ cr4 = native_read_cr4();
19773+ /* clear PGE */
19774+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19775+ /* write old PGE again and flush TLBs */
19776+ native_write_cr4(cr4);
19777+ }
19778 }
19779
19780 static inline void __native_flush_tlb_global(void)
19781@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19782
19783 static inline void __native_flush_tlb_single(unsigned long addr)
19784 {
19785+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19786+ u64 descriptor[2];
19787+
19788+ descriptor[0] = PCID_KERNEL;
19789+ descriptor[1] = addr;
19790+
19791+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19792+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19793+ if (addr < TASK_SIZE_MAX)
19794+ descriptor[1] += pax_user_shadow_base;
19795+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19796+ }
19797+
19798+ descriptor[0] = PCID_USER;
19799+ descriptor[1] = addr;
19800+#endif
19801+
19802+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19803+ return;
19804+ }
19805+
19806+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19807+ if (static_cpu_has(X86_FEATURE_PCID)) {
19808+ unsigned int cpu = raw_get_cpu();
19809+
19810+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19811+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19812+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19813+ raw_put_cpu_no_resched();
19814+
19815+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19816+ addr += pax_user_shadow_base;
19817+ }
19818+#endif
19819+
19820 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19821 }
19822
19823diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19824index 0d592e0..526f797 100644
19825--- a/arch/x86/include/asm/uaccess.h
19826+++ b/arch/x86/include/asm/uaccess.h
19827@@ -7,6 +7,7 @@
19828 #include <linux/compiler.h>
19829 #include <linux/thread_info.h>
19830 #include <linux/string.h>
19831+#include <linux/spinlock.h>
19832 #include <asm/asm.h>
19833 #include <asm/page.h>
19834 #include <asm/smap.h>
19835@@ -29,7 +30,12 @@
19836
19837 #define get_ds() (KERNEL_DS)
19838 #define get_fs() (current_thread_info()->addr_limit)
19839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19840+void __set_fs(mm_segment_t x);
19841+void set_fs(mm_segment_t x);
19842+#else
19843 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19844+#endif
19845
19846 #define segment_eq(a, b) ((a).seg == (b).seg)
19847
19848@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19849 * checks that the pointer is in the user space range - after calling
19850 * this function, memory access functions may still return -EFAULT.
19851 */
19852-#define access_ok(type, addr, size) \
19853- likely(!__range_not_ok(addr, size, user_addr_max()))
19854+extern int _cond_resched(void);
19855+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19856+#define access_ok(type, addr, size) \
19857+({ \
19858+ unsigned long __size = size; \
19859+ unsigned long __addr = (unsigned long)addr; \
19860+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19861+ if (__ret_ao && __size) { \
19862+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19863+ unsigned long __end_ao = __addr + __size - 1; \
19864+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19865+ while (__addr_ao <= __end_ao) { \
19866+ char __c_ao; \
19867+ __addr_ao += PAGE_SIZE; \
19868+ if (__size > PAGE_SIZE) \
19869+ _cond_resched(); \
19870+ if (__get_user(__c_ao, (char __user *)__addr)) \
19871+ break; \
19872+ if (type != VERIFY_WRITE) { \
19873+ __addr = __addr_ao; \
19874+ continue; \
19875+ } \
19876+ if (__put_user(__c_ao, (char __user *)__addr)) \
19877+ break; \
19878+ __addr = __addr_ao; \
19879+ } \
19880+ } \
19881+ } \
19882+ __ret_ao; \
19883+})
19884
19885 /*
19886 * The exception table consists of pairs of addresses relative to the
19887@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19888 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19889 __chk_user_ptr(ptr); \
19890 might_fault(); \
19891+ pax_open_userland(); \
19892 asm volatile("call __get_user_%P3" \
19893 : "=a" (__ret_gu), "=r" (__val_gu) \
19894 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19895 (x) = (__typeof__(*(ptr))) __val_gu; \
19896+ pax_close_userland(); \
19897 __ret_gu; \
19898 })
19899
19900@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19901 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19902 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19903
19904-
19905+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19906+#define __copyuser_seg "gs;"
19907+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19908+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19909+#else
19910+#define __copyuser_seg
19911+#define __COPYUSER_SET_ES
19912+#define __COPYUSER_RESTORE_ES
19913+#endif
19914
19915 #ifdef CONFIG_X86_32
19916 #define __put_user_asm_u64(x, addr, err, errret) \
19917 asm volatile(ASM_STAC "\n" \
19918- "1: movl %%eax,0(%2)\n" \
19919- "2: movl %%edx,4(%2)\n" \
19920+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19921+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19922 "3: " ASM_CLAC "\n" \
19923 ".section .fixup,\"ax\"\n" \
19924 "4: movl %3,%0\n" \
19925@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19926
19927 #define __put_user_asm_ex_u64(x, addr) \
19928 asm volatile(ASM_STAC "\n" \
19929- "1: movl %%eax,0(%1)\n" \
19930- "2: movl %%edx,4(%1)\n" \
19931+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19932+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19933 "3: " ASM_CLAC "\n" \
19934 _ASM_EXTABLE_EX(1b, 2b) \
19935 _ASM_EXTABLE_EX(2b, 3b) \
19936@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19937 __typeof__(*(ptr)) __pu_val; \
19938 __chk_user_ptr(ptr); \
19939 might_fault(); \
19940- __pu_val = x; \
19941+ __pu_val = (x); \
19942+ pax_open_userland(); \
19943 switch (sizeof(*(ptr))) { \
19944 case 1: \
19945 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19946@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19947 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19948 break; \
19949 } \
19950+ pax_close_userland(); \
19951 __ret_pu; \
19952 })
19953
19954@@ -355,8 +401,10 @@ do { \
19955 } while (0)
19956
19957 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19958+do { \
19959+ pax_open_userland(); \
19960 asm volatile(ASM_STAC "\n" \
19961- "1: mov"itype" %2,%"rtype"1\n" \
19962+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19963 "2: " ASM_CLAC "\n" \
19964 ".section .fixup,\"ax\"\n" \
19965 "3: mov %3,%0\n" \
19966@@ -364,8 +412,10 @@ do { \
19967 " jmp 2b\n" \
19968 ".previous\n" \
19969 _ASM_EXTABLE(1b, 3b) \
19970- : "=r" (err), ltype(x) \
19971- : "m" (__m(addr)), "i" (errret), "0" (err))
19972+ : "=r" (err), ltype (x) \
19973+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19974+ pax_close_userland(); \
19975+} while (0)
19976
19977 #define __get_user_size_ex(x, ptr, size) \
19978 do { \
19979@@ -389,7 +439,7 @@ do { \
19980 } while (0)
19981
19982 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19983- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19984+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19985 "2:\n" \
19986 _ASM_EXTABLE_EX(1b, 2b) \
19987 : ltype(x) : "m" (__m(addr)))
19988@@ -406,13 +456,24 @@ do { \
19989 int __gu_err; \
19990 unsigned long __gu_val; \
19991 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19992- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19993+ (x) = (__typeof__(*(ptr)))__gu_val; \
19994 __gu_err; \
19995 })
19996
19997 /* FIXME: this hack is definitely wrong -AK */
19998 struct __large_struct { unsigned long buf[100]; };
19999-#define __m(x) (*(struct __large_struct __user *)(x))
20000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20001+#define ____m(x) \
20002+({ \
20003+ unsigned long ____x = (unsigned long)(x); \
20004+ if (____x < pax_user_shadow_base) \
20005+ ____x += pax_user_shadow_base; \
20006+ (typeof(x))____x; \
20007+})
20008+#else
20009+#define ____m(x) (x)
20010+#endif
20011+#define __m(x) (*(struct __large_struct __user *)____m(x))
20012
20013 /*
20014 * Tell gcc we read from memory instead of writing: this is because
20015@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20016 * aliasing issues.
20017 */
20018 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20019+do { \
20020+ pax_open_userland(); \
20021 asm volatile(ASM_STAC "\n" \
20022- "1: mov"itype" %"rtype"1,%2\n" \
20023+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20024 "2: " ASM_CLAC "\n" \
20025 ".section .fixup,\"ax\"\n" \
20026 "3: mov %3,%0\n" \
20027@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20028 ".previous\n" \
20029 _ASM_EXTABLE(1b, 3b) \
20030 : "=r"(err) \
20031- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20032+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20033+ pax_close_userland(); \
20034+} while (0)
20035
20036 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20037- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20038+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20039 "2:\n" \
20040 _ASM_EXTABLE_EX(1b, 2b) \
20041 : : ltype(x), "m" (__m(addr)))
20042@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20043 */
20044 #define uaccess_try do { \
20045 current_thread_info()->uaccess_err = 0; \
20046+ pax_open_userland(); \
20047 stac(); \
20048 barrier();
20049
20050 #define uaccess_catch(err) \
20051 clac(); \
20052+ pax_close_userland(); \
20053 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20054 } while (0)
20055
20056@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20057 * On error, the variable @x is set to zero.
20058 */
20059
20060+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20061+#define __get_user(x, ptr) get_user((x), (ptr))
20062+#else
20063 #define __get_user(x, ptr) \
20064 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20065+#endif
20066
20067 /**
20068 * __put_user: - Write a simple value into user space, with less checking.
20069@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20070 * Returns zero on success, or -EFAULT on error.
20071 */
20072
20073+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20074+#define __put_user(x, ptr) put_user((x), (ptr))
20075+#else
20076 #define __put_user(x, ptr) \
20077 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20078+#endif
20079
20080 #define __get_user_unaligned __get_user
20081 #define __put_user_unaligned __put_user
20082@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20083 #define get_user_ex(x, ptr) do { \
20084 unsigned long __gue_val; \
20085 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20086- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20087+ (x) = (__typeof__(*(ptr)))__gue_val; \
20088 } while (0)
20089
20090 #define put_user_try uaccess_try
20091@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20092 __typeof__(ptr) __uval = (uval); \
20093 __typeof__(*(ptr)) __old = (old); \
20094 __typeof__(*(ptr)) __new = (new); \
20095+ pax_open_userland(); \
20096 switch (size) { \
20097 case 1: \
20098 { \
20099 asm volatile("\t" ASM_STAC "\n" \
20100- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20101+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20102 "2:\t" ASM_CLAC "\n" \
20103 "\t.section .fixup, \"ax\"\n" \
20104 "3:\tmov %3, %0\n" \
20105 "\tjmp 2b\n" \
20106 "\t.previous\n" \
20107 _ASM_EXTABLE(1b, 3b) \
20108- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20109+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20110 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20111 : "memory" \
20112 ); \
20113@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20114 case 2: \
20115 { \
20116 asm volatile("\t" ASM_STAC "\n" \
20117- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20118+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20119 "2:\t" ASM_CLAC "\n" \
20120 "\t.section .fixup, \"ax\"\n" \
20121 "3:\tmov %3, %0\n" \
20122 "\tjmp 2b\n" \
20123 "\t.previous\n" \
20124 _ASM_EXTABLE(1b, 3b) \
20125- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20126+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20127 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20128 : "memory" \
20129 ); \
20130@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20131 case 4: \
20132 { \
20133 asm volatile("\t" ASM_STAC "\n" \
20134- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20135+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20136 "2:\t" ASM_CLAC "\n" \
20137 "\t.section .fixup, \"ax\"\n" \
20138 "3:\tmov %3, %0\n" \
20139 "\tjmp 2b\n" \
20140 "\t.previous\n" \
20141 _ASM_EXTABLE(1b, 3b) \
20142- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20143+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20144 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20145 : "memory" \
20146 ); \
20147@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20148 __cmpxchg_wrong_size(); \
20149 \
20150 asm volatile("\t" ASM_STAC "\n" \
20151- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20152+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20153 "2:\t" ASM_CLAC "\n" \
20154 "\t.section .fixup, \"ax\"\n" \
20155 "3:\tmov %3, %0\n" \
20156 "\tjmp 2b\n" \
20157 "\t.previous\n" \
20158 _ASM_EXTABLE(1b, 3b) \
20159- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20160+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20161 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20162 : "memory" \
20163 ); \
20164@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20165 default: \
20166 __cmpxchg_wrong_size(); \
20167 } \
20168+ pax_close_userland(); \
20169 *__uval = __old; \
20170 __ret; \
20171 })
20172@@ -636,17 +713,6 @@ extern struct movsl_mask {
20173
20174 #define ARCH_HAS_NOCACHE_UACCESS 1
20175
20176-#ifdef CONFIG_X86_32
20177-# include <asm/uaccess_32.h>
20178-#else
20179-# include <asm/uaccess_64.h>
20180-#endif
20181-
20182-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20183- unsigned n);
20184-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20185- unsigned n);
20186-
20187 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20188 # define copy_user_diag __compiletime_error
20189 #else
20190@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20191 extern void copy_user_diag("copy_from_user() buffer size is too small")
20192 copy_from_user_overflow(void);
20193 extern void copy_user_diag("copy_to_user() buffer size is too small")
20194-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20195+copy_to_user_overflow(void);
20196
20197 #undef copy_user_diag
20198
20199@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20200
20201 extern void
20202 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20203-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20204+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20205 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20206
20207 #else
20208@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20209
20210 #endif
20211
20212+#ifdef CONFIG_X86_32
20213+# include <asm/uaccess_32.h>
20214+#else
20215+# include <asm/uaccess_64.h>
20216+#endif
20217+
20218 static inline unsigned long __must_check
20219 copy_from_user(void *to, const void __user *from, unsigned long n)
20220 {
20221- int sz = __compiletime_object_size(to);
20222+ size_t sz = __compiletime_object_size(to);
20223
20224 might_fault();
20225
20226@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20227 * case, and do only runtime checking for non-constant sizes.
20228 */
20229
20230- if (likely(sz < 0 || sz >= n))
20231- n = _copy_from_user(to, from, n);
20232- else if(__builtin_constant_p(n))
20233- copy_from_user_overflow();
20234- else
20235- __copy_from_user_overflow(sz, n);
20236+ if (likely(sz != (size_t)-1 && sz < n)) {
20237+ if(__builtin_constant_p(n))
20238+ copy_from_user_overflow();
20239+ else
20240+ __copy_from_user_overflow(sz, n);
20241+ } else if (access_ok(VERIFY_READ, from, n))
20242+ n = __copy_from_user(to, from, n);
20243+ else if ((long)n > 0)
20244+ memset(to, 0, n);
20245
20246 return n;
20247 }
20248@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20249 static inline unsigned long __must_check
20250 copy_to_user(void __user *to, const void *from, unsigned long n)
20251 {
20252- int sz = __compiletime_object_size(from);
20253+ size_t sz = __compiletime_object_size(from);
20254
20255 might_fault();
20256
20257 /* See the comment in copy_from_user() above. */
20258- if (likely(sz < 0 || sz >= n))
20259- n = _copy_to_user(to, from, n);
20260- else if(__builtin_constant_p(n))
20261- copy_to_user_overflow();
20262- else
20263- __copy_to_user_overflow(sz, n);
20264+ if (likely(sz != (size_t)-1 && sz < n)) {
20265+ if(__builtin_constant_p(n))
20266+ copy_to_user_overflow();
20267+ else
20268+ __copy_to_user_overflow(sz, n);
20269+ } else if (access_ok(VERIFY_WRITE, to, n))
20270+ n = __copy_to_user(to, from, n);
20271
20272 return n;
20273 }
20274diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20275index 3c03a5d..1071638 100644
20276--- a/arch/x86/include/asm/uaccess_32.h
20277+++ b/arch/x86/include/asm/uaccess_32.h
20278@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20279 static __always_inline unsigned long __must_check
20280 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20281 {
20282+ if ((long)n < 0)
20283+ return n;
20284+
20285+ check_object_size(from, n, true);
20286+
20287 if (__builtin_constant_p(n)) {
20288 unsigned long ret;
20289
20290@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20291 __copy_to_user(void __user *to, const void *from, unsigned long n)
20292 {
20293 might_fault();
20294+
20295 return __copy_to_user_inatomic(to, from, n);
20296 }
20297
20298 static __always_inline unsigned long
20299 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20300 {
20301+ if ((long)n < 0)
20302+ return n;
20303+
20304 /* Avoid zeroing the tail if the copy fails..
20305 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20306 * but as the zeroing behaviour is only significant when n is not
20307@@ -137,6 +146,12 @@ static __always_inline unsigned long
20308 __copy_from_user(void *to, const void __user *from, unsigned long n)
20309 {
20310 might_fault();
20311+
20312+ if ((long)n < 0)
20313+ return n;
20314+
20315+ check_object_size(to, n, false);
20316+
20317 if (__builtin_constant_p(n)) {
20318 unsigned long ret;
20319
20320@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20321 const void __user *from, unsigned long n)
20322 {
20323 might_fault();
20324+
20325+ if ((long)n < 0)
20326+ return n;
20327+
20328 if (__builtin_constant_p(n)) {
20329 unsigned long ret;
20330
20331@@ -181,7 +200,10 @@ static __always_inline unsigned long
20332 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20333 unsigned long n)
20334 {
20335- return __copy_from_user_ll_nocache_nozero(to, from, n);
20336+ if ((long)n < 0)
20337+ return n;
20338+
20339+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20340 }
20341
20342 #endif /* _ASM_X86_UACCESS_32_H */
20343diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20344index 12a26b9..206c200 100644
20345--- a/arch/x86/include/asm/uaccess_64.h
20346+++ b/arch/x86/include/asm/uaccess_64.h
20347@@ -10,6 +10,9 @@
20348 #include <asm/alternative.h>
20349 #include <asm/cpufeature.h>
20350 #include <asm/page.h>
20351+#include <asm/pgtable.h>
20352+
20353+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20354
20355 /*
20356 * Copy To/From Userspace
20357@@ -17,14 +20,14 @@
20358
20359 /* Handles exceptions in both to and from, but doesn't do access_ok */
20360 __must_check unsigned long
20361-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20362+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20363 __must_check unsigned long
20364-copy_user_generic_string(void *to, const void *from, unsigned len);
20365+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20366 __must_check unsigned long
20367-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20368+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20369
20370 static __always_inline __must_check unsigned long
20371-copy_user_generic(void *to, const void *from, unsigned len)
20372+copy_user_generic(void *to, const void *from, unsigned long len)
20373 {
20374 unsigned ret;
20375
20376@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20377 }
20378
20379 __must_check unsigned long
20380-copy_in_user(void __user *to, const void __user *from, unsigned len);
20381+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20382
20383 static __always_inline __must_check
20384-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20385+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20386 {
20387- int ret = 0;
20388+ size_t sz = __compiletime_object_size(dst);
20389+ unsigned ret = 0;
20390+
20391+ if (size > INT_MAX)
20392+ return size;
20393+
20394+ check_object_size(dst, size, false);
20395+
20396+#ifdef CONFIG_PAX_MEMORY_UDEREF
20397+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20398+ return size;
20399+#endif
20400+
20401+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20402+ if(__builtin_constant_p(size))
20403+ copy_from_user_overflow();
20404+ else
20405+ __copy_from_user_overflow(sz, size);
20406+ return size;
20407+ }
20408
20409 if (!__builtin_constant_p(size))
20410- return copy_user_generic(dst, (__force void *)src, size);
20411+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20412 switch (size) {
20413- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20414+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20415 ret, "b", "b", "=q", 1);
20416 return ret;
20417- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20418+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20419 ret, "w", "w", "=r", 2);
20420 return ret;
20421- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20422+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20423 ret, "l", "k", "=r", 4);
20424 return ret;
20425- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20426+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20427 ret, "q", "", "=r", 8);
20428 return ret;
20429 case 10:
20430- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20431+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20432 ret, "q", "", "=r", 10);
20433 if (unlikely(ret))
20434 return ret;
20435 __get_user_asm(*(u16 *)(8 + (char *)dst),
20436- (u16 __user *)(8 + (char __user *)src),
20437+ (const u16 __user *)(8 + (const char __user *)src),
20438 ret, "w", "w", "=r", 2);
20439 return ret;
20440 case 16:
20441- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20442+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20443 ret, "q", "", "=r", 16);
20444 if (unlikely(ret))
20445 return ret;
20446 __get_user_asm(*(u64 *)(8 + (char *)dst),
20447- (u64 __user *)(8 + (char __user *)src),
20448+ (const u64 __user *)(8 + (const char __user *)src),
20449 ret, "q", "", "=r", 8);
20450 return ret;
20451 default:
20452- return copy_user_generic(dst, (__force void *)src, size);
20453+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20454 }
20455 }
20456
20457 static __always_inline __must_check
20458-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20459+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20460 {
20461 might_fault();
20462 return __copy_from_user_nocheck(dst, src, size);
20463 }
20464
20465 static __always_inline __must_check
20466-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20467+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20468 {
20469- int ret = 0;
20470+ size_t sz = __compiletime_object_size(src);
20471+ unsigned ret = 0;
20472+
20473+ if (size > INT_MAX)
20474+ return size;
20475+
20476+ check_object_size(src, size, true);
20477+
20478+#ifdef CONFIG_PAX_MEMORY_UDEREF
20479+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20480+ return size;
20481+#endif
20482+
20483+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20484+ if(__builtin_constant_p(size))
20485+ copy_to_user_overflow();
20486+ else
20487+ __copy_to_user_overflow(sz, size);
20488+ return size;
20489+ }
20490
20491 if (!__builtin_constant_p(size))
20492- return copy_user_generic((__force void *)dst, src, size);
20493+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20494 switch (size) {
20495- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20496+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20497 ret, "b", "b", "iq", 1);
20498 return ret;
20499- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20500+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20501 ret, "w", "w", "ir", 2);
20502 return ret;
20503- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20504+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20505 ret, "l", "k", "ir", 4);
20506 return ret;
20507- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20508+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20509 ret, "q", "", "er", 8);
20510 return ret;
20511 case 10:
20512- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20513+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20514 ret, "q", "", "er", 10);
20515 if (unlikely(ret))
20516 return ret;
20517 asm("":::"memory");
20518- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20519+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20520 ret, "w", "w", "ir", 2);
20521 return ret;
20522 case 16:
20523- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20524+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20525 ret, "q", "", "er", 16);
20526 if (unlikely(ret))
20527 return ret;
20528 asm("":::"memory");
20529- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20530+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20531 ret, "q", "", "er", 8);
20532 return ret;
20533 default:
20534- return copy_user_generic((__force void *)dst, src, size);
20535+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20536 }
20537 }
20538
20539 static __always_inline __must_check
20540-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20541+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20542 {
20543 might_fault();
20544 return __copy_to_user_nocheck(dst, src, size);
20545 }
20546
20547 static __always_inline __must_check
20548-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20549+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20550 {
20551- int ret = 0;
20552+ unsigned ret = 0;
20553
20554 might_fault();
20555+
20556+ if (size > INT_MAX)
20557+ return size;
20558+
20559+#ifdef CONFIG_PAX_MEMORY_UDEREF
20560+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20561+ return size;
20562+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20563+ return size;
20564+#endif
20565+
20566 if (!__builtin_constant_p(size))
20567- return copy_user_generic((__force void *)dst,
20568- (__force void *)src, size);
20569+ return copy_user_generic((__force_kernel void *)____m(dst),
20570+ (__force_kernel const void *)____m(src), size);
20571 switch (size) {
20572 case 1: {
20573 u8 tmp;
20574- __get_user_asm(tmp, (u8 __user *)src,
20575+ __get_user_asm(tmp, (const u8 __user *)src,
20576 ret, "b", "b", "=q", 1);
20577 if (likely(!ret))
20578 __put_user_asm(tmp, (u8 __user *)dst,
20579@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20580 }
20581 case 2: {
20582 u16 tmp;
20583- __get_user_asm(tmp, (u16 __user *)src,
20584+ __get_user_asm(tmp, (const u16 __user *)src,
20585 ret, "w", "w", "=r", 2);
20586 if (likely(!ret))
20587 __put_user_asm(tmp, (u16 __user *)dst,
20588@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20589
20590 case 4: {
20591 u32 tmp;
20592- __get_user_asm(tmp, (u32 __user *)src,
20593+ __get_user_asm(tmp, (const u32 __user *)src,
20594 ret, "l", "k", "=r", 4);
20595 if (likely(!ret))
20596 __put_user_asm(tmp, (u32 __user *)dst,
20597@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20598 }
20599 case 8: {
20600 u64 tmp;
20601- __get_user_asm(tmp, (u64 __user *)src,
20602+ __get_user_asm(tmp, (const u64 __user *)src,
20603 ret, "q", "", "=r", 8);
20604 if (likely(!ret))
20605 __put_user_asm(tmp, (u64 __user *)dst,
20606@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20607 return ret;
20608 }
20609 default:
20610- return copy_user_generic((__force void *)dst,
20611- (__force void *)src, size);
20612+ return copy_user_generic((__force_kernel void *)____m(dst),
20613+ (__force_kernel const void *)____m(src), size);
20614 }
20615 }
20616
20617-static __must_check __always_inline int
20618-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20619+static __must_check __always_inline unsigned long
20620+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20621 {
20622 return __copy_from_user_nocheck(dst, src, size);
20623 }
20624
20625-static __must_check __always_inline int
20626-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20627+static __must_check __always_inline unsigned long
20628+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20629 {
20630 return __copy_to_user_nocheck(dst, src, size);
20631 }
20632
20633-extern long __copy_user_nocache(void *dst, const void __user *src,
20634- unsigned size, int zerorest);
20635+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20636+ unsigned long size, int zerorest);
20637
20638-static inline int
20639-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20640+static inline unsigned long
20641+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20642 {
20643 might_fault();
20644+
20645+ if (size > INT_MAX)
20646+ return size;
20647+
20648+#ifdef CONFIG_PAX_MEMORY_UDEREF
20649+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20650+ return size;
20651+#endif
20652+
20653 return __copy_user_nocache(dst, src, size, 1);
20654 }
20655
20656-static inline int
20657+static inline unsigned long
20658 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20659- unsigned size)
20660+ unsigned long size)
20661 {
20662+ if (size > INT_MAX)
20663+ return size;
20664+
20665+#ifdef CONFIG_PAX_MEMORY_UDEREF
20666+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20667+ return size;
20668+#endif
20669+
20670 return __copy_user_nocache(dst, src, size, 0);
20671 }
20672
20673 unsigned long
20674-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20675+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20676
20677 #endif /* _ASM_X86_UACCESS_64_H */
20678diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20679index 5b238981..77fdd78 100644
20680--- a/arch/x86/include/asm/word-at-a-time.h
20681+++ b/arch/x86/include/asm/word-at-a-time.h
20682@@ -11,7 +11,7 @@
20683 * and shift, for example.
20684 */
20685 struct word_at_a_time {
20686- const unsigned long one_bits, high_bits;
20687+ unsigned long one_bits, high_bits;
20688 };
20689
20690 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20691diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20692index e45e4da..44e8572 100644
20693--- a/arch/x86/include/asm/x86_init.h
20694+++ b/arch/x86/include/asm/x86_init.h
20695@@ -129,7 +129,7 @@ struct x86_init_ops {
20696 struct x86_init_timers timers;
20697 struct x86_init_iommu iommu;
20698 struct x86_init_pci pci;
20699-};
20700+} __no_const;
20701
20702 /**
20703 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20704@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20705 void (*setup_percpu_clockev)(void);
20706 void (*early_percpu_clock_init)(void);
20707 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20708-};
20709+} __no_const;
20710
20711 struct timespec;
20712
20713@@ -168,7 +168,7 @@ struct x86_platform_ops {
20714 void (*save_sched_clock_state)(void);
20715 void (*restore_sched_clock_state)(void);
20716 void (*apic_post_init)(void);
20717-};
20718+} __no_const;
20719
20720 struct pci_dev;
20721 struct msi_msg;
20722@@ -185,7 +185,7 @@ struct x86_msi_ops {
20723 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20724 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20725 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20726-};
20727+} __no_const;
20728
20729 struct IO_APIC_route_entry;
20730 struct io_apic_irq_attr;
20731@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20732 unsigned int destination, int vector,
20733 struct io_apic_irq_attr *attr);
20734 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20735-};
20736+} __no_const;
20737
20738 extern struct x86_init_ops x86_init;
20739 extern struct x86_cpuinit_ops x86_cpuinit;
20740diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20741index c949923..c22bfa4 100644
20742--- a/arch/x86/include/asm/xen/page.h
20743+++ b/arch/x86/include/asm/xen/page.h
20744@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
20745 extern struct page *m2p_find_override(unsigned long mfn);
20746 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20747
20748-static inline unsigned long pfn_to_mfn(unsigned long pfn)
20749+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20750 {
20751 unsigned long mfn;
20752
20753diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20754index d949ef2..479b9d1 100644
20755--- a/arch/x86/include/asm/xsave.h
20756+++ b/arch/x86/include/asm/xsave.h
20757@@ -82,8 +82,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20758 if (unlikely(err))
20759 return -EFAULT;
20760
20761+ pax_open_userland();
20762 __asm__ __volatile__(ASM_STAC "\n"
20763- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20764+ "1:"
20765+ __copyuser_seg
20766+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20767 "2: " ASM_CLAC "\n"
20768 ".section .fixup,\"ax\"\n"
20769 "3: movl $-1,%[err]\n"
20770@@ -93,18 +96,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20771 : [err] "=r" (err)
20772 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20773 : "memory");
20774+ pax_close_userland();
20775 return err;
20776 }
20777
20778 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20779 {
20780 int err;
20781- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20782+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20783 u32 lmask = mask;
20784 u32 hmask = mask >> 32;
20785
20786+ pax_open_userland();
20787 __asm__ __volatile__(ASM_STAC "\n"
20788- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20789+ "1:"
20790+ __copyuser_seg
20791+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20792 "2: " ASM_CLAC "\n"
20793 ".section .fixup,\"ax\"\n"
20794 "3: movl $-1,%[err]\n"
20795@@ -114,6 +121,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20796 : [err] "=r" (err)
20797 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20798 : "memory"); /* memory required? */
20799+ pax_close_userland();
20800 return err;
20801 }
20802
20803diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20804index bbae024..e1528f9 100644
20805--- a/arch/x86/include/uapi/asm/e820.h
20806+++ b/arch/x86/include/uapi/asm/e820.h
20807@@ -63,7 +63,7 @@ struct e820map {
20808 #define ISA_START_ADDRESS 0xa0000
20809 #define ISA_END_ADDRESS 0x100000
20810
20811-#define BIOS_BEGIN 0x000a0000
20812+#define BIOS_BEGIN 0x000c0000
20813 #define BIOS_END 0x00100000
20814
20815 #define BIOS_ROM_BASE 0xffe00000
20816diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20817index 7b0a55a..ad115bf 100644
20818--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20819+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20820@@ -49,7 +49,6 @@
20821 #define EFLAGS 144
20822 #define RSP 152
20823 #define SS 160
20824-#define ARGOFFSET R11
20825 #endif /* __ASSEMBLY__ */
20826
20827 /* top of stack page */
20828diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20829index 047f9ff..4ba5ea6 100644
20830--- a/arch/x86/kernel/Makefile
20831+++ b/arch/x86/kernel/Makefile
20832@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20833 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20834 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20835 obj-y += probe_roms.o
20836-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20837+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20838 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20839 obj-$(CONFIG_X86_64) += mcount_64.o
20840 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20841diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20842index 86281ff..e046fc2 100644
20843--- a/arch/x86/kernel/acpi/boot.c
20844+++ b/arch/x86/kernel/acpi/boot.c
20845@@ -1296,7 +1296,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20846 * If your system is blacklisted here, but you find that acpi=force
20847 * works for you, please contact linux-acpi@vger.kernel.org
20848 */
20849-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20850+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20851 /*
20852 * Boxes that need ACPI disabled
20853 */
20854@@ -1371,7 +1371,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20855 };
20856
20857 /* second table for DMI checks that should run after early-quirks */
20858-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20859+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20860 /*
20861 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20862 * which includes some code which overrides all temperature
20863diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20864index 3136820..e2c6577 100644
20865--- a/arch/x86/kernel/acpi/sleep.c
20866+++ b/arch/x86/kernel/acpi/sleep.c
20867@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20868 #else /* CONFIG_64BIT */
20869 #ifdef CONFIG_SMP
20870 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20871+
20872+ pax_open_kernel();
20873 early_gdt_descr.address =
20874 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20875+ pax_close_kernel();
20876+
20877 initial_gs = per_cpu_offset(smp_processor_id());
20878 #endif
20879 initial_code = (unsigned long)wakeup_long64;
20880diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20881index 665c6b7..eae4d56 100644
20882--- a/arch/x86/kernel/acpi/wakeup_32.S
20883+++ b/arch/x86/kernel/acpi/wakeup_32.S
20884@@ -29,13 +29,11 @@ wakeup_pmode_return:
20885 # and restore the stack ... but you need gdt for this to work
20886 movl saved_context_esp, %esp
20887
20888- movl %cs:saved_magic, %eax
20889- cmpl $0x12345678, %eax
20890+ cmpl $0x12345678, saved_magic
20891 jne bogus_magic
20892
20893 # jump to place where we left off
20894- movl saved_eip, %eax
20895- jmp *%eax
20896+ jmp *(saved_eip)
20897
20898 bogus_magic:
20899 jmp bogus_magic
20900diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20901index 703130f..27a155d 100644
20902--- a/arch/x86/kernel/alternative.c
20903+++ b/arch/x86/kernel/alternative.c
20904@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20905 */
20906 for (a = start; a < end; a++) {
20907 instr = (u8 *)&a->instr_offset + a->instr_offset;
20908+
20909+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20910+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20911+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20912+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20913+#endif
20914+
20915 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20916 BUG_ON(a->replacementlen > a->instrlen);
20917 BUG_ON(a->instrlen > sizeof(insnbuf));
20918@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20919 add_nops(insnbuf + a->replacementlen,
20920 a->instrlen - a->replacementlen);
20921
20922+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20923+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20924+ instr = ktva_ktla(instr);
20925+#endif
20926+
20927 text_poke_early(instr, insnbuf, a->instrlen);
20928 }
20929 }
20930@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20931 for (poff = start; poff < end; poff++) {
20932 u8 *ptr = (u8 *)poff + *poff;
20933
20934+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20935+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20936+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20937+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20938+#endif
20939+
20940 if (!*poff || ptr < text || ptr >= text_end)
20941 continue;
20942 /* turn DS segment override prefix into lock prefix */
20943- if (*ptr == 0x3e)
20944+ if (*ktla_ktva(ptr) == 0x3e)
20945 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20946 }
20947 mutex_unlock(&text_mutex);
20948@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20949 for (poff = start; poff < end; poff++) {
20950 u8 *ptr = (u8 *)poff + *poff;
20951
20952+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20953+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20954+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20955+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20956+#endif
20957+
20958 if (!*poff || ptr < text || ptr >= text_end)
20959 continue;
20960 /* turn lock prefix into DS segment override prefix */
20961- if (*ptr == 0xf0)
20962+ if (*ktla_ktva(ptr) == 0xf0)
20963 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20964 }
20965 mutex_unlock(&text_mutex);
20966@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20967
20968 BUG_ON(p->len > MAX_PATCH_LEN);
20969 /* prep the buffer with the original instructions */
20970- memcpy(insnbuf, p->instr, p->len);
20971+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20972 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20973 (unsigned long)p->instr, p->len);
20974
20975@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20976 if (!uniproc_patched || num_possible_cpus() == 1)
20977 free_init_pages("SMP alternatives",
20978 (unsigned long)__smp_locks,
20979- (unsigned long)__smp_locks_end);
20980+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20981 #endif
20982
20983 apply_paravirt(__parainstructions, __parainstructions_end);
20984@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20985 * instructions. And on the local CPU you need to be protected again NMI or MCE
20986 * handlers seeing an inconsistent instruction while you patch.
20987 */
20988-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20989+void *__kprobes text_poke_early(void *addr, const void *opcode,
20990 size_t len)
20991 {
20992 unsigned long flags;
20993 local_irq_save(flags);
20994- memcpy(addr, opcode, len);
20995+
20996+ pax_open_kernel();
20997+ memcpy(ktla_ktva(addr), opcode, len);
20998 sync_core();
20999+ pax_close_kernel();
21000+
21001 local_irq_restore(flags);
21002 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21003 that causes hangs on some VIA CPUs. */
21004@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21005 */
21006 void *text_poke(void *addr, const void *opcode, size_t len)
21007 {
21008- unsigned long flags;
21009- char *vaddr;
21010+ unsigned char *vaddr = ktla_ktva(addr);
21011 struct page *pages[2];
21012- int i;
21013+ size_t i;
21014
21015 if (!core_kernel_text((unsigned long)addr)) {
21016- pages[0] = vmalloc_to_page(addr);
21017- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21018+ pages[0] = vmalloc_to_page(vaddr);
21019+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21020 } else {
21021- pages[0] = virt_to_page(addr);
21022+ pages[0] = virt_to_page(vaddr);
21023 WARN_ON(!PageReserved(pages[0]));
21024- pages[1] = virt_to_page(addr + PAGE_SIZE);
21025+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21026 }
21027 BUG_ON(!pages[0]);
21028- local_irq_save(flags);
21029- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21030- if (pages[1])
21031- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21032- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21033- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21034- clear_fixmap(FIX_TEXT_POKE0);
21035- if (pages[1])
21036- clear_fixmap(FIX_TEXT_POKE1);
21037- local_flush_tlb();
21038- sync_core();
21039- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21040- that causes hangs on some VIA CPUs. */
21041+ text_poke_early(addr, opcode, len);
21042 for (i = 0; i < len; i++)
21043- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21044- local_irq_restore(flags);
21045+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21046 return addr;
21047 }
21048
21049@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21050 if (likely(!bp_patching_in_progress))
21051 return 0;
21052
21053- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21054+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21055 return 0;
21056
21057 /* set up the specified breakpoint handler */
21058@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21059 */
21060 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21061 {
21062- unsigned char int3 = 0xcc;
21063+ const unsigned char int3 = 0xcc;
21064
21065 bp_int3_handler = handler;
21066 bp_int3_addr = (u8 *)addr + sizeof(int3);
21067diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21068index ad28db7..c538b2c 100644
21069--- a/arch/x86/kernel/apic/apic.c
21070+++ b/arch/x86/kernel/apic/apic.c
21071@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21072 /*
21073 * Debug level, exported for io_apic.c
21074 */
21075-unsigned int apic_verbosity;
21076+int apic_verbosity;
21077
21078 int pic_mode;
21079
21080@@ -2000,7 +2000,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21081 apic_write(APIC_ESR, 0);
21082 v = apic_read(APIC_ESR);
21083 ack_APIC_irq();
21084- atomic_inc(&irq_err_count);
21085+ atomic_inc_unchecked(&irq_err_count);
21086
21087 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21088 smp_processor_id(), v);
21089diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21090index 7c1b294..e71d27f 100644
21091--- a/arch/x86/kernel/apic/apic_flat_64.c
21092+++ b/arch/x86/kernel/apic/apic_flat_64.c
21093@@ -154,7 +154,7 @@ static int flat_probe(void)
21094 return 1;
21095 }
21096
21097-static struct apic apic_flat = {
21098+static struct apic apic_flat __read_only = {
21099 .name = "flat",
21100 .probe = flat_probe,
21101 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21102@@ -268,7 +268,7 @@ static int physflat_probe(void)
21103 return 0;
21104 }
21105
21106-static struct apic apic_physflat = {
21107+static struct apic apic_physflat __read_only = {
21108
21109 .name = "physical flat",
21110 .probe = physflat_probe,
21111diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21112index 8c7c982..a225910 100644
21113--- a/arch/x86/kernel/apic/apic_noop.c
21114+++ b/arch/x86/kernel/apic/apic_noop.c
21115@@ -118,7 +118,7 @@ static void noop_apic_write(u32 reg, u32 v)
21116 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21117 }
21118
21119-struct apic apic_noop = {
21120+struct apic apic_noop __read_only = {
21121 .name = "noop",
21122 .probe = noop_probe,
21123 .acpi_madt_oem_check = NULL,
21124diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21125index e4840aa..e7d9dac 100644
21126--- a/arch/x86/kernel/apic/bigsmp_32.c
21127+++ b/arch/x86/kernel/apic/bigsmp_32.c
21128@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
21129 return dmi_bigsmp;
21130 }
21131
21132-static struct apic apic_bigsmp = {
21133+static struct apic apic_bigsmp __read_only = {
21134
21135 .name = "bigsmp",
21136 .probe = probe_bigsmp,
21137diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21138index 81e08ef..abc77e5 100644
21139--- a/arch/x86/kernel/apic/io_apic.c
21140+++ b/arch/x86/kernel/apic/io_apic.c
21141@@ -1042,7 +1042,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
21142 }
21143 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21144
21145-void lock_vector_lock(void)
21146+void lock_vector_lock(void) __acquires(vector_lock)
21147 {
21148 /* Used to the online set of cpus does not change
21149 * during assign_irq_vector.
21150@@ -1050,7 +1050,7 @@ void lock_vector_lock(void)
21151 raw_spin_lock(&vector_lock);
21152 }
21153
21154-void unlock_vector_lock(void)
21155+void unlock_vector_lock(void) __releases(vector_lock)
21156 {
21157 raw_spin_unlock(&vector_lock);
21158 }
21159@@ -2349,7 +2349,7 @@ static void ack_apic_edge(struct irq_data *data)
21160 ack_APIC_irq();
21161 }
21162
21163-atomic_t irq_mis_count;
21164+atomic_unchecked_t irq_mis_count;
21165
21166 #ifdef CONFIG_GENERIC_PENDING_IRQ
21167 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21168@@ -2490,7 +2490,7 @@ static void ack_apic_level(struct irq_data *data)
21169 * at the cpu.
21170 */
21171 if (!(v & (1 << (i & 0x1f)))) {
21172- atomic_inc(&irq_mis_count);
21173+ atomic_inc_unchecked(&irq_mis_count);
21174
21175 eoi_ioapic_irq(irq, cfg);
21176 }
21177diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21178index cceb352..a635fd8 100644
21179--- a/arch/x86/kernel/apic/probe_32.c
21180+++ b/arch/x86/kernel/apic/probe_32.c
21181@@ -72,7 +72,7 @@ static int probe_default(void)
21182 return 1;
21183 }
21184
21185-static struct apic apic_default = {
21186+static struct apic apic_default __read_only = {
21187
21188 .name = "default",
21189 .probe = probe_default,
21190diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21191index e66766b..1c008ba 100644
21192--- a/arch/x86/kernel/apic/x2apic_cluster.c
21193+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21194@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21195 return notifier_from_errno(err);
21196 }
21197
21198-static struct notifier_block __refdata x2apic_cpu_notifier = {
21199+static struct notifier_block x2apic_cpu_notifier = {
21200 .notifier_call = update_clusterinfo,
21201 };
21202
21203@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21204 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21205 }
21206
21207-static struct apic apic_x2apic_cluster = {
21208+static struct apic apic_x2apic_cluster __read_only = {
21209
21210 .name = "cluster x2apic",
21211 .probe = x2apic_cluster_probe,
21212diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21213index 6d600eb..0300c00 100644
21214--- a/arch/x86/kernel/apic/x2apic_phys.c
21215+++ b/arch/x86/kernel/apic/x2apic_phys.c
21216@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21217 return apic == &apic_x2apic_phys;
21218 }
21219
21220-static struct apic apic_x2apic_phys = {
21221+static struct apic apic_x2apic_phys __read_only = {
21222
21223 .name = "physical x2apic",
21224 .probe = x2apic_phys_probe,
21225diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21226index 293b41d..4df25fd 100644
21227--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21228+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21229@@ -350,7 +350,7 @@ static int uv_probe(void)
21230 return apic == &apic_x2apic_uv_x;
21231 }
21232
21233-static struct apic __refdata apic_x2apic_uv_x = {
21234+static struct apic apic_x2apic_uv_x __read_only = {
21235
21236 .name = "UV large system",
21237 .probe = uv_probe,
21238diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21239index 5848744..56cb598 100644
21240--- a/arch/x86/kernel/apm_32.c
21241+++ b/arch/x86/kernel/apm_32.c
21242@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21243 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21244 * even though they are called in protected mode.
21245 */
21246-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21247+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21248 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21249
21250 static const char driver_version[] = "1.16ac"; /* no spaces */
21251@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21252 BUG_ON(cpu != 0);
21253 gdt = get_cpu_gdt_table(cpu);
21254 save_desc_40 = gdt[0x40 / 8];
21255+
21256+ pax_open_kernel();
21257 gdt[0x40 / 8] = bad_bios_desc;
21258+ pax_close_kernel();
21259
21260 apm_irq_save(flags);
21261 APM_DO_SAVE_SEGS;
21262@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21263 &call->esi);
21264 APM_DO_RESTORE_SEGS;
21265 apm_irq_restore(flags);
21266+
21267+ pax_open_kernel();
21268 gdt[0x40 / 8] = save_desc_40;
21269+ pax_close_kernel();
21270+
21271 put_cpu();
21272
21273 return call->eax & 0xff;
21274@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21275 BUG_ON(cpu != 0);
21276 gdt = get_cpu_gdt_table(cpu);
21277 save_desc_40 = gdt[0x40 / 8];
21278+
21279+ pax_open_kernel();
21280 gdt[0x40 / 8] = bad_bios_desc;
21281+ pax_close_kernel();
21282
21283 apm_irq_save(flags);
21284 APM_DO_SAVE_SEGS;
21285@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21286 &call->eax);
21287 APM_DO_RESTORE_SEGS;
21288 apm_irq_restore(flags);
21289+
21290+ pax_open_kernel();
21291 gdt[0x40 / 8] = save_desc_40;
21292+ pax_close_kernel();
21293+
21294 put_cpu();
21295 return error;
21296 }
21297@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21298 * code to that CPU.
21299 */
21300 gdt = get_cpu_gdt_table(0);
21301+
21302+ pax_open_kernel();
21303 set_desc_base(&gdt[APM_CS >> 3],
21304 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21305 set_desc_base(&gdt[APM_CS_16 >> 3],
21306 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21307 set_desc_base(&gdt[APM_DS >> 3],
21308 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21309+ pax_close_kernel();
21310
21311 proc_create("apm", 0, NULL, &apm_file_ops);
21312
21313diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21314index 9f6b934..cf5ffb3 100644
21315--- a/arch/x86/kernel/asm-offsets.c
21316+++ b/arch/x86/kernel/asm-offsets.c
21317@@ -32,6 +32,8 @@ void common(void) {
21318 OFFSET(TI_flags, thread_info, flags);
21319 OFFSET(TI_status, thread_info, status);
21320 OFFSET(TI_addr_limit, thread_info, addr_limit);
21321+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21322+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21323
21324 BLANK();
21325 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21326@@ -52,8 +54,26 @@ void common(void) {
21327 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21328 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21329 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21330+
21331+#ifdef CONFIG_PAX_KERNEXEC
21332+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21333 #endif
21334
21335+#ifdef CONFIG_PAX_MEMORY_UDEREF
21336+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21337+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21338+#ifdef CONFIG_X86_64
21339+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21340+#endif
21341+#endif
21342+
21343+#endif
21344+
21345+ BLANK();
21346+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21347+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21348+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21349+
21350 #ifdef CONFIG_XEN
21351 BLANK();
21352 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21353diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21354index e7c798b..2b2019b 100644
21355--- a/arch/x86/kernel/asm-offsets_64.c
21356+++ b/arch/x86/kernel/asm-offsets_64.c
21357@@ -77,6 +77,7 @@ int main(void)
21358 BLANK();
21359 #undef ENTRY
21360
21361+ DEFINE(TSS_size, sizeof(struct tss_struct));
21362 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21363 BLANK();
21364
21365diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21366index 7fd54f0..0691410 100644
21367--- a/arch/x86/kernel/cpu/Makefile
21368+++ b/arch/x86/kernel/cpu/Makefile
21369@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21370 CFLAGS_REMOVE_perf_event.o = -pg
21371 endif
21372
21373-# Make sure load_percpu_segment has no stackprotector
21374-nostackp := $(call cc-option, -fno-stack-protector)
21375-CFLAGS_common.o := $(nostackp)
21376-
21377 obj-y := intel_cacheinfo.o scattered.o topology.o
21378 obj-y += proc.o capflags.o powerflags.o common.o
21379 obj-y += rdrand.o
21380diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21381index ce8b8ff..d7d8851 100644
21382--- a/arch/x86/kernel/cpu/amd.c
21383+++ b/arch/x86/kernel/cpu/amd.c
21384@@ -728,7 +728,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21385 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21386 {
21387 /* AMD errata T13 (order #21922) */
21388- if ((c->x86 == 6)) {
21389+ if (c->x86 == 6) {
21390 /* Duron Rev A0 */
21391 if (c->x86_model == 3 && c->x86_mask == 0)
21392 size = 64;
21393diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21394index ef1b93f..150db65 100644
21395--- a/arch/x86/kernel/cpu/common.c
21396+++ b/arch/x86/kernel/cpu/common.c
21397@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21398
21399 static const struct cpu_dev *this_cpu = &default_cpu;
21400
21401-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21402-#ifdef CONFIG_X86_64
21403- /*
21404- * We need valid kernel segments for data and code in long mode too
21405- * IRET will check the segment types kkeil 2000/10/28
21406- * Also sysret mandates a special GDT layout
21407- *
21408- * TLS descriptors are currently at a different place compared to i386.
21409- * Hopefully nobody expects them at a fixed place (Wine?)
21410- */
21411- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21412- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21413- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21414- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21415- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21416- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21417-#else
21418- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21419- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21420- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21421- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21422- /*
21423- * Segments used for calling PnP BIOS have byte granularity.
21424- * They code segments and data segments have fixed 64k limits,
21425- * the transfer segment sizes are set at run time.
21426- */
21427- /* 32-bit code */
21428- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21429- /* 16-bit code */
21430- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21431- /* 16-bit data */
21432- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21433- /* 16-bit data */
21434- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21435- /* 16-bit data */
21436- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21437- /*
21438- * The APM segments have byte granularity and their bases
21439- * are set at run time. All have 64k limits.
21440- */
21441- /* 32-bit code */
21442- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21443- /* 16-bit code */
21444- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21445- /* data */
21446- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21447-
21448- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21449- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21450- GDT_STACK_CANARY_INIT
21451-#endif
21452-} };
21453-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21454-
21455 static int __init x86_xsave_setup(char *s)
21456 {
21457 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21458@@ -295,6 +241,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21459 }
21460 }
21461
21462+#ifdef CONFIG_X86_64
21463+static __init int setup_disable_pcid(char *arg)
21464+{
21465+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21466+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21467+
21468+#ifdef CONFIG_PAX_MEMORY_UDEREF
21469+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21470+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21471+#endif
21472+
21473+ return 1;
21474+}
21475+__setup("nopcid", setup_disable_pcid);
21476+
21477+static void setup_pcid(struct cpuinfo_x86 *c)
21478+{
21479+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21480+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21481+
21482+#ifdef CONFIG_PAX_MEMORY_UDEREF
21483+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21484+ pax_open_kernel();
21485+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21486+ pax_close_kernel();
21487+ printk("PAX: slow and weak UDEREF enabled\n");
21488+ } else
21489+ printk("PAX: UDEREF disabled\n");
21490+#endif
21491+
21492+ return;
21493+ }
21494+
21495+ printk("PAX: PCID detected\n");
21496+ set_in_cr4(X86_CR4_PCIDE);
21497+
21498+#ifdef CONFIG_PAX_MEMORY_UDEREF
21499+ pax_open_kernel();
21500+ clone_pgd_mask = ~(pgdval_t)0UL;
21501+ pax_close_kernel();
21502+ if (pax_user_shadow_base)
21503+ printk("PAX: weak UDEREF enabled\n");
21504+ else {
21505+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21506+ printk("PAX: strong UDEREF enabled\n");
21507+ }
21508+#endif
21509+
21510+ if (cpu_has(c, X86_FEATURE_INVPCID))
21511+ printk("PAX: INVPCID detected\n");
21512+}
21513+#endif
21514+
21515 /*
21516 * Some CPU features depend on higher CPUID levels, which may not always
21517 * be available due to CPUID level capping or broken virtualization
21518@@ -395,7 +394,7 @@ void switch_to_new_gdt(int cpu)
21519 {
21520 struct desc_ptr gdt_descr;
21521
21522- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21523+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21524 gdt_descr.size = GDT_SIZE - 1;
21525 load_gdt(&gdt_descr);
21526 /* Reload the per-cpu base */
21527@@ -885,6 +884,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21528 setup_smep(c);
21529 setup_smap(c);
21530
21531+#ifdef CONFIG_X86_64
21532+ setup_pcid(c);
21533+#endif
21534+
21535 /*
21536 * The vendor-specific functions might have changed features.
21537 * Now we do "generic changes."
21538@@ -893,6 +896,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21539 /* Filter out anything that depends on CPUID levels we don't have */
21540 filter_cpuid_features(c, true);
21541
21542+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21543+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21544+#endif
21545+
21546 /* If the model name is still unset, do table lookup. */
21547 if (!c->x86_model_id[0]) {
21548 const char *p;
21549@@ -973,7 +980,7 @@ static void syscall32_cpu_init(void)
21550 void enable_sep_cpu(void)
21551 {
21552 int cpu = get_cpu();
21553- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21554+ struct tss_struct *tss = init_tss + cpu;
21555
21556 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21557 put_cpu();
21558@@ -1113,14 +1120,16 @@ static __init int setup_disablecpuid(char *arg)
21559 }
21560 __setup("clearcpuid=", setup_disablecpuid);
21561
21562+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21563+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21564+
21565 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21566- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21567+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21568 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21569
21570 #ifdef CONFIG_X86_64
21571-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21572-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21573- (unsigned long) debug_idt_table };
21574+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21575+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21576
21577 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21578 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21579@@ -1283,7 +1292,7 @@ void cpu_init(void)
21580 load_ucode_ap();
21581
21582 cpu = stack_smp_processor_id();
21583- t = &per_cpu(init_tss, cpu);
21584+ t = init_tss + cpu;
21585 oist = &per_cpu(orig_ist, cpu);
21586
21587 #ifdef CONFIG_NUMA
21588@@ -1318,7 +1327,6 @@ void cpu_init(void)
21589 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21590 barrier();
21591
21592- x86_configure_nx();
21593 enable_x2apic();
21594
21595 /*
21596@@ -1370,7 +1378,7 @@ void cpu_init(void)
21597 {
21598 int cpu = smp_processor_id();
21599 struct task_struct *curr = current;
21600- struct tss_struct *t = &per_cpu(init_tss, cpu);
21601+ struct tss_struct *t = init_tss + cpu;
21602 struct thread_struct *thread = &curr->thread;
21603
21604 show_ucode_info_early();
21605diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21606index 9c8f739..902a9c5 100644
21607--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21608+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21609@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21610 };
21611
21612 #ifdef CONFIG_AMD_NB
21613+static struct attribute *default_attrs_amd_nb[] = {
21614+ &type.attr,
21615+ &level.attr,
21616+ &coherency_line_size.attr,
21617+ &physical_line_partition.attr,
21618+ &ways_of_associativity.attr,
21619+ &number_of_sets.attr,
21620+ &size.attr,
21621+ &shared_cpu_map.attr,
21622+ &shared_cpu_list.attr,
21623+ NULL,
21624+ NULL,
21625+ NULL,
21626+ NULL
21627+};
21628+
21629 static struct attribute **amd_l3_attrs(void)
21630 {
21631 static struct attribute **attrs;
21632@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21633
21634 n = ARRAY_SIZE(default_attrs);
21635
21636- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21637- n += 2;
21638-
21639- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21640- n += 1;
21641-
21642- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21643- if (attrs == NULL)
21644- return attrs = default_attrs;
21645-
21646- for (n = 0; default_attrs[n]; n++)
21647- attrs[n] = default_attrs[n];
21648+ attrs = default_attrs_amd_nb;
21649
21650 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21651 attrs[n++] = &cache_disable_0.attr;
21652@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21653 .default_attrs = default_attrs,
21654 };
21655
21656+#ifdef CONFIG_AMD_NB
21657+static struct kobj_type ktype_cache_amd_nb = {
21658+ .sysfs_ops = &sysfs_ops,
21659+ .default_attrs = default_attrs_amd_nb,
21660+};
21661+#endif
21662+
21663 static struct kobj_type ktype_percpu_entry = {
21664 .sysfs_ops = &sysfs_ops,
21665 };
21666@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21667 return retval;
21668 }
21669
21670+#ifdef CONFIG_AMD_NB
21671+ amd_l3_attrs();
21672+#endif
21673+
21674 for (i = 0; i < num_cache_leaves; i++) {
21675+ struct kobj_type *ktype;
21676+
21677 this_object = INDEX_KOBJECT_PTR(cpu, i);
21678 this_object->cpu = cpu;
21679 this_object->index = i;
21680
21681 this_leaf = CPUID4_INFO_IDX(cpu, i);
21682
21683- ktype_cache.default_attrs = default_attrs;
21684+ ktype = &ktype_cache;
21685 #ifdef CONFIG_AMD_NB
21686 if (this_leaf->base.nb)
21687- ktype_cache.default_attrs = amd_l3_attrs();
21688+ ktype = &ktype_cache_amd_nb;
21689 #endif
21690 retval = kobject_init_and_add(&(this_object->kobj),
21691- &ktype_cache,
21692+ ktype,
21693 per_cpu(ici_cache_kobject, cpu),
21694 "index%1lu", i);
21695 if (unlikely(retval)) {
21696diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21697index 9a79c8d..158c2f1 100644
21698--- a/arch/x86/kernel/cpu/mcheck/mce.c
21699+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21700@@ -45,6 +45,7 @@
21701 #include <asm/processor.h>
21702 #include <asm/mce.h>
21703 #include <asm/msr.h>
21704+#include <asm/local.h>
21705
21706 #include "mce-internal.h"
21707
21708@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21709 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21710 m->cs, m->ip);
21711
21712- if (m->cs == __KERNEL_CS)
21713+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21714 print_symbol("{%s}", m->ip);
21715 pr_cont("\n");
21716 }
21717@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21718
21719 #define PANIC_TIMEOUT 5 /* 5 seconds */
21720
21721-static atomic_t mce_paniced;
21722+static atomic_unchecked_t mce_paniced;
21723
21724 static int fake_panic;
21725-static atomic_t mce_fake_paniced;
21726+static atomic_unchecked_t mce_fake_paniced;
21727
21728 /* Panic in progress. Enable interrupts and wait for final IPI */
21729 static void wait_for_panic(void)
21730@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21731 /*
21732 * Make sure only one CPU runs in machine check panic
21733 */
21734- if (atomic_inc_return(&mce_paniced) > 1)
21735+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21736 wait_for_panic();
21737 barrier();
21738
21739@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21740 console_verbose();
21741 } else {
21742 /* Don't log too much for fake panic */
21743- if (atomic_inc_return(&mce_fake_paniced) > 1)
21744+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21745 return;
21746 }
21747 /* First print corrected ones that are still unlogged */
21748@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21749 if (!fake_panic) {
21750 if (panic_timeout == 0)
21751 panic_timeout = mca_cfg.panic_timeout;
21752- panic(msg);
21753+ panic("%s", msg);
21754 } else
21755 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21756 }
21757@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
21758 * might have been modified by someone else.
21759 */
21760 rmb();
21761- if (atomic_read(&mce_paniced))
21762+ if (atomic_read_unchecked(&mce_paniced))
21763 wait_for_panic();
21764 if (!mca_cfg.monarch_timeout)
21765 goto out;
21766@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21767 }
21768
21769 /* Call the installed machine check handler for this CPU setup. */
21770-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21771+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21772 unexpected_machine_check;
21773
21774 /*
21775@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21776 return;
21777 }
21778
21779+ pax_open_kernel();
21780 machine_check_vector = do_machine_check;
21781+ pax_close_kernel();
21782
21783 __mcheck_cpu_init_generic();
21784 __mcheck_cpu_init_vendor(c);
21785@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21786 */
21787
21788 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21789-static int mce_chrdev_open_count; /* #times opened */
21790+static local_t mce_chrdev_open_count; /* #times opened */
21791 static int mce_chrdev_open_exclu; /* already open exclusive? */
21792
21793 static int mce_chrdev_open(struct inode *inode, struct file *file)
21794@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21795 spin_lock(&mce_chrdev_state_lock);
21796
21797 if (mce_chrdev_open_exclu ||
21798- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21799+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21800 spin_unlock(&mce_chrdev_state_lock);
21801
21802 return -EBUSY;
21803@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21804
21805 if (file->f_flags & O_EXCL)
21806 mce_chrdev_open_exclu = 1;
21807- mce_chrdev_open_count++;
21808+ local_inc(&mce_chrdev_open_count);
21809
21810 spin_unlock(&mce_chrdev_state_lock);
21811
21812@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21813 {
21814 spin_lock(&mce_chrdev_state_lock);
21815
21816- mce_chrdev_open_count--;
21817+ local_dec(&mce_chrdev_open_count);
21818 mce_chrdev_open_exclu = 0;
21819
21820 spin_unlock(&mce_chrdev_state_lock);
21821@@ -2414,7 +2417,7 @@ static __init void mce_init_banks(void)
21822
21823 for (i = 0; i < mca_cfg.banks; i++) {
21824 struct mce_bank *b = &mce_banks[i];
21825- struct device_attribute *a = &b->attr;
21826+ device_attribute_no_const *a = &b->attr;
21827
21828 sysfs_attr_init(&a->attr);
21829 a->attr.name = b->attrname;
21830@@ -2521,7 +2524,7 @@ struct dentry *mce_get_debugfs_dir(void)
21831 static void mce_reset(void)
21832 {
21833 cpu_missing = 0;
21834- atomic_set(&mce_fake_paniced, 0);
21835+ atomic_set_unchecked(&mce_fake_paniced, 0);
21836 atomic_set(&mce_executing, 0);
21837 atomic_set(&mce_callin, 0);
21838 atomic_set(&global_nwo, 0);
21839diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21840index a304298..49b6d06 100644
21841--- a/arch/x86/kernel/cpu/mcheck/p5.c
21842+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21843@@ -10,6 +10,7 @@
21844 #include <asm/processor.h>
21845 #include <asm/mce.h>
21846 #include <asm/msr.h>
21847+#include <asm/pgtable.h>
21848
21849 /* By default disabled */
21850 int mce_p5_enabled __read_mostly;
21851@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21852 if (!cpu_has(c, X86_FEATURE_MCE))
21853 return;
21854
21855+ pax_open_kernel();
21856 machine_check_vector = pentium_machine_check;
21857+ pax_close_kernel();
21858 /* Make sure the vector pointer is visible before we enable MCEs: */
21859 wmb();
21860
21861diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21862index 7dc5564..1273569 100644
21863--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21864+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21865@@ -9,6 +9,7 @@
21866 #include <asm/processor.h>
21867 #include <asm/mce.h>
21868 #include <asm/msr.h>
21869+#include <asm/pgtable.h>
21870
21871 /* Machine check handler for WinChip C6: */
21872 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21873@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21874 {
21875 u32 lo, hi;
21876
21877+ pax_open_kernel();
21878 machine_check_vector = winchip_machine_check;
21879+ pax_close_kernel();
21880 /* Make sure the vector pointer is visible before we enable MCEs: */
21881 wmb();
21882
21883diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21884index dd9d619..86e1d81 100644
21885--- a/arch/x86/kernel/cpu/microcode/core.c
21886+++ b/arch/x86/kernel/cpu/microcode/core.c
21887@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21888 return NOTIFY_OK;
21889 }
21890
21891-static struct notifier_block __refdata mc_cpu_notifier = {
21892+static struct notifier_block mc_cpu_notifier = {
21893 .notifier_call = mc_cpu_callback,
21894 };
21895
21896diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21897index a276fa7..e66810f 100644
21898--- a/arch/x86/kernel/cpu/microcode/intel.c
21899+++ b/arch/x86/kernel/cpu/microcode/intel.c
21900@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21901
21902 static int get_ucode_user(void *to, const void *from, size_t n)
21903 {
21904- return copy_from_user(to, from, n);
21905+ return copy_from_user(to, (const void __force_user *)from, n);
21906 }
21907
21908 static enum ucode_state
21909 request_microcode_user(int cpu, const void __user *buf, size_t size)
21910 {
21911- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21912+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21913 }
21914
21915 static void microcode_fini_cpu(int cpu)
21916diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21917index f961de9..8a9d332 100644
21918--- a/arch/x86/kernel/cpu/mtrr/main.c
21919+++ b/arch/x86/kernel/cpu/mtrr/main.c
21920@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21921 u64 size_or_mask, size_and_mask;
21922 static bool mtrr_aps_delayed_init;
21923
21924-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21925+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21926
21927 const struct mtrr_ops *mtrr_if;
21928
21929diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21930index df5e41f..816c719 100644
21931--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21932+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21933@@ -25,7 +25,7 @@ struct mtrr_ops {
21934 int (*validate_add_page)(unsigned long base, unsigned long size,
21935 unsigned int type);
21936 int (*have_wrcomb)(void);
21937-};
21938+} __do_const;
21939
21940 extern int generic_get_free_region(unsigned long base, unsigned long size,
21941 int replace_reg);
21942diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21943index 2879ecd..bb8c80b 100644
21944--- a/arch/x86/kernel/cpu/perf_event.c
21945+++ b/arch/x86/kernel/cpu/perf_event.c
21946@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
21947
21948 }
21949
21950-static struct attribute_group x86_pmu_format_group = {
21951+static attribute_group_no_const x86_pmu_format_group = {
21952 .name = "format",
21953 .attrs = NULL,
21954 };
21955@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
21956 NULL,
21957 };
21958
21959-static struct attribute_group x86_pmu_events_group = {
21960+static attribute_group_no_const x86_pmu_events_group = {
21961 .name = "events",
21962 .attrs = events_attr,
21963 };
21964@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
21965 if (idx > GDT_ENTRIES)
21966 return 0;
21967
21968- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
21969+ desc = get_cpu_gdt_table(smp_processor_id());
21970 }
21971
21972 return get_desc_base(desc + idx);
21973@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21974 break;
21975
21976 perf_callchain_store(entry, frame.return_address);
21977- fp = frame.next_frame;
21978+ fp = (const void __force_user *)frame.next_frame;
21979 }
21980 }
21981
21982diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21983index 639d128..e92d7e5 100644
21984--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21985+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21986@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21987 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21988 {
21989 struct attribute **attrs;
21990- struct attribute_group *attr_group;
21991+ attribute_group_no_const *attr_group;
21992 int i = 0, j;
21993
21994 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21995diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21996index 2502d0d..e5cc05c 100644
21997--- a/arch/x86/kernel/cpu/perf_event_intel.c
21998+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21999@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22000 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22001
22002 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22003- u64 capabilities;
22004+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22005
22006- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22007- x86_pmu.intel_cap.capabilities = capabilities;
22008+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22009+ x86_pmu.intel_cap.capabilities = capabilities;
22010 }
22011
22012 intel_ds_init();
22013diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22014index 619f769..d510008 100644
22015--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22016+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22017@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22018 NULL,
22019 };
22020
22021-static struct attribute_group rapl_pmu_events_group = {
22022+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22023 .name = "events",
22024 .attrs = NULL, /* patched at runtime */
22025 };
22026diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22027index ae6552a..b5be2d3 100644
22028--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22029+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22030@@ -3694,7 +3694,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22031 static int __init uncore_type_init(struct intel_uncore_type *type)
22032 {
22033 struct intel_uncore_pmu *pmus;
22034- struct attribute_group *attr_group;
22035+ attribute_group_no_const *attr_group;
22036 struct attribute **attrs;
22037 int i, j;
22038
22039diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22040index 90236f0..54cb20d 100644
22041--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22042+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22043@@ -503,7 +503,7 @@ struct intel_uncore_box {
22044 struct uncore_event_desc {
22045 struct kobj_attribute attr;
22046 const char *config;
22047-};
22048+} __do_const;
22049
22050 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22051 { \
22052diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22053index 3225ae6c..ee3c6db 100644
22054--- a/arch/x86/kernel/cpuid.c
22055+++ b/arch/x86/kernel/cpuid.c
22056@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22057 return notifier_from_errno(err);
22058 }
22059
22060-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22061+static struct notifier_block cpuid_class_cpu_notifier =
22062 {
22063 .notifier_call = cpuid_class_cpu_callback,
22064 };
22065diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22066index 507de80..ebaae2a 100644
22067--- a/arch/x86/kernel/crash.c
22068+++ b/arch/x86/kernel/crash.c
22069@@ -58,7 +58,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22070 #ifdef CONFIG_X86_32
22071 struct pt_regs fixed_regs;
22072
22073- if (!user_mode_vm(regs)) {
22074+ if (!user_mode(regs)) {
22075 crash_fixup_ss_esp(&fixed_regs, regs);
22076 regs = &fixed_regs;
22077 }
22078diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22079index afa64ad..dce67dd 100644
22080--- a/arch/x86/kernel/crash_dump_64.c
22081+++ b/arch/x86/kernel/crash_dump_64.c
22082@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22083 return -ENOMEM;
22084
22085 if (userbuf) {
22086- if (copy_to_user(buf, vaddr + offset, csize)) {
22087+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22088 iounmap(vaddr);
22089 return -EFAULT;
22090 }
22091diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22092index f6dfd93..892ade4 100644
22093--- a/arch/x86/kernel/doublefault.c
22094+++ b/arch/x86/kernel/doublefault.c
22095@@ -12,7 +12,7 @@
22096
22097 #define DOUBLEFAULT_STACKSIZE (1024)
22098 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22099-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22100+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22101
22102 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22103
22104@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22105 unsigned long gdt, tss;
22106
22107 native_store_gdt(&gdt_desc);
22108- gdt = gdt_desc.address;
22109+ gdt = (unsigned long)gdt_desc.address;
22110
22111 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22112
22113@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22114 /* 0x2 bit is always set */
22115 .flags = X86_EFLAGS_SF | 0x2,
22116 .sp = STACK_START,
22117- .es = __USER_DS,
22118+ .es = __KERNEL_DS,
22119 .cs = __KERNEL_CS,
22120 .ss = __KERNEL_DS,
22121- .ds = __USER_DS,
22122+ .ds = __KERNEL_DS,
22123 .fs = __KERNEL_PERCPU,
22124
22125 .__cr3 = __pa_nodebug(swapper_pg_dir),
22126diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22127index b74ebc7..6dbb0c5 100644
22128--- a/arch/x86/kernel/dumpstack.c
22129+++ b/arch/x86/kernel/dumpstack.c
22130@@ -2,6 +2,9 @@
22131 * Copyright (C) 1991, 1992 Linus Torvalds
22132 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22133 */
22134+#ifdef CONFIG_GRKERNSEC_HIDESYM
22135+#define __INCLUDED_BY_HIDESYM 1
22136+#endif
22137 #include <linux/kallsyms.h>
22138 #include <linux/kprobes.h>
22139 #include <linux/uaccess.h>
22140@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
22141 static void
22142 print_ftrace_graph_addr(unsigned long addr, void *data,
22143 const struct stacktrace_ops *ops,
22144- struct thread_info *tinfo, int *graph)
22145+ struct task_struct *task, int *graph)
22146 {
22147- struct task_struct *task;
22148 unsigned long ret_addr;
22149 int index;
22150
22151 if (addr != (unsigned long)return_to_handler)
22152 return;
22153
22154- task = tinfo->task;
22155 index = task->curr_ret_stack;
22156
22157 if (!task->ret_stack || index < *graph)
22158@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22159 static inline void
22160 print_ftrace_graph_addr(unsigned long addr, void *data,
22161 const struct stacktrace_ops *ops,
22162- struct thread_info *tinfo, int *graph)
22163+ struct task_struct *task, int *graph)
22164 { }
22165 #endif
22166
22167@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22168 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22169 */
22170
22171-static inline int valid_stack_ptr(struct thread_info *tinfo,
22172- void *p, unsigned int size, void *end)
22173+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22174 {
22175- void *t = tinfo;
22176 if (end) {
22177 if (p < end && p >= (end-THREAD_SIZE))
22178 return 1;
22179@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22180 }
22181
22182 unsigned long
22183-print_context_stack(struct thread_info *tinfo,
22184+print_context_stack(struct task_struct *task, void *stack_start,
22185 unsigned long *stack, unsigned long bp,
22186 const struct stacktrace_ops *ops, void *data,
22187 unsigned long *end, int *graph)
22188 {
22189 struct stack_frame *frame = (struct stack_frame *)bp;
22190
22191- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22192+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22193 unsigned long addr;
22194
22195 addr = *stack;
22196@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22197 } else {
22198 ops->address(data, addr, 0);
22199 }
22200- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22201+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22202 }
22203 stack++;
22204 }
22205@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22206 EXPORT_SYMBOL_GPL(print_context_stack);
22207
22208 unsigned long
22209-print_context_stack_bp(struct thread_info *tinfo,
22210+print_context_stack_bp(struct task_struct *task, void *stack_start,
22211 unsigned long *stack, unsigned long bp,
22212 const struct stacktrace_ops *ops, void *data,
22213 unsigned long *end, int *graph)
22214@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22215 struct stack_frame *frame = (struct stack_frame *)bp;
22216 unsigned long *ret_addr = &frame->return_address;
22217
22218- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22219+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22220 unsigned long addr = *ret_addr;
22221
22222 if (!__kernel_text_address(addr))
22223@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22224 ops->address(data, addr, 1);
22225 frame = frame->next_frame;
22226 ret_addr = &frame->return_address;
22227- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22228+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22229 }
22230
22231 return (unsigned long)frame;
22232@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22233 static void print_trace_address(void *data, unsigned long addr, int reliable)
22234 {
22235 touch_nmi_watchdog();
22236- printk(data);
22237+ printk("%s", (char *)data);
22238 printk_stack_address(addr, reliable);
22239 }
22240
22241@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22242 EXPORT_SYMBOL_GPL(oops_begin);
22243 NOKPROBE_SYMBOL(oops_begin);
22244
22245+extern void gr_handle_kernel_exploit(void);
22246+
22247 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22248 {
22249 if (regs && kexec_should_crash(current))
22250@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22251 panic("Fatal exception in interrupt");
22252 if (panic_on_oops)
22253 panic("Fatal exception");
22254- do_exit(signr);
22255+
22256+ gr_handle_kernel_exploit();
22257+
22258+ do_group_exit(signr);
22259 }
22260 NOKPROBE_SYMBOL(oops_end);
22261
22262@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22263 print_modules();
22264 show_regs(regs);
22265 #ifdef CONFIG_X86_32
22266- if (user_mode_vm(regs)) {
22267+ if (user_mode(regs)) {
22268 sp = regs->sp;
22269 ss = regs->ss & 0xffff;
22270 } else {
22271@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22272 unsigned long flags = oops_begin();
22273 int sig = SIGSEGV;
22274
22275- if (!user_mode_vm(regs))
22276+ if (!user_mode(regs))
22277 report_bug(regs->ip, regs);
22278
22279 if (__die(str, regs, err))
22280diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22281index 5abd4cd..c65733b 100644
22282--- a/arch/x86/kernel/dumpstack_32.c
22283+++ b/arch/x86/kernel/dumpstack_32.c
22284@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22285 bp = stack_frame(task, regs);
22286
22287 for (;;) {
22288- struct thread_info *context;
22289+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22290 void *end_stack;
22291
22292 end_stack = is_hardirq_stack(stack, cpu);
22293 if (!end_stack)
22294 end_stack = is_softirq_stack(stack, cpu);
22295
22296- context = task_thread_info(task);
22297- bp = ops->walk_stack(context, stack, bp, ops, data,
22298+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22299 end_stack, &graph);
22300
22301 /* Stop if not on irq stack */
22302@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22303 int i;
22304
22305 show_regs_print_info(KERN_EMERG);
22306- __show_regs(regs, !user_mode_vm(regs));
22307+ __show_regs(regs, !user_mode(regs));
22308
22309 /*
22310 * When in-kernel, we also print out the stack and code at the
22311 * time of the fault..
22312 */
22313- if (!user_mode_vm(regs)) {
22314+ if (!user_mode(regs)) {
22315 unsigned int code_prologue = code_bytes * 43 / 64;
22316 unsigned int code_len = code_bytes;
22317 unsigned char c;
22318 u8 *ip;
22319+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22320
22321 pr_emerg("Stack:\n");
22322 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22323
22324 pr_emerg("Code:");
22325
22326- ip = (u8 *)regs->ip - code_prologue;
22327+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22328 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22329 /* try starting at IP */
22330- ip = (u8 *)regs->ip;
22331+ ip = (u8 *)regs->ip + cs_base;
22332 code_len = code_len - code_prologue + 1;
22333 }
22334 for (i = 0; i < code_len; i++, ip++) {
22335@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22336 pr_cont(" Bad EIP value.");
22337 break;
22338 }
22339- if (ip == (u8 *)regs->ip)
22340+ if (ip == (u8 *)regs->ip + cs_base)
22341 pr_cont(" <%02x>", c);
22342 else
22343 pr_cont(" %02x", c);
22344@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22345 {
22346 unsigned short ud2;
22347
22348+ ip = ktla_ktva(ip);
22349 if (ip < PAGE_OFFSET)
22350 return 0;
22351 if (probe_kernel_address((unsigned short *)ip, ud2))
22352@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22353
22354 return ud2 == 0x0b0f;
22355 }
22356+
22357+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22358+void pax_check_alloca(unsigned long size)
22359+{
22360+ unsigned long sp = (unsigned long)&sp, stack_left;
22361+
22362+ /* all kernel stacks are of the same size */
22363+ stack_left = sp & (THREAD_SIZE - 1);
22364+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22365+}
22366+EXPORT_SYMBOL(pax_check_alloca);
22367+#endif
22368diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22369index 1abcb50..6c8d702 100644
22370--- a/arch/x86/kernel/dumpstack_64.c
22371+++ b/arch/x86/kernel/dumpstack_64.c
22372@@ -154,12 +154,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22373 const struct stacktrace_ops *ops, void *data)
22374 {
22375 const unsigned cpu = get_cpu();
22376- struct thread_info *tinfo;
22377 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22378 unsigned long dummy;
22379 unsigned used = 0;
22380 int graph = 0;
22381 int done = 0;
22382+ void *stack_start;
22383
22384 if (!task)
22385 task = current;
22386@@ -180,7 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22387 * current stack address. If the stacks consist of nested
22388 * exceptions
22389 */
22390- tinfo = task_thread_info(task);
22391 while (!done) {
22392 unsigned long *stack_end;
22393 enum stack_type stype;
22394@@ -203,7 +202,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22395 if (ops->stack(data, id) < 0)
22396 break;
22397
22398- bp = ops->walk_stack(tinfo, stack, bp, ops,
22399+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22400 data, stack_end, &graph);
22401 ops->stack(data, "<EOE>");
22402 /*
22403@@ -211,6 +210,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22404 * second-to-last pointer (index -2 to end) in the
22405 * exception stack:
22406 */
22407+ if ((u16)stack_end[-1] != __KERNEL_DS)
22408+ goto out;
22409 stack = (unsigned long *) stack_end[-2];
22410 done = 0;
22411 break;
22412@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22413
22414 if (ops->stack(data, "IRQ") < 0)
22415 break;
22416- bp = ops->walk_stack(tinfo, stack, bp,
22417+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22418 ops, data, stack_end, &graph);
22419 /*
22420 * We link to the next stack (which would be
22421@@ -241,7 +242,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22422 /*
22423 * This handles the process stack:
22424 */
22425- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22426+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22427+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22428+out:
22429 put_cpu();
22430 }
22431 EXPORT_SYMBOL(dump_trace);
22432@@ -350,3 +353,50 @@ int is_valid_bugaddr(unsigned long ip)
22433
22434 return ud2 == 0x0b0f;
22435 }
22436+
22437+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22438+void pax_check_alloca(unsigned long size)
22439+{
22440+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22441+ unsigned cpu, used;
22442+ char *id;
22443+
22444+ /* check the process stack first */
22445+ stack_start = (unsigned long)task_stack_page(current);
22446+ stack_end = stack_start + THREAD_SIZE;
22447+ if (likely(stack_start <= sp && sp < stack_end)) {
22448+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22449+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22450+ return;
22451+ }
22452+
22453+ cpu = get_cpu();
22454+
22455+ /* check the irq stacks */
22456+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22457+ stack_start = stack_end - IRQ_STACK_SIZE;
22458+ if (stack_start <= sp && sp < stack_end) {
22459+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22460+ put_cpu();
22461+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22462+ return;
22463+ }
22464+
22465+ /* check the exception stacks */
22466+ used = 0;
22467+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22468+ stack_start = stack_end - EXCEPTION_STKSZ;
22469+ if (stack_end && stack_start <= sp && sp < stack_end) {
22470+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22471+ put_cpu();
22472+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22473+ return;
22474+ }
22475+
22476+ put_cpu();
22477+
22478+ /* unknown stack */
22479+ BUG();
22480+}
22481+EXPORT_SYMBOL(pax_check_alloca);
22482+#endif
22483diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22484index 988c00a..4f673b6 100644
22485--- a/arch/x86/kernel/e820.c
22486+++ b/arch/x86/kernel/e820.c
22487@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22488
22489 static void early_panic(char *msg)
22490 {
22491- early_printk(msg);
22492- panic(msg);
22493+ early_printk("%s", msg);
22494+ panic("%s", msg);
22495 }
22496
22497 static int userdef __initdata;
22498diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22499index 01d1c18..8073693 100644
22500--- a/arch/x86/kernel/early_printk.c
22501+++ b/arch/x86/kernel/early_printk.c
22502@@ -7,6 +7,7 @@
22503 #include <linux/pci_regs.h>
22504 #include <linux/pci_ids.h>
22505 #include <linux/errno.h>
22506+#include <linux/sched.h>
22507 #include <asm/io.h>
22508 #include <asm/processor.h>
22509 #include <asm/fcntl.h>
22510diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22511index 0d0c9d4..f65b4f6 100644
22512--- a/arch/x86/kernel/entry_32.S
22513+++ b/arch/x86/kernel/entry_32.S
22514@@ -177,13 +177,153 @@
22515 /*CFI_REL_OFFSET gs, PT_GS*/
22516 .endm
22517 .macro SET_KERNEL_GS reg
22518+
22519+#ifdef CONFIG_CC_STACKPROTECTOR
22520 movl $(__KERNEL_STACK_CANARY), \reg
22521+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22522+ movl $(__USER_DS), \reg
22523+#else
22524+ xorl \reg, \reg
22525+#endif
22526+
22527 movl \reg, %gs
22528 .endm
22529
22530 #endif /* CONFIG_X86_32_LAZY_GS */
22531
22532-.macro SAVE_ALL
22533+.macro pax_enter_kernel
22534+#ifdef CONFIG_PAX_KERNEXEC
22535+ call pax_enter_kernel
22536+#endif
22537+.endm
22538+
22539+.macro pax_exit_kernel
22540+#ifdef CONFIG_PAX_KERNEXEC
22541+ call pax_exit_kernel
22542+#endif
22543+.endm
22544+
22545+#ifdef CONFIG_PAX_KERNEXEC
22546+ENTRY(pax_enter_kernel)
22547+#ifdef CONFIG_PARAVIRT
22548+ pushl %eax
22549+ pushl %ecx
22550+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22551+ mov %eax, %esi
22552+#else
22553+ mov %cr0, %esi
22554+#endif
22555+ bts $16, %esi
22556+ jnc 1f
22557+ mov %cs, %esi
22558+ cmp $__KERNEL_CS, %esi
22559+ jz 3f
22560+ ljmp $__KERNEL_CS, $3f
22561+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22562+2:
22563+#ifdef CONFIG_PARAVIRT
22564+ mov %esi, %eax
22565+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22566+#else
22567+ mov %esi, %cr0
22568+#endif
22569+3:
22570+#ifdef CONFIG_PARAVIRT
22571+ popl %ecx
22572+ popl %eax
22573+#endif
22574+ ret
22575+ENDPROC(pax_enter_kernel)
22576+
22577+ENTRY(pax_exit_kernel)
22578+#ifdef CONFIG_PARAVIRT
22579+ pushl %eax
22580+ pushl %ecx
22581+#endif
22582+ mov %cs, %esi
22583+ cmp $__KERNEXEC_KERNEL_CS, %esi
22584+ jnz 2f
22585+#ifdef CONFIG_PARAVIRT
22586+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22587+ mov %eax, %esi
22588+#else
22589+ mov %cr0, %esi
22590+#endif
22591+ btr $16, %esi
22592+ ljmp $__KERNEL_CS, $1f
22593+1:
22594+#ifdef CONFIG_PARAVIRT
22595+ mov %esi, %eax
22596+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22597+#else
22598+ mov %esi, %cr0
22599+#endif
22600+2:
22601+#ifdef CONFIG_PARAVIRT
22602+ popl %ecx
22603+ popl %eax
22604+#endif
22605+ ret
22606+ENDPROC(pax_exit_kernel)
22607+#endif
22608+
22609+ .macro pax_erase_kstack
22610+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22611+ call pax_erase_kstack
22612+#endif
22613+ .endm
22614+
22615+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22616+/*
22617+ * ebp: thread_info
22618+ */
22619+ENTRY(pax_erase_kstack)
22620+ pushl %edi
22621+ pushl %ecx
22622+ pushl %eax
22623+
22624+ mov TI_lowest_stack(%ebp), %edi
22625+ mov $-0xBEEF, %eax
22626+ std
22627+
22628+1: mov %edi, %ecx
22629+ and $THREAD_SIZE_asm - 1, %ecx
22630+ shr $2, %ecx
22631+ repne scasl
22632+ jecxz 2f
22633+
22634+ cmp $2*16, %ecx
22635+ jc 2f
22636+
22637+ mov $2*16, %ecx
22638+ repe scasl
22639+ jecxz 2f
22640+ jne 1b
22641+
22642+2: cld
22643+ mov %esp, %ecx
22644+ sub %edi, %ecx
22645+
22646+ cmp $THREAD_SIZE_asm, %ecx
22647+ jb 3f
22648+ ud2
22649+3:
22650+
22651+ shr $2, %ecx
22652+ rep stosl
22653+
22654+ mov TI_task_thread_sp0(%ebp), %edi
22655+ sub $128, %edi
22656+ mov %edi, TI_lowest_stack(%ebp)
22657+
22658+ popl %eax
22659+ popl %ecx
22660+ popl %edi
22661+ ret
22662+ENDPROC(pax_erase_kstack)
22663+#endif
22664+
22665+.macro __SAVE_ALL _DS
22666 cld
22667 PUSH_GS
22668 pushl_cfi %fs
22669@@ -206,7 +346,7 @@
22670 CFI_REL_OFFSET ecx, 0
22671 pushl_cfi %ebx
22672 CFI_REL_OFFSET ebx, 0
22673- movl $(__USER_DS), %edx
22674+ movl $\_DS, %edx
22675 movl %edx, %ds
22676 movl %edx, %es
22677 movl $(__KERNEL_PERCPU), %edx
22678@@ -214,6 +354,15 @@
22679 SET_KERNEL_GS %edx
22680 .endm
22681
22682+.macro SAVE_ALL
22683+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22684+ __SAVE_ALL __KERNEL_DS
22685+ pax_enter_kernel
22686+#else
22687+ __SAVE_ALL __USER_DS
22688+#endif
22689+.endm
22690+
22691 .macro RESTORE_INT_REGS
22692 popl_cfi %ebx
22693 CFI_RESTORE ebx
22694@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22695 popfl_cfi
22696 jmp syscall_exit
22697 CFI_ENDPROC
22698-END(ret_from_fork)
22699+ENDPROC(ret_from_fork)
22700
22701 ENTRY(ret_from_kernel_thread)
22702 CFI_STARTPROC
22703@@ -340,7 +489,15 @@ ret_from_intr:
22704 andl $SEGMENT_RPL_MASK, %eax
22705 #endif
22706 cmpl $USER_RPL, %eax
22707+
22708+#ifdef CONFIG_PAX_KERNEXEC
22709+ jae resume_userspace
22710+
22711+ pax_exit_kernel
22712+ jmp resume_kernel
22713+#else
22714 jb resume_kernel # not returning to v8086 or userspace
22715+#endif
22716
22717 ENTRY(resume_userspace)
22718 LOCKDEP_SYS_EXIT
22719@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
22720 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22721 # int/exception return?
22722 jne work_pending
22723- jmp restore_all
22724-END(ret_from_exception)
22725+ jmp restore_all_pax
22726+ENDPROC(ret_from_exception)
22727
22728 #ifdef CONFIG_PREEMPT
22729 ENTRY(resume_kernel)
22730@@ -365,7 +522,7 @@ need_resched:
22731 jz restore_all
22732 call preempt_schedule_irq
22733 jmp need_resched
22734-END(resume_kernel)
22735+ENDPROC(resume_kernel)
22736 #endif
22737 CFI_ENDPROC
22738
22739@@ -395,30 +552,45 @@ sysenter_past_esp:
22740 /*CFI_REL_OFFSET cs, 0*/
22741 /*
22742 * Push current_thread_info()->sysenter_return to the stack.
22743- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22744- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22745 */
22746- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22747+ pushl_cfi $0
22748 CFI_REL_OFFSET eip, 0
22749
22750 pushl_cfi %eax
22751 SAVE_ALL
22752+ GET_THREAD_INFO(%ebp)
22753+ movl TI_sysenter_return(%ebp),%ebp
22754+ movl %ebp,PT_EIP(%esp)
22755 ENABLE_INTERRUPTS(CLBR_NONE)
22756
22757 /*
22758 * Load the potential sixth argument from user stack.
22759 * Careful about security.
22760 */
22761+ movl PT_OLDESP(%esp),%ebp
22762+
22763+#ifdef CONFIG_PAX_MEMORY_UDEREF
22764+ mov PT_OLDSS(%esp),%ds
22765+1: movl %ds:(%ebp),%ebp
22766+ push %ss
22767+ pop %ds
22768+#else
22769 cmpl $__PAGE_OFFSET-3,%ebp
22770 jae syscall_fault
22771 ASM_STAC
22772 1: movl (%ebp),%ebp
22773 ASM_CLAC
22774+#endif
22775+
22776 movl %ebp,PT_EBP(%esp)
22777 _ASM_EXTABLE(1b,syscall_fault)
22778
22779 GET_THREAD_INFO(%ebp)
22780
22781+#ifdef CONFIG_PAX_RANDKSTACK
22782+ pax_erase_kstack
22783+#endif
22784+
22785 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22786 jnz sysenter_audit
22787 sysenter_do_call:
22788@@ -434,12 +606,24 @@ sysenter_after_call:
22789 testl $_TIF_ALLWORK_MASK, %ecx
22790 jne sysexit_audit
22791 sysenter_exit:
22792+
22793+#ifdef CONFIG_PAX_RANDKSTACK
22794+ pushl_cfi %eax
22795+ movl %esp, %eax
22796+ call pax_randomize_kstack
22797+ popl_cfi %eax
22798+#endif
22799+
22800+ pax_erase_kstack
22801+
22802 /* if something modifies registers it must also disable sysexit */
22803 movl PT_EIP(%esp), %edx
22804 movl PT_OLDESP(%esp), %ecx
22805 xorl %ebp,%ebp
22806 TRACE_IRQS_ON
22807 1: mov PT_FS(%esp), %fs
22808+2: mov PT_DS(%esp), %ds
22809+3: mov PT_ES(%esp), %es
22810 PTGS_TO_GS
22811 ENABLE_INTERRUPTS_SYSEXIT
22812
22813@@ -456,6 +640,9 @@ sysenter_audit:
22814 movl %eax,%edx /* 2nd arg: syscall number */
22815 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22816 call __audit_syscall_entry
22817+
22818+ pax_erase_kstack
22819+
22820 pushl_cfi %ebx
22821 movl PT_EAX(%esp),%eax /* reload syscall number */
22822 jmp sysenter_do_call
22823@@ -481,10 +668,16 @@ sysexit_audit:
22824
22825 CFI_ENDPROC
22826 .pushsection .fixup,"ax"
22827-2: movl $0,PT_FS(%esp)
22828+4: movl $0,PT_FS(%esp)
22829+ jmp 1b
22830+5: movl $0,PT_DS(%esp)
22831+ jmp 1b
22832+6: movl $0,PT_ES(%esp)
22833 jmp 1b
22834 .popsection
22835- _ASM_EXTABLE(1b,2b)
22836+ _ASM_EXTABLE(1b,4b)
22837+ _ASM_EXTABLE(2b,5b)
22838+ _ASM_EXTABLE(3b,6b)
22839 PTGS_TO_GS_EX
22840 ENDPROC(ia32_sysenter_target)
22841
22842@@ -495,6 +688,11 @@ ENTRY(system_call)
22843 pushl_cfi %eax # save orig_eax
22844 SAVE_ALL
22845 GET_THREAD_INFO(%ebp)
22846+
22847+#ifdef CONFIG_PAX_RANDKSTACK
22848+ pax_erase_kstack
22849+#endif
22850+
22851 # system call tracing in operation / emulation
22852 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22853 jnz syscall_trace_entry
22854@@ -514,6 +712,15 @@ syscall_exit:
22855 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22856 jne syscall_exit_work
22857
22858+restore_all_pax:
22859+
22860+#ifdef CONFIG_PAX_RANDKSTACK
22861+ movl %esp, %eax
22862+ call pax_randomize_kstack
22863+#endif
22864+
22865+ pax_erase_kstack
22866+
22867 restore_all:
22868 TRACE_IRQS_IRET
22869 restore_all_notrace:
22870@@ -568,14 +775,34 @@ ldt_ss:
22871 * compensating for the offset by changing to the ESPFIX segment with
22872 * a base address that matches for the difference.
22873 */
22874-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22875+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22876 mov %esp, %edx /* load kernel esp */
22877 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22878 mov %dx, %ax /* eax: new kernel esp */
22879 sub %eax, %edx /* offset (low word is 0) */
22880+#ifdef CONFIG_SMP
22881+ movl PER_CPU_VAR(cpu_number), %ebx
22882+ shll $PAGE_SHIFT_asm, %ebx
22883+ addl $cpu_gdt_table, %ebx
22884+#else
22885+ movl $cpu_gdt_table, %ebx
22886+#endif
22887 shr $16, %edx
22888- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22889- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22890+
22891+#ifdef CONFIG_PAX_KERNEXEC
22892+ mov %cr0, %esi
22893+ btr $16, %esi
22894+ mov %esi, %cr0
22895+#endif
22896+
22897+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22898+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22899+
22900+#ifdef CONFIG_PAX_KERNEXEC
22901+ bts $16, %esi
22902+ mov %esi, %cr0
22903+#endif
22904+
22905 pushl_cfi $__ESPFIX_SS
22906 pushl_cfi %eax /* new kernel esp */
22907 /* Disable interrupts, but do not irqtrace this section: we
22908@@ -605,20 +832,18 @@ work_resched:
22909 movl TI_flags(%ebp), %ecx
22910 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22911 # than syscall tracing?
22912- jz restore_all
22913+ jz restore_all_pax
22914 testb $_TIF_NEED_RESCHED, %cl
22915 jnz work_resched
22916
22917 work_notifysig: # deal with pending signals and
22918 # notify-resume requests
22919+ movl %esp, %eax
22920 #ifdef CONFIG_VM86
22921 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22922- movl %esp, %eax
22923 jne work_notifysig_v86 # returning to kernel-space or
22924 # vm86-space
22925 1:
22926-#else
22927- movl %esp, %eax
22928 #endif
22929 TRACE_IRQS_ON
22930 ENABLE_INTERRUPTS(CLBR_NONE)
22931@@ -639,7 +864,7 @@ work_notifysig_v86:
22932 movl %eax, %esp
22933 jmp 1b
22934 #endif
22935-END(work_pending)
22936+ENDPROC(work_pending)
22937
22938 # perform syscall exit tracing
22939 ALIGN
22940@@ -647,11 +872,14 @@ syscall_trace_entry:
22941 movl $-ENOSYS,PT_EAX(%esp)
22942 movl %esp, %eax
22943 call syscall_trace_enter
22944+
22945+ pax_erase_kstack
22946+
22947 /* What it returned is what we'll actually use. */
22948 cmpl $(NR_syscalls), %eax
22949 jnae syscall_call
22950 jmp syscall_exit
22951-END(syscall_trace_entry)
22952+ENDPROC(syscall_trace_entry)
22953
22954 # perform syscall exit tracing
22955 ALIGN
22956@@ -664,26 +892,30 @@ syscall_exit_work:
22957 movl %esp, %eax
22958 call syscall_trace_leave
22959 jmp resume_userspace
22960-END(syscall_exit_work)
22961+ENDPROC(syscall_exit_work)
22962 CFI_ENDPROC
22963
22964 RING0_INT_FRAME # can't unwind into user space anyway
22965 syscall_fault:
22966+#ifdef CONFIG_PAX_MEMORY_UDEREF
22967+ push %ss
22968+ pop %ds
22969+#endif
22970 ASM_CLAC
22971 GET_THREAD_INFO(%ebp)
22972 movl $-EFAULT,PT_EAX(%esp)
22973 jmp resume_userspace
22974-END(syscall_fault)
22975+ENDPROC(syscall_fault)
22976
22977 syscall_badsys:
22978 movl $-ENOSYS,%eax
22979 jmp syscall_after_call
22980-END(syscall_badsys)
22981+ENDPROC(syscall_badsys)
22982
22983 sysenter_badsys:
22984 movl $-ENOSYS,%eax
22985 jmp sysenter_after_call
22986-END(syscall_badsys)
22987+ENDPROC(sysenter_badsys)
22988 CFI_ENDPROC
22989
22990 .macro FIXUP_ESPFIX_STACK
22991@@ -696,8 +928,15 @@ END(syscall_badsys)
22992 */
22993 #ifdef CONFIG_X86_ESPFIX32
22994 /* fixup the stack */
22995- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22996- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22997+#ifdef CONFIG_SMP
22998+ movl PER_CPU_VAR(cpu_number), %ebx
22999+ shll $PAGE_SHIFT_asm, %ebx
23000+ addl $cpu_gdt_table, %ebx
23001+#else
23002+ movl $cpu_gdt_table, %ebx
23003+#endif
23004+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23005+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23006 shl $16, %eax
23007 addl %esp, %eax /* the adjusted stack pointer */
23008 pushl_cfi $__KERNEL_DS
23009@@ -753,7 +992,7 @@ vector=vector+1
23010 .endr
23011 2: jmp common_interrupt
23012 .endr
23013-END(irq_entries_start)
23014+ENDPROC(irq_entries_start)
23015
23016 .previous
23017 END(interrupt)
23018@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23019 pushl_cfi $do_coprocessor_error
23020 jmp error_code
23021 CFI_ENDPROC
23022-END(coprocessor_error)
23023+ENDPROC(coprocessor_error)
23024
23025 ENTRY(simd_coprocessor_error)
23026 RING0_INT_FRAME
23027@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23028 .section .altinstructions,"a"
23029 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23030 .previous
23031-.section .altinstr_replacement,"ax"
23032+.section .altinstr_replacement,"a"
23033 663: pushl $do_simd_coprocessor_error
23034 664:
23035 .previous
23036@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23037 #endif
23038 jmp error_code
23039 CFI_ENDPROC
23040-END(simd_coprocessor_error)
23041+ENDPROC(simd_coprocessor_error)
23042
23043 ENTRY(device_not_available)
23044 RING0_INT_FRAME
23045@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23046 pushl_cfi $do_device_not_available
23047 jmp error_code
23048 CFI_ENDPROC
23049-END(device_not_available)
23050+ENDPROC(device_not_available)
23051
23052 #ifdef CONFIG_PARAVIRT
23053 ENTRY(native_iret)
23054 iret
23055 _ASM_EXTABLE(native_iret, iret_exc)
23056-END(native_iret)
23057+ENDPROC(native_iret)
23058
23059 ENTRY(native_irq_enable_sysexit)
23060 sti
23061 sysexit
23062-END(native_irq_enable_sysexit)
23063+ENDPROC(native_irq_enable_sysexit)
23064 #endif
23065
23066 ENTRY(overflow)
23067@@ -862,7 +1101,7 @@ ENTRY(overflow)
23068 pushl_cfi $do_overflow
23069 jmp error_code
23070 CFI_ENDPROC
23071-END(overflow)
23072+ENDPROC(overflow)
23073
23074 ENTRY(bounds)
23075 RING0_INT_FRAME
23076@@ -871,7 +1110,7 @@ ENTRY(bounds)
23077 pushl_cfi $do_bounds
23078 jmp error_code
23079 CFI_ENDPROC
23080-END(bounds)
23081+ENDPROC(bounds)
23082
23083 ENTRY(invalid_op)
23084 RING0_INT_FRAME
23085@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23086 pushl_cfi $do_invalid_op
23087 jmp error_code
23088 CFI_ENDPROC
23089-END(invalid_op)
23090+ENDPROC(invalid_op)
23091
23092 ENTRY(coprocessor_segment_overrun)
23093 RING0_INT_FRAME
23094@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23095 pushl_cfi $do_coprocessor_segment_overrun
23096 jmp error_code
23097 CFI_ENDPROC
23098-END(coprocessor_segment_overrun)
23099+ENDPROC(coprocessor_segment_overrun)
23100
23101 ENTRY(invalid_TSS)
23102 RING0_EC_FRAME
23103@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23104 pushl_cfi $do_invalid_TSS
23105 jmp error_code
23106 CFI_ENDPROC
23107-END(invalid_TSS)
23108+ENDPROC(invalid_TSS)
23109
23110 ENTRY(segment_not_present)
23111 RING0_EC_FRAME
23112@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23113 pushl_cfi $do_segment_not_present
23114 jmp error_code
23115 CFI_ENDPROC
23116-END(segment_not_present)
23117+ENDPROC(segment_not_present)
23118
23119 ENTRY(stack_segment)
23120 RING0_EC_FRAME
23121@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23122 pushl_cfi $do_stack_segment
23123 jmp error_code
23124 CFI_ENDPROC
23125-END(stack_segment)
23126+ENDPROC(stack_segment)
23127
23128 ENTRY(alignment_check)
23129 RING0_EC_FRAME
23130@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23131 pushl_cfi $do_alignment_check
23132 jmp error_code
23133 CFI_ENDPROC
23134-END(alignment_check)
23135+ENDPROC(alignment_check)
23136
23137 ENTRY(divide_error)
23138 RING0_INT_FRAME
23139@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23140 pushl_cfi $do_divide_error
23141 jmp error_code
23142 CFI_ENDPROC
23143-END(divide_error)
23144+ENDPROC(divide_error)
23145
23146 #ifdef CONFIG_X86_MCE
23147 ENTRY(machine_check)
23148@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23149 pushl_cfi machine_check_vector
23150 jmp error_code
23151 CFI_ENDPROC
23152-END(machine_check)
23153+ENDPROC(machine_check)
23154 #endif
23155
23156 ENTRY(spurious_interrupt_bug)
23157@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23158 pushl_cfi $do_spurious_interrupt_bug
23159 jmp error_code
23160 CFI_ENDPROC
23161-END(spurious_interrupt_bug)
23162+ENDPROC(spurious_interrupt_bug)
23163
23164 #ifdef CONFIG_XEN
23165 /* Xen doesn't set %esp to be precisely what the normal sysenter
23166@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23167
23168 ENTRY(mcount)
23169 ret
23170-END(mcount)
23171+ENDPROC(mcount)
23172
23173 ENTRY(ftrace_caller)
23174 cmpl $0, function_trace_stop
23175@@ -1089,7 +1328,7 @@ ftrace_graph_call:
23176 .globl ftrace_stub
23177 ftrace_stub:
23178 ret
23179-END(ftrace_caller)
23180+ENDPROC(ftrace_caller)
23181
23182 ENTRY(ftrace_regs_caller)
23183 pushf /* push flags before compare (in cs location) */
23184@@ -1193,7 +1432,7 @@ trace:
23185 popl %ecx
23186 popl %eax
23187 jmp ftrace_stub
23188-END(mcount)
23189+ENDPROC(mcount)
23190 #endif /* CONFIG_DYNAMIC_FTRACE */
23191 #endif /* CONFIG_FUNCTION_TRACER */
23192
23193@@ -1211,7 +1450,7 @@ ENTRY(ftrace_graph_caller)
23194 popl %ecx
23195 popl %eax
23196 ret
23197-END(ftrace_graph_caller)
23198+ENDPROC(ftrace_graph_caller)
23199
23200 .globl return_to_handler
23201 return_to_handler:
23202@@ -1272,15 +1511,18 @@ error_code:
23203 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23204 REG_TO_PTGS %ecx
23205 SET_KERNEL_GS %ecx
23206- movl $(__USER_DS), %ecx
23207+ movl $(__KERNEL_DS), %ecx
23208 movl %ecx, %ds
23209 movl %ecx, %es
23210+
23211+ pax_enter_kernel
23212+
23213 TRACE_IRQS_OFF
23214 movl %esp,%eax # pt_regs pointer
23215 call *%edi
23216 jmp ret_from_exception
23217 CFI_ENDPROC
23218-END(page_fault)
23219+ENDPROC(page_fault)
23220
23221 /*
23222 * Debug traps and NMI can happen at the one SYSENTER instruction
23223@@ -1323,7 +1565,7 @@ debug_stack_correct:
23224 call do_debug
23225 jmp ret_from_exception
23226 CFI_ENDPROC
23227-END(debug)
23228+ENDPROC(debug)
23229
23230 /*
23231 * NMI is doubly nasty. It can happen _while_ we're handling
23232@@ -1363,6 +1605,9 @@ nmi_stack_correct:
23233 xorl %edx,%edx # zero error code
23234 movl %esp,%eax # pt_regs pointer
23235 call do_nmi
23236+
23237+ pax_exit_kernel
23238+
23239 jmp restore_all_notrace
23240 CFI_ENDPROC
23241
23242@@ -1400,13 +1645,16 @@ nmi_espfix_stack:
23243 FIXUP_ESPFIX_STACK # %eax == %esp
23244 xorl %edx,%edx # zero error code
23245 call do_nmi
23246+
23247+ pax_exit_kernel
23248+
23249 RESTORE_REGS
23250 lss 12+4(%esp), %esp # back to espfix stack
23251 CFI_ADJUST_CFA_OFFSET -24
23252 jmp irq_return
23253 #endif
23254 CFI_ENDPROC
23255-END(nmi)
23256+ENDPROC(nmi)
23257
23258 ENTRY(int3)
23259 RING0_INT_FRAME
23260@@ -1419,14 +1667,14 @@ ENTRY(int3)
23261 call do_int3
23262 jmp ret_from_exception
23263 CFI_ENDPROC
23264-END(int3)
23265+ENDPROC(int3)
23266
23267 ENTRY(general_protection)
23268 RING0_EC_FRAME
23269 pushl_cfi $do_general_protection
23270 jmp error_code
23271 CFI_ENDPROC
23272-END(general_protection)
23273+ENDPROC(general_protection)
23274
23275 #ifdef CONFIG_KVM_GUEST
23276 ENTRY(async_page_fault)
23277@@ -1435,6 +1683,6 @@ ENTRY(async_page_fault)
23278 pushl_cfi $do_async_page_fault
23279 jmp error_code
23280 CFI_ENDPROC
23281-END(async_page_fault)
23282+ENDPROC(async_page_fault)
23283 #endif
23284
23285diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23286index c844f08..966a50e 100644
23287--- a/arch/x86/kernel/entry_64.S
23288+++ b/arch/x86/kernel/entry_64.S
23289@@ -59,6 +59,8 @@
23290 #include <asm/smap.h>
23291 #include <asm/pgtable_types.h>
23292 #include <linux/err.h>
23293+#include <asm/pgtable.h>
23294+#include <asm/alternative-asm.h>
23295
23296 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23297 #include <linux/elf-em.h>
23298@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23299 ENDPROC(native_usergs_sysret64)
23300 #endif /* CONFIG_PARAVIRT */
23301
23302+ .macro ljmpq sel, off
23303+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23304+ .byte 0x48; ljmp *1234f(%rip)
23305+ .pushsection .rodata
23306+ .align 16
23307+ 1234: .quad \off; .word \sel
23308+ .popsection
23309+#else
23310+ pushq $\sel
23311+ pushq $\off
23312+ lretq
23313+#endif
23314+ .endm
23315+
23316+ .macro pax_enter_kernel
23317+ pax_set_fptr_mask
23318+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23319+ call pax_enter_kernel
23320+#endif
23321+ .endm
23322+
23323+ .macro pax_exit_kernel
23324+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23325+ call pax_exit_kernel
23326+#endif
23327+
23328+ .endm
23329+
23330+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23331+ENTRY(pax_enter_kernel)
23332+ pushq %rdi
23333+
23334+#ifdef CONFIG_PARAVIRT
23335+ PV_SAVE_REGS(CLBR_RDI)
23336+#endif
23337+
23338+#ifdef CONFIG_PAX_KERNEXEC
23339+ GET_CR0_INTO_RDI
23340+ bts $16,%rdi
23341+ jnc 3f
23342+ mov %cs,%edi
23343+ cmp $__KERNEL_CS,%edi
23344+ jnz 2f
23345+1:
23346+#endif
23347+
23348+#ifdef CONFIG_PAX_MEMORY_UDEREF
23349+ 661: jmp 111f
23350+ .pushsection .altinstr_replacement, "a"
23351+ 662: ASM_NOP2
23352+ .popsection
23353+ .pushsection .altinstructions, "a"
23354+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23355+ .popsection
23356+ GET_CR3_INTO_RDI
23357+ cmp $0,%dil
23358+ jnz 112f
23359+ mov $__KERNEL_DS,%edi
23360+ mov %edi,%ss
23361+ jmp 111f
23362+112: cmp $1,%dil
23363+ jz 113f
23364+ ud2
23365+113: sub $4097,%rdi
23366+ bts $63,%rdi
23367+ SET_RDI_INTO_CR3
23368+ mov $__UDEREF_KERNEL_DS,%edi
23369+ mov %edi,%ss
23370+111:
23371+#endif
23372+
23373+#ifdef CONFIG_PARAVIRT
23374+ PV_RESTORE_REGS(CLBR_RDI)
23375+#endif
23376+
23377+ popq %rdi
23378+ pax_force_retaddr
23379+ retq
23380+
23381+#ifdef CONFIG_PAX_KERNEXEC
23382+2: ljmpq __KERNEL_CS,1b
23383+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23384+4: SET_RDI_INTO_CR0
23385+ jmp 1b
23386+#endif
23387+ENDPROC(pax_enter_kernel)
23388+
23389+ENTRY(pax_exit_kernel)
23390+ pushq %rdi
23391+
23392+#ifdef CONFIG_PARAVIRT
23393+ PV_SAVE_REGS(CLBR_RDI)
23394+#endif
23395+
23396+#ifdef CONFIG_PAX_KERNEXEC
23397+ mov %cs,%rdi
23398+ cmp $__KERNEXEC_KERNEL_CS,%edi
23399+ jz 2f
23400+ GET_CR0_INTO_RDI
23401+ bts $16,%rdi
23402+ jnc 4f
23403+1:
23404+#endif
23405+
23406+#ifdef CONFIG_PAX_MEMORY_UDEREF
23407+ 661: jmp 111f
23408+ .pushsection .altinstr_replacement, "a"
23409+ 662: ASM_NOP2
23410+ .popsection
23411+ .pushsection .altinstructions, "a"
23412+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23413+ .popsection
23414+ mov %ss,%edi
23415+ cmp $__UDEREF_KERNEL_DS,%edi
23416+ jnz 111f
23417+ GET_CR3_INTO_RDI
23418+ cmp $0,%dil
23419+ jz 112f
23420+ ud2
23421+112: add $4097,%rdi
23422+ bts $63,%rdi
23423+ SET_RDI_INTO_CR3
23424+ mov $__KERNEL_DS,%edi
23425+ mov %edi,%ss
23426+111:
23427+#endif
23428+
23429+#ifdef CONFIG_PARAVIRT
23430+ PV_RESTORE_REGS(CLBR_RDI);
23431+#endif
23432+
23433+ popq %rdi
23434+ pax_force_retaddr
23435+ retq
23436+
23437+#ifdef CONFIG_PAX_KERNEXEC
23438+2: GET_CR0_INTO_RDI
23439+ btr $16,%rdi
23440+ jnc 4f
23441+ ljmpq __KERNEL_CS,3f
23442+3: SET_RDI_INTO_CR0
23443+ jmp 1b
23444+4: ud2
23445+ jmp 4b
23446+#endif
23447+ENDPROC(pax_exit_kernel)
23448+#endif
23449+
23450+ .macro pax_enter_kernel_user
23451+ pax_set_fptr_mask
23452+#ifdef CONFIG_PAX_MEMORY_UDEREF
23453+ call pax_enter_kernel_user
23454+#endif
23455+ .endm
23456+
23457+ .macro pax_exit_kernel_user
23458+#ifdef CONFIG_PAX_MEMORY_UDEREF
23459+ call pax_exit_kernel_user
23460+#endif
23461+#ifdef CONFIG_PAX_RANDKSTACK
23462+ pushq %rax
23463+ pushq %r11
23464+ call pax_randomize_kstack
23465+ popq %r11
23466+ popq %rax
23467+#endif
23468+ .endm
23469+
23470+#ifdef CONFIG_PAX_MEMORY_UDEREF
23471+ENTRY(pax_enter_kernel_user)
23472+ pushq %rdi
23473+ pushq %rbx
23474+
23475+#ifdef CONFIG_PARAVIRT
23476+ PV_SAVE_REGS(CLBR_RDI)
23477+#endif
23478+
23479+ 661: jmp 111f
23480+ .pushsection .altinstr_replacement, "a"
23481+ 662: ASM_NOP2
23482+ .popsection
23483+ .pushsection .altinstructions, "a"
23484+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23485+ .popsection
23486+ GET_CR3_INTO_RDI
23487+ cmp $1,%dil
23488+ jnz 4f
23489+ sub $4097,%rdi
23490+ bts $63,%rdi
23491+ SET_RDI_INTO_CR3
23492+ jmp 3f
23493+111:
23494+
23495+ GET_CR3_INTO_RDI
23496+ mov %rdi,%rbx
23497+ add $__START_KERNEL_map,%rbx
23498+ sub phys_base(%rip),%rbx
23499+
23500+#ifdef CONFIG_PARAVIRT
23501+ cmpl $0, pv_info+PARAVIRT_enabled
23502+ jz 1f
23503+ pushq %rdi
23504+ i = 0
23505+ .rept USER_PGD_PTRS
23506+ mov i*8(%rbx),%rsi
23507+ mov $0,%sil
23508+ lea i*8(%rbx),%rdi
23509+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23510+ i = i + 1
23511+ .endr
23512+ popq %rdi
23513+ jmp 2f
23514+1:
23515+#endif
23516+
23517+ i = 0
23518+ .rept USER_PGD_PTRS
23519+ movb $0,i*8(%rbx)
23520+ i = i + 1
23521+ .endr
23522+
23523+2: SET_RDI_INTO_CR3
23524+
23525+#ifdef CONFIG_PAX_KERNEXEC
23526+ GET_CR0_INTO_RDI
23527+ bts $16,%rdi
23528+ SET_RDI_INTO_CR0
23529+#endif
23530+
23531+3:
23532+
23533+#ifdef CONFIG_PARAVIRT
23534+ PV_RESTORE_REGS(CLBR_RDI)
23535+#endif
23536+
23537+ popq %rbx
23538+ popq %rdi
23539+ pax_force_retaddr
23540+ retq
23541+4: ud2
23542+ENDPROC(pax_enter_kernel_user)
23543+
23544+ENTRY(pax_exit_kernel_user)
23545+ pushq %rdi
23546+ pushq %rbx
23547+
23548+#ifdef CONFIG_PARAVIRT
23549+ PV_SAVE_REGS(CLBR_RDI)
23550+#endif
23551+
23552+ GET_CR3_INTO_RDI
23553+ 661: jmp 1f
23554+ .pushsection .altinstr_replacement, "a"
23555+ 662: ASM_NOP2
23556+ .popsection
23557+ .pushsection .altinstructions, "a"
23558+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23559+ .popsection
23560+ cmp $0,%dil
23561+ jnz 3f
23562+ add $4097,%rdi
23563+ bts $63,%rdi
23564+ SET_RDI_INTO_CR3
23565+ jmp 2f
23566+1:
23567+
23568+ mov %rdi,%rbx
23569+
23570+#ifdef CONFIG_PAX_KERNEXEC
23571+ GET_CR0_INTO_RDI
23572+ btr $16,%rdi
23573+ jnc 3f
23574+ SET_RDI_INTO_CR0
23575+#endif
23576+
23577+ add $__START_KERNEL_map,%rbx
23578+ sub phys_base(%rip),%rbx
23579+
23580+#ifdef CONFIG_PARAVIRT
23581+ cmpl $0, pv_info+PARAVIRT_enabled
23582+ jz 1f
23583+ i = 0
23584+ .rept USER_PGD_PTRS
23585+ mov i*8(%rbx),%rsi
23586+ mov $0x67,%sil
23587+ lea i*8(%rbx),%rdi
23588+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23589+ i = i + 1
23590+ .endr
23591+ jmp 2f
23592+1:
23593+#endif
23594+
23595+ i = 0
23596+ .rept USER_PGD_PTRS
23597+ movb $0x67,i*8(%rbx)
23598+ i = i + 1
23599+ .endr
23600+2:
23601+
23602+#ifdef CONFIG_PARAVIRT
23603+ PV_RESTORE_REGS(CLBR_RDI)
23604+#endif
23605+
23606+ popq %rbx
23607+ popq %rdi
23608+ pax_force_retaddr
23609+ retq
23610+3: ud2
23611+ENDPROC(pax_exit_kernel_user)
23612+#endif
23613+
23614+ .macro pax_enter_kernel_nmi
23615+ pax_set_fptr_mask
23616+
23617+#ifdef CONFIG_PAX_KERNEXEC
23618+ GET_CR0_INTO_RDI
23619+ bts $16,%rdi
23620+ jc 110f
23621+ SET_RDI_INTO_CR0
23622+ or $2,%ebx
23623+110:
23624+#endif
23625+
23626+#ifdef CONFIG_PAX_MEMORY_UDEREF
23627+ 661: jmp 111f
23628+ .pushsection .altinstr_replacement, "a"
23629+ 662: ASM_NOP2
23630+ .popsection
23631+ .pushsection .altinstructions, "a"
23632+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23633+ .popsection
23634+ GET_CR3_INTO_RDI
23635+ cmp $0,%dil
23636+ jz 111f
23637+ sub $4097,%rdi
23638+ or $4,%ebx
23639+ bts $63,%rdi
23640+ SET_RDI_INTO_CR3
23641+ mov $__UDEREF_KERNEL_DS,%edi
23642+ mov %edi,%ss
23643+111:
23644+#endif
23645+ .endm
23646+
23647+ .macro pax_exit_kernel_nmi
23648+#ifdef CONFIG_PAX_KERNEXEC
23649+ btr $1,%ebx
23650+ jnc 110f
23651+ GET_CR0_INTO_RDI
23652+ btr $16,%rdi
23653+ SET_RDI_INTO_CR0
23654+110:
23655+#endif
23656+
23657+#ifdef CONFIG_PAX_MEMORY_UDEREF
23658+ btr $2,%ebx
23659+ jnc 111f
23660+ GET_CR3_INTO_RDI
23661+ add $4097,%rdi
23662+ bts $63,%rdi
23663+ SET_RDI_INTO_CR3
23664+ mov $__KERNEL_DS,%edi
23665+ mov %edi,%ss
23666+111:
23667+#endif
23668+ .endm
23669+
23670+ .macro pax_erase_kstack
23671+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23672+ call pax_erase_kstack
23673+#endif
23674+ .endm
23675+
23676+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23677+ENTRY(pax_erase_kstack)
23678+ pushq %rdi
23679+ pushq %rcx
23680+ pushq %rax
23681+ pushq %r11
23682+
23683+ GET_THREAD_INFO(%r11)
23684+ mov TI_lowest_stack(%r11), %rdi
23685+ mov $-0xBEEF, %rax
23686+ std
23687+
23688+1: mov %edi, %ecx
23689+ and $THREAD_SIZE_asm - 1, %ecx
23690+ shr $3, %ecx
23691+ repne scasq
23692+ jecxz 2f
23693+
23694+ cmp $2*8, %ecx
23695+ jc 2f
23696+
23697+ mov $2*8, %ecx
23698+ repe scasq
23699+ jecxz 2f
23700+ jne 1b
23701+
23702+2: cld
23703+ mov %esp, %ecx
23704+ sub %edi, %ecx
23705+
23706+ cmp $THREAD_SIZE_asm, %rcx
23707+ jb 3f
23708+ ud2
23709+3:
23710+
23711+ shr $3, %ecx
23712+ rep stosq
23713+
23714+ mov TI_task_thread_sp0(%r11), %rdi
23715+ sub $256, %rdi
23716+ mov %rdi, TI_lowest_stack(%r11)
23717+
23718+ popq %r11
23719+ popq %rax
23720+ popq %rcx
23721+ popq %rdi
23722+ pax_force_retaddr
23723+ ret
23724+ENDPROC(pax_erase_kstack)
23725+#endif
23726
23727 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23728 #ifdef CONFIG_TRACE_IRQFLAGS
23729@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
23730 .endm
23731
23732 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23733- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23734+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23735 jnc 1f
23736 TRACE_IRQS_ON_DEBUG
23737 1:
23738@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
23739 movq \tmp,R11+\offset(%rsp)
23740 .endm
23741
23742- .macro FAKE_STACK_FRAME child_rip
23743- /* push in order ss, rsp, eflags, cs, rip */
23744- xorl %eax, %eax
23745- pushq_cfi $__KERNEL_DS /* ss */
23746- /*CFI_REL_OFFSET ss,0*/
23747- pushq_cfi %rax /* rsp */
23748- CFI_REL_OFFSET rsp,0
23749- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23750- /*CFI_REL_OFFSET rflags,0*/
23751- pushq_cfi $__KERNEL_CS /* cs */
23752- /*CFI_REL_OFFSET cs,0*/
23753- pushq_cfi \child_rip /* rip */
23754- CFI_REL_OFFSET rip,0
23755- pushq_cfi %rax /* orig rax */
23756- .endm
23757-
23758- .macro UNFAKE_STACK_FRAME
23759- addq $8*6, %rsp
23760- CFI_ADJUST_CFA_OFFSET -(6*8)
23761- .endm
23762-
23763 /*
23764 * initial frame state for interrupts (and exceptions without error code)
23765 */
23766@@ -242,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23767 /* save partial stack frame */
23768 .macro SAVE_ARGS_IRQ
23769 cld
23770- /* start from rbp in pt_regs and jump over */
23771- movq_cfi rdi, (RDI-RBP)
23772- movq_cfi rsi, (RSI-RBP)
23773- movq_cfi rdx, (RDX-RBP)
23774- movq_cfi rcx, (RCX-RBP)
23775- movq_cfi rax, (RAX-RBP)
23776- movq_cfi r8, (R8-RBP)
23777- movq_cfi r9, (R9-RBP)
23778- movq_cfi r10, (R10-RBP)
23779- movq_cfi r11, (R11-RBP)
23780+ /* start from r15 in pt_regs and jump over */
23781+ movq_cfi rdi, RDI
23782+ movq_cfi rsi, RSI
23783+ movq_cfi rdx, RDX
23784+ movq_cfi rcx, RCX
23785+ movq_cfi rax, RAX
23786+ movq_cfi r8, R8
23787+ movq_cfi r9, R9
23788+ movq_cfi r10, R10
23789+ movq_cfi r11, R11
23790+ movq_cfi r12, R12
23791
23792 /* Save rbp so that we can unwind from get_irq_regs() */
23793- movq_cfi rbp, 0
23794+ movq_cfi rbp, RBP
23795
23796 /* Save previous stack value */
23797 movq %rsp, %rsi
23798
23799- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23800- testl $3, CS-RBP(%rsi)
23801+ movq %rsp,%rdi /* arg1 for handler */
23802+ testb $3, CS(%rsi)
23803 je 1f
23804 SWAPGS
23805 /*
23806@@ -280,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23807 0x06 /* DW_OP_deref */, \
23808 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23809 0x22 /* DW_OP_plus */
23810+
23811+#ifdef CONFIG_PAX_MEMORY_UDEREF
23812+ testb $3, CS(%rdi)
23813+ jnz 1f
23814+ pax_enter_kernel
23815+ jmp 2f
23816+1: pax_enter_kernel_user
23817+2:
23818+#else
23819+ pax_enter_kernel
23820+#endif
23821+
23822 /* We entered an interrupt context - irqs are off: */
23823 TRACE_IRQS_OFF
23824 .endm
23825@@ -309,9 +727,52 @@ ENTRY(save_paranoid)
23826 js 1f /* negative -> in kernel */
23827 SWAPGS
23828 xorl %ebx,%ebx
23829-1: ret
23830+1:
23831+#ifdef CONFIG_PAX_MEMORY_UDEREF
23832+ testb $3, CS+8(%rsp)
23833+ jnz 1f
23834+ pax_enter_kernel
23835+ jmp 2f
23836+1: pax_enter_kernel_user
23837+2:
23838+#else
23839+ pax_enter_kernel
23840+#endif
23841+ pax_force_retaddr
23842+ ret
23843 CFI_ENDPROC
23844-END(save_paranoid)
23845+ENDPROC(save_paranoid)
23846+
23847+ENTRY(save_paranoid_nmi)
23848+ XCPT_FRAME 1 RDI+8
23849+ cld
23850+ movq_cfi rdi, RDI+8
23851+ movq_cfi rsi, RSI+8
23852+ movq_cfi rdx, RDX+8
23853+ movq_cfi rcx, RCX+8
23854+ movq_cfi rax, RAX+8
23855+ movq_cfi r8, R8+8
23856+ movq_cfi r9, R9+8
23857+ movq_cfi r10, R10+8
23858+ movq_cfi r11, R11+8
23859+ movq_cfi rbx, RBX+8
23860+ movq_cfi rbp, RBP+8
23861+ movq_cfi r12, R12+8
23862+ movq_cfi r13, R13+8
23863+ movq_cfi r14, R14+8
23864+ movq_cfi r15, R15+8
23865+ movl $1,%ebx
23866+ movl $MSR_GS_BASE,%ecx
23867+ rdmsr
23868+ testl %edx,%edx
23869+ js 1f /* negative -> in kernel */
23870+ SWAPGS
23871+ xorl %ebx,%ebx
23872+1: pax_enter_kernel_nmi
23873+ pax_force_retaddr
23874+ ret
23875+ CFI_ENDPROC
23876+ENDPROC(save_paranoid_nmi)
23877
23878 /*
23879 * A newly forked process directly context switches into this address.
23880@@ -332,7 +793,7 @@ ENTRY(ret_from_fork)
23881
23882 RESTORE_REST
23883
23884- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23885+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23886 jz 1f
23887
23888 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23889@@ -342,15 +803,13 @@ ENTRY(ret_from_fork)
23890 jmp ret_from_sys_call # go to the SYSRET fastpath
23891
23892 1:
23893- subq $REST_SKIP, %rsp # leave space for volatiles
23894- CFI_ADJUST_CFA_OFFSET REST_SKIP
23895 movq %rbp, %rdi
23896 call *%rbx
23897 movl $0, RAX(%rsp)
23898 RESTORE_REST
23899 jmp int_ret_from_sys_call
23900 CFI_ENDPROC
23901-END(ret_from_fork)
23902+ENDPROC(ret_from_fork)
23903
23904 /*
23905 * System call entry. Up to 6 arguments in registers are supported.
23906@@ -387,7 +846,7 @@ END(ret_from_fork)
23907 ENTRY(system_call)
23908 CFI_STARTPROC simple
23909 CFI_SIGNAL_FRAME
23910- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23911+ CFI_DEF_CFA rsp,0
23912 CFI_REGISTER rip,rcx
23913 /*CFI_REGISTER rflags,r11*/
23914 SWAPGS_UNSAFE_STACK
23915@@ -400,16 +859,23 @@ GLOBAL(system_call_after_swapgs)
23916
23917 movq %rsp,PER_CPU_VAR(old_rsp)
23918 movq PER_CPU_VAR(kernel_stack),%rsp
23919+ SAVE_ARGS 8*6,0
23920+ pax_enter_kernel_user
23921+
23922+#ifdef CONFIG_PAX_RANDKSTACK
23923+ pax_erase_kstack
23924+#endif
23925+
23926 /*
23927 * No need to follow this irqs off/on section - it's straight
23928 * and short:
23929 */
23930 ENABLE_INTERRUPTS(CLBR_NONE)
23931- SAVE_ARGS 8,0
23932 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23933 movq %rcx,RIP-ARGOFFSET(%rsp)
23934 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23935- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23936+ GET_THREAD_INFO(%rcx)
23937+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23938 jnz tracesys
23939 system_call_fastpath:
23940 #if __SYSCALL_MASK == ~0
23941@@ -433,10 +899,13 @@ sysret_check:
23942 LOCKDEP_SYS_EXIT
23943 DISABLE_INTERRUPTS(CLBR_NONE)
23944 TRACE_IRQS_OFF
23945- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23946+ GET_THREAD_INFO(%rcx)
23947+ movl TI_flags(%rcx),%edx
23948 andl %edi,%edx
23949 jnz sysret_careful
23950 CFI_REMEMBER_STATE
23951+ pax_exit_kernel_user
23952+ pax_erase_kstack
23953 /*
23954 * sysretq will re-enable interrupts:
23955 */
23956@@ -495,6 +964,9 @@ auditsys:
23957 movq %rax,%rsi /* 2nd arg: syscall number */
23958 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23959 call __audit_syscall_entry
23960+
23961+ pax_erase_kstack
23962+
23963 LOAD_ARGS 0 /* reload call-clobbered registers */
23964 jmp system_call_fastpath
23965
23966@@ -516,7 +988,7 @@ sysret_audit:
23967 /* Do syscall tracing */
23968 tracesys:
23969 #ifdef CONFIG_AUDITSYSCALL
23970- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23971+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
23972 jz auditsys
23973 #endif
23974 SAVE_REST
23975@@ -524,12 +996,15 @@ tracesys:
23976 FIXUP_TOP_OF_STACK %rdi
23977 movq %rsp,%rdi
23978 call syscall_trace_enter
23979+
23980+ pax_erase_kstack
23981+
23982 /*
23983 * Reload arg registers from stack in case ptrace changed them.
23984 * We don't reload %rax because syscall_trace_enter() returned
23985 * the value it wants us to use in the table lookup.
23986 */
23987- LOAD_ARGS ARGOFFSET, 1
23988+ LOAD_ARGS 1
23989 RESTORE_REST
23990 #if __SYSCALL_MASK == ~0
23991 cmpq $__NR_syscall_max,%rax
23992@@ -559,7 +1034,9 @@ GLOBAL(int_with_check)
23993 andl %edi,%edx
23994 jnz int_careful
23995 andl $~TS_COMPAT,TI_status(%rcx)
23996- jmp retint_swapgs
23997+ pax_exit_kernel_user
23998+ pax_erase_kstack
23999+ jmp retint_swapgs_pax
24000
24001 /* Either reschedule or signal or syscall exit tracking needed. */
24002 /* First do a reschedule test. */
24003@@ -605,7 +1082,7 @@ int_restore_rest:
24004 TRACE_IRQS_OFF
24005 jmp int_with_check
24006 CFI_ENDPROC
24007-END(system_call)
24008+ENDPROC(system_call)
24009
24010 .macro FORK_LIKE func
24011 ENTRY(stub_\func)
24012@@ -618,9 +1095,10 @@ ENTRY(stub_\func)
24013 DEFAULT_FRAME 0 8 /* offset 8: return address */
24014 call sys_\func
24015 RESTORE_TOP_OF_STACK %r11, 8
24016- ret $REST_SKIP /* pop extended registers */
24017+ pax_force_retaddr
24018+ ret
24019 CFI_ENDPROC
24020-END(stub_\func)
24021+ENDPROC(stub_\func)
24022 .endm
24023
24024 .macro FIXED_FRAME label,func
24025@@ -630,9 +1108,10 @@ ENTRY(\label)
24026 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24027 call \func
24028 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24029+ pax_force_retaddr
24030 ret
24031 CFI_ENDPROC
24032-END(\label)
24033+ENDPROC(\label)
24034 .endm
24035
24036 FORK_LIKE clone
24037@@ -640,19 +1119,6 @@ END(\label)
24038 FORK_LIKE vfork
24039 FIXED_FRAME stub_iopl, sys_iopl
24040
24041-ENTRY(ptregscall_common)
24042- DEFAULT_FRAME 1 8 /* offset 8: return address */
24043- RESTORE_TOP_OF_STACK %r11, 8
24044- movq_cfi_restore R15+8, r15
24045- movq_cfi_restore R14+8, r14
24046- movq_cfi_restore R13+8, r13
24047- movq_cfi_restore R12+8, r12
24048- movq_cfi_restore RBP+8, rbp
24049- movq_cfi_restore RBX+8, rbx
24050- ret $REST_SKIP /* pop extended registers */
24051- CFI_ENDPROC
24052-END(ptregscall_common)
24053-
24054 ENTRY(stub_execve)
24055 CFI_STARTPROC
24056 addq $8, %rsp
24057@@ -664,7 +1130,7 @@ ENTRY(stub_execve)
24058 RESTORE_REST
24059 jmp int_ret_from_sys_call
24060 CFI_ENDPROC
24061-END(stub_execve)
24062+ENDPROC(stub_execve)
24063
24064 /*
24065 * sigreturn is special because it needs to restore all registers on return.
24066@@ -681,7 +1147,7 @@ ENTRY(stub_rt_sigreturn)
24067 RESTORE_REST
24068 jmp int_ret_from_sys_call
24069 CFI_ENDPROC
24070-END(stub_rt_sigreturn)
24071+ENDPROC(stub_rt_sigreturn)
24072
24073 #ifdef CONFIG_X86_X32_ABI
24074 ENTRY(stub_x32_rt_sigreturn)
24075@@ -695,7 +1161,7 @@ ENTRY(stub_x32_rt_sigreturn)
24076 RESTORE_REST
24077 jmp int_ret_from_sys_call
24078 CFI_ENDPROC
24079-END(stub_x32_rt_sigreturn)
24080+ENDPROC(stub_x32_rt_sigreturn)
24081
24082 ENTRY(stub_x32_execve)
24083 CFI_STARTPROC
24084@@ -709,7 +1175,7 @@ ENTRY(stub_x32_execve)
24085 RESTORE_REST
24086 jmp int_ret_from_sys_call
24087 CFI_ENDPROC
24088-END(stub_x32_execve)
24089+ENDPROC(stub_x32_execve)
24090
24091 #endif
24092
24093@@ -746,7 +1212,7 @@ vector=vector+1
24094 2: jmp common_interrupt
24095 .endr
24096 CFI_ENDPROC
24097-END(irq_entries_start)
24098+ENDPROC(irq_entries_start)
24099
24100 .previous
24101 END(interrupt)
24102@@ -763,8 +1229,8 @@ END(interrupt)
24103 /* 0(%rsp): ~(interrupt number) */
24104 .macro interrupt func
24105 /* reserve pt_regs for scratch regs and rbp */
24106- subq $ORIG_RAX-RBP, %rsp
24107- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24108+ subq $ORIG_RAX, %rsp
24109+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24110 SAVE_ARGS_IRQ
24111 call \func
24112 .endm
24113@@ -787,14 +1253,14 @@ ret_from_intr:
24114
24115 /* Restore saved previous stack */
24116 popq %rsi
24117- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24118- leaq ARGOFFSET-RBP(%rsi), %rsp
24119+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24120+ movq %rsi, %rsp
24121 CFI_DEF_CFA_REGISTER rsp
24122- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24123+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24124
24125 exit_intr:
24126 GET_THREAD_INFO(%rcx)
24127- testl $3,CS-ARGOFFSET(%rsp)
24128+ testb $3,CS-ARGOFFSET(%rsp)
24129 je retint_kernel
24130
24131 /* Interrupt came from user space */
24132@@ -816,12 +1282,35 @@ retint_swapgs: /* return to user-space */
24133 * The iretq could re-enable interrupts:
24134 */
24135 DISABLE_INTERRUPTS(CLBR_ANY)
24136+ pax_exit_kernel_user
24137+retint_swapgs_pax:
24138 TRACE_IRQS_IRETQ
24139 SWAPGS
24140 jmp restore_args
24141
24142 retint_restore_args: /* return to kernel space */
24143 DISABLE_INTERRUPTS(CLBR_ANY)
24144+ pax_exit_kernel
24145+
24146+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24147+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24148+ * namely calling EFI runtime services with a phys mapping. We're
24149+ * starting off with NOPs and patch in the real instrumentation
24150+ * (BTS/OR) before starting any userland process; even before starting
24151+ * up the APs.
24152+ */
24153+ .pushsection .altinstr_replacement, "a"
24154+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24155+ 602:
24156+ .popsection
24157+ 603: .fill 602b-601b, 1, 0x90
24158+ .pushsection .altinstructions, "a"
24159+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24160+ .popsection
24161+#else
24162+ pax_force_retaddr (RIP-ARGOFFSET)
24163+#endif
24164+
24165 /*
24166 * The iretq could re-enable interrupts:
24167 */
24168@@ -934,7 +1423,7 @@ ENTRY(retint_kernel)
24169 jmp exit_intr
24170 #endif
24171 CFI_ENDPROC
24172-END(common_interrupt)
24173+ENDPROC(common_interrupt)
24174
24175 /*
24176 * If IRET takes a fault on the espfix stack, then we
24177@@ -956,13 +1445,13 @@ __do_double_fault:
24178 cmpq $native_irq_return_iret,%rax
24179 jne do_double_fault /* This shouldn't happen... */
24180 movq PER_CPU_VAR(kernel_stack),%rax
24181- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
24182+ subq $(6*8),%rax /* Reset to original stack */
24183 movq %rax,RSP(%rdi)
24184 movq $0,(%rax) /* Missing (lost) #GP error code */
24185 movq $general_protection,RIP(%rdi)
24186 retq
24187 CFI_ENDPROC
24188-END(__do_double_fault)
24189+ENDPROC(__do_double_fault)
24190 #else
24191 # define __do_double_fault do_double_fault
24192 #endif
24193@@ -979,7 +1468,7 @@ ENTRY(\sym)
24194 interrupt \do_sym
24195 jmp ret_from_intr
24196 CFI_ENDPROC
24197-END(\sym)
24198+ENDPROC(\sym)
24199 .endm
24200
24201 #ifdef CONFIG_TRACING
24202@@ -1052,7 +1541,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24203 /*
24204 * Exception entry points.
24205 */
24206-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24207+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24208
24209 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24210 ENTRY(\sym)
24211@@ -1103,6 +1592,12 @@ ENTRY(\sym)
24212 .endif
24213
24214 .if \shift_ist != -1
24215+#ifdef CONFIG_SMP
24216+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24217+ lea init_tss(%r13), %r13
24218+#else
24219+ lea init_tss(%rip), %r13
24220+#endif
24221 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24222 .endif
24223
24224@@ -1119,7 +1614,7 @@ ENTRY(\sym)
24225 .endif
24226
24227 CFI_ENDPROC
24228-END(\sym)
24229+ENDPROC(\sym)
24230 .endm
24231
24232 #ifdef CONFIG_TRACING
24233@@ -1160,9 +1655,10 @@ gs_change:
24234 2: mfence /* workaround */
24235 SWAPGS
24236 popfq_cfi
24237+ pax_force_retaddr
24238 ret
24239 CFI_ENDPROC
24240-END(native_load_gs_index)
24241+ENDPROC(native_load_gs_index)
24242
24243 _ASM_EXTABLE(gs_change,bad_gs)
24244 .section .fixup,"ax"
24245@@ -1190,9 +1686,10 @@ ENTRY(do_softirq_own_stack)
24246 CFI_DEF_CFA_REGISTER rsp
24247 CFI_ADJUST_CFA_OFFSET -8
24248 decl PER_CPU_VAR(irq_count)
24249+ pax_force_retaddr
24250 ret
24251 CFI_ENDPROC
24252-END(do_softirq_own_stack)
24253+ENDPROC(do_softirq_own_stack)
24254
24255 #ifdef CONFIG_XEN
24256 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24257@@ -1230,7 +1727,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24258 decl PER_CPU_VAR(irq_count)
24259 jmp error_exit
24260 CFI_ENDPROC
24261-END(xen_do_hypervisor_callback)
24262+ENDPROC(xen_do_hypervisor_callback)
24263
24264 /*
24265 * Hypervisor uses this for application faults while it executes.
24266@@ -1289,7 +1786,7 @@ ENTRY(xen_failsafe_callback)
24267 SAVE_ALL
24268 jmp error_exit
24269 CFI_ENDPROC
24270-END(xen_failsafe_callback)
24271+ENDPROC(xen_failsafe_callback)
24272
24273 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24274 xen_hvm_callback_vector xen_evtchn_do_upcall
24275@@ -1336,18 +1833,33 @@ ENTRY(paranoid_exit)
24276 DEFAULT_FRAME
24277 DISABLE_INTERRUPTS(CLBR_NONE)
24278 TRACE_IRQS_OFF_DEBUG
24279- testl %ebx,%ebx /* swapgs needed? */
24280+ testl $1,%ebx /* swapgs needed? */
24281 jnz paranoid_restore
24282- testl $3,CS(%rsp)
24283+ testb $3,CS(%rsp)
24284 jnz paranoid_userspace
24285+#ifdef CONFIG_PAX_MEMORY_UDEREF
24286+ pax_exit_kernel
24287+ TRACE_IRQS_IRETQ 0
24288+ SWAPGS_UNSAFE_STACK
24289+ RESTORE_ALL 8
24290+ pax_force_retaddr_bts
24291+ jmp irq_return
24292+#endif
24293 paranoid_swapgs:
24294+#ifdef CONFIG_PAX_MEMORY_UDEREF
24295+ pax_exit_kernel_user
24296+#else
24297+ pax_exit_kernel
24298+#endif
24299 TRACE_IRQS_IRETQ 0
24300 SWAPGS_UNSAFE_STACK
24301 RESTORE_ALL 8
24302 jmp irq_return
24303 paranoid_restore:
24304+ pax_exit_kernel
24305 TRACE_IRQS_IRETQ_DEBUG 0
24306 RESTORE_ALL 8
24307+ pax_force_retaddr_bts
24308 jmp irq_return
24309 paranoid_userspace:
24310 GET_THREAD_INFO(%rcx)
24311@@ -1376,7 +1888,7 @@ paranoid_schedule:
24312 TRACE_IRQS_OFF
24313 jmp paranoid_userspace
24314 CFI_ENDPROC
24315-END(paranoid_exit)
24316+ENDPROC(paranoid_exit)
24317
24318 /*
24319 * Exception entry point. This expects an error code/orig_rax on the stack.
24320@@ -1403,12 +1915,23 @@ ENTRY(error_entry)
24321 movq_cfi r14, R14+8
24322 movq_cfi r15, R15+8
24323 xorl %ebx,%ebx
24324- testl $3,CS+8(%rsp)
24325+ testb $3,CS+8(%rsp)
24326 je error_kernelspace
24327 error_swapgs:
24328 SWAPGS
24329 error_sti:
24330+#ifdef CONFIG_PAX_MEMORY_UDEREF
24331+ testb $3, CS+8(%rsp)
24332+ jnz 1f
24333+ pax_enter_kernel
24334+ jmp 2f
24335+1: pax_enter_kernel_user
24336+2:
24337+#else
24338+ pax_enter_kernel
24339+#endif
24340 TRACE_IRQS_OFF
24341+ pax_force_retaddr
24342 ret
24343
24344 /*
24345@@ -1435,7 +1958,7 @@ bstep_iret:
24346 movq %rcx,RIP+8(%rsp)
24347 jmp error_swapgs
24348 CFI_ENDPROC
24349-END(error_entry)
24350+ENDPROC(error_entry)
24351
24352
24353 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24354@@ -1446,7 +1969,7 @@ ENTRY(error_exit)
24355 DISABLE_INTERRUPTS(CLBR_NONE)
24356 TRACE_IRQS_OFF
24357 GET_THREAD_INFO(%rcx)
24358- testl %eax,%eax
24359+ testl $1,%eax
24360 jne retint_kernel
24361 LOCKDEP_SYS_EXIT_IRQ
24362 movl TI_flags(%rcx),%edx
24363@@ -1455,7 +1978,7 @@ ENTRY(error_exit)
24364 jnz retint_careful
24365 jmp retint_swapgs
24366 CFI_ENDPROC
24367-END(error_exit)
24368+ENDPROC(error_exit)
24369
24370 /*
24371 * Test if a given stack is an NMI stack or not.
24372@@ -1513,9 +2036,11 @@ ENTRY(nmi)
24373 * If %cs was not the kernel segment, then the NMI triggered in user
24374 * space, which means it is definitely not nested.
24375 */
24376+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24377+ je 1f
24378 cmpl $__KERNEL_CS, 16(%rsp)
24379 jne first_nmi
24380-
24381+1:
24382 /*
24383 * Check the special variable on the stack to see if NMIs are
24384 * executing.
24385@@ -1549,8 +2074,7 @@ nested_nmi:
24386
24387 1:
24388 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24389- leaq -1*8(%rsp), %rdx
24390- movq %rdx, %rsp
24391+ subq $8, %rsp
24392 CFI_ADJUST_CFA_OFFSET 1*8
24393 leaq -10*8(%rsp), %rdx
24394 pushq_cfi $__KERNEL_DS
24395@@ -1568,6 +2092,7 @@ nested_nmi_out:
24396 CFI_RESTORE rdx
24397
24398 /* No need to check faults here */
24399+# pax_force_retaddr_bts
24400 INTERRUPT_RETURN
24401
24402 CFI_RESTORE_STATE
24403@@ -1664,13 +2189,13 @@ end_repeat_nmi:
24404 subq $ORIG_RAX-R15, %rsp
24405 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24406 /*
24407- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24408+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24409 * as we should not be calling schedule in NMI context.
24410 * Even with normal interrupts enabled. An NMI should not be
24411 * setting NEED_RESCHED or anything that normal interrupts and
24412 * exceptions might do.
24413 */
24414- call save_paranoid
24415+ call save_paranoid_nmi
24416 DEFAULT_FRAME 0
24417
24418 /*
24419@@ -1680,9 +2205,9 @@ end_repeat_nmi:
24420 * NMI itself takes a page fault, the page fault that was preempted
24421 * will read the information from the NMI page fault and not the
24422 * origin fault. Save it off and restore it if it changes.
24423- * Use the r12 callee-saved register.
24424+ * Use the r13 callee-saved register.
24425 */
24426- movq %cr2, %r12
24427+ movq %cr2, %r13
24428
24429 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24430 movq %rsp,%rdi
24431@@ -1691,29 +2216,34 @@ end_repeat_nmi:
24432
24433 /* Did the NMI take a page fault? Restore cr2 if it did */
24434 movq %cr2, %rcx
24435- cmpq %rcx, %r12
24436+ cmpq %rcx, %r13
24437 je 1f
24438- movq %r12, %cr2
24439+ movq %r13, %cr2
24440 1:
24441
24442- testl %ebx,%ebx /* swapgs needed? */
24443+ testl $1,%ebx /* swapgs needed? */
24444 jnz nmi_restore
24445 nmi_swapgs:
24446 SWAPGS_UNSAFE_STACK
24447 nmi_restore:
24448+ pax_exit_kernel_nmi
24449 /* Pop the extra iret frame at once */
24450 RESTORE_ALL 6*8
24451+ testb $3, 8(%rsp)
24452+ jnz 1f
24453+ pax_force_retaddr_bts
24454+1:
24455
24456 /* Clear the NMI executing stack variable */
24457 movq $0, 5*8(%rsp)
24458 jmp irq_return
24459 CFI_ENDPROC
24460-END(nmi)
24461+ENDPROC(nmi)
24462
24463 ENTRY(ignore_sysret)
24464 CFI_STARTPROC
24465 mov $-ENOSYS,%eax
24466 sysret
24467 CFI_ENDPROC
24468-END(ignore_sysret)
24469+ENDPROC(ignore_sysret)
24470
24471diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24472index 94d857f..bf1f0bf 100644
24473--- a/arch/x86/kernel/espfix_64.c
24474+++ b/arch/x86/kernel/espfix_64.c
24475@@ -197,7 +197,7 @@ void init_espfix_ap(void)
24476 set_pte(&pte_p[n*PTE_STRIDE], pte);
24477
24478 /* Job is done for this CPU and any CPU which shares this page */
24479- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24480+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24481
24482 unlock_done:
24483 mutex_unlock(&espfix_init_mutex);
24484diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24485index cbc4a91..b38ee45 100644
24486--- a/arch/x86/kernel/ftrace.c
24487+++ b/arch/x86/kernel/ftrace.c
24488@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24489 * kernel identity mapping to modify code.
24490 */
24491 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24492- ip = (unsigned long)__va(__pa_symbol(ip));
24493+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24494
24495 return ip;
24496 }
24497@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24498 {
24499 unsigned char replaced[MCOUNT_INSN_SIZE];
24500
24501+ ip = ktla_ktva(ip);
24502+
24503 /*
24504 * Note: Due to modules and __init, code can
24505 * disappear and change, we need to protect against faulting
24506@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24507 unsigned char old[MCOUNT_INSN_SIZE];
24508 int ret;
24509
24510- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24511+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24512
24513 ftrace_update_func = ip;
24514 /* Make sure the breakpoints see the ftrace_update_func update */
24515@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
24516 unsigned char replaced[MCOUNT_INSN_SIZE];
24517 unsigned char brk = BREAKPOINT_INSTRUCTION;
24518
24519- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24520+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24521 return -EFAULT;
24522
24523 /* Make sure it is what we expect it to be */
24524diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24525index eda1a86..8f6df48 100644
24526--- a/arch/x86/kernel/head64.c
24527+++ b/arch/x86/kernel/head64.c
24528@@ -67,12 +67,12 @@ again:
24529 pgd = *pgd_p;
24530
24531 /*
24532- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24533- * critical -- __PAGE_OFFSET would point us back into the dynamic
24534+ * The use of __early_va rather than __va here is critical:
24535+ * __va would point us back into the dynamic
24536 * range and we might end up looping forever...
24537 */
24538 if (pgd)
24539- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24540+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24541 else {
24542 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24543 reset_early_page_tables();
24544@@ -82,13 +82,13 @@ again:
24545 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24546 for (i = 0; i < PTRS_PER_PUD; i++)
24547 pud_p[i] = 0;
24548- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24549+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24550 }
24551 pud_p += pud_index(address);
24552 pud = *pud_p;
24553
24554 if (pud)
24555- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24556+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24557 else {
24558 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24559 reset_early_page_tables();
24560@@ -98,7 +98,7 @@ again:
24561 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24562 for (i = 0; i < PTRS_PER_PMD; i++)
24563 pmd_p[i] = 0;
24564- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24565+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24566 }
24567 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24568 pmd_p[pmd_index(address)] = pmd;
24569@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24570 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24571 early_printk("Kernel alive\n");
24572
24573- clear_page(init_level4_pgt);
24574 /* set init_level4_pgt kernel high mapping*/
24575 init_level4_pgt[511] = early_level4_pgt[511];
24576
24577diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24578index f36bd42..0ab4474 100644
24579--- a/arch/x86/kernel/head_32.S
24580+++ b/arch/x86/kernel/head_32.S
24581@@ -26,6 +26,12 @@
24582 /* Physical address */
24583 #define pa(X) ((X) - __PAGE_OFFSET)
24584
24585+#ifdef CONFIG_PAX_KERNEXEC
24586+#define ta(X) (X)
24587+#else
24588+#define ta(X) ((X) - __PAGE_OFFSET)
24589+#endif
24590+
24591 /*
24592 * References to members of the new_cpu_data structure.
24593 */
24594@@ -55,11 +61,7 @@
24595 * and small than max_low_pfn, otherwise will waste some page table entries
24596 */
24597
24598-#if PTRS_PER_PMD > 1
24599-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24600-#else
24601-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24602-#endif
24603+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24604
24605 /* Number of possible pages in the lowmem region */
24606 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24607@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24608 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24609
24610 /*
24611+ * Real beginning of normal "text" segment
24612+ */
24613+ENTRY(stext)
24614+ENTRY(_stext)
24615+
24616+/*
24617 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24618 * %esi points to the real-mode code as a 32-bit pointer.
24619 * CS and DS must be 4 GB flat segments, but we don't depend on
24620@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24621 * can.
24622 */
24623 __HEAD
24624+
24625+#ifdef CONFIG_PAX_KERNEXEC
24626+ jmp startup_32
24627+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24628+.fill PAGE_SIZE-5,1,0xcc
24629+#endif
24630+
24631 ENTRY(startup_32)
24632 movl pa(stack_start),%ecx
24633
24634@@ -106,6 +121,59 @@ ENTRY(startup_32)
24635 2:
24636 leal -__PAGE_OFFSET(%ecx),%esp
24637
24638+#ifdef CONFIG_SMP
24639+ movl $pa(cpu_gdt_table),%edi
24640+ movl $__per_cpu_load,%eax
24641+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24642+ rorl $16,%eax
24643+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24644+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24645+ movl $__per_cpu_end - 1,%eax
24646+ subl $__per_cpu_start,%eax
24647+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24648+#endif
24649+
24650+#ifdef CONFIG_PAX_MEMORY_UDEREF
24651+ movl $NR_CPUS,%ecx
24652+ movl $pa(cpu_gdt_table),%edi
24653+1:
24654+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24655+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24656+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24657+ addl $PAGE_SIZE_asm,%edi
24658+ loop 1b
24659+#endif
24660+
24661+#ifdef CONFIG_PAX_KERNEXEC
24662+ movl $pa(boot_gdt),%edi
24663+ movl $__LOAD_PHYSICAL_ADDR,%eax
24664+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24665+ rorl $16,%eax
24666+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24667+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24668+ rorl $16,%eax
24669+
24670+ ljmp $(__BOOT_CS),$1f
24671+1:
24672+
24673+ movl $NR_CPUS,%ecx
24674+ movl $pa(cpu_gdt_table),%edi
24675+ addl $__PAGE_OFFSET,%eax
24676+1:
24677+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24678+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24679+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24680+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24681+ rorl $16,%eax
24682+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24683+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24684+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24685+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24686+ rorl $16,%eax
24687+ addl $PAGE_SIZE_asm,%edi
24688+ loop 1b
24689+#endif
24690+
24691 /*
24692 * Clear BSS first so that there are no surprises...
24693 */
24694@@ -201,8 +269,11 @@ ENTRY(startup_32)
24695 movl %eax, pa(max_pfn_mapped)
24696
24697 /* Do early initialization of the fixmap area */
24698- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24699- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24700+#ifdef CONFIG_COMPAT_VDSO
24701+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24702+#else
24703+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24704+#endif
24705 #else /* Not PAE */
24706
24707 page_pde_offset = (__PAGE_OFFSET >> 20);
24708@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24709 movl %eax, pa(max_pfn_mapped)
24710
24711 /* Do early initialization of the fixmap area */
24712- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24713- movl %eax,pa(initial_page_table+0xffc)
24714+#ifdef CONFIG_COMPAT_VDSO
24715+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24716+#else
24717+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24718+#endif
24719 #endif
24720
24721 #ifdef CONFIG_PARAVIRT
24722@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24723 cmpl $num_subarch_entries, %eax
24724 jae bad_subarch
24725
24726- movl pa(subarch_entries)(,%eax,4), %eax
24727- subl $__PAGE_OFFSET, %eax
24728- jmp *%eax
24729+ jmp *pa(subarch_entries)(,%eax,4)
24730
24731 bad_subarch:
24732 WEAK(lguest_entry)
24733@@ -261,10 +333,10 @@ WEAK(xen_entry)
24734 __INITDATA
24735
24736 subarch_entries:
24737- .long default_entry /* normal x86/PC */
24738- .long lguest_entry /* lguest hypervisor */
24739- .long xen_entry /* Xen hypervisor */
24740- .long default_entry /* Moorestown MID */
24741+ .long ta(default_entry) /* normal x86/PC */
24742+ .long ta(lguest_entry) /* lguest hypervisor */
24743+ .long ta(xen_entry) /* Xen hypervisor */
24744+ .long ta(default_entry) /* Moorestown MID */
24745 num_subarch_entries = (. - subarch_entries) / 4
24746 .previous
24747 #else
24748@@ -354,6 +426,7 @@ default_entry:
24749 movl pa(mmu_cr4_features),%eax
24750 movl %eax,%cr4
24751
24752+#ifdef CONFIG_X86_PAE
24753 testb $X86_CR4_PAE, %al # check if PAE is enabled
24754 jz enable_paging
24755
24756@@ -382,6 +455,9 @@ default_entry:
24757 /* Make changes effective */
24758 wrmsr
24759
24760+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24761+#endif
24762+
24763 enable_paging:
24764
24765 /*
24766@@ -449,14 +525,20 @@ is486:
24767 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24768 movl %eax,%ss # after changing gdt.
24769
24770- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24771+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24772 movl %eax,%ds
24773 movl %eax,%es
24774
24775 movl $(__KERNEL_PERCPU), %eax
24776 movl %eax,%fs # set this cpu's percpu
24777
24778+#ifdef CONFIG_CC_STACKPROTECTOR
24779 movl $(__KERNEL_STACK_CANARY),%eax
24780+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24781+ movl $(__USER_DS),%eax
24782+#else
24783+ xorl %eax,%eax
24784+#endif
24785 movl %eax,%gs
24786
24787 xorl %eax,%eax # Clear LDT
24788@@ -512,8 +594,11 @@ setup_once:
24789 * relocation. Manually set base address in stack canary
24790 * segment descriptor.
24791 */
24792- movl $gdt_page,%eax
24793+ movl $cpu_gdt_table,%eax
24794 movl $stack_canary,%ecx
24795+#ifdef CONFIG_SMP
24796+ addl $__per_cpu_load,%ecx
24797+#endif
24798 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24799 shrl $16, %ecx
24800 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24801@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24802 cmpl $2,(%esp) # X86_TRAP_NMI
24803 je is_nmi # Ignore NMI
24804
24805- cmpl $2,%ss:early_recursion_flag
24806+ cmpl $1,%ss:early_recursion_flag
24807 je hlt_loop
24808 incl %ss:early_recursion_flag
24809
24810@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24811 pushl (20+6*4)(%esp) /* trapno */
24812 pushl $fault_msg
24813 call printk
24814-#endif
24815 call dump_stack
24816+#endif
24817 hlt_loop:
24818 hlt
24819 jmp hlt_loop
24820@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24821 /* This is the default interrupt "handler" :-) */
24822 ALIGN
24823 ignore_int:
24824- cld
24825 #ifdef CONFIG_PRINTK
24826+ cmpl $2,%ss:early_recursion_flag
24827+ je hlt_loop
24828+ incl %ss:early_recursion_flag
24829+ cld
24830 pushl %eax
24831 pushl %ecx
24832 pushl %edx
24833@@ -617,9 +705,6 @@ ignore_int:
24834 movl $(__KERNEL_DS),%eax
24835 movl %eax,%ds
24836 movl %eax,%es
24837- cmpl $2,early_recursion_flag
24838- je hlt_loop
24839- incl early_recursion_flag
24840 pushl 16(%esp)
24841 pushl 24(%esp)
24842 pushl 32(%esp)
24843@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24844 /*
24845 * BSS section
24846 */
24847-__PAGE_ALIGNED_BSS
24848- .align PAGE_SIZE
24849 #ifdef CONFIG_X86_PAE
24850+.section .initial_pg_pmd,"a",@progbits
24851 initial_pg_pmd:
24852 .fill 1024*KPMDS,4,0
24853 #else
24854+.section .initial_page_table,"a",@progbits
24855 ENTRY(initial_page_table)
24856 .fill 1024,4,0
24857 #endif
24858+.section .initial_pg_fixmap,"a",@progbits
24859 initial_pg_fixmap:
24860 .fill 1024,4,0
24861+.section .empty_zero_page,"a",@progbits
24862 ENTRY(empty_zero_page)
24863 .fill 4096,1,0
24864+.section .swapper_pg_dir,"a",@progbits
24865 ENTRY(swapper_pg_dir)
24866+#ifdef CONFIG_X86_PAE
24867+ .fill 4,8,0
24868+#else
24869 .fill 1024,4,0
24870+#endif
24871
24872 /*
24873 * This starts the data section.
24874 */
24875 #ifdef CONFIG_X86_PAE
24876-__PAGE_ALIGNED_DATA
24877- /* Page-aligned for the benefit of paravirt? */
24878- .align PAGE_SIZE
24879+.section .initial_page_table,"a",@progbits
24880 ENTRY(initial_page_table)
24881 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24882 # if KPMDS == 3
24883@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24884 # error "Kernel PMDs should be 1, 2 or 3"
24885 # endif
24886 .align PAGE_SIZE /* needs to be page-sized too */
24887+
24888+#ifdef CONFIG_PAX_PER_CPU_PGD
24889+ENTRY(cpu_pgd)
24890+ .rept 2*NR_CPUS
24891+ .fill 4,8,0
24892+ .endr
24893+#endif
24894+
24895 #endif
24896
24897 .data
24898 .balign 4
24899 ENTRY(stack_start)
24900- .long init_thread_union+THREAD_SIZE
24901+ .long init_thread_union+THREAD_SIZE-8
24902
24903 __INITRODATA
24904 int_msg:
24905@@ -727,7 +825,7 @@ fault_msg:
24906 * segment size, and 32-bit linear address value:
24907 */
24908
24909- .data
24910+.section .rodata,"a",@progbits
24911 .globl boot_gdt_descr
24912 .globl idt_descr
24913
24914@@ -736,7 +834,7 @@ fault_msg:
24915 .word 0 # 32 bit align gdt_desc.address
24916 boot_gdt_descr:
24917 .word __BOOT_DS+7
24918- .long boot_gdt - __PAGE_OFFSET
24919+ .long pa(boot_gdt)
24920
24921 .word 0 # 32-bit align idt_desc.address
24922 idt_descr:
24923@@ -747,7 +845,7 @@ idt_descr:
24924 .word 0 # 32 bit align gdt_desc.address
24925 ENTRY(early_gdt_descr)
24926 .word GDT_ENTRIES*8-1
24927- .long gdt_page /* Overwritten for secondary CPUs */
24928+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24929
24930 /*
24931 * The boot_gdt must mirror the equivalent in setup.S and is
24932@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24933 .align L1_CACHE_BYTES
24934 ENTRY(boot_gdt)
24935 .fill GDT_ENTRY_BOOT_CS,8,0
24936- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24937- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24938+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24939+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24940+
24941+ .align PAGE_SIZE_asm
24942+ENTRY(cpu_gdt_table)
24943+ .rept NR_CPUS
24944+ .quad 0x0000000000000000 /* NULL descriptor */
24945+ .quad 0x0000000000000000 /* 0x0b reserved */
24946+ .quad 0x0000000000000000 /* 0x13 reserved */
24947+ .quad 0x0000000000000000 /* 0x1b reserved */
24948+
24949+#ifdef CONFIG_PAX_KERNEXEC
24950+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24951+#else
24952+ .quad 0x0000000000000000 /* 0x20 unused */
24953+#endif
24954+
24955+ .quad 0x0000000000000000 /* 0x28 unused */
24956+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24957+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24958+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24959+ .quad 0x0000000000000000 /* 0x4b reserved */
24960+ .quad 0x0000000000000000 /* 0x53 reserved */
24961+ .quad 0x0000000000000000 /* 0x5b reserved */
24962+
24963+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24964+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24965+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24966+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24967+
24968+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24969+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24970+
24971+ /*
24972+ * Segments used for calling PnP BIOS have byte granularity.
24973+ * The code segments and data segments have fixed 64k limits,
24974+ * the transfer segment sizes are set at run time.
24975+ */
24976+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24977+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24978+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24979+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24980+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24981+
24982+ /*
24983+ * The APM segments have byte granularity and their bases
24984+ * are set at run time. All have 64k limits.
24985+ */
24986+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24987+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24988+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24989+
24990+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24991+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24992+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24993+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24994+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24995+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24996+
24997+ /* Be sure this is zeroed to avoid false validations in Xen */
24998+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24999+ .endr
25000diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25001index a468c0a..c7dec74 100644
25002--- a/arch/x86/kernel/head_64.S
25003+++ b/arch/x86/kernel/head_64.S
25004@@ -20,6 +20,8 @@
25005 #include <asm/processor-flags.h>
25006 #include <asm/percpu.h>
25007 #include <asm/nops.h>
25008+#include <asm/cpufeature.h>
25009+#include <asm/alternative-asm.h>
25010
25011 #ifdef CONFIG_PARAVIRT
25012 #include <asm/asm-offsets.h>
25013@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25014 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25015 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25016 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25017+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25018+L3_VMALLOC_START = pud_index(VMALLOC_START)
25019+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25020+L3_VMALLOC_END = pud_index(VMALLOC_END)
25021+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25022+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25023
25024 .text
25025 __HEAD
25026@@ -89,11 +97,24 @@ startup_64:
25027 * Fixup the physical addresses in the page table
25028 */
25029 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25030+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25031+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25032+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25033+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25034+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25035
25036- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25037- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25038+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25039+#ifndef CONFIG_XEN
25040+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25041+#endif
25042+
25043+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25044+
25045+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25046+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25047
25048 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25049+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25050
25051 /*
25052 * Set up the identity mapping for the switchover. These
25053@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
25054 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25055 1:
25056
25057- /* Enable PAE mode and PGE */
25058- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25059+ /* Enable PAE mode and PSE/PGE */
25060+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25061 movq %rcx, %cr4
25062
25063 /* Setup early boot stage 4 level pagetables. */
25064@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
25065 movl $MSR_EFER, %ecx
25066 rdmsr
25067 btsl $_EFER_SCE, %eax /* Enable System Call */
25068- btl $20,%edi /* No Execute supported? */
25069+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25070 jnc 1f
25071 btsl $_EFER_NX, %eax
25072 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25073+#ifndef CONFIG_EFI
25074+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25075+#endif
25076+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25077+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25078+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25079+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25080+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25081+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25082 1: wrmsr /* Make changes effective */
25083
25084 /* Setup cr0 */
25085@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
25086 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25087 * address given in m16:64.
25088 */
25089+ pax_set_fptr_mask
25090 movq initial_code(%rip),%rax
25091 pushq $0 # fake return address to stop unwinder
25092 pushq $__KERNEL_CS # set correct cs
25093@@ -313,7 +344,7 @@ ENDPROC(start_cpu0)
25094 .quad INIT_PER_CPU_VAR(irq_stack_union)
25095
25096 GLOBAL(stack_start)
25097- .quad init_thread_union+THREAD_SIZE-8
25098+ .quad init_thread_union+THREAD_SIZE-16
25099 .word 0
25100 __FINITDATA
25101
25102@@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
25103 call dump_stack
25104 #ifdef CONFIG_KALLSYMS
25105 leaq early_idt_ripmsg(%rip),%rdi
25106- movq 40(%rsp),%rsi # %rip again
25107+ movq 88(%rsp),%rsi # %rip again
25108 call __print_symbol
25109 #endif
25110 #endif /* EARLY_PRINTK */
25111@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
25112 early_recursion_flag:
25113 .long 0
25114
25115+ .section .rodata,"a",@progbits
25116 #ifdef CONFIG_EARLY_PRINTK
25117 early_idt_msg:
25118 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25119@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
25120 NEXT_PAGE(early_dynamic_pgts)
25121 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25122
25123- .data
25124+ .section .rodata,"a",@progbits
25125
25126-#ifndef CONFIG_XEN
25127 NEXT_PAGE(init_level4_pgt)
25128- .fill 512,8,0
25129-#else
25130-NEXT_PAGE(init_level4_pgt)
25131- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25132 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25133 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25134+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25135+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25136+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25137+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25138+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25139+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25140 .org init_level4_pgt + L4_START_KERNEL*8, 0
25141 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25142 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25143
25144+#ifdef CONFIG_PAX_PER_CPU_PGD
25145+NEXT_PAGE(cpu_pgd)
25146+ .rept 2*NR_CPUS
25147+ .fill 512,8,0
25148+ .endr
25149+#endif
25150+
25151 NEXT_PAGE(level3_ident_pgt)
25152 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25153+#ifdef CONFIG_XEN
25154 .fill 511, 8, 0
25155+#else
25156+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25157+ .fill 510,8,0
25158+#endif
25159+
25160+NEXT_PAGE(level3_vmalloc_start_pgt)
25161+ .fill 512,8,0
25162+
25163+NEXT_PAGE(level3_vmalloc_end_pgt)
25164+ .fill 512,8,0
25165+
25166+NEXT_PAGE(level3_vmemmap_pgt)
25167+ .fill L3_VMEMMAP_START,8,0
25168+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25169+
25170 NEXT_PAGE(level2_ident_pgt)
25171- /* Since I easily can, map the first 1G.
25172+ /* Since I easily can, map the first 2G.
25173 * Don't set NX because code runs from these pages.
25174 */
25175- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25176-#endif
25177+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25178
25179 NEXT_PAGE(level3_kernel_pgt)
25180 .fill L3_START_KERNEL,8,0
25181@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
25182 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25183 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25184
25185+NEXT_PAGE(level2_vmemmap_pgt)
25186+ .fill 512,8,0
25187+
25188 NEXT_PAGE(level2_kernel_pgt)
25189 /*
25190 * 512 MB kernel mapping. We spend a full page on this pagetable
25191@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
25192 NEXT_PAGE(level2_fixmap_pgt)
25193 .fill 506,8,0
25194 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25195- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25196- .fill 5,8,0
25197+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25198+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25199+ .fill 4,8,0
25200
25201 NEXT_PAGE(level1_fixmap_pgt)
25202 .fill 512,8,0
25203
25204+NEXT_PAGE(level1_vsyscall_pgt)
25205+ .fill 512,8,0
25206+
25207 #undef PMDS
25208
25209- .data
25210+ .align PAGE_SIZE
25211+ENTRY(cpu_gdt_table)
25212+ .rept NR_CPUS
25213+ .quad 0x0000000000000000 /* NULL descriptor */
25214+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25215+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25216+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25217+ .quad 0x00cffb000000ffff /* __USER32_CS */
25218+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25219+ .quad 0x00affb000000ffff /* __USER_CS */
25220+
25221+#ifdef CONFIG_PAX_KERNEXEC
25222+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25223+#else
25224+ .quad 0x0 /* unused */
25225+#endif
25226+
25227+ .quad 0,0 /* TSS */
25228+ .quad 0,0 /* LDT */
25229+ .quad 0,0,0 /* three TLS descriptors */
25230+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25231+ /* asm/segment.h:GDT_ENTRIES must match this */
25232+
25233+#ifdef CONFIG_PAX_MEMORY_UDEREF
25234+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25235+#else
25236+ .quad 0x0 /* unused */
25237+#endif
25238+
25239+ /* zero the remaining page */
25240+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25241+ .endr
25242+
25243 .align 16
25244 .globl early_gdt_descr
25245 early_gdt_descr:
25246 .word GDT_ENTRIES*8-1
25247 early_gdt_descr_base:
25248- .quad INIT_PER_CPU_VAR(gdt_page)
25249+ .quad cpu_gdt_table
25250
25251 ENTRY(phys_base)
25252 /* This must match the first entry in level2_kernel_pgt */
25253 .quad 0x0000000000000000
25254
25255 #include "../../x86/xen/xen-head.S"
25256-
25257- __PAGE_ALIGNED_BSS
25258+
25259+ .section .rodata,"a",@progbits
25260 NEXT_PAGE(empty_zero_page)
25261 .skip PAGE_SIZE
25262diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25263index 05fd74f..c3548b1 100644
25264--- a/arch/x86/kernel/i386_ksyms_32.c
25265+++ b/arch/x86/kernel/i386_ksyms_32.c
25266@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25267 EXPORT_SYMBOL(cmpxchg8b_emu);
25268 #endif
25269
25270+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25271+
25272 /* Networking helper routines. */
25273 EXPORT_SYMBOL(csum_partial_copy_generic);
25274+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25275+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25276
25277 EXPORT_SYMBOL(__get_user_1);
25278 EXPORT_SYMBOL(__get_user_2);
25279@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25280 EXPORT_SYMBOL(___preempt_schedule_context);
25281 #endif
25282 #endif
25283+
25284+#ifdef CONFIG_PAX_KERNEXEC
25285+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25286+#endif
25287+
25288+#ifdef CONFIG_PAX_PER_CPU_PGD
25289+EXPORT_SYMBOL(cpu_pgd);
25290+#endif
25291diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25292index d5dd808..b6432cf 100644
25293--- a/arch/x86/kernel/i387.c
25294+++ b/arch/x86/kernel/i387.c
25295@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25296 static inline bool interrupted_user_mode(void)
25297 {
25298 struct pt_regs *regs = get_irq_regs();
25299- return regs && user_mode_vm(regs);
25300+ return regs && user_mode(regs);
25301 }
25302
25303 /*
25304diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25305index 8af8171..f8c1169 100644
25306--- a/arch/x86/kernel/i8259.c
25307+++ b/arch/x86/kernel/i8259.c
25308@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25309 static void make_8259A_irq(unsigned int irq)
25310 {
25311 disable_irq_nosync(irq);
25312- io_apic_irqs &= ~(1<<irq);
25313+ io_apic_irqs &= ~(1UL<<irq);
25314 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25315 i8259A_chip.name);
25316 enable_irq(irq);
25317@@ -209,7 +209,7 @@ spurious_8259A_irq:
25318 "spurious 8259A interrupt: IRQ%d.\n", irq);
25319 spurious_irq_mask |= irqmask;
25320 }
25321- atomic_inc(&irq_err_count);
25322+ atomic_inc_unchecked(&irq_err_count);
25323 /*
25324 * Theoretically we do not have to handle this IRQ,
25325 * but in Linux this does not cause problems and is
25326@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25327 /* (slave's support for AEOI in flat mode is to be investigated) */
25328 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25329
25330+ pax_open_kernel();
25331 if (auto_eoi)
25332 /*
25333 * In AEOI mode we just have to mask the interrupt
25334 * when acking.
25335 */
25336- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25337+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25338 else
25339- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25340+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25341+ pax_close_kernel();
25342
25343 udelay(100); /* wait for 8259A to initialize */
25344
25345diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25346index a979b5b..1d6db75 100644
25347--- a/arch/x86/kernel/io_delay.c
25348+++ b/arch/x86/kernel/io_delay.c
25349@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25350 * Quirk table for systems that misbehave (lock up, etc.) if port
25351 * 0x80 is used:
25352 */
25353-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25354+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25355 {
25356 .callback = dmi_io_delay_0xed_port,
25357 .ident = "Compaq Presario V6000",
25358diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25359index 4ddaf66..49d5c18 100644
25360--- a/arch/x86/kernel/ioport.c
25361+++ b/arch/x86/kernel/ioport.c
25362@@ -6,6 +6,7 @@
25363 #include <linux/sched.h>
25364 #include <linux/kernel.h>
25365 #include <linux/capability.h>
25366+#include <linux/security.h>
25367 #include <linux/errno.h>
25368 #include <linux/types.h>
25369 #include <linux/ioport.h>
25370@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25371 return -EINVAL;
25372 if (turn_on && !capable(CAP_SYS_RAWIO))
25373 return -EPERM;
25374+#ifdef CONFIG_GRKERNSEC_IO
25375+ if (turn_on && grsec_disable_privio) {
25376+ gr_handle_ioperm();
25377+ return -ENODEV;
25378+ }
25379+#endif
25380
25381 /*
25382 * If it's the first ioperm() call in this thread's lifetime, set the
25383@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25384 * because the ->io_bitmap_max value must match the bitmap
25385 * contents:
25386 */
25387- tss = &per_cpu(init_tss, get_cpu());
25388+ tss = init_tss + get_cpu();
25389
25390 if (turn_on)
25391 bitmap_clear(t->io_bitmap_ptr, from, num);
25392@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25393 if (level > old) {
25394 if (!capable(CAP_SYS_RAWIO))
25395 return -EPERM;
25396+#ifdef CONFIG_GRKERNSEC_IO
25397+ if (grsec_disable_privio) {
25398+ gr_handle_iopl();
25399+ return -ENODEV;
25400+ }
25401+#endif
25402 }
25403 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25404 t->iopl = level << 12;
25405diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25406index 922d285..6d20692 100644
25407--- a/arch/x86/kernel/irq.c
25408+++ b/arch/x86/kernel/irq.c
25409@@ -22,7 +22,7 @@
25410 #define CREATE_TRACE_POINTS
25411 #include <asm/trace/irq_vectors.h>
25412
25413-atomic_t irq_err_count;
25414+atomic_unchecked_t irq_err_count;
25415
25416 /* Function pointer for generic interrupt vector handling */
25417 void (*x86_platform_ipi_callback)(void) = NULL;
25418@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25419 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25420 seq_printf(p, " Hypervisor callback interrupts\n");
25421 #endif
25422- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25423+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25424 #if defined(CONFIG_X86_IO_APIC)
25425- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25426+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25427 #endif
25428 return 0;
25429 }
25430@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25431
25432 u64 arch_irq_stat(void)
25433 {
25434- u64 sum = atomic_read(&irq_err_count);
25435+ u64 sum = atomic_read_unchecked(&irq_err_count);
25436 return sum;
25437 }
25438
25439diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25440index 63ce838..2ea3e06 100644
25441--- a/arch/x86/kernel/irq_32.c
25442+++ b/arch/x86/kernel/irq_32.c
25443@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25444
25445 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25446
25447+extern void gr_handle_kernel_exploit(void);
25448+
25449 int sysctl_panic_on_stackoverflow __read_mostly;
25450
25451 /* Debugging check for stack overflow: is there less than 1KB free? */
25452@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25453 __asm__ __volatile__("andl %%esp,%0" :
25454 "=r" (sp) : "0" (THREAD_SIZE - 1));
25455
25456- return sp < (sizeof(struct thread_info) + STACK_WARN);
25457+ return sp < STACK_WARN;
25458 }
25459
25460 static void print_stack_overflow(void)
25461 {
25462 printk(KERN_WARNING "low stack detected by irq handler\n");
25463 dump_stack();
25464+ gr_handle_kernel_exploit();
25465 if (sysctl_panic_on_stackoverflow)
25466 panic("low stack detected by irq handler - check messages\n");
25467 }
25468@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25469 static inline int
25470 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25471 {
25472- struct irq_stack *curstk, *irqstk;
25473+ struct irq_stack *irqstk;
25474 u32 *isp, *prev_esp, arg1, arg2;
25475
25476- curstk = (struct irq_stack *) current_stack();
25477 irqstk = __this_cpu_read(hardirq_stack);
25478
25479 /*
25480@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25481 * handler) we can't do that and just have to keep using the
25482 * current stack (which is the irq stack already after all)
25483 */
25484- if (unlikely(curstk == irqstk))
25485+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25486 return 0;
25487
25488- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25489+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25490
25491 /* Save the next esp at the bottom of the stack */
25492 prev_esp = (u32 *)irqstk;
25493 *prev_esp = current_stack_pointer;
25494
25495+#ifdef CONFIG_PAX_MEMORY_UDEREF
25496+ __set_fs(MAKE_MM_SEG(0));
25497+#endif
25498+
25499 if (unlikely(overflow))
25500 call_on_stack(print_stack_overflow, isp);
25501
25502@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25503 : "0" (irq), "1" (desc), "2" (isp),
25504 "D" (desc->handle_irq)
25505 : "memory", "cc", "ecx");
25506+
25507+#ifdef CONFIG_PAX_MEMORY_UDEREF
25508+ __set_fs(current_thread_info()->addr_limit);
25509+#endif
25510+
25511 return 1;
25512 }
25513
25514@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25515 */
25516 void irq_ctx_init(int cpu)
25517 {
25518- struct irq_stack *irqstk;
25519-
25520 if (per_cpu(hardirq_stack, cpu))
25521 return;
25522
25523- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25524- THREADINFO_GFP,
25525- THREAD_SIZE_ORDER));
25526- per_cpu(hardirq_stack, cpu) = irqstk;
25527-
25528- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25529- THREADINFO_GFP,
25530- THREAD_SIZE_ORDER));
25531- per_cpu(softirq_stack, cpu) = irqstk;
25532-
25533- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25534- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25535+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25536+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25537 }
25538
25539 void do_softirq_own_stack(void)
25540 {
25541- struct thread_info *curstk;
25542 struct irq_stack *irqstk;
25543 u32 *isp, *prev_esp;
25544
25545- curstk = current_stack();
25546 irqstk = __this_cpu_read(softirq_stack);
25547
25548 /* build the stack frame on the softirq stack */
25549@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25550 prev_esp = (u32 *)irqstk;
25551 *prev_esp = current_stack_pointer;
25552
25553+#ifdef CONFIG_PAX_MEMORY_UDEREF
25554+ __set_fs(MAKE_MM_SEG(0));
25555+#endif
25556+
25557 call_on_stack(__do_softirq, isp);
25558+
25559+#ifdef CONFIG_PAX_MEMORY_UDEREF
25560+ __set_fs(current_thread_info()->addr_limit);
25561+#endif
25562+
25563 }
25564
25565 bool handle_irq(unsigned irq, struct pt_regs *regs)
25566@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25567 if (unlikely(!desc))
25568 return false;
25569
25570- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25571+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25572 if (unlikely(overflow))
25573 print_stack_overflow();
25574 desc->handle_irq(irq, desc);
25575diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25576index 4d1c746..55a22d6 100644
25577--- a/arch/x86/kernel/irq_64.c
25578+++ b/arch/x86/kernel/irq_64.c
25579@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25580 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25581 EXPORT_PER_CPU_SYMBOL(irq_regs);
25582
25583+extern void gr_handle_kernel_exploit(void);
25584+
25585 int sysctl_panic_on_stackoverflow;
25586
25587 /*
25588@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25589 u64 estack_top, estack_bottom;
25590 u64 curbase = (u64)task_stack_page(current);
25591
25592- if (user_mode_vm(regs))
25593+ if (user_mode(regs))
25594 return;
25595
25596 if (regs->sp >= curbase + sizeof(struct thread_info) +
25597@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25598 irq_stack_top, irq_stack_bottom,
25599 estack_top, estack_bottom);
25600
25601+ gr_handle_kernel_exploit();
25602+
25603 if (sysctl_panic_on_stackoverflow)
25604 panic("low stack detected by irq handler - check messages\n");
25605 #endif
25606diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25607index 26d5a55..a01160a 100644
25608--- a/arch/x86/kernel/jump_label.c
25609+++ b/arch/x86/kernel/jump_label.c
25610@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25611 * Jump label is enabled for the first time.
25612 * So we expect a default_nop...
25613 */
25614- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25615+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25616 != 0))
25617 bug_at((void *)entry->code, __LINE__);
25618 } else {
25619@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25620 * ...otherwise expect an ideal_nop. Otherwise
25621 * something went horribly wrong.
25622 */
25623- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25624+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25625 != 0))
25626 bug_at((void *)entry->code, __LINE__);
25627 }
25628@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25629 * are converting the default nop to the ideal nop.
25630 */
25631 if (init) {
25632- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25633+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25634 bug_at((void *)entry->code, __LINE__);
25635 } else {
25636 code.jump = 0xe9;
25637 code.offset = entry->target -
25638 (entry->code + JUMP_LABEL_NOP_SIZE);
25639- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25640+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25641 bug_at((void *)entry->code, __LINE__);
25642 }
25643 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25644diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25645index 7ec1d5f..5a7d130 100644
25646--- a/arch/x86/kernel/kgdb.c
25647+++ b/arch/x86/kernel/kgdb.c
25648@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25649 #ifdef CONFIG_X86_32
25650 switch (regno) {
25651 case GDB_SS:
25652- if (!user_mode_vm(regs))
25653+ if (!user_mode(regs))
25654 *(unsigned long *)mem = __KERNEL_DS;
25655 break;
25656 case GDB_SP:
25657- if (!user_mode_vm(regs))
25658+ if (!user_mode(regs))
25659 *(unsigned long *)mem = kernel_stack_pointer(regs);
25660 break;
25661 case GDB_GS:
25662@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25663 bp->attr.bp_addr = breakinfo[breakno].addr;
25664 bp->attr.bp_len = breakinfo[breakno].len;
25665 bp->attr.bp_type = breakinfo[breakno].type;
25666- info->address = breakinfo[breakno].addr;
25667+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25668+ info->address = ktla_ktva(breakinfo[breakno].addr);
25669+ else
25670+ info->address = breakinfo[breakno].addr;
25671 info->len = breakinfo[breakno].len;
25672 info->type = breakinfo[breakno].type;
25673 val = arch_install_hw_breakpoint(bp);
25674@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25675 case 'k':
25676 /* clear the trace bit */
25677 linux_regs->flags &= ~X86_EFLAGS_TF;
25678- atomic_set(&kgdb_cpu_doing_single_step, -1);
25679+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25680
25681 /* set the trace bit if we're stepping */
25682 if (remcomInBuffer[0] == 's') {
25683 linux_regs->flags |= X86_EFLAGS_TF;
25684- atomic_set(&kgdb_cpu_doing_single_step,
25685+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25686 raw_smp_processor_id());
25687 }
25688
25689@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25690
25691 switch (cmd) {
25692 case DIE_DEBUG:
25693- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25694+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25695 if (user_mode(regs))
25696 return single_step_cont(regs, args);
25697 break;
25698@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25699 #endif /* CONFIG_DEBUG_RODATA */
25700
25701 bpt->type = BP_BREAKPOINT;
25702- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25703+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25704 BREAK_INSTR_SIZE);
25705 if (err)
25706 return err;
25707- err = probe_kernel_write((char *)bpt->bpt_addr,
25708+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25709 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25710 #ifdef CONFIG_DEBUG_RODATA
25711 if (!err)
25712@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25713 return -EBUSY;
25714 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25715 BREAK_INSTR_SIZE);
25716- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25717+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25718 if (err)
25719 return err;
25720 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25721@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25722 if (mutex_is_locked(&text_mutex))
25723 goto knl_write;
25724 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25725- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25726+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25727 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25728 goto knl_write;
25729 return err;
25730 knl_write:
25731 #endif /* CONFIG_DEBUG_RODATA */
25732- return probe_kernel_write((char *)bpt->bpt_addr,
25733+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25734 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25735 }
25736
25737diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25738index 67e6d19..731ed28 100644
25739--- a/arch/x86/kernel/kprobes/core.c
25740+++ b/arch/x86/kernel/kprobes/core.c
25741@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25742 s32 raddr;
25743 } __packed *insn;
25744
25745- insn = (struct __arch_relative_insn *)from;
25746+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25747+
25748+ pax_open_kernel();
25749 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25750 insn->op = op;
25751+ pax_close_kernel();
25752 }
25753
25754 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25755@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25756 kprobe_opcode_t opcode;
25757 kprobe_opcode_t *orig_opcodes = opcodes;
25758
25759- if (search_exception_tables((unsigned long)opcodes))
25760+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25761 return 0; /* Page fault may occur on this address. */
25762
25763 retry:
25764@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25765 * for the first byte, we can recover the original instruction
25766 * from it and kp->opcode.
25767 */
25768- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25769+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25770 buf[0] = kp->opcode;
25771- return (unsigned long)buf;
25772+ return ktva_ktla((unsigned long)buf);
25773 }
25774
25775 /*
25776@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25777 /* Another subsystem puts a breakpoint, failed to recover */
25778 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25779 return 0;
25780+ pax_open_kernel();
25781 memcpy(dest, insn.kaddr, insn.length);
25782+ pax_close_kernel();
25783
25784 #ifdef CONFIG_X86_64
25785 if (insn_rip_relative(&insn)) {
25786@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25787 return 0;
25788 }
25789 disp = (u8 *) dest + insn_offset_displacement(&insn);
25790+ pax_open_kernel();
25791 *(s32 *) disp = (s32) newdisp;
25792+ pax_close_kernel();
25793 }
25794 #endif
25795 return insn.length;
25796@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25797 * nor set current_kprobe, because it doesn't use single
25798 * stepping.
25799 */
25800- regs->ip = (unsigned long)p->ainsn.insn;
25801+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25802 preempt_enable_no_resched();
25803 return;
25804 }
25805@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25806 regs->flags &= ~X86_EFLAGS_IF;
25807 /* single step inline if the instruction is an int3 */
25808 if (p->opcode == BREAKPOINT_INSTRUCTION)
25809- regs->ip = (unsigned long)p->addr;
25810+ regs->ip = ktla_ktva((unsigned long)p->addr);
25811 else
25812- regs->ip = (unsigned long)p->ainsn.insn;
25813+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25814 }
25815 NOKPROBE_SYMBOL(setup_singlestep);
25816
25817@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25818 struct kprobe *p;
25819 struct kprobe_ctlblk *kcb;
25820
25821- if (user_mode_vm(regs))
25822+ if (user_mode(regs))
25823 return 0;
25824
25825 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25826@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25827 setup_singlestep(p, regs, kcb, 0);
25828 return 1;
25829 }
25830- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25831+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25832 /*
25833 * The breakpoint instruction was removed right
25834 * after we hit it. Another cpu has removed
25835@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
25836 " movq %rax, 152(%rsp)\n"
25837 RESTORE_REGS_STRING
25838 " popfq\n"
25839+#ifdef KERNEXEC_PLUGIN
25840+ " btsq $63,(%rsp)\n"
25841+#endif
25842 #else
25843 " pushf\n"
25844 SAVE_REGS_STRING
25845@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25846 struct kprobe_ctlblk *kcb)
25847 {
25848 unsigned long *tos = stack_addr(regs);
25849- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25850+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25851 unsigned long orig_ip = (unsigned long)p->addr;
25852 kprobe_opcode_t *insn = p->ainsn.insn;
25853
25854@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25855 struct die_args *args = data;
25856 int ret = NOTIFY_DONE;
25857
25858- if (args->regs && user_mode_vm(args->regs))
25859+ if (args->regs && user_mode(args->regs))
25860 return ret;
25861
25862 if (val == DIE_GPF) {
25863diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25864index f304773..551e63c 100644
25865--- a/arch/x86/kernel/kprobes/opt.c
25866+++ b/arch/x86/kernel/kprobes/opt.c
25867@@ -79,6 +79,7 @@ found:
25868 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25869 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25870 {
25871+ pax_open_kernel();
25872 #ifdef CONFIG_X86_64
25873 *addr++ = 0x48;
25874 *addr++ = 0xbf;
25875@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25876 *addr++ = 0xb8;
25877 #endif
25878 *(unsigned long *)addr = val;
25879+ pax_close_kernel();
25880 }
25881
25882 asm (
25883@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25884 * Verify if the address gap is in 2GB range, because this uses
25885 * a relative jump.
25886 */
25887- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25888+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25889 if (abs(rel) > 0x7fffffff)
25890 return -ERANGE;
25891
25892@@ -352,16 +354,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25893 op->optinsn.size = ret;
25894
25895 /* Copy arch-dep-instance from template */
25896- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25897+ pax_open_kernel();
25898+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25899+ pax_close_kernel();
25900
25901 /* Set probe information */
25902 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25903
25904 /* Set probe function call */
25905- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25906+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25907
25908 /* Set returning jmp instruction at the tail of out-of-line buffer */
25909- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25910+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25911 (u8 *)op->kp.addr + op->optinsn.size);
25912
25913 flush_icache_range((unsigned long) buf,
25914@@ -386,7 +390,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25915 WARN_ON(kprobe_disabled(&op->kp));
25916
25917 /* Backup instructions which will be replaced by jump address */
25918- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25919+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25920 RELATIVE_ADDR_SIZE);
25921
25922 insn_buf[0] = RELATIVEJUMP_OPCODE;
25923@@ -434,7 +438,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25924 /* This kprobe is really able to run optimized path. */
25925 op = container_of(p, struct optimized_kprobe, kp);
25926 /* Detour through copied instructions */
25927- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25928+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25929 if (!reenter)
25930 reset_current_kprobe();
25931 preempt_enable_no_resched();
25932diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25933index c2bedae..25e7ab60 100644
25934--- a/arch/x86/kernel/ksysfs.c
25935+++ b/arch/x86/kernel/ksysfs.c
25936@@ -184,7 +184,7 @@ out:
25937
25938 static struct kobj_attribute type_attr = __ATTR_RO(type);
25939
25940-static struct bin_attribute data_attr = {
25941+static bin_attribute_no_const data_attr __read_only = {
25942 .attr = {
25943 .name = "data",
25944 .mode = S_IRUGO,
25945diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25946index c37886d..d851d32 100644
25947--- a/arch/x86/kernel/ldt.c
25948+++ b/arch/x86/kernel/ldt.c
25949@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25950 if (reload) {
25951 #ifdef CONFIG_SMP
25952 preempt_disable();
25953- load_LDT(pc);
25954+ load_LDT_nolock(pc);
25955 if (!cpumask_equal(mm_cpumask(current->mm),
25956 cpumask_of(smp_processor_id())))
25957 smp_call_function(flush_ldt, current->mm, 1);
25958 preempt_enable();
25959 #else
25960- load_LDT(pc);
25961+ load_LDT_nolock(pc);
25962 #endif
25963 }
25964 if (oldsize) {
25965@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25966 return err;
25967
25968 for (i = 0; i < old->size; i++)
25969- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25970+ write_ldt_entry(new->ldt, i, old->ldt + i);
25971 return 0;
25972 }
25973
25974@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25975 retval = copy_ldt(&mm->context, &old_mm->context);
25976 mutex_unlock(&old_mm->context.lock);
25977 }
25978+
25979+ if (tsk == current) {
25980+ mm->context.vdso = 0;
25981+
25982+#ifdef CONFIG_X86_32
25983+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25984+ mm->context.user_cs_base = 0UL;
25985+ mm->context.user_cs_limit = ~0UL;
25986+
25987+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25988+ cpus_clear(mm->context.cpu_user_cs_mask);
25989+#endif
25990+
25991+#endif
25992+#endif
25993+
25994+ }
25995+
25996 return retval;
25997 }
25998
25999@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26000 }
26001 }
26002
26003+#ifdef CONFIG_PAX_SEGMEXEC
26004+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26005+ error = -EINVAL;
26006+ goto out_unlock;
26007+ }
26008+#endif
26009+
26010 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26011 error = -EINVAL;
26012 goto out_unlock;
26013diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26014index 1667b1d..16492c5 100644
26015--- a/arch/x86/kernel/machine_kexec_32.c
26016+++ b/arch/x86/kernel/machine_kexec_32.c
26017@@ -25,7 +25,7 @@
26018 #include <asm/cacheflush.h>
26019 #include <asm/debugreg.h>
26020
26021-static void set_idt(void *newidt, __u16 limit)
26022+static void set_idt(struct desc_struct *newidt, __u16 limit)
26023 {
26024 struct desc_ptr curidt;
26025
26026@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26027 }
26028
26029
26030-static void set_gdt(void *newgdt, __u16 limit)
26031+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26032 {
26033 struct desc_ptr curgdt;
26034
26035@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26036 }
26037
26038 control_page = page_address(image->control_code_page);
26039- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26040+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26041
26042 relocate_kernel_ptr = control_page;
26043 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26044diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26045index c050a01..5774072 100644
26046--- a/arch/x86/kernel/mcount_64.S
26047+++ b/arch/x86/kernel/mcount_64.S
26048@@ -7,7 +7,7 @@
26049 #include <linux/linkage.h>
26050 #include <asm/ptrace.h>
26051 #include <asm/ftrace.h>
26052-
26053+#include <asm/alternative-asm.h>
26054
26055 .code64
26056 .section .entry.text, "ax"
26057@@ -24,8 +24,9 @@
26058 #ifdef CONFIG_DYNAMIC_FTRACE
26059
26060 ENTRY(function_hook)
26061+ pax_force_retaddr
26062 retq
26063-END(function_hook)
26064+ENDPROC(function_hook)
26065
26066 /* skip is set if stack has been adjusted */
26067 .macro ftrace_caller_setup skip=0
26068@@ -66,8 +67,9 @@ GLOBAL(ftrace_graph_call)
26069 #endif
26070
26071 GLOBAL(ftrace_stub)
26072+ pax_force_retaddr
26073 retq
26074-END(ftrace_caller)
26075+ENDPROC(ftrace_caller)
26076
26077 ENTRY(ftrace_regs_caller)
26078 /* Save the current flags before compare (in SS location)*/
26079@@ -135,7 +137,7 @@ ftrace_restore_flags:
26080 popfq
26081 jmp ftrace_stub
26082
26083-END(ftrace_regs_caller)
26084+ENDPROC(ftrace_regs_caller)
26085
26086
26087 #else /* ! CONFIG_DYNAMIC_FTRACE */
26088@@ -156,6 +158,7 @@ ENTRY(function_hook)
26089 #endif
26090
26091 GLOBAL(ftrace_stub)
26092+ pax_force_retaddr
26093 retq
26094
26095 trace:
26096@@ -169,12 +172,13 @@ trace:
26097 #endif
26098 subq $MCOUNT_INSN_SIZE, %rdi
26099
26100+ pax_force_fptr ftrace_trace_function
26101 call *ftrace_trace_function
26102
26103 MCOUNT_RESTORE_FRAME
26104
26105 jmp ftrace_stub
26106-END(function_hook)
26107+ENDPROC(function_hook)
26108 #endif /* CONFIG_DYNAMIC_FTRACE */
26109 #endif /* CONFIG_FUNCTION_TRACER */
26110
26111@@ -196,8 +200,9 @@ ENTRY(ftrace_graph_caller)
26112
26113 MCOUNT_RESTORE_FRAME
26114
26115+ pax_force_retaddr
26116 retq
26117-END(ftrace_graph_caller)
26118+ENDPROC(ftrace_graph_caller)
26119
26120 GLOBAL(return_to_handler)
26121 subq $24, %rsp
26122@@ -213,5 +218,7 @@ GLOBAL(return_to_handler)
26123 movq 8(%rsp), %rdx
26124 movq (%rsp), %rax
26125 addq $24, %rsp
26126+ pax_force_fptr %rdi
26127 jmp *%rdi
26128+ENDPROC(return_to_handler)
26129 #endif
26130diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26131index e69f988..da078ea 100644
26132--- a/arch/x86/kernel/module.c
26133+++ b/arch/x86/kernel/module.c
26134@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26135 }
26136 #endif
26137
26138-void *module_alloc(unsigned long size)
26139+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26140 {
26141- if (PAGE_ALIGN(size) > MODULES_LEN)
26142+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26143 return NULL;
26144 return __vmalloc_node_range(size, 1,
26145 MODULES_VADDR + get_module_load_offset(),
26146- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26147- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26148+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26149+ prot, NUMA_NO_NODE,
26150 __builtin_return_address(0));
26151 }
26152
26153+void *module_alloc(unsigned long size)
26154+{
26155+
26156+#ifdef CONFIG_PAX_KERNEXEC
26157+ return __module_alloc(size, PAGE_KERNEL);
26158+#else
26159+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26160+#endif
26161+
26162+}
26163+
26164+#ifdef CONFIG_PAX_KERNEXEC
26165+#ifdef CONFIG_X86_32
26166+void *module_alloc_exec(unsigned long size)
26167+{
26168+ struct vm_struct *area;
26169+
26170+ if (size == 0)
26171+ return NULL;
26172+
26173+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26174+return area ? area->addr : NULL;
26175+}
26176+EXPORT_SYMBOL(module_alloc_exec);
26177+
26178+void module_free_exec(struct module *mod, void *module_region)
26179+{
26180+ vunmap(module_region);
26181+}
26182+EXPORT_SYMBOL(module_free_exec);
26183+#else
26184+void module_free_exec(struct module *mod, void *module_region)
26185+{
26186+ module_free(mod, module_region);
26187+}
26188+EXPORT_SYMBOL(module_free_exec);
26189+
26190+void *module_alloc_exec(unsigned long size)
26191+{
26192+ return __module_alloc(size, PAGE_KERNEL_RX);
26193+}
26194+EXPORT_SYMBOL(module_alloc_exec);
26195+#endif
26196+#endif
26197+
26198 #ifdef CONFIG_X86_32
26199 int apply_relocate(Elf32_Shdr *sechdrs,
26200 const char *strtab,
26201@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26202 unsigned int i;
26203 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26204 Elf32_Sym *sym;
26205- uint32_t *location;
26206+ uint32_t *plocation, location;
26207
26208 DEBUGP("Applying relocate section %u to %u\n",
26209 relsec, sechdrs[relsec].sh_info);
26210 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26211 /* This is where to make the change */
26212- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26213- + rel[i].r_offset;
26214+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26215+ location = (uint32_t)plocation;
26216+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26217+ plocation = ktla_ktva((void *)plocation);
26218 /* This is the symbol it is referring to. Note that all
26219 undefined symbols have been resolved. */
26220 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26221@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26222 switch (ELF32_R_TYPE(rel[i].r_info)) {
26223 case R_386_32:
26224 /* We add the value into the location given */
26225- *location += sym->st_value;
26226+ pax_open_kernel();
26227+ *plocation += sym->st_value;
26228+ pax_close_kernel();
26229 break;
26230 case R_386_PC32:
26231 /* Add the value, subtract its position */
26232- *location += sym->st_value - (uint32_t)location;
26233+ pax_open_kernel();
26234+ *plocation += sym->st_value - location;
26235+ pax_close_kernel();
26236 break;
26237 default:
26238 pr_err("%s: Unknown relocation: %u\n",
26239@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26240 case R_X86_64_NONE:
26241 break;
26242 case R_X86_64_64:
26243+ pax_open_kernel();
26244 *(u64 *)loc = val;
26245+ pax_close_kernel();
26246 break;
26247 case R_X86_64_32:
26248+ pax_open_kernel();
26249 *(u32 *)loc = val;
26250+ pax_close_kernel();
26251 if (val != *(u32 *)loc)
26252 goto overflow;
26253 break;
26254 case R_X86_64_32S:
26255+ pax_open_kernel();
26256 *(s32 *)loc = val;
26257+ pax_close_kernel();
26258 if ((s64)val != *(s32 *)loc)
26259 goto overflow;
26260 break;
26261 case R_X86_64_PC32:
26262 val -= (u64)loc;
26263+ pax_open_kernel();
26264 *(u32 *)loc = val;
26265+ pax_close_kernel();
26266+
26267 #if 0
26268 if ((s64)val != *(s32 *)loc)
26269 goto overflow;
26270diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26271index c9603ac..9f88728 100644
26272--- a/arch/x86/kernel/msr.c
26273+++ b/arch/x86/kernel/msr.c
26274@@ -37,6 +37,7 @@
26275 #include <linux/notifier.h>
26276 #include <linux/uaccess.h>
26277 #include <linux/gfp.h>
26278+#include <linux/grsecurity.h>
26279
26280 #include <asm/processor.h>
26281 #include <asm/msr.h>
26282@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26283 int err = 0;
26284 ssize_t bytes = 0;
26285
26286+#ifdef CONFIG_GRKERNSEC_KMEM
26287+ gr_handle_msr_write();
26288+ return -EPERM;
26289+#endif
26290+
26291 if (count % 8)
26292 return -EINVAL; /* Invalid chunk size */
26293
26294@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26295 err = -EBADF;
26296 break;
26297 }
26298+#ifdef CONFIG_GRKERNSEC_KMEM
26299+ gr_handle_msr_write();
26300+ return -EPERM;
26301+#endif
26302 if (copy_from_user(&regs, uregs, sizeof regs)) {
26303 err = -EFAULT;
26304 break;
26305@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26306 return notifier_from_errno(err);
26307 }
26308
26309-static struct notifier_block __refdata msr_class_cpu_notifier = {
26310+static struct notifier_block msr_class_cpu_notifier = {
26311 .notifier_call = msr_class_cpu_callback,
26312 };
26313
26314diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26315index c3e985d..110a36a 100644
26316--- a/arch/x86/kernel/nmi.c
26317+++ b/arch/x86/kernel/nmi.c
26318@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26319
26320 static void nmi_max_handler(struct irq_work *w)
26321 {
26322- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26323+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26324 int remainder_ns, decimal_msecs;
26325- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26326+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26327
26328 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26329 decimal_msecs = remainder_ns / 1000;
26330
26331 printk_ratelimited(KERN_INFO
26332 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26333- a->handler, whole_msecs, decimal_msecs);
26334+ n->action->handler, whole_msecs, decimal_msecs);
26335 }
26336
26337 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26338@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26339 delta = sched_clock() - delta;
26340 trace_nmi_handler(a->handler, (int)delta, thishandled);
26341
26342- if (delta < nmi_longest_ns || delta < a->max_duration)
26343+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26344 continue;
26345
26346- a->max_duration = delta;
26347- irq_work_queue(&a->irq_work);
26348+ a->work->max_duration = delta;
26349+ irq_work_queue(&a->work->irq_work);
26350 }
26351
26352 rcu_read_unlock();
26353@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26354 }
26355 NOKPROBE_SYMBOL(nmi_handle);
26356
26357-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26358+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26359 {
26360 struct nmi_desc *desc = nmi_to_desc(type);
26361 unsigned long flags;
26362@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26363 if (!action->handler)
26364 return -EINVAL;
26365
26366- init_irq_work(&action->irq_work, nmi_max_handler);
26367+ action->work->action = action;
26368+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26369
26370 spin_lock_irqsave(&desc->lock, flags);
26371
26372@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26373 * event confuses some handlers (kdump uses this flag)
26374 */
26375 if (action->flags & NMI_FLAG_FIRST)
26376- list_add_rcu(&action->list, &desc->head);
26377+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26378 else
26379- list_add_tail_rcu(&action->list, &desc->head);
26380+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26381
26382 spin_unlock_irqrestore(&desc->lock, flags);
26383 return 0;
26384@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26385 if (!strcmp(n->name, name)) {
26386 WARN(in_nmi(),
26387 "Trying to free NMI (%s) from NMI context!\n", n->name);
26388- list_del_rcu(&n->list);
26389+ pax_list_del_rcu((struct list_head *)&n->list);
26390 break;
26391 }
26392 }
26393@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26394 dotraplinkage notrace void
26395 do_nmi(struct pt_regs *regs, long error_code)
26396 {
26397+
26398+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26399+ if (!user_mode(regs)) {
26400+ unsigned long cs = regs->cs & 0xFFFF;
26401+ unsigned long ip = ktva_ktla(regs->ip);
26402+
26403+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26404+ regs->ip = ip;
26405+ }
26406+#endif
26407+
26408 nmi_nesting_preprocess(regs);
26409
26410 nmi_enter();
26411diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26412index 6d9582e..f746287 100644
26413--- a/arch/x86/kernel/nmi_selftest.c
26414+++ b/arch/x86/kernel/nmi_selftest.c
26415@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26416 {
26417 /* trap all the unknown NMIs we may generate */
26418 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26419- __initdata);
26420+ __initconst);
26421 }
26422
26423 static void __init cleanup_nmi_testsuite(void)
26424@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26425 unsigned long timeout;
26426
26427 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26428- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26429+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26430 nmi_fail = FAILURE;
26431 return;
26432 }
26433diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26434index bbb6c73..24a58ef 100644
26435--- a/arch/x86/kernel/paravirt-spinlocks.c
26436+++ b/arch/x86/kernel/paravirt-spinlocks.c
26437@@ -8,7 +8,7 @@
26438
26439 #include <asm/paravirt.h>
26440
26441-struct pv_lock_ops pv_lock_ops = {
26442+struct pv_lock_ops pv_lock_ops __read_only = {
26443 #ifdef CONFIG_SMP
26444 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26445 .unlock_kick = paravirt_nop,
26446diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26447index 548d25f..f8fb99c 100644
26448--- a/arch/x86/kernel/paravirt.c
26449+++ b/arch/x86/kernel/paravirt.c
26450@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26451 {
26452 return x;
26453 }
26454+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26455+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26456+#endif
26457
26458 void __init default_banner(void)
26459 {
26460@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26461
26462 if (opfunc == NULL)
26463 /* If there's no function, patch it with a ud2a (BUG) */
26464- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26465- else if (opfunc == _paravirt_nop)
26466+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26467+ else if (opfunc == (void *)_paravirt_nop)
26468 /* If the operation is a nop, then nop the callsite */
26469 ret = paravirt_patch_nop();
26470
26471 /* identity functions just return their single argument */
26472- else if (opfunc == _paravirt_ident_32)
26473+ else if (opfunc == (void *)_paravirt_ident_32)
26474 ret = paravirt_patch_ident_32(insnbuf, len);
26475- else if (opfunc == _paravirt_ident_64)
26476+ else if (opfunc == (void *)_paravirt_ident_64)
26477 ret = paravirt_patch_ident_64(insnbuf, len);
26478+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26479+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26480+ ret = paravirt_patch_ident_64(insnbuf, len);
26481+#endif
26482
26483 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26484 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26485@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26486 if (insn_len > len || start == NULL)
26487 insn_len = len;
26488 else
26489- memcpy(insnbuf, start, insn_len);
26490+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26491
26492 return insn_len;
26493 }
26494@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26495 return this_cpu_read(paravirt_lazy_mode);
26496 }
26497
26498-struct pv_info pv_info = {
26499+struct pv_info pv_info __read_only = {
26500 .name = "bare hardware",
26501 .paravirt_enabled = 0,
26502 .kernel_rpl = 0,
26503@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26504 #endif
26505 };
26506
26507-struct pv_init_ops pv_init_ops = {
26508+struct pv_init_ops pv_init_ops __read_only = {
26509 .patch = native_patch,
26510 };
26511
26512-struct pv_time_ops pv_time_ops = {
26513+struct pv_time_ops pv_time_ops __read_only = {
26514 .sched_clock = native_sched_clock,
26515 .steal_clock = native_steal_clock,
26516 };
26517
26518-__visible struct pv_irq_ops pv_irq_ops = {
26519+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26520 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26521 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26522 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26523@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26524 #endif
26525 };
26526
26527-__visible struct pv_cpu_ops pv_cpu_ops = {
26528+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26529 .cpuid = native_cpuid,
26530 .get_debugreg = native_get_debugreg,
26531 .set_debugreg = native_set_debugreg,
26532@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26533 NOKPROBE_SYMBOL(native_set_debugreg);
26534 NOKPROBE_SYMBOL(native_load_idt);
26535
26536-struct pv_apic_ops pv_apic_ops = {
26537+struct pv_apic_ops pv_apic_ops __read_only= {
26538 #ifdef CONFIG_X86_LOCAL_APIC
26539 .startup_ipi_hook = paravirt_nop,
26540 #endif
26541 };
26542
26543-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26544+#ifdef CONFIG_X86_32
26545+#ifdef CONFIG_X86_PAE
26546+/* 64-bit pagetable entries */
26547+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26548+#else
26549 /* 32-bit pagetable entries */
26550 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26551+#endif
26552 #else
26553 /* 64-bit pagetable entries */
26554 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26555 #endif
26556
26557-struct pv_mmu_ops pv_mmu_ops = {
26558+struct pv_mmu_ops pv_mmu_ops __read_only = {
26559
26560 .read_cr2 = native_read_cr2,
26561 .write_cr2 = native_write_cr2,
26562@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26563 .make_pud = PTE_IDENT,
26564
26565 .set_pgd = native_set_pgd,
26566+ .set_pgd_batched = native_set_pgd_batched,
26567 #endif
26568 #endif /* PAGETABLE_LEVELS >= 3 */
26569
26570@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26571 },
26572
26573 .set_fixmap = native_set_fixmap,
26574+
26575+#ifdef CONFIG_PAX_KERNEXEC
26576+ .pax_open_kernel = native_pax_open_kernel,
26577+ .pax_close_kernel = native_pax_close_kernel,
26578+#endif
26579+
26580 };
26581
26582 EXPORT_SYMBOL_GPL(pv_time_ops);
26583diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26584index 0497f71..7186c0d 100644
26585--- a/arch/x86/kernel/pci-calgary_64.c
26586+++ b/arch/x86/kernel/pci-calgary_64.c
26587@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26588 tce_space = be64_to_cpu(readq(target));
26589 tce_space = tce_space & TAR_SW_BITS;
26590
26591- tce_space = tce_space & (~specified_table_size);
26592+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26593 info->tce_space = (u64 *)__va(tce_space);
26594 }
26595 }
26596diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26597index 35ccf75..7a15747 100644
26598--- a/arch/x86/kernel/pci-iommu_table.c
26599+++ b/arch/x86/kernel/pci-iommu_table.c
26600@@ -2,7 +2,7 @@
26601 #include <asm/iommu_table.h>
26602 #include <linux/string.h>
26603 #include <linux/kallsyms.h>
26604-
26605+#include <linux/sched.h>
26606
26607 #define DEBUG 1
26608
26609diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26610index 77dd0ad..9ec4723 100644
26611--- a/arch/x86/kernel/pci-swiotlb.c
26612+++ b/arch/x86/kernel/pci-swiotlb.c
26613@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26614 struct dma_attrs *attrs)
26615 {
26616 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26617- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26618+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26619 else
26620 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26621 }
26622diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
26623index ca7f0d5..8996469 100644
26624--- a/arch/x86/kernel/preempt.S
26625+++ b/arch/x86/kernel/preempt.S
26626@@ -3,12 +3,14 @@
26627 #include <asm/dwarf2.h>
26628 #include <asm/asm.h>
26629 #include <asm/calling.h>
26630+#include <asm/alternative-asm.h>
26631
26632 ENTRY(___preempt_schedule)
26633 CFI_STARTPROC
26634 SAVE_ALL
26635 call preempt_schedule
26636 RESTORE_ALL
26637+ pax_force_retaddr
26638 ret
26639 CFI_ENDPROC
26640
26641@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
26642 SAVE_ALL
26643 call preempt_schedule_context
26644 RESTORE_ALL
26645+ pax_force_retaddr
26646 ret
26647 CFI_ENDPROC
26648
26649diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26650index 4505e2a..ae28b0d 100644
26651--- a/arch/x86/kernel/process.c
26652+++ b/arch/x86/kernel/process.c
26653@@ -36,7 +36,8 @@
26654 * section. Since TSS's are completely CPU-local, we want them
26655 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26656 */
26657-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26658+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26659+EXPORT_SYMBOL(init_tss);
26660
26661 #ifdef CONFIG_X86_64
26662 static DEFINE_PER_CPU(unsigned char, is_idle);
26663@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
26664 task_xstate_cachep =
26665 kmem_cache_create("task_xstate", xstate_size,
26666 __alignof__(union thread_xstate),
26667- SLAB_PANIC | SLAB_NOTRACK, NULL);
26668+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26669 }
26670
26671 /*
26672@@ -105,7 +106,7 @@ void exit_thread(void)
26673 unsigned long *bp = t->io_bitmap_ptr;
26674
26675 if (bp) {
26676- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26677+ struct tss_struct *tss = init_tss + get_cpu();
26678
26679 t->io_bitmap_ptr = NULL;
26680 clear_thread_flag(TIF_IO_BITMAP);
26681@@ -125,6 +126,9 @@ void flush_thread(void)
26682 {
26683 struct task_struct *tsk = current;
26684
26685+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26686+ loadsegment(gs, 0);
26687+#endif
26688 flush_ptrace_hw_breakpoint(tsk);
26689 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26690 drop_init_fpu(tsk);
26691@@ -271,7 +275,7 @@ static void __exit_idle(void)
26692 void exit_idle(void)
26693 {
26694 /* idle loop has pid 0 */
26695- if (current->pid)
26696+ if (task_pid_nr(current))
26697 return;
26698 __exit_idle();
26699 }
26700@@ -324,7 +328,7 @@ bool xen_set_default_idle(void)
26701 return ret;
26702 }
26703 #endif
26704-void stop_this_cpu(void *dummy)
26705+__noreturn void stop_this_cpu(void *dummy)
26706 {
26707 local_irq_disable();
26708 /*
26709@@ -453,16 +457,37 @@ static int __init idle_setup(char *str)
26710 }
26711 early_param("idle", idle_setup);
26712
26713-unsigned long arch_align_stack(unsigned long sp)
26714+#ifdef CONFIG_PAX_RANDKSTACK
26715+void pax_randomize_kstack(struct pt_regs *regs)
26716 {
26717- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26718- sp -= get_random_int() % 8192;
26719- return sp & ~0xf;
26720-}
26721+ struct thread_struct *thread = &current->thread;
26722+ unsigned long time;
26723
26724-unsigned long arch_randomize_brk(struct mm_struct *mm)
26725-{
26726- unsigned long range_end = mm->brk + 0x02000000;
26727- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26728-}
26729+ if (!randomize_va_space)
26730+ return;
26731+
26732+ if (v8086_mode(regs))
26733+ return;
26734
26735+ rdtscl(time);
26736+
26737+ /* P4 seems to return a 0 LSB, ignore it */
26738+#ifdef CONFIG_MPENTIUM4
26739+ time &= 0x3EUL;
26740+ time <<= 2;
26741+#elif defined(CONFIG_X86_64)
26742+ time &= 0xFUL;
26743+ time <<= 4;
26744+#else
26745+ time &= 0x1FUL;
26746+ time <<= 3;
26747+#endif
26748+
26749+ thread->sp0 ^= time;
26750+ load_sp0(init_tss + smp_processor_id(), thread);
26751+
26752+#ifdef CONFIG_X86_64
26753+ this_cpu_write(kernel_stack, thread->sp0);
26754+#endif
26755+}
26756+#endif
26757diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26758index 7bc86bb..0ea06e8 100644
26759--- a/arch/x86/kernel/process_32.c
26760+++ b/arch/x86/kernel/process_32.c
26761@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26762 unsigned long thread_saved_pc(struct task_struct *tsk)
26763 {
26764 return ((unsigned long *)tsk->thread.sp)[3];
26765+//XXX return tsk->thread.eip;
26766 }
26767
26768 void __show_regs(struct pt_regs *regs, int all)
26769@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26770 unsigned long sp;
26771 unsigned short ss, gs;
26772
26773- if (user_mode_vm(regs)) {
26774+ if (user_mode(regs)) {
26775 sp = regs->sp;
26776 ss = regs->ss & 0xffff;
26777- gs = get_user_gs(regs);
26778 } else {
26779 sp = kernel_stack_pointer(regs);
26780 savesegment(ss, ss);
26781- savesegment(gs, gs);
26782 }
26783+ gs = get_user_gs(regs);
26784
26785 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26786 (u16)regs->cs, regs->ip, regs->flags,
26787- smp_processor_id());
26788+ raw_smp_processor_id());
26789 print_symbol("EIP is at %s\n", regs->ip);
26790
26791 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26792@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
26793 int copy_thread(unsigned long clone_flags, unsigned long sp,
26794 unsigned long arg, struct task_struct *p)
26795 {
26796- struct pt_regs *childregs = task_pt_regs(p);
26797+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26798 struct task_struct *tsk;
26799 int err;
26800
26801 p->thread.sp = (unsigned long) childregs;
26802 p->thread.sp0 = (unsigned long) (childregs+1);
26803+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26804
26805 if (unlikely(p->flags & PF_KTHREAD)) {
26806 /* kernel thread */
26807 memset(childregs, 0, sizeof(struct pt_regs));
26808 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26809- task_user_gs(p) = __KERNEL_STACK_CANARY;
26810- childregs->ds = __USER_DS;
26811- childregs->es = __USER_DS;
26812+ savesegment(gs, childregs->gs);
26813+ childregs->ds = __KERNEL_DS;
26814+ childregs->es = __KERNEL_DS;
26815 childregs->fs = __KERNEL_PERCPU;
26816 childregs->bx = sp; /* function */
26817 childregs->bp = arg;
26818@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26819 struct thread_struct *prev = &prev_p->thread,
26820 *next = &next_p->thread;
26821 int cpu = smp_processor_id();
26822- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26823+ struct tss_struct *tss = init_tss + cpu;
26824 fpu_switch_t fpu;
26825
26826 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26827@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26828 */
26829 lazy_save_gs(prev->gs);
26830
26831+#ifdef CONFIG_PAX_MEMORY_UDEREF
26832+ __set_fs(task_thread_info(next_p)->addr_limit);
26833+#endif
26834+
26835 /*
26836 * Load the per-thread Thread-Local Storage descriptor.
26837 */
26838@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26839 */
26840 arch_end_context_switch(next_p);
26841
26842- this_cpu_write(kernel_stack,
26843- (unsigned long)task_stack_page(next_p) +
26844- THREAD_SIZE - KERNEL_STACK_OFFSET);
26845+ this_cpu_write(current_task, next_p);
26846+ this_cpu_write(current_tinfo, &next_p->tinfo);
26847+ this_cpu_write(kernel_stack, next->sp0);
26848
26849 /*
26850 * Restore %gs if needed (which is common)
26851@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26852
26853 switch_fpu_finish(next_p, fpu);
26854
26855- this_cpu_write(current_task, next_p);
26856-
26857 return prev_p;
26858 }
26859
26860@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
26861 } while (count++ < 16);
26862 return 0;
26863 }
26864-
26865diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26866index ca5b02d..c0b2f6a 100644
26867--- a/arch/x86/kernel/process_64.c
26868+++ b/arch/x86/kernel/process_64.c
26869@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26870 struct pt_regs *childregs;
26871 struct task_struct *me = current;
26872
26873- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26874+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26875 childregs = task_pt_regs(p);
26876 p->thread.sp = (unsigned long) childregs;
26877 p->thread.usersp = me->thread.usersp;
26878+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26879 set_tsk_thread_flag(p, TIF_FORK);
26880 p->thread.fpu_counter = 0;
26881 p->thread.io_bitmap_ptr = NULL;
26882@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26883 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26884 savesegment(es, p->thread.es);
26885 savesegment(ds, p->thread.ds);
26886+ savesegment(ss, p->thread.ss);
26887+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26888 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26889
26890 if (unlikely(p->flags & PF_KTHREAD)) {
26891@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26892 struct thread_struct *prev = &prev_p->thread;
26893 struct thread_struct *next = &next_p->thread;
26894 int cpu = smp_processor_id();
26895- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26896+ struct tss_struct *tss = init_tss + cpu;
26897 unsigned fsindex, gsindex;
26898 fpu_switch_t fpu;
26899
26900@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26901 if (unlikely(next->ds | prev->ds))
26902 loadsegment(ds, next->ds);
26903
26904+ savesegment(ss, prev->ss);
26905+ if (unlikely(next->ss != prev->ss))
26906+ loadsegment(ss, next->ss);
26907
26908 /* We must save %fs and %gs before load_TLS() because
26909 * %fs and %gs may be cleared by load_TLS().
26910@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26911 prev->usersp = this_cpu_read(old_rsp);
26912 this_cpu_write(old_rsp, next->usersp);
26913 this_cpu_write(current_task, next_p);
26914+ this_cpu_write(current_tinfo, &next_p->tinfo);
26915
26916 /*
26917 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26918@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26919 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26920 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26921
26922- this_cpu_write(kernel_stack,
26923- (unsigned long)task_stack_page(next_p) +
26924- THREAD_SIZE - KERNEL_STACK_OFFSET);
26925+ this_cpu_write(kernel_stack, next->sp0);
26926
26927 /*
26928 * Now maybe reload the debug registers and handle I/O bitmaps
26929@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
26930 if (!p || p == current || p->state == TASK_RUNNING)
26931 return 0;
26932 stack = (unsigned long)task_stack_page(p);
26933- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26934+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26935 return 0;
26936 fp = *(u64 *)(p->thread.sp);
26937 do {
26938- if (fp < (unsigned long)stack ||
26939- fp >= (unsigned long)stack+THREAD_SIZE)
26940+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26941 return 0;
26942 ip = *(u64 *)(fp+8);
26943 if (!in_sched_functions(ip))
26944diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26945index 678c0ad..2fc2a7b 100644
26946--- a/arch/x86/kernel/ptrace.c
26947+++ b/arch/x86/kernel/ptrace.c
26948@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26949 unsigned long sp = (unsigned long)&regs->sp;
26950 u32 *prev_esp;
26951
26952- if (context == (sp & ~(THREAD_SIZE - 1)))
26953+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26954 return sp;
26955
26956- prev_esp = (u32 *)(context);
26957+ prev_esp = *(u32 **)(context);
26958 if (prev_esp)
26959 return (unsigned long)prev_esp;
26960
26961@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26962 if (child->thread.gs != value)
26963 return do_arch_prctl(child, ARCH_SET_GS, value);
26964 return 0;
26965+
26966+ case offsetof(struct user_regs_struct,ip):
26967+ /*
26968+ * Protect against any attempt to set ip to an
26969+ * impossible address. There are dragons lurking if the
26970+ * address is noncanonical. (This explicitly allows
26971+ * setting ip to TASK_SIZE_MAX, because user code can do
26972+ * that all by itself by running off the end of its
26973+ * address space.
26974+ */
26975+ if (value > TASK_SIZE_MAX)
26976+ return -EIO;
26977+ break;
26978+
26979 #endif
26980 }
26981
26982@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26983 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26984 {
26985 int i;
26986- int dr7 = 0;
26987+ unsigned long dr7 = 0;
26988 struct arch_hw_breakpoint *info;
26989
26990 for (i = 0; i < HBP_NUM; i++) {
26991@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26992 unsigned long addr, unsigned long data)
26993 {
26994 int ret;
26995- unsigned long __user *datap = (unsigned long __user *)data;
26996+ unsigned long __user *datap = (__force unsigned long __user *)data;
26997
26998 switch (request) {
26999 /* read the word at location addr in the USER area. */
27000@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27001 if ((int) addr < 0)
27002 return -EIO;
27003 ret = do_get_thread_area(child, addr,
27004- (struct user_desc __user *)data);
27005+ (__force struct user_desc __user *) data);
27006 break;
27007
27008 case PTRACE_SET_THREAD_AREA:
27009 if ((int) addr < 0)
27010 return -EIO;
27011 ret = do_set_thread_area(child, addr,
27012- (struct user_desc __user *)data, 0);
27013+ (__force struct user_desc __user *) data, 0);
27014 break;
27015 #endif
27016
27017@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27018
27019 #ifdef CONFIG_X86_64
27020
27021-static struct user_regset x86_64_regsets[] __read_mostly = {
27022+static user_regset_no_const x86_64_regsets[] __read_only = {
27023 [REGSET_GENERAL] = {
27024 .core_note_type = NT_PRSTATUS,
27025 .n = sizeof(struct user_regs_struct) / sizeof(long),
27026@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27027 #endif /* CONFIG_X86_64 */
27028
27029 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27030-static struct user_regset x86_32_regsets[] __read_mostly = {
27031+static user_regset_no_const x86_32_regsets[] __read_only = {
27032 [REGSET_GENERAL] = {
27033 .core_note_type = NT_PRSTATUS,
27034 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27035@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27036 */
27037 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27038
27039-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27040+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27041 {
27042 #ifdef CONFIG_X86_64
27043 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27044@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27045 memset(info, 0, sizeof(*info));
27046 info->si_signo = SIGTRAP;
27047 info->si_code = si_code;
27048- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27049+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27050 }
27051
27052 void user_single_step_siginfo(struct task_struct *tsk,
27053@@ -1450,6 +1464,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27054 # define IS_IA32 0
27055 #endif
27056
27057+#ifdef CONFIG_GRKERNSEC_SETXID
27058+extern void gr_delayed_cred_worker(void);
27059+#endif
27060+
27061 /*
27062 * We must return the syscall number to actually look up in the table.
27063 * This can be -1L to skip running any syscall at all.
27064@@ -1460,6 +1478,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27065
27066 user_exit();
27067
27068+#ifdef CONFIG_GRKERNSEC_SETXID
27069+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27070+ gr_delayed_cred_worker();
27071+#endif
27072+
27073 /*
27074 * If we stepped into a sysenter/syscall insn, it trapped in
27075 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27076@@ -1515,6 +1538,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27077 */
27078 user_exit();
27079
27080+#ifdef CONFIG_GRKERNSEC_SETXID
27081+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27082+ gr_delayed_cred_worker();
27083+#endif
27084+
27085 audit_syscall_exit(regs);
27086
27087 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27088diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27089index 2f355d2..e75ed0a 100644
27090--- a/arch/x86/kernel/pvclock.c
27091+++ b/arch/x86/kernel/pvclock.c
27092@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27093 reset_hung_task_detector();
27094 }
27095
27096-static atomic64_t last_value = ATOMIC64_INIT(0);
27097+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27098
27099 void pvclock_resume(void)
27100 {
27101- atomic64_set(&last_value, 0);
27102+ atomic64_set_unchecked(&last_value, 0);
27103 }
27104
27105 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27106@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27107 * updating at the same time, and one of them could be slightly behind,
27108 * making the assumption that last_value always go forward fail to hold.
27109 */
27110- last = atomic64_read(&last_value);
27111+ last = atomic64_read_unchecked(&last_value);
27112 do {
27113 if (ret < last)
27114 return last;
27115- last = atomic64_cmpxchg(&last_value, last, ret);
27116+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27117 } while (unlikely(last != ret));
27118
27119 return ret;
27120diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27121index 52b1157..c6e67c4 100644
27122--- a/arch/x86/kernel/reboot.c
27123+++ b/arch/x86/kernel/reboot.c
27124@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27125
27126 void __noreturn machine_real_restart(unsigned int type)
27127 {
27128+
27129+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27130+ struct desc_struct *gdt;
27131+#endif
27132+
27133 local_irq_disable();
27134
27135 /*
27136@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
27137
27138 /* Jump to the identity-mapped low memory code */
27139 #ifdef CONFIG_X86_32
27140- asm volatile("jmpl *%0" : :
27141+
27142+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27143+ gdt = get_cpu_gdt_table(smp_processor_id());
27144+ pax_open_kernel();
27145+#ifdef CONFIG_PAX_MEMORY_UDEREF
27146+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27147+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27148+ loadsegment(ds, __KERNEL_DS);
27149+ loadsegment(es, __KERNEL_DS);
27150+ loadsegment(ss, __KERNEL_DS);
27151+#endif
27152+#ifdef CONFIG_PAX_KERNEXEC
27153+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27154+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27155+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27156+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27157+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27158+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27159+#endif
27160+ pax_close_kernel();
27161+#endif
27162+
27163+ asm volatile("ljmpl *%0" : :
27164 "rm" (real_mode_header->machine_real_restart_asm),
27165 "a" (type));
27166 #else
27167@@ -486,7 +513,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27168 * This means that this function can never return, it can misbehave
27169 * by not rebooting properly and hanging.
27170 */
27171-static void native_machine_emergency_restart(void)
27172+static void __noreturn native_machine_emergency_restart(void)
27173 {
27174 int i;
27175 int attempt = 0;
27176@@ -610,13 +637,13 @@ void native_machine_shutdown(void)
27177 #endif
27178 }
27179
27180-static void __machine_emergency_restart(int emergency)
27181+static void __noreturn __machine_emergency_restart(int emergency)
27182 {
27183 reboot_emergency = emergency;
27184 machine_ops.emergency_restart();
27185 }
27186
27187-static void native_machine_restart(char *__unused)
27188+static void __noreturn native_machine_restart(char *__unused)
27189 {
27190 pr_notice("machine restart\n");
27191
27192@@ -625,7 +652,7 @@ static void native_machine_restart(char *__unused)
27193 __machine_emergency_restart(0);
27194 }
27195
27196-static void native_machine_halt(void)
27197+static void __noreturn native_machine_halt(void)
27198 {
27199 /* Stop other cpus and apics */
27200 machine_shutdown();
27201@@ -635,7 +662,7 @@ static void native_machine_halt(void)
27202 stop_this_cpu(NULL);
27203 }
27204
27205-static void native_machine_power_off(void)
27206+static void __noreturn native_machine_power_off(void)
27207 {
27208 if (pm_power_off) {
27209 if (!reboot_force)
27210@@ -644,9 +671,10 @@ static void native_machine_power_off(void)
27211 }
27212 /* A fallback in case there is no PM info available */
27213 tboot_shutdown(TB_SHUTDOWN_HALT);
27214+ unreachable();
27215 }
27216
27217-struct machine_ops machine_ops = {
27218+struct machine_ops machine_ops __read_only = {
27219 .power_off = native_machine_power_off,
27220 .shutdown = native_machine_shutdown,
27221 .emergency_restart = native_machine_emergency_restart,
27222diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27223index c8e41e9..64049ef 100644
27224--- a/arch/x86/kernel/reboot_fixups_32.c
27225+++ b/arch/x86/kernel/reboot_fixups_32.c
27226@@ -57,7 +57,7 @@ struct device_fixup {
27227 unsigned int vendor;
27228 unsigned int device;
27229 void (*reboot_fixup)(struct pci_dev *);
27230-};
27231+} __do_const;
27232
27233 /*
27234 * PCI ids solely used for fixups_table go here
27235diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27236index 3fd2c69..a444264 100644
27237--- a/arch/x86/kernel/relocate_kernel_64.S
27238+++ b/arch/x86/kernel/relocate_kernel_64.S
27239@@ -96,8 +96,7 @@ relocate_kernel:
27240
27241 /* jump to identity mapped page */
27242 addq $(identity_mapped - relocate_kernel), %r8
27243- pushq %r8
27244- ret
27245+ jmp *%r8
27246
27247 identity_mapped:
27248 /* set return address to 0 if not preserving context */
27249diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27250index 78a0e62..5c2e510 100644
27251--- a/arch/x86/kernel/setup.c
27252+++ b/arch/x86/kernel/setup.c
27253@@ -110,6 +110,7 @@
27254 #include <asm/mce.h>
27255 #include <asm/alternative.h>
27256 #include <asm/prom.h>
27257+#include <asm/boot.h>
27258
27259 /*
27260 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27261@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27262 #endif
27263
27264
27265-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27266-__visible unsigned long mmu_cr4_features;
27267+#ifdef CONFIG_X86_64
27268+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27269+#elif defined(CONFIG_X86_PAE)
27270+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27271 #else
27272-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27273+__visible unsigned long mmu_cr4_features __read_only;
27274 #endif
27275
27276+void set_in_cr4(unsigned long mask)
27277+{
27278+ unsigned long cr4 = read_cr4();
27279+
27280+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27281+ return;
27282+
27283+ pax_open_kernel();
27284+ mmu_cr4_features |= mask;
27285+ pax_close_kernel();
27286+
27287+ if (trampoline_cr4_features)
27288+ *trampoline_cr4_features = mmu_cr4_features;
27289+ cr4 |= mask;
27290+ write_cr4(cr4);
27291+}
27292+EXPORT_SYMBOL(set_in_cr4);
27293+
27294+void clear_in_cr4(unsigned long mask)
27295+{
27296+ unsigned long cr4 = read_cr4();
27297+
27298+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27299+ return;
27300+
27301+ pax_open_kernel();
27302+ mmu_cr4_features &= ~mask;
27303+ pax_close_kernel();
27304+
27305+ if (trampoline_cr4_features)
27306+ *trampoline_cr4_features = mmu_cr4_features;
27307+ cr4 &= ~mask;
27308+ write_cr4(cr4);
27309+}
27310+EXPORT_SYMBOL(clear_in_cr4);
27311+
27312 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27313 int bootloader_type, bootloader_version;
27314
27315@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27316 * area (640->1Mb) as ram even though it is not.
27317 * take them out.
27318 */
27319- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27320+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27321
27322 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27323 }
27324@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27325 /* called before trim_bios_range() to spare extra sanitize */
27326 static void __init e820_add_kernel_range(void)
27327 {
27328- u64 start = __pa_symbol(_text);
27329+ u64 start = __pa_symbol(ktla_ktva(_text));
27330 u64 size = __pa_symbol(_end) - start;
27331
27332 /*
27333@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27334
27335 void __init setup_arch(char **cmdline_p)
27336 {
27337+#ifdef CONFIG_X86_32
27338+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27339+#else
27340 memblock_reserve(__pa_symbol(_text),
27341 (unsigned long)__bss_stop - (unsigned long)_text);
27342+#endif
27343
27344 early_reserve_initrd();
27345
27346@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27347
27348 if (!boot_params.hdr.root_flags)
27349 root_mountflags &= ~MS_RDONLY;
27350- init_mm.start_code = (unsigned long) _text;
27351- init_mm.end_code = (unsigned long) _etext;
27352+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27353+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27354 init_mm.end_data = (unsigned long) _edata;
27355 init_mm.brk = _brk_end;
27356
27357- code_resource.start = __pa_symbol(_text);
27358- code_resource.end = __pa_symbol(_etext)-1;
27359- data_resource.start = __pa_symbol(_etext);
27360+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27361+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27362+ data_resource.start = __pa_symbol(_sdata);
27363 data_resource.end = __pa_symbol(_edata)-1;
27364 bss_resource.start = __pa_symbol(__bss_start);
27365 bss_resource.end = __pa_symbol(__bss_stop)-1;
27366diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27367index 5cdff03..80fa283 100644
27368--- a/arch/x86/kernel/setup_percpu.c
27369+++ b/arch/x86/kernel/setup_percpu.c
27370@@ -21,19 +21,17 @@
27371 #include <asm/cpu.h>
27372 #include <asm/stackprotector.h>
27373
27374-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27375+#ifdef CONFIG_SMP
27376+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27377 EXPORT_PER_CPU_SYMBOL(cpu_number);
27378+#endif
27379
27380-#ifdef CONFIG_X86_64
27381 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27382-#else
27383-#define BOOT_PERCPU_OFFSET 0
27384-#endif
27385
27386 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27387 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27388
27389-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27390+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27391 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27392 };
27393 EXPORT_SYMBOL(__per_cpu_offset);
27394@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27395 {
27396 #ifdef CONFIG_NEED_MULTIPLE_NODES
27397 pg_data_t *last = NULL;
27398- unsigned int cpu;
27399+ int cpu;
27400
27401 for_each_possible_cpu(cpu) {
27402 int node = early_cpu_to_node(cpu);
27403@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27404 {
27405 #ifdef CONFIG_X86_32
27406 struct desc_struct gdt;
27407+ unsigned long base = per_cpu_offset(cpu);
27408
27409- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27410- 0x2 | DESCTYPE_S, 0x8);
27411- gdt.s = 1;
27412+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27413+ 0x83 | DESCTYPE_S, 0xC);
27414 write_gdt_entry(get_cpu_gdt_table(cpu),
27415 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27416 #endif
27417@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27418 /* alrighty, percpu areas up and running */
27419 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27420 for_each_possible_cpu(cpu) {
27421+#ifdef CONFIG_CC_STACKPROTECTOR
27422+#ifdef CONFIG_X86_32
27423+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27424+#endif
27425+#endif
27426 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27427 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27428 per_cpu(cpu_number, cpu) = cpu;
27429@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27430 */
27431 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27432 #endif
27433+#ifdef CONFIG_CC_STACKPROTECTOR
27434+#ifdef CONFIG_X86_32
27435+ if (!cpu)
27436+ per_cpu(stack_canary.canary, cpu) = canary;
27437+#endif
27438+#endif
27439 /*
27440 * Up to this point, the boot CPU has been using .init.data
27441 * area. Reload any changed state for the boot CPU.
27442diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27443index 2851d63..83bf567 100644
27444--- a/arch/x86/kernel/signal.c
27445+++ b/arch/x86/kernel/signal.c
27446@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27447 * Align the stack pointer according to the i386 ABI,
27448 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27449 */
27450- sp = ((sp + 4) & -16ul) - 4;
27451+ sp = ((sp - 12) & -16ul) - 4;
27452 #else /* !CONFIG_X86_32 */
27453 sp = round_down(sp, 16) - 8;
27454 #endif
27455@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27456 }
27457
27458 if (current->mm->context.vdso)
27459- restorer = current->mm->context.vdso +
27460- selected_vdso32->sym___kernel_sigreturn;
27461+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27462 else
27463- restorer = &frame->retcode;
27464+ restorer = (void __user *)&frame->retcode;
27465 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27466 restorer = ksig->ka.sa.sa_restorer;
27467
27468@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27469 * reasons and because gdb uses it as a signature to notice
27470 * signal handler stack frames.
27471 */
27472- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27473+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27474
27475 if (err)
27476 return -EFAULT;
27477@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27478 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27479
27480 /* Set up to return from userspace. */
27481- restorer = current->mm->context.vdso +
27482- selected_vdso32->sym___kernel_rt_sigreturn;
27483+ if (current->mm->context.vdso)
27484+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27485+ else
27486+ restorer = (void __user *)&frame->retcode;
27487 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27488 restorer = ksig->ka.sa.sa_restorer;
27489 put_user_ex(restorer, &frame->pretcode);
27490@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27491 * reasons and because gdb uses it as a signature to notice
27492 * signal handler stack frames.
27493 */
27494- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27495+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27496 } put_user_catch(err);
27497
27498 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27499@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27500 {
27501 int usig = signr_convert(ksig->sig);
27502 sigset_t *set = sigmask_to_save();
27503- compat_sigset_t *cset = (compat_sigset_t *) set;
27504+ sigset_t sigcopy;
27505+ compat_sigset_t *cset;
27506+
27507+ sigcopy = *set;
27508+
27509+ cset = (compat_sigset_t *) &sigcopy;
27510
27511 /* Set up the stack frame */
27512 if (is_ia32_frame()) {
27513@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27514 } else if (is_x32_frame()) {
27515 return x32_setup_rt_frame(ksig, cset, regs);
27516 } else {
27517- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27518+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27519 }
27520 }
27521
27522diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27523index be8e1bd..a3d93fa 100644
27524--- a/arch/x86/kernel/smp.c
27525+++ b/arch/x86/kernel/smp.c
27526@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27527
27528 __setup("nonmi_ipi", nonmi_ipi_setup);
27529
27530-struct smp_ops smp_ops = {
27531+struct smp_ops smp_ops __read_only = {
27532 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27533 .smp_prepare_cpus = native_smp_prepare_cpus,
27534 .smp_cpus_done = native_smp_cpus_done,
27535diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27536index 5492798..a3bd4f2 100644
27537--- a/arch/x86/kernel/smpboot.c
27538+++ b/arch/x86/kernel/smpboot.c
27539@@ -230,14 +230,17 @@ static void notrace start_secondary(void *unused)
27540
27541 enable_start_cpu0 = 0;
27542
27543-#ifdef CONFIG_X86_32
27544+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27545+ barrier();
27546+
27547 /* switch away from the initial page table */
27548+#ifdef CONFIG_PAX_PER_CPU_PGD
27549+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27550+#else
27551 load_cr3(swapper_pg_dir);
27552+#endif
27553 __flush_tlb_all();
27554-#endif
27555
27556- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27557- barrier();
27558 /*
27559 * Check TSC synchronization with the BP:
27560 */
27561@@ -764,8 +767,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27562 alternatives_enable_smp();
27563
27564 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27565- (THREAD_SIZE + task_stack_page(idle))) - 1);
27566+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27567 per_cpu(current_task, cpu) = idle;
27568+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27569
27570 #ifdef CONFIG_X86_32
27571 /* Stack for startup_32 can be just as for start_secondary onwards */
27572@@ -774,10 +778,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27573 clear_tsk_thread_flag(idle, TIF_FORK);
27574 initial_gs = per_cpu_offset(cpu);
27575 #endif
27576- per_cpu(kernel_stack, cpu) =
27577- (unsigned long)task_stack_page(idle) -
27578- KERNEL_STACK_OFFSET + THREAD_SIZE;
27579+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27580+ pax_open_kernel();
27581 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27582+ pax_close_kernel();
27583 initial_code = (unsigned long)start_secondary;
27584 stack_start = idle->thread.sp;
27585
27586@@ -923,6 +927,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27587 /* the FPU context is blank, nobody can own it */
27588 __cpu_disable_lazy_restore(cpu);
27589
27590+#ifdef CONFIG_PAX_PER_CPU_PGD
27591+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27592+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27593+ KERNEL_PGD_PTRS);
27594+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27595+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27596+ KERNEL_PGD_PTRS);
27597+#endif
27598+
27599 err = do_boot_cpu(apicid, cpu, tidle);
27600 if (err) {
27601 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27602diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27603index 9b4d51d..5d28b58 100644
27604--- a/arch/x86/kernel/step.c
27605+++ b/arch/x86/kernel/step.c
27606@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27607 struct desc_struct *desc;
27608 unsigned long base;
27609
27610- seg &= ~7UL;
27611+ seg >>= 3;
27612
27613 mutex_lock(&child->mm->context.lock);
27614- if (unlikely((seg >> 3) >= child->mm->context.size))
27615+ if (unlikely(seg >= child->mm->context.size))
27616 addr = -1L; /* bogus selector, access would fault */
27617 else {
27618 desc = child->mm->context.ldt + seg;
27619@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27620 addr += base;
27621 }
27622 mutex_unlock(&child->mm->context.lock);
27623- }
27624+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27625+ addr = ktla_ktva(addr);
27626
27627 return addr;
27628 }
27629@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27630 unsigned char opcode[15];
27631 unsigned long addr = convert_ip_to_linear(child, regs);
27632
27633+ if (addr == -EINVAL)
27634+ return 0;
27635+
27636 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27637 for (i = 0; i < copied; i++) {
27638 switch (opcode[i]) {
27639diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27640new file mode 100644
27641index 0000000..5877189
27642--- /dev/null
27643+++ b/arch/x86/kernel/sys_i386_32.c
27644@@ -0,0 +1,189 @@
27645+/*
27646+ * This file contains various random system calls that
27647+ * have a non-standard calling sequence on the Linux/i386
27648+ * platform.
27649+ */
27650+
27651+#include <linux/errno.h>
27652+#include <linux/sched.h>
27653+#include <linux/mm.h>
27654+#include <linux/fs.h>
27655+#include <linux/smp.h>
27656+#include <linux/sem.h>
27657+#include <linux/msg.h>
27658+#include <linux/shm.h>
27659+#include <linux/stat.h>
27660+#include <linux/syscalls.h>
27661+#include <linux/mman.h>
27662+#include <linux/file.h>
27663+#include <linux/utsname.h>
27664+#include <linux/ipc.h>
27665+#include <linux/elf.h>
27666+
27667+#include <linux/uaccess.h>
27668+#include <linux/unistd.h>
27669+
27670+#include <asm/syscalls.h>
27671+
27672+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27673+{
27674+ unsigned long pax_task_size = TASK_SIZE;
27675+
27676+#ifdef CONFIG_PAX_SEGMEXEC
27677+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27678+ pax_task_size = SEGMEXEC_TASK_SIZE;
27679+#endif
27680+
27681+ if (flags & MAP_FIXED)
27682+ if (len > pax_task_size || addr > pax_task_size - len)
27683+ return -EINVAL;
27684+
27685+ return 0;
27686+}
27687+
27688+/*
27689+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27690+ */
27691+static unsigned long get_align_mask(void)
27692+{
27693+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27694+ return 0;
27695+
27696+ if (!(current->flags & PF_RANDOMIZE))
27697+ return 0;
27698+
27699+ return va_align.mask;
27700+}
27701+
27702+unsigned long
27703+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27704+ unsigned long len, unsigned long pgoff, unsigned long flags)
27705+{
27706+ struct mm_struct *mm = current->mm;
27707+ struct vm_area_struct *vma;
27708+ unsigned long pax_task_size = TASK_SIZE;
27709+ struct vm_unmapped_area_info info;
27710+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27711+
27712+#ifdef CONFIG_PAX_SEGMEXEC
27713+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27714+ pax_task_size = SEGMEXEC_TASK_SIZE;
27715+#endif
27716+
27717+ pax_task_size -= PAGE_SIZE;
27718+
27719+ if (len > pax_task_size)
27720+ return -ENOMEM;
27721+
27722+ if (flags & MAP_FIXED)
27723+ return addr;
27724+
27725+#ifdef CONFIG_PAX_RANDMMAP
27726+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27727+#endif
27728+
27729+ if (addr) {
27730+ addr = PAGE_ALIGN(addr);
27731+ if (pax_task_size - len >= addr) {
27732+ vma = find_vma(mm, addr);
27733+ if (check_heap_stack_gap(vma, addr, len, offset))
27734+ return addr;
27735+ }
27736+ }
27737+
27738+ info.flags = 0;
27739+ info.length = len;
27740+ info.align_mask = filp ? get_align_mask() : 0;
27741+ info.align_offset = pgoff << PAGE_SHIFT;
27742+ info.threadstack_offset = offset;
27743+
27744+#ifdef CONFIG_PAX_PAGEEXEC
27745+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27746+ info.low_limit = 0x00110000UL;
27747+ info.high_limit = mm->start_code;
27748+
27749+#ifdef CONFIG_PAX_RANDMMAP
27750+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27751+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27752+#endif
27753+
27754+ if (info.low_limit < info.high_limit) {
27755+ addr = vm_unmapped_area(&info);
27756+ if (!IS_ERR_VALUE(addr))
27757+ return addr;
27758+ }
27759+ } else
27760+#endif
27761+
27762+ info.low_limit = mm->mmap_base;
27763+ info.high_limit = pax_task_size;
27764+
27765+ return vm_unmapped_area(&info);
27766+}
27767+
27768+unsigned long
27769+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27770+ const unsigned long len, const unsigned long pgoff,
27771+ const unsigned long flags)
27772+{
27773+ struct vm_area_struct *vma;
27774+ struct mm_struct *mm = current->mm;
27775+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27776+ struct vm_unmapped_area_info info;
27777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27778+
27779+#ifdef CONFIG_PAX_SEGMEXEC
27780+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27781+ pax_task_size = SEGMEXEC_TASK_SIZE;
27782+#endif
27783+
27784+ pax_task_size -= PAGE_SIZE;
27785+
27786+ /* requested length too big for entire address space */
27787+ if (len > pax_task_size)
27788+ return -ENOMEM;
27789+
27790+ if (flags & MAP_FIXED)
27791+ return addr;
27792+
27793+#ifdef CONFIG_PAX_PAGEEXEC
27794+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27795+ goto bottomup;
27796+#endif
27797+
27798+#ifdef CONFIG_PAX_RANDMMAP
27799+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27800+#endif
27801+
27802+ /* requesting a specific address */
27803+ if (addr) {
27804+ addr = PAGE_ALIGN(addr);
27805+ if (pax_task_size - len >= addr) {
27806+ vma = find_vma(mm, addr);
27807+ if (check_heap_stack_gap(vma, addr, len, offset))
27808+ return addr;
27809+ }
27810+ }
27811+
27812+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27813+ info.length = len;
27814+ info.low_limit = PAGE_SIZE;
27815+ info.high_limit = mm->mmap_base;
27816+ info.align_mask = filp ? get_align_mask() : 0;
27817+ info.align_offset = pgoff << PAGE_SHIFT;
27818+ info.threadstack_offset = offset;
27819+
27820+ addr = vm_unmapped_area(&info);
27821+ if (!(addr & ~PAGE_MASK))
27822+ return addr;
27823+ VM_BUG_ON(addr != -ENOMEM);
27824+
27825+bottomup:
27826+ /*
27827+ * A failed mmap() very likely causes application failure,
27828+ * so fall back to the bottom-up function here. This scenario
27829+ * can happen with large stack limits and large mmap()
27830+ * allocations.
27831+ */
27832+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27833+}
27834diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27835index 30277e2..5664a29 100644
27836--- a/arch/x86/kernel/sys_x86_64.c
27837+++ b/arch/x86/kernel/sys_x86_64.c
27838@@ -81,8 +81,8 @@ out:
27839 return error;
27840 }
27841
27842-static void find_start_end(unsigned long flags, unsigned long *begin,
27843- unsigned long *end)
27844+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27845+ unsigned long *begin, unsigned long *end)
27846 {
27847 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27848 unsigned long new_begin;
27849@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27850 *begin = new_begin;
27851 }
27852 } else {
27853- *begin = current->mm->mmap_legacy_base;
27854+ *begin = mm->mmap_legacy_base;
27855 *end = TASK_SIZE;
27856 }
27857 }
27858@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27859 struct vm_area_struct *vma;
27860 struct vm_unmapped_area_info info;
27861 unsigned long begin, end;
27862+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27863
27864 if (flags & MAP_FIXED)
27865 return addr;
27866
27867- find_start_end(flags, &begin, &end);
27868+ find_start_end(mm, flags, &begin, &end);
27869
27870 if (len > end)
27871 return -ENOMEM;
27872
27873+#ifdef CONFIG_PAX_RANDMMAP
27874+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27875+#endif
27876+
27877 if (addr) {
27878 addr = PAGE_ALIGN(addr);
27879 vma = find_vma(mm, addr);
27880- if (end - len >= addr &&
27881- (!vma || addr + len <= vma->vm_start))
27882+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27883 return addr;
27884 }
27885
27886@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27887 info.high_limit = end;
27888 info.align_mask = filp ? get_align_mask() : 0;
27889 info.align_offset = pgoff << PAGE_SHIFT;
27890+ info.threadstack_offset = offset;
27891 return vm_unmapped_area(&info);
27892 }
27893
27894@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27895 struct mm_struct *mm = current->mm;
27896 unsigned long addr = addr0;
27897 struct vm_unmapped_area_info info;
27898+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27899
27900 /* requested length too big for entire address space */
27901 if (len > TASK_SIZE)
27902@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27903 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27904 goto bottomup;
27905
27906+#ifdef CONFIG_PAX_RANDMMAP
27907+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27908+#endif
27909+
27910 /* requesting a specific address */
27911 if (addr) {
27912 addr = PAGE_ALIGN(addr);
27913 vma = find_vma(mm, addr);
27914- if (TASK_SIZE - len >= addr &&
27915- (!vma || addr + len <= vma->vm_start))
27916+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27917 return addr;
27918 }
27919
27920@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27921 info.high_limit = mm->mmap_base;
27922 info.align_mask = filp ? get_align_mask() : 0;
27923 info.align_offset = pgoff << PAGE_SHIFT;
27924+ info.threadstack_offset = offset;
27925 addr = vm_unmapped_area(&info);
27926 if (!(addr & ~PAGE_MASK))
27927 return addr;
27928diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27929index 91a4496..bb87552 100644
27930--- a/arch/x86/kernel/tboot.c
27931+++ b/arch/x86/kernel/tboot.c
27932@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27933
27934 void tboot_shutdown(u32 shutdown_type)
27935 {
27936- void (*shutdown)(void);
27937+ void (* __noreturn shutdown)(void);
27938
27939 if (!tboot_enabled())
27940 return;
27941@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27942
27943 switch_to_tboot_pt();
27944
27945- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27946+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27947 shutdown();
27948
27949 /* should not reach here */
27950@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27951 return -ENODEV;
27952 }
27953
27954-static atomic_t ap_wfs_count;
27955+static atomic_unchecked_t ap_wfs_count;
27956
27957 static int tboot_wait_for_aps(int num_aps)
27958 {
27959@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27960 {
27961 switch (action) {
27962 case CPU_DYING:
27963- atomic_inc(&ap_wfs_count);
27964+ atomic_inc_unchecked(&ap_wfs_count);
27965 if (num_online_cpus() == 1)
27966- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27967+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27968 return NOTIFY_BAD;
27969 break;
27970 }
27971@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27972
27973 tboot_create_trampoline();
27974
27975- atomic_set(&ap_wfs_count, 0);
27976+ atomic_set_unchecked(&ap_wfs_count, 0);
27977 register_hotcpu_notifier(&tboot_cpu_notifier);
27978
27979 #ifdef CONFIG_DEBUG_FS
27980diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27981index bf7ef5c..59d0ac9 100644
27982--- a/arch/x86/kernel/time.c
27983+++ b/arch/x86/kernel/time.c
27984@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27985 {
27986 unsigned long pc = instruction_pointer(regs);
27987
27988- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27989+ if (!user_mode(regs) && in_lock_functions(pc)) {
27990 #ifdef CONFIG_FRAME_POINTER
27991- return *(unsigned long *)(regs->bp + sizeof(long));
27992+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27993 #else
27994 unsigned long *sp =
27995 (unsigned long *)kernel_stack_pointer(regs);
27996@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27997 * or above a saved flags. Eflags has bits 22-31 zero,
27998 * kernel addresses don't.
27999 */
28000+
28001+#ifdef CONFIG_PAX_KERNEXEC
28002+ return ktla_ktva(sp[0]);
28003+#else
28004 if (sp[0] >> 22)
28005 return sp[0];
28006 if (sp[1] >> 22)
28007 return sp[1];
28008 #endif
28009+
28010+#endif
28011 }
28012 return pc;
28013 }
28014diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28015index f7fec09..9991981 100644
28016--- a/arch/x86/kernel/tls.c
28017+++ b/arch/x86/kernel/tls.c
28018@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28019 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28020 return -EINVAL;
28021
28022+#ifdef CONFIG_PAX_SEGMEXEC
28023+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28024+ return -EINVAL;
28025+#endif
28026+
28027 set_tls_desc(p, idx, &info, 1);
28028
28029 return 0;
28030@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28031
28032 if (kbuf)
28033 info = kbuf;
28034- else if (__copy_from_user(infobuf, ubuf, count))
28035+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28036 return -EFAULT;
28037 else
28038 info = infobuf;
28039diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28040index 1c113db..287b42e 100644
28041--- a/arch/x86/kernel/tracepoint.c
28042+++ b/arch/x86/kernel/tracepoint.c
28043@@ -9,11 +9,11 @@
28044 #include <linux/atomic.h>
28045
28046 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28047-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28048+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28049 (unsigned long) trace_idt_table };
28050
28051 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28052-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28053+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28054
28055 static int trace_irq_vector_refcount;
28056 static DEFINE_MUTEX(irq_vector_mutex);
28057diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28058index 0d0e922..0886373 100644
28059--- a/arch/x86/kernel/traps.c
28060+++ b/arch/x86/kernel/traps.c
28061@@ -67,7 +67,7 @@
28062 #include <asm/proto.h>
28063
28064 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28065-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28066+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28067 #else
28068 #include <asm/processor-flags.h>
28069 #include <asm/setup.h>
28070@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28071 #endif
28072
28073 /* Must be page-aligned because the real IDT is used in a fixmap. */
28074-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28075+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28076
28077 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28078 EXPORT_SYMBOL_GPL(used_vectors);
28079@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28080 }
28081
28082 static nokprobe_inline int
28083-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28084+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28085 struct pt_regs *regs, long error_code)
28086 {
28087 #ifdef CONFIG_X86_32
28088- if (regs->flags & X86_VM_MASK) {
28089+ if (v8086_mode(regs)) {
28090 /*
28091 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28092 * On nmi (interrupt 2), do_trap should not be called.
28093@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28094 return -1;
28095 }
28096 #endif
28097- if (!user_mode(regs)) {
28098+ if (!user_mode_novm(regs)) {
28099 if (!fixup_exception(regs)) {
28100 tsk->thread.error_code = error_code;
28101 tsk->thread.trap_nr = trapnr;
28102+
28103+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28104+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28105+ str = "PAX: suspicious stack segment fault";
28106+#endif
28107+
28108 die(str, regs, error_code);
28109 }
28110+
28111+#ifdef CONFIG_PAX_REFCOUNT
28112+ if (trapnr == X86_TRAP_OF)
28113+ pax_report_refcount_overflow(regs);
28114+#endif
28115+
28116 return 0;
28117 }
28118
28119@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28120 }
28121
28122 static void
28123-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28124+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28125 long error_code, siginfo_t *info)
28126 {
28127 struct task_struct *tsk = current;
28128@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28129 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28130 printk_ratelimit()) {
28131 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28132- tsk->comm, tsk->pid, str,
28133+ tsk->comm, task_pid_nr(tsk), str,
28134 regs->ip, regs->sp, error_code);
28135 print_vma_addr(" in ", regs->ip);
28136 pr_cont("\n");
28137@@ -266,6 +278,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28138 tsk->thread.error_code = error_code;
28139 tsk->thread.trap_nr = X86_TRAP_DF;
28140
28141+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28142+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28143+ die("grsec: kernel stack overflow detected", regs, error_code);
28144+#endif
28145+
28146 #ifdef CONFIG_DOUBLEFAULT
28147 df_debug(regs, error_code);
28148 #endif
28149@@ -288,7 +305,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28150 conditional_sti(regs);
28151
28152 #ifdef CONFIG_X86_32
28153- if (regs->flags & X86_VM_MASK) {
28154+ if (v8086_mode(regs)) {
28155 local_irq_enable();
28156 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28157 goto exit;
28158@@ -296,18 +313,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28159 #endif
28160
28161 tsk = current;
28162- if (!user_mode(regs)) {
28163+ if (!user_mode_novm(regs)) {
28164 if (fixup_exception(regs))
28165 goto exit;
28166
28167 tsk->thread.error_code = error_code;
28168 tsk->thread.trap_nr = X86_TRAP_GP;
28169 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28170- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28171+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28172+
28173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28174+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28175+ die("PAX: suspicious general protection fault", regs, error_code);
28176+ else
28177+#endif
28178+
28179 die("general protection fault", regs, error_code);
28180+ }
28181 goto exit;
28182 }
28183
28184+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28185+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28186+ struct mm_struct *mm = tsk->mm;
28187+ unsigned long limit;
28188+
28189+ down_write(&mm->mmap_sem);
28190+ limit = mm->context.user_cs_limit;
28191+ if (limit < TASK_SIZE) {
28192+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28193+ up_write(&mm->mmap_sem);
28194+ return;
28195+ }
28196+ up_write(&mm->mmap_sem);
28197+ }
28198+#endif
28199+
28200 tsk->thread.error_code = error_code;
28201 tsk->thread.trap_nr = X86_TRAP_GP;
28202
28203@@ -481,7 +522,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28204 /* It's safe to allow irq's after DR6 has been saved */
28205 preempt_conditional_sti(regs);
28206
28207- if (regs->flags & X86_VM_MASK) {
28208+ if (v8086_mode(regs)) {
28209 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28210 X86_TRAP_DB);
28211 preempt_conditional_cli(regs);
28212@@ -496,7 +537,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28213 * We already checked v86 mode above, so we can check for kernel mode
28214 * by just checking the CPL of CS.
28215 */
28216- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28217+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28218 tsk->thread.debugreg6 &= ~DR_STEP;
28219 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28220 regs->flags &= ~X86_EFLAGS_TF;
28221@@ -529,7 +570,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28222 return;
28223 conditional_sti(regs);
28224
28225- if (!user_mode_vm(regs))
28226+ if (!user_mode(regs))
28227 {
28228 if (!fixup_exception(regs)) {
28229 task->thread.error_code = error_code;
28230diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28231index ea03031..34a5cdda 100644
28232--- a/arch/x86/kernel/tsc.c
28233+++ b/arch/x86/kernel/tsc.c
28234@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28235 */
28236 smp_wmb();
28237
28238- ACCESS_ONCE(c2n->head) = data;
28239+ ACCESS_ONCE_RW(c2n->head) = data;
28240 }
28241
28242 /*
28243diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28244index 5d1cbfe..2a21feb 100644
28245--- a/arch/x86/kernel/uprobes.c
28246+++ b/arch/x86/kernel/uprobes.c
28247@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28248 int ret = NOTIFY_DONE;
28249
28250 /* We are only interested in userspace traps */
28251- if (regs && !user_mode_vm(regs))
28252+ if (regs && !user_mode(regs))
28253 return NOTIFY_DONE;
28254
28255 switch (val) {
28256@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28257
28258 if (nleft != rasize) {
28259 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28260- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28261+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28262
28263 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28264 }
28265diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28266index b9242ba..50c5edd 100644
28267--- a/arch/x86/kernel/verify_cpu.S
28268+++ b/arch/x86/kernel/verify_cpu.S
28269@@ -20,6 +20,7 @@
28270 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28271 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28272 * arch/x86/kernel/head_32.S: processor startup
28273+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28274 *
28275 * verify_cpu, returns the status of longmode and SSE in register %eax.
28276 * 0: Success 1: Failure
28277diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28278index e8edcf5..27f9344 100644
28279--- a/arch/x86/kernel/vm86_32.c
28280+++ b/arch/x86/kernel/vm86_32.c
28281@@ -44,6 +44,7 @@
28282 #include <linux/ptrace.h>
28283 #include <linux/audit.h>
28284 #include <linux/stddef.h>
28285+#include <linux/grsecurity.h>
28286
28287 #include <asm/uaccess.h>
28288 #include <asm/io.h>
28289@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28290 do_exit(SIGSEGV);
28291 }
28292
28293- tss = &per_cpu(init_tss, get_cpu());
28294+ tss = init_tss + get_cpu();
28295 current->thread.sp0 = current->thread.saved_sp0;
28296 current->thread.sysenter_cs = __KERNEL_CS;
28297 load_sp0(tss, &current->thread);
28298@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28299
28300 if (tsk->thread.saved_sp0)
28301 return -EPERM;
28302+
28303+#ifdef CONFIG_GRKERNSEC_VM86
28304+ if (!capable(CAP_SYS_RAWIO)) {
28305+ gr_handle_vm86();
28306+ return -EPERM;
28307+ }
28308+#endif
28309+
28310 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28311 offsetof(struct kernel_vm86_struct, vm86plus) -
28312 sizeof(info.regs));
28313@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28314 int tmp;
28315 struct vm86plus_struct __user *v86;
28316
28317+#ifdef CONFIG_GRKERNSEC_VM86
28318+ if (!capable(CAP_SYS_RAWIO)) {
28319+ gr_handle_vm86();
28320+ return -EPERM;
28321+ }
28322+#endif
28323+
28324 tsk = current;
28325 switch (cmd) {
28326 case VM86_REQUEST_IRQ:
28327@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28328 tsk->thread.saved_fs = info->regs32->fs;
28329 tsk->thread.saved_gs = get_user_gs(info->regs32);
28330
28331- tss = &per_cpu(init_tss, get_cpu());
28332+ tss = init_tss + get_cpu();
28333 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28334 if (cpu_has_sep)
28335 tsk->thread.sysenter_cs = 0;
28336@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28337 goto cannot_handle;
28338 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28339 goto cannot_handle;
28340- intr_ptr = (unsigned long __user *) (i << 2);
28341+ intr_ptr = (__force unsigned long __user *) (i << 2);
28342 if (get_user(segoffs, intr_ptr))
28343 goto cannot_handle;
28344 if ((segoffs >> 16) == BIOSSEG)
28345diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28346index 49edf2d..c0d1362 100644
28347--- a/arch/x86/kernel/vmlinux.lds.S
28348+++ b/arch/x86/kernel/vmlinux.lds.S
28349@@ -26,6 +26,13 @@
28350 #include <asm/page_types.h>
28351 #include <asm/cache.h>
28352 #include <asm/boot.h>
28353+#include <asm/segment.h>
28354+
28355+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28356+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28357+#else
28358+#define __KERNEL_TEXT_OFFSET 0
28359+#endif
28360
28361 #undef i386 /* in case the preprocessor is a 32bit one */
28362
28363@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28364
28365 PHDRS {
28366 text PT_LOAD FLAGS(5); /* R_E */
28367+#ifdef CONFIG_X86_32
28368+ module PT_LOAD FLAGS(5); /* R_E */
28369+#endif
28370+#ifdef CONFIG_XEN
28371+ rodata PT_LOAD FLAGS(5); /* R_E */
28372+#else
28373+ rodata PT_LOAD FLAGS(4); /* R__ */
28374+#endif
28375 data PT_LOAD FLAGS(6); /* RW_ */
28376-#ifdef CONFIG_X86_64
28377+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28378 #ifdef CONFIG_SMP
28379 percpu PT_LOAD FLAGS(6); /* RW_ */
28380 #endif
28381+ text.init PT_LOAD FLAGS(5); /* R_E */
28382+ text.exit PT_LOAD FLAGS(5); /* R_E */
28383 init PT_LOAD FLAGS(7); /* RWE */
28384-#endif
28385 note PT_NOTE FLAGS(0); /* ___ */
28386 }
28387
28388 SECTIONS
28389 {
28390 #ifdef CONFIG_X86_32
28391- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28392- phys_startup_32 = startup_32 - LOAD_OFFSET;
28393+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28394 #else
28395- . = __START_KERNEL;
28396- phys_startup_64 = startup_64 - LOAD_OFFSET;
28397+ . = __START_KERNEL;
28398 #endif
28399
28400 /* Text and read-only data */
28401- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28402- _text = .;
28403+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28404 /* bootstrapping code */
28405+#ifdef CONFIG_X86_32
28406+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28407+#else
28408+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28409+#endif
28410+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28411+ _text = .;
28412 HEAD_TEXT
28413 . = ALIGN(8);
28414 _stext = .;
28415@@ -104,13 +124,47 @@ SECTIONS
28416 IRQENTRY_TEXT
28417 *(.fixup)
28418 *(.gnu.warning)
28419- /* End of text section */
28420- _etext = .;
28421 } :text = 0x9090
28422
28423- NOTES :text :note
28424+ . += __KERNEL_TEXT_OFFSET;
28425
28426- EXCEPTION_TABLE(16) :text = 0x9090
28427+#ifdef CONFIG_X86_32
28428+ . = ALIGN(PAGE_SIZE);
28429+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28430+
28431+#ifdef CONFIG_PAX_KERNEXEC
28432+ MODULES_EXEC_VADDR = .;
28433+ BYTE(0)
28434+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28435+ . = ALIGN(HPAGE_SIZE) - 1;
28436+ MODULES_EXEC_END = .;
28437+#endif
28438+
28439+ } :module
28440+#endif
28441+
28442+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28443+ /* End of text section */
28444+ BYTE(0)
28445+ _etext = . - __KERNEL_TEXT_OFFSET;
28446+ }
28447+
28448+#ifdef CONFIG_X86_32
28449+ . = ALIGN(PAGE_SIZE);
28450+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28451+ . = ALIGN(PAGE_SIZE);
28452+ *(.empty_zero_page)
28453+ *(.initial_pg_fixmap)
28454+ *(.initial_pg_pmd)
28455+ *(.initial_page_table)
28456+ *(.swapper_pg_dir)
28457+ } :rodata
28458+#endif
28459+
28460+ . = ALIGN(PAGE_SIZE);
28461+ NOTES :rodata :note
28462+
28463+ EXCEPTION_TABLE(16) :rodata
28464
28465 #if defined(CONFIG_DEBUG_RODATA)
28466 /* .text should occupy whole number of pages */
28467@@ -122,16 +176,20 @@ SECTIONS
28468
28469 /* Data */
28470 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28471+
28472+#ifdef CONFIG_PAX_KERNEXEC
28473+ . = ALIGN(HPAGE_SIZE);
28474+#else
28475+ . = ALIGN(PAGE_SIZE);
28476+#endif
28477+
28478 /* Start of data section */
28479 _sdata = .;
28480
28481 /* init_task */
28482 INIT_TASK_DATA(THREAD_SIZE)
28483
28484-#ifdef CONFIG_X86_32
28485- /* 32 bit has nosave before _edata */
28486 NOSAVE_DATA
28487-#endif
28488
28489 PAGE_ALIGNED_DATA(PAGE_SIZE)
28490
28491@@ -174,12 +232,19 @@ SECTIONS
28492 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28493
28494 /* Init code and data - will be freed after init */
28495- . = ALIGN(PAGE_SIZE);
28496 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28497+ BYTE(0)
28498+
28499+#ifdef CONFIG_PAX_KERNEXEC
28500+ . = ALIGN(HPAGE_SIZE);
28501+#else
28502+ . = ALIGN(PAGE_SIZE);
28503+#endif
28504+
28505 __init_begin = .; /* paired with __init_end */
28506- }
28507+ } :init.begin
28508
28509-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28510+#ifdef CONFIG_SMP
28511 /*
28512 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28513 * output PHDR, so the next output section - .init.text - should
28514@@ -188,12 +253,27 @@ SECTIONS
28515 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
28516 #endif
28517
28518- INIT_TEXT_SECTION(PAGE_SIZE)
28519-#ifdef CONFIG_X86_64
28520- :init
28521-#endif
28522+ . = ALIGN(PAGE_SIZE);
28523+ init_begin = .;
28524+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28525+ VMLINUX_SYMBOL(_sinittext) = .;
28526+ INIT_TEXT
28527+ VMLINUX_SYMBOL(_einittext) = .;
28528+ . = ALIGN(PAGE_SIZE);
28529+ } :text.init
28530
28531- INIT_DATA_SECTION(16)
28532+ /*
28533+ * .exit.text is discard at runtime, not link time, to deal with
28534+ * references from .altinstructions and .eh_frame
28535+ */
28536+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28537+ EXIT_TEXT
28538+ . = ALIGN(16);
28539+ } :text.exit
28540+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28541+
28542+ . = ALIGN(PAGE_SIZE);
28543+ INIT_DATA_SECTION(16) :init
28544
28545 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28546 __x86_cpu_dev_start = .;
28547@@ -264,19 +344,12 @@ SECTIONS
28548 }
28549
28550 . = ALIGN(8);
28551- /*
28552- * .exit.text is discard at runtime, not link time, to deal with
28553- * references from .altinstructions and .eh_frame
28554- */
28555- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28556- EXIT_TEXT
28557- }
28558
28559 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28560 EXIT_DATA
28561 }
28562
28563-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28564+#ifndef CONFIG_SMP
28565 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28566 #endif
28567
28568@@ -295,16 +368,10 @@ SECTIONS
28569 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28570 __smp_locks = .;
28571 *(.smp_locks)
28572- . = ALIGN(PAGE_SIZE);
28573 __smp_locks_end = .;
28574+ . = ALIGN(PAGE_SIZE);
28575 }
28576
28577-#ifdef CONFIG_X86_64
28578- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28579- NOSAVE_DATA
28580- }
28581-#endif
28582-
28583 /* BSS */
28584 . = ALIGN(PAGE_SIZE);
28585 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28586@@ -320,6 +387,7 @@ SECTIONS
28587 __brk_base = .;
28588 . += 64 * 1024; /* 64k alignment slop space */
28589 *(.brk_reservation) /* areas brk users have reserved */
28590+ . = ALIGN(HPAGE_SIZE);
28591 __brk_limit = .;
28592 }
28593
28594@@ -346,13 +414,12 @@ SECTIONS
28595 * for the boot processor.
28596 */
28597 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28598-INIT_PER_CPU(gdt_page);
28599 INIT_PER_CPU(irq_stack_union);
28600
28601 /*
28602 * Build-time check on the image size:
28603 */
28604-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28605+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28606 "kernel image bigger than KERNEL_IMAGE_SIZE");
28607
28608 #ifdef CONFIG_SMP
28609diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28610index e1e1e80..1400089 100644
28611--- a/arch/x86/kernel/vsyscall_64.c
28612+++ b/arch/x86/kernel/vsyscall_64.c
28613@@ -54,15 +54,13 @@
28614
28615 DEFINE_VVAR(int, vgetcpu_mode);
28616
28617-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28618+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28619
28620 static int __init vsyscall_setup(char *str)
28621 {
28622 if (str) {
28623 if (!strcmp("emulate", str))
28624 vsyscall_mode = EMULATE;
28625- else if (!strcmp("native", str))
28626- vsyscall_mode = NATIVE;
28627 else if (!strcmp("none", str))
28628 vsyscall_mode = NONE;
28629 else
28630@@ -279,8 +277,7 @@ do_ret:
28631 return true;
28632
28633 sigsegv:
28634- force_sig(SIGSEGV, current);
28635- return true;
28636+ do_group_exit(SIGKILL);
28637 }
28638
28639 /*
28640@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
28641 extern char __vsyscall_page;
28642 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28643
28644- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28645- vsyscall_mode == NATIVE
28646- ? PAGE_KERNEL_VSYSCALL
28647- : PAGE_KERNEL_VVAR);
28648+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28649 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28650 (unsigned long)VSYSCALL_ADDR);
28651 }
28652diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28653index 04068192..4d75aa6 100644
28654--- a/arch/x86/kernel/x8664_ksyms_64.c
28655+++ b/arch/x86/kernel/x8664_ksyms_64.c
28656@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28657 EXPORT_SYMBOL(copy_user_generic_unrolled);
28658 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28659 EXPORT_SYMBOL(__copy_user_nocache);
28660-EXPORT_SYMBOL(_copy_from_user);
28661-EXPORT_SYMBOL(_copy_to_user);
28662
28663 EXPORT_SYMBOL(copy_page);
28664 EXPORT_SYMBOL(clear_page);
28665@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28666 EXPORT_SYMBOL(___preempt_schedule_context);
28667 #endif
28668 #endif
28669+
28670+#ifdef CONFIG_PAX_PER_CPU_PGD
28671+EXPORT_SYMBOL(cpu_pgd);
28672+#endif
28673diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28674index e48b674..a451dd9 100644
28675--- a/arch/x86/kernel/x86_init.c
28676+++ b/arch/x86/kernel/x86_init.c
28677@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28678 static void default_nmi_init(void) { };
28679 static int default_i8042_detect(void) { return 1; };
28680
28681-struct x86_platform_ops x86_platform = {
28682+struct x86_platform_ops x86_platform __read_only = {
28683 .calibrate_tsc = native_calibrate_tsc,
28684 .get_wallclock = mach_get_cmos_time,
28685 .set_wallclock = mach_set_rtc_mmss,
28686@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28687 EXPORT_SYMBOL_GPL(x86_platform);
28688
28689 #if defined(CONFIG_PCI_MSI)
28690-struct x86_msi_ops x86_msi = {
28691+struct x86_msi_ops x86_msi __read_only = {
28692 .setup_msi_irqs = native_setup_msi_irqs,
28693 .compose_msi_msg = native_compose_msi_msg,
28694 .teardown_msi_irq = native_teardown_msi_irq,
28695@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
28696 }
28697 #endif
28698
28699-struct x86_io_apic_ops x86_io_apic_ops = {
28700+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28701 .init = native_io_apic_init_mappings,
28702 .read = native_io_apic_read,
28703 .write = native_io_apic_write,
28704diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28705index a4b451c..8dfe1ad 100644
28706--- a/arch/x86/kernel/xsave.c
28707+++ b/arch/x86/kernel/xsave.c
28708@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28709
28710 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28711 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28712- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28713+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28714
28715 if (!use_xsave())
28716 return err;
28717
28718- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28719+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28720
28721 /*
28722 * Read the xstate_bv which we copied (directly from the cpu or
28723 * from the state in task struct) to the user buffers.
28724 */
28725- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28726+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28727
28728 /*
28729 * For legacy compatible, we always set FP/SSE bits in the bit
28730@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28731 */
28732 xstate_bv |= XSTATE_FPSSE;
28733
28734- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28735+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28736
28737 return err;
28738 }
28739@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28740 {
28741 int err;
28742
28743+ buf = (struct xsave_struct __user *)____m(buf);
28744 if (use_xsave())
28745 err = xsave_user(buf);
28746 else if (use_fxsr())
28747@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28748 */
28749 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28750 {
28751+ buf = (void __user *)____m(buf);
28752 if (use_xsave()) {
28753 if ((unsigned long)buf % 64 || fx_only) {
28754 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28755diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28756index 38a0afe..94421a9 100644
28757--- a/arch/x86/kvm/cpuid.c
28758+++ b/arch/x86/kvm/cpuid.c
28759@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28760 struct kvm_cpuid2 *cpuid,
28761 struct kvm_cpuid_entry2 __user *entries)
28762 {
28763- int r;
28764+ int r, i;
28765
28766 r = -E2BIG;
28767 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28768 goto out;
28769 r = -EFAULT;
28770- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28771- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28772+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28773 goto out;
28774+ for (i = 0; i < cpuid->nent; ++i) {
28775+ struct kvm_cpuid_entry2 cpuid_entry;
28776+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28777+ goto out;
28778+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28779+ }
28780 vcpu->arch.cpuid_nent = cpuid->nent;
28781 kvm_apic_set_version(vcpu);
28782 kvm_x86_ops->cpuid_update(vcpu);
28783@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28784 struct kvm_cpuid2 *cpuid,
28785 struct kvm_cpuid_entry2 __user *entries)
28786 {
28787- int r;
28788+ int r, i;
28789
28790 r = -E2BIG;
28791 if (cpuid->nent < vcpu->arch.cpuid_nent)
28792 goto out;
28793 r = -EFAULT;
28794- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28795- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28796+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28797 goto out;
28798+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28799+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28800+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28801+ goto out;
28802+ }
28803 return 0;
28804
28805 out:
28806diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28807index 453e5fb..214168f 100644
28808--- a/arch/x86/kvm/lapic.c
28809+++ b/arch/x86/kvm/lapic.c
28810@@ -55,7 +55,7 @@
28811 #define APIC_BUS_CYCLE_NS 1
28812
28813 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28814-#define apic_debug(fmt, arg...)
28815+#define apic_debug(fmt, arg...) do {} while (0)
28816
28817 #define APIC_LVT_NUM 6
28818 /* 14 is the version for Xeon and Pentium 8.4.8*/
28819diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28820index 4107765..d9eb358 100644
28821--- a/arch/x86/kvm/paging_tmpl.h
28822+++ b/arch/x86/kvm/paging_tmpl.h
28823@@ -331,7 +331,7 @@ retry_walk:
28824 if (unlikely(kvm_is_error_hva(host_addr)))
28825 goto error;
28826
28827- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28828+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28829 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28830 goto error;
28831 walker->ptep_user[walker->level - 1] = ptep_user;
28832diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28833index b5e994a..35b5866 100644
28834--- a/arch/x86/kvm/svm.c
28835+++ b/arch/x86/kvm/svm.c
28836@@ -3541,7 +3541,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28837 int cpu = raw_smp_processor_id();
28838
28839 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28840+
28841+ pax_open_kernel();
28842 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28843+ pax_close_kernel();
28844+
28845 load_TR_desc();
28846 }
28847
28848@@ -3942,6 +3946,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28849 #endif
28850 #endif
28851
28852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28853+ __set_fs(current_thread_info()->addr_limit);
28854+#endif
28855+
28856 reload_tss(vcpu);
28857
28858 local_irq_disable();
28859diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28860index 801332e..eeff1cc 100644
28861--- a/arch/x86/kvm/vmx.c
28862+++ b/arch/x86/kvm/vmx.c
28863@@ -1339,12 +1339,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28864 #endif
28865 }
28866
28867-static void vmcs_clear_bits(unsigned long field, u32 mask)
28868+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28869 {
28870 vmcs_writel(field, vmcs_readl(field) & ~mask);
28871 }
28872
28873-static void vmcs_set_bits(unsigned long field, u32 mask)
28874+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28875 {
28876 vmcs_writel(field, vmcs_readl(field) | mask);
28877 }
28878@@ -1604,7 +1604,11 @@ static void reload_tss(void)
28879 struct desc_struct *descs;
28880
28881 descs = (void *)gdt->address;
28882+
28883+ pax_open_kernel();
28884 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28885+ pax_close_kernel();
28886+
28887 load_TR_desc();
28888 }
28889
28890@@ -1832,6 +1836,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28891 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28892 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28893
28894+#ifdef CONFIG_PAX_PER_CPU_PGD
28895+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28896+#endif
28897+
28898 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28899 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28900 vmx->loaded_vmcs->cpu = cpu;
28901@@ -2121,7 +2129,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28902 * reads and returns guest's timestamp counter "register"
28903 * guest_tsc = host_tsc + tsc_offset -- 21.3
28904 */
28905-static u64 guest_read_tsc(void)
28906+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28907 {
28908 u64 host_tsc, tsc_offset;
28909
28910@@ -3093,8 +3101,11 @@ static __init int hardware_setup(void)
28911 if (!cpu_has_vmx_flexpriority())
28912 flexpriority_enabled = 0;
28913
28914- if (!cpu_has_vmx_tpr_shadow())
28915- kvm_x86_ops->update_cr8_intercept = NULL;
28916+ if (!cpu_has_vmx_tpr_shadow()) {
28917+ pax_open_kernel();
28918+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28919+ pax_close_kernel();
28920+ }
28921
28922 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28923 kvm_disable_largepages();
28924@@ -3105,13 +3116,15 @@ static __init int hardware_setup(void)
28925 if (!cpu_has_vmx_apicv())
28926 enable_apicv = 0;
28927
28928+ pax_open_kernel();
28929 if (enable_apicv)
28930- kvm_x86_ops->update_cr8_intercept = NULL;
28931+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28932 else {
28933- kvm_x86_ops->hwapic_irr_update = NULL;
28934- kvm_x86_ops->deliver_posted_interrupt = NULL;
28935- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28936+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28937+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28938+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28939 }
28940+ pax_close_kernel();
28941
28942 if (nested)
28943 nested_vmx_setup_ctls_msrs();
28944@@ -4221,7 +4234,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28945
28946 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28947 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28948+
28949+#ifndef CONFIG_PAX_PER_CPU_PGD
28950 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28951+#endif
28952
28953 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28954 #ifdef CONFIG_X86_64
28955@@ -4243,7 +4259,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28956 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28957 vmx->host_idt_base = dt.address;
28958
28959- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28960+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28961
28962 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28963 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28964@@ -7413,6 +7429,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28965 "jmp 2f \n\t"
28966 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28967 "2: "
28968+
28969+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28970+ "ljmp %[cs],$3f\n\t"
28971+ "3: "
28972+#endif
28973+
28974 /* Save guest registers, load host registers, keep flags */
28975 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28976 "pop %0 \n\t"
28977@@ -7465,6 +7487,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28978 #endif
28979 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28980 [wordsize]"i"(sizeof(ulong))
28981+
28982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28983+ ,[cs]"i"(__KERNEL_CS)
28984+#endif
28985+
28986 : "cc", "memory"
28987 #ifdef CONFIG_X86_64
28988 , "rax", "rbx", "rdi", "rsi"
28989@@ -7478,7 +7505,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28990 if (debugctlmsr)
28991 update_debugctlmsr(debugctlmsr);
28992
28993-#ifndef CONFIG_X86_64
28994+#ifdef CONFIG_X86_32
28995 /*
28996 * The sysexit path does not restore ds/es, so we must set them to
28997 * a reasonable value ourselves.
28998@@ -7487,8 +7514,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28999 * may be executed in interrupt context, which saves and restore segments
29000 * around it, nullifying its effect.
29001 */
29002- loadsegment(ds, __USER_DS);
29003- loadsegment(es, __USER_DS);
29004+ loadsegment(ds, __KERNEL_DS);
29005+ loadsegment(es, __KERNEL_DS);
29006+ loadsegment(ss, __KERNEL_DS);
29007+
29008+#ifdef CONFIG_PAX_KERNEXEC
29009+ loadsegment(fs, __KERNEL_PERCPU);
29010+#endif
29011+
29012+#ifdef CONFIG_PAX_MEMORY_UDEREF
29013+ __set_fs(current_thread_info()->addr_limit);
29014+#endif
29015+
29016 #endif
29017
29018 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29019diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29020index ef432f8..a630659 100644
29021--- a/arch/x86/kvm/x86.c
29022+++ b/arch/x86/kvm/x86.c
29023@@ -1808,8 +1808,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29024 {
29025 struct kvm *kvm = vcpu->kvm;
29026 int lm = is_long_mode(vcpu);
29027- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29028- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29029+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29030+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29031 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29032 : kvm->arch.xen_hvm_config.blob_size_32;
29033 u32 page_num = data & ~PAGE_MASK;
29034@@ -2729,6 +2729,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29035 if (n < msr_list.nmsrs)
29036 goto out;
29037 r = -EFAULT;
29038+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29039+ goto out;
29040 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29041 num_msrs_to_save * sizeof(u32)))
29042 goto out;
29043@@ -5567,7 +5569,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29044 };
29045 #endif
29046
29047-int kvm_arch_init(void *opaque)
29048+int kvm_arch_init(const void *opaque)
29049 {
29050 int r;
29051 struct kvm_x86_ops *ops = opaque;
29052diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29053index aae9413..d11e829 100644
29054--- a/arch/x86/lguest/boot.c
29055+++ b/arch/x86/lguest/boot.c
29056@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29057 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29058 * Launcher to reboot us.
29059 */
29060-static void lguest_restart(char *reason)
29061+static __noreturn void lguest_restart(char *reason)
29062 {
29063 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29064+ BUG();
29065 }
29066
29067 /*G:050
29068diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29069index 00933d5..3a64af9 100644
29070--- a/arch/x86/lib/atomic64_386_32.S
29071+++ b/arch/x86/lib/atomic64_386_32.S
29072@@ -48,6 +48,10 @@ BEGIN(read)
29073 movl (v), %eax
29074 movl 4(v), %edx
29075 RET_ENDP
29076+BEGIN(read_unchecked)
29077+ movl (v), %eax
29078+ movl 4(v), %edx
29079+RET_ENDP
29080 #undef v
29081
29082 #define v %esi
29083@@ -55,6 +59,10 @@ BEGIN(set)
29084 movl %ebx, (v)
29085 movl %ecx, 4(v)
29086 RET_ENDP
29087+BEGIN(set_unchecked)
29088+ movl %ebx, (v)
29089+ movl %ecx, 4(v)
29090+RET_ENDP
29091 #undef v
29092
29093 #define v %esi
29094@@ -70,6 +78,20 @@ RET_ENDP
29095 BEGIN(add)
29096 addl %eax, (v)
29097 adcl %edx, 4(v)
29098+
29099+#ifdef CONFIG_PAX_REFCOUNT
29100+ jno 0f
29101+ subl %eax, (v)
29102+ sbbl %edx, 4(v)
29103+ int $4
29104+0:
29105+ _ASM_EXTABLE(0b, 0b)
29106+#endif
29107+
29108+RET_ENDP
29109+BEGIN(add_unchecked)
29110+ addl %eax, (v)
29111+ adcl %edx, 4(v)
29112 RET_ENDP
29113 #undef v
29114
29115@@ -77,6 +99,24 @@ RET_ENDP
29116 BEGIN(add_return)
29117 addl (v), %eax
29118 adcl 4(v), %edx
29119+
29120+#ifdef CONFIG_PAX_REFCOUNT
29121+ into
29122+1234:
29123+ _ASM_EXTABLE(1234b, 2f)
29124+#endif
29125+
29126+ movl %eax, (v)
29127+ movl %edx, 4(v)
29128+
29129+#ifdef CONFIG_PAX_REFCOUNT
29130+2:
29131+#endif
29132+
29133+RET_ENDP
29134+BEGIN(add_return_unchecked)
29135+ addl (v), %eax
29136+ adcl 4(v), %edx
29137 movl %eax, (v)
29138 movl %edx, 4(v)
29139 RET_ENDP
29140@@ -86,6 +126,20 @@ RET_ENDP
29141 BEGIN(sub)
29142 subl %eax, (v)
29143 sbbl %edx, 4(v)
29144+
29145+#ifdef CONFIG_PAX_REFCOUNT
29146+ jno 0f
29147+ addl %eax, (v)
29148+ adcl %edx, 4(v)
29149+ int $4
29150+0:
29151+ _ASM_EXTABLE(0b, 0b)
29152+#endif
29153+
29154+RET_ENDP
29155+BEGIN(sub_unchecked)
29156+ subl %eax, (v)
29157+ sbbl %edx, 4(v)
29158 RET_ENDP
29159 #undef v
29160
29161@@ -96,6 +150,27 @@ BEGIN(sub_return)
29162 sbbl $0, %edx
29163 addl (v), %eax
29164 adcl 4(v), %edx
29165+
29166+#ifdef CONFIG_PAX_REFCOUNT
29167+ into
29168+1234:
29169+ _ASM_EXTABLE(1234b, 2f)
29170+#endif
29171+
29172+ movl %eax, (v)
29173+ movl %edx, 4(v)
29174+
29175+#ifdef CONFIG_PAX_REFCOUNT
29176+2:
29177+#endif
29178+
29179+RET_ENDP
29180+BEGIN(sub_return_unchecked)
29181+ negl %edx
29182+ negl %eax
29183+ sbbl $0, %edx
29184+ addl (v), %eax
29185+ adcl 4(v), %edx
29186 movl %eax, (v)
29187 movl %edx, 4(v)
29188 RET_ENDP
29189@@ -105,6 +180,20 @@ RET_ENDP
29190 BEGIN(inc)
29191 addl $1, (v)
29192 adcl $0, 4(v)
29193+
29194+#ifdef CONFIG_PAX_REFCOUNT
29195+ jno 0f
29196+ subl $1, (v)
29197+ sbbl $0, 4(v)
29198+ int $4
29199+0:
29200+ _ASM_EXTABLE(0b, 0b)
29201+#endif
29202+
29203+RET_ENDP
29204+BEGIN(inc_unchecked)
29205+ addl $1, (v)
29206+ adcl $0, 4(v)
29207 RET_ENDP
29208 #undef v
29209
29210@@ -114,6 +203,26 @@ BEGIN(inc_return)
29211 movl 4(v), %edx
29212 addl $1, %eax
29213 adcl $0, %edx
29214+
29215+#ifdef CONFIG_PAX_REFCOUNT
29216+ into
29217+1234:
29218+ _ASM_EXTABLE(1234b, 2f)
29219+#endif
29220+
29221+ movl %eax, (v)
29222+ movl %edx, 4(v)
29223+
29224+#ifdef CONFIG_PAX_REFCOUNT
29225+2:
29226+#endif
29227+
29228+RET_ENDP
29229+BEGIN(inc_return_unchecked)
29230+ movl (v), %eax
29231+ movl 4(v), %edx
29232+ addl $1, %eax
29233+ adcl $0, %edx
29234 movl %eax, (v)
29235 movl %edx, 4(v)
29236 RET_ENDP
29237@@ -123,6 +232,20 @@ RET_ENDP
29238 BEGIN(dec)
29239 subl $1, (v)
29240 sbbl $0, 4(v)
29241+
29242+#ifdef CONFIG_PAX_REFCOUNT
29243+ jno 0f
29244+ addl $1, (v)
29245+ adcl $0, 4(v)
29246+ int $4
29247+0:
29248+ _ASM_EXTABLE(0b, 0b)
29249+#endif
29250+
29251+RET_ENDP
29252+BEGIN(dec_unchecked)
29253+ subl $1, (v)
29254+ sbbl $0, 4(v)
29255 RET_ENDP
29256 #undef v
29257
29258@@ -132,6 +255,26 @@ BEGIN(dec_return)
29259 movl 4(v), %edx
29260 subl $1, %eax
29261 sbbl $0, %edx
29262+
29263+#ifdef CONFIG_PAX_REFCOUNT
29264+ into
29265+1234:
29266+ _ASM_EXTABLE(1234b, 2f)
29267+#endif
29268+
29269+ movl %eax, (v)
29270+ movl %edx, 4(v)
29271+
29272+#ifdef CONFIG_PAX_REFCOUNT
29273+2:
29274+#endif
29275+
29276+RET_ENDP
29277+BEGIN(dec_return_unchecked)
29278+ movl (v), %eax
29279+ movl 4(v), %edx
29280+ subl $1, %eax
29281+ sbbl $0, %edx
29282 movl %eax, (v)
29283 movl %edx, 4(v)
29284 RET_ENDP
29285@@ -143,6 +286,13 @@ BEGIN(add_unless)
29286 adcl %edx, %edi
29287 addl (v), %eax
29288 adcl 4(v), %edx
29289+
29290+#ifdef CONFIG_PAX_REFCOUNT
29291+ into
29292+1234:
29293+ _ASM_EXTABLE(1234b, 2f)
29294+#endif
29295+
29296 cmpl %eax, %ecx
29297 je 3f
29298 1:
29299@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29300 1:
29301 addl $1, %eax
29302 adcl $0, %edx
29303+
29304+#ifdef CONFIG_PAX_REFCOUNT
29305+ into
29306+1234:
29307+ _ASM_EXTABLE(1234b, 2f)
29308+#endif
29309+
29310 movl %eax, (v)
29311 movl %edx, 4(v)
29312 movl $1, %eax
29313@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29314 movl 4(v), %edx
29315 subl $1, %eax
29316 sbbl $0, %edx
29317+
29318+#ifdef CONFIG_PAX_REFCOUNT
29319+ into
29320+1234:
29321+ _ASM_EXTABLE(1234b, 1f)
29322+#endif
29323+
29324 js 1f
29325 movl %eax, (v)
29326 movl %edx, 4(v)
29327diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29328index f5cc9eb..51fa319 100644
29329--- a/arch/x86/lib/atomic64_cx8_32.S
29330+++ b/arch/x86/lib/atomic64_cx8_32.S
29331@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29332 CFI_STARTPROC
29333
29334 read64 %ecx
29335+ pax_force_retaddr
29336 ret
29337 CFI_ENDPROC
29338 ENDPROC(atomic64_read_cx8)
29339
29340+ENTRY(atomic64_read_unchecked_cx8)
29341+ CFI_STARTPROC
29342+
29343+ read64 %ecx
29344+ pax_force_retaddr
29345+ ret
29346+ CFI_ENDPROC
29347+ENDPROC(atomic64_read_unchecked_cx8)
29348+
29349 ENTRY(atomic64_set_cx8)
29350 CFI_STARTPROC
29351
29352@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29353 cmpxchg8b (%esi)
29354 jne 1b
29355
29356+ pax_force_retaddr
29357 ret
29358 CFI_ENDPROC
29359 ENDPROC(atomic64_set_cx8)
29360
29361+ENTRY(atomic64_set_unchecked_cx8)
29362+ CFI_STARTPROC
29363+
29364+1:
29365+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29366+ * are atomic on 586 and newer */
29367+ cmpxchg8b (%esi)
29368+ jne 1b
29369+
29370+ pax_force_retaddr
29371+ ret
29372+ CFI_ENDPROC
29373+ENDPROC(atomic64_set_unchecked_cx8)
29374+
29375 ENTRY(atomic64_xchg_cx8)
29376 CFI_STARTPROC
29377
29378@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29379 cmpxchg8b (%esi)
29380 jne 1b
29381
29382+ pax_force_retaddr
29383 ret
29384 CFI_ENDPROC
29385 ENDPROC(atomic64_xchg_cx8)
29386
29387-.macro addsub_return func ins insc
29388-ENTRY(atomic64_\func\()_return_cx8)
29389+.macro addsub_return func ins insc unchecked=""
29390+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29391 CFI_STARTPROC
29392 SAVE ebp
29393 SAVE ebx
29394@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29395 movl %edx, %ecx
29396 \ins\()l %esi, %ebx
29397 \insc\()l %edi, %ecx
29398+
29399+.ifb \unchecked
29400+#ifdef CONFIG_PAX_REFCOUNT
29401+ into
29402+2:
29403+ _ASM_EXTABLE(2b, 3f)
29404+#endif
29405+.endif
29406+
29407 LOCK_PREFIX
29408 cmpxchg8b (%ebp)
29409 jne 1b
29410-
29411-10:
29412 movl %ebx, %eax
29413 movl %ecx, %edx
29414+
29415+.ifb \unchecked
29416+#ifdef CONFIG_PAX_REFCOUNT
29417+3:
29418+#endif
29419+.endif
29420+
29421 RESTORE edi
29422 RESTORE esi
29423 RESTORE ebx
29424 RESTORE ebp
29425+ pax_force_retaddr
29426 ret
29427 CFI_ENDPROC
29428-ENDPROC(atomic64_\func\()_return_cx8)
29429+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29430 .endm
29431
29432 addsub_return add add adc
29433 addsub_return sub sub sbb
29434+addsub_return add add adc _unchecked
29435+addsub_return sub sub sbb _unchecked
29436
29437-.macro incdec_return func ins insc
29438-ENTRY(atomic64_\func\()_return_cx8)
29439+.macro incdec_return func ins insc unchecked=""
29440+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29441 CFI_STARTPROC
29442 SAVE ebx
29443
29444@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29445 movl %edx, %ecx
29446 \ins\()l $1, %ebx
29447 \insc\()l $0, %ecx
29448+
29449+.ifb \unchecked
29450+#ifdef CONFIG_PAX_REFCOUNT
29451+ into
29452+2:
29453+ _ASM_EXTABLE(2b, 3f)
29454+#endif
29455+.endif
29456+
29457 LOCK_PREFIX
29458 cmpxchg8b (%esi)
29459 jne 1b
29460
29461-10:
29462 movl %ebx, %eax
29463 movl %ecx, %edx
29464+
29465+.ifb \unchecked
29466+#ifdef CONFIG_PAX_REFCOUNT
29467+3:
29468+#endif
29469+.endif
29470+
29471 RESTORE ebx
29472+ pax_force_retaddr
29473 ret
29474 CFI_ENDPROC
29475-ENDPROC(atomic64_\func\()_return_cx8)
29476+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29477 .endm
29478
29479 incdec_return inc add adc
29480 incdec_return dec sub sbb
29481+incdec_return inc add adc _unchecked
29482+incdec_return dec sub sbb _unchecked
29483
29484 ENTRY(atomic64_dec_if_positive_cx8)
29485 CFI_STARTPROC
29486@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29487 movl %edx, %ecx
29488 subl $1, %ebx
29489 sbb $0, %ecx
29490+
29491+#ifdef CONFIG_PAX_REFCOUNT
29492+ into
29493+1234:
29494+ _ASM_EXTABLE(1234b, 2f)
29495+#endif
29496+
29497 js 2f
29498 LOCK_PREFIX
29499 cmpxchg8b (%esi)
29500@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29501 movl %ebx, %eax
29502 movl %ecx, %edx
29503 RESTORE ebx
29504+ pax_force_retaddr
29505 ret
29506 CFI_ENDPROC
29507 ENDPROC(atomic64_dec_if_positive_cx8)
29508@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29509 movl %edx, %ecx
29510 addl %ebp, %ebx
29511 adcl %edi, %ecx
29512+
29513+#ifdef CONFIG_PAX_REFCOUNT
29514+ into
29515+1234:
29516+ _ASM_EXTABLE(1234b, 3f)
29517+#endif
29518+
29519 LOCK_PREFIX
29520 cmpxchg8b (%esi)
29521 jne 1b
29522@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29523 CFI_ADJUST_CFA_OFFSET -8
29524 RESTORE ebx
29525 RESTORE ebp
29526+ pax_force_retaddr
29527 ret
29528 4:
29529 cmpl %edx, 4(%esp)
29530@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29531 xorl %ecx, %ecx
29532 addl $1, %ebx
29533 adcl %edx, %ecx
29534+
29535+#ifdef CONFIG_PAX_REFCOUNT
29536+ into
29537+1234:
29538+ _ASM_EXTABLE(1234b, 3f)
29539+#endif
29540+
29541 LOCK_PREFIX
29542 cmpxchg8b (%esi)
29543 jne 1b
29544@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29545 movl $1, %eax
29546 3:
29547 RESTORE ebx
29548+ pax_force_retaddr
29549 ret
29550 CFI_ENDPROC
29551 ENDPROC(atomic64_inc_not_zero_cx8)
29552diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29553index e78b8ee..7e173a8 100644
29554--- a/arch/x86/lib/checksum_32.S
29555+++ b/arch/x86/lib/checksum_32.S
29556@@ -29,7 +29,8 @@
29557 #include <asm/dwarf2.h>
29558 #include <asm/errno.h>
29559 #include <asm/asm.h>
29560-
29561+#include <asm/segment.h>
29562+
29563 /*
29564 * computes a partial checksum, e.g. for TCP/UDP fragments
29565 */
29566@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29567
29568 #define ARGBASE 16
29569 #define FP 12
29570-
29571-ENTRY(csum_partial_copy_generic)
29572+
29573+ENTRY(csum_partial_copy_generic_to_user)
29574 CFI_STARTPROC
29575+
29576+#ifdef CONFIG_PAX_MEMORY_UDEREF
29577+ pushl_cfi %gs
29578+ popl_cfi %es
29579+ jmp csum_partial_copy_generic
29580+#endif
29581+
29582+ENTRY(csum_partial_copy_generic_from_user)
29583+
29584+#ifdef CONFIG_PAX_MEMORY_UDEREF
29585+ pushl_cfi %gs
29586+ popl_cfi %ds
29587+#endif
29588+
29589+ENTRY(csum_partial_copy_generic)
29590 subl $4,%esp
29591 CFI_ADJUST_CFA_OFFSET 4
29592 pushl_cfi %edi
29593@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29594 jmp 4f
29595 SRC(1: movw (%esi), %bx )
29596 addl $2, %esi
29597-DST( movw %bx, (%edi) )
29598+DST( movw %bx, %es:(%edi) )
29599 addl $2, %edi
29600 addw %bx, %ax
29601 adcl $0, %eax
29602@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29603 SRC(1: movl (%esi), %ebx )
29604 SRC( movl 4(%esi), %edx )
29605 adcl %ebx, %eax
29606-DST( movl %ebx, (%edi) )
29607+DST( movl %ebx, %es:(%edi) )
29608 adcl %edx, %eax
29609-DST( movl %edx, 4(%edi) )
29610+DST( movl %edx, %es:4(%edi) )
29611
29612 SRC( movl 8(%esi), %ebx )
29613 SRC( movl 12(%esi), %edx )
29614 adcl %ebx, %eax
29615-DST( movl %ebx, 8(%edi) )
29616+DST( movl %ebx, %es:8(%edi) )
29617 adcl %edx, %eax
29618-DST( movl %edx, 12(%edi) )
29619+DST( movl %edx, %es:12(%edi) )
29620
29621 SRC( movl 16(%esi), %ebx )
29622 SRC( movl 20(%esi), %edx )
29623 adcl %ebx, %eax
29624-DST( movl %ebx, 16(%edi) )
29625+DST( movl %ebx, %es:16(%edi) )
29626 adcl %edx, %eax
29627-DST( movl %edx, 20(%edi) )
29628+DST( movl %edx, %es:20(%edi) )
29629
29630 SRC( movl 24(%esi), %ebx )
29631 SRC( movl 28(%esi), %edx )
29632 adcl %ebx, %eax
29633-DST( movl %ebx, 24(%edi) )
29634+DST( movl %ebx, %es:24(%edi) )
29635 adcl %edx, %eax
29636-DST( movl %edx, 28(%edi) )
29637+DST( movl %edx, %es:28(%edi) )
29638
29639 lea 32(%esi), %esi
29640 lea 32(%edi), %edi
29641@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29642 shrl $2, %edx # This clears CF
29643 SRC(3: movl (%esi), %ebx )
29644 adcl %ebx, %eax
29645-DST( movl %ebx, (%edi) )
29646+DST( movl %ebx, %es:(%edi) )
29647 lea 4(%esi), %esi
29648 lea 4(%edi), %edi
29649 dec %edx
29650@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29651 jb 5f
29652 SRC( movw (%esi), %cx )
29653 leal 2(%esi), %esi
29654-DST( movw %cx, (%edi) )
29655+DST( movw %cx, %es:(%edi) )
29656 leal 2(%edi), %edi
29657 je 6f
29658 shll $16,%ecx
29659 SRC(5: movb (%esi), %cl )
29660-DST( movb %cl, (%edi) )
29661+DST( movb %cl, %es:(%edi) )
29662 6: addl %ecx, %eax
29663 adcl $0, %eax
29664 7:
29665@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29666
29667 6001:
29668 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29669- movl $-EFAULT, (%ebx)
29670+ movl $-EFAULT, %ss:(%ebx)
29671
29672 # zero the complete destination - computing the rest
29673 # is too much work
29674@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29675
29676 6002:
29677 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29678- movl $-EFAULT,(%ebx)
29679+ movl $-EFAULT,%ss:(%ebx)
29680 jmp 5000b
29681
29682 .previous
29683
29684+ pushl_cfi %ss
29685+ popl_cfi %ds
29686+ pushl_cfi %ss
29687+ popl_cfi %es
29688 popl_cfi %ebx
29689 CFI_RESTORE ebx
29690 popl_cfi %esi
29691@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29692 popl_cfi %ecx # equivalent to addl $4,%esp
29693 ret
29694 CFI_ENDPROC
29695-ENDPROC(csum_partial_copy_generic)
29696+ENDPROC(csum_partial_copy_generic_to_user)
29697
29698 #else
29699
29700 /* Version for PentiumII/PPro */
29701
29702 #define ROUND1(x) \
29703+ nop; nop; nop; \
29704 SRC(movl x(%esi), %ebx ) ; \
29705 addl %ebx, %eax ; \
29706- DST(movl %ebx, x(%edi) ) ;
29707+ DST(movl %ebx, %es:x(%edi)) ;
29708
29709 #define ROUND(x) \
29710+ nop; nop; nop; \
29711 SRC(movl x(%esi), %ebx ) ; \
29712 adcl %ebx, %eax ; \
29713- DST(movl %ebx, x(%edi) ) ;
29714+ DST(movl %ebx, %es:x(%edi)) ;
29715
29716 #define ARGBASE 12
29717-
29718-ENTRY(csum_partial_copy_generic)
29719+
29720+ENTRY(csum_partial_copy_generic_to_user)
29721 CFI_STARTPROC
29722+
29723+#ifdef CONFIG_PAX_MEMORY_UDEREF
29724+ pushl_cfi %gs
29725+ popl_cfi %es
29726+ jmp csum_partial_copy_generic
29727+#endif
29728+
29729+ENTRY(csum_partial_copy_generic_from_user)
29730+
29731+#ifdef CONFIG_PAX_MEMORY_UDEREF
29732+ pushl_cfi %gs
29733+ popl_cfi %ds
29734+#endif
29735+
29736+ENTRY(csum_partial_copy_generic)
29737 pushl_cfi %ebx
29738 CFI_REL_OFFSET ebx, 0
29739 pushl_cfi %edi
29740@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29741 subl %ebx, %edi
29742 lea -1(%esi),%edx
29743 andl $-32,%edx
29744- lea 3f(%ebx,%ebx), %ebx
29745+ lea 3f(%ebx,%ebx,2), %ebx
29746 testl %esi, %esi
29747 jmp *%ebx
29748 1: addl $64,%esi
29749@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29750 jb 5f
29751 SRC( movw (%esi), %dx )
29752 leal 2(%esi), %esi
29753-DST( movw %dx, (%edi) )
29754+DST( movw %dx, %es:(%edi) )
29755 leal 2(%edi), %edi
29756 je 6f
29757 shll $16,%edx
29758 5:
29759 SRC( movb (%esi), %dl )
29760-DST( movb %dl, (%edi) )
29761+DST( movb %dl, %es:(%edi) )
29762 6: addl %edx, %eax
29763 adcl $0, %eax
29764 7:
29765 .section .fixup, "ax"
29766 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29767- movl $-EFAULT, (%ebx)
29768+ movl $-EFAULT, %ss:(%ebx)
29769 # zero the complete destination (computing the rest is too much work)
29770 movl ARGBASE+8(%esp),%edi # dst
29771 movl ARGBASE+12(%esp),%ecx # len
29772@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29773 rep; stosb
29774 jmp 7b
29775 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29776- movl $-EFAULT, (%ebx)
29777+ movl $-EFAULT, %ss:(%ebx)
29778 jmp 7b
29779 .previous
29780
29781+#ifdef CONFIG_PAX_MEMORY_UDEREF
29782+ pushl_cfi %ss
29783+ popl_cfi %ds
29784+ pushl_cfi %ss
29785+ popl_cfi %es
29786+#endif
29787+
29788 popl_cfi %esi
29789 CFI_RESTORE esi
29790 popl_cfi %edi
29791@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29792 CFI_RESTORE ebx
29793 ret
29794 CFI_ENDPROC
29795-ENDPROC(csum_partial_copy_generic)
29796+ENDPROC(csum_partial_copy_generic_to_user)
29797
29798 #undef ROUND
29799 #undef ROUND1
29800diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29801index f2145cf..cea889d 100644
29802--- a/arch/x86/lib/clear_page_64.S
29803+++ b/arch/x86/lib/clear_page_64.S
29804@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29805 movl $4096/8,%ecx
29806 xorl %eax,%eax
29807 rep stosq
29808+ pax_force_retaddr
29809 ret
29810 CFI_ENDPROC
29811 ENDPROC(clear_page_c)
29812@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29813 movl $4096,%ecx
29814 xorl %eax,%eax
29815 rep stosb
29816+ pax_force_retaddr
29817 ret
29818 CFI_ENDPROC
29819 ENDPROC(clear_page_c_e)
29820@@ -43,6 +45,7 @@ ENTRY(clear_page)
29821 leaq 64(%rdi),%rdi
29822 jnz .Lloop
29823 nop
29824+ pax_force_retaddr
29825 ret
29826 CFI_ENDPROC
29827 .Lclear_page_end:
29828@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29829
29830 #include <asm/cpufeature.h>
29831
29832- .section .altinstr_replacement,"ax"
29833+ .section .altinstr_replacement,"a"
29834 1: .byte 0xeb /* jmp <disp8> */
29835 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29836 2: .byte 0xeb /* jmp <disp8> */
29837diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29838index 1e572c5..2a162cd 100644
29839--- a/arch/x86/lib/cmpxchg16b_emu.S
29840+++ b/arch/x86/lib/cmpxchg16b_emu.S
29841@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29842
29843 popf
29844 mov $1, %al
29845+ pax_force_retaddr
29846 ret
29847
29848 not_same:
29849 popf
29850 xor %al,%al
29851+ pax_force_retaddr
29852 ret
29853
29854 CFI_ENDPROC
29855diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29856index 176cca6..e0d658e 100644
29857--- a/arch/x86/lib/copy_page_64.S
29858+++ b/arch/x86/lib/copy_page_64.S
29859@@ -9,6 +9,7 @@ copy_page_rep:
29860 CFI_STARTPROC
29861 movl $4096/8, %ecx
29862 rep movsq
29863+ pax_force_retaddr
29864 ret
29865 CFI_ENDPROC
29866 ENDPROC(copy_page_rep)
29867@@ -24,8 +25,8 @@ ENTRY(copy_page)
29868 CFI_ADJUST_CFA_OFFSET 2*8
29869 movq %rbx, (%rsp)
29870 CFI_REL_OFFSET rbx, 0
29871- movq %r12, 1*8(%rsp)
29872- CFI_REL_OFFSET r12, 1*8
29873+ movq %r13, 1*8(%rsp)
29874+ CFI_REL_OFFSET r13, 1*8
29875
29876 movl $(4096/64)-5, %ecx
29877 .p2align 4
29878@@ -38,7 +39,7 @@ ENTRY(copy_page)
29879 movq 0x8*4(%rsi), %r9
29880 movq 0x8*5(%rsi), %r10
29881 movq 0x8*6(%rsi), %r11
29882- movq 0x8*7(%rsi), %r12
29883+ movq 0x8*7(%rsi), %r13
29884
29885 prefetcht0 5*64(%rsi)
29886
29887@@ -49,7 +50,7 @@ ENTRY(copy_page)
29888 movq %r9, 0x8*4(%rdi)
29889 movq %r10, 0x8*5(%rdi)
29890 movq %r11, 0x8*6(%rdi)
29891- movq %r12, 0x8*7(%rdi)
29892+ movq %r13, 0x8*7(%rdi)
29893
29894 leaq 64 (%rsi), %rsi
29895 leaq 64 (%rdi), %rdi
29896@@ -68,7 +69,7 @@ ENTRY(copy_page)
29897 movq 0x8*4(%rsi), %r9
29898 movq 0x8*5(%rsi), %r10
29899 movq 0x8*6(%rsi), %r11
29900- movq 0x8*7(%rsi), %r12
29901+ movq 0x8*7(%rsi), %r13
29902
29903 movq %rax, 0x8*0(%rdi)
29904 movq %rbx, 0x8*1(%rdi)
29905@@ -77,7 +78,7 @@ ENTRY(copy_page)
29906 movq %r9, 0x8*4(%rdi)
29907 movq %r10, 0x8*5(%rdi)
29908 movq %r11, 0x8*6(%rdi)
29909- movq %r12, 0x8*7(%rdi)
29910+ movq %r13, 0x8*7(%rdi)
29911
29912 leaq 64(%rdi), %rdi
29913 leaq 64(%rsi), %rsi
29914@@ -85,10 +86,11 @@ ENTRY(copy_page)
29915
29916 movq (%rsp), %rbx
29917 CFI_RESTORE rbx
29918- movq 1*8(%rsp), %r12
29919- CFI_RESTORE r12
29920+ movq 1*8(%rsp), %r13
29921+ CFI_RESTORE r13
29922 addq $2*8, %rsp
29923 CFI_ADJUST_CFA_OFFSET -2*8
29924+ pax_force_retaddr
29925 ret
29926 .Lcopy_page_end:
29927 CFI_ENDPROC
29928@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29929
29930 #include <asm/cpufeature.h>
29931
29932- .section .altinstr_replacement,"ax"
29933+ .section .altinstr_replacement,"a"
29934 1: .byte 0xeb /* jmp <disp8> */
29935 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29936 2:
29937diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29938index dee945d..a84067b 100644
29939--- a/arch/x86/lib/copy_user_64.S
29940+++ b/arch/x86/lib/copy_user_64.S
29941@@ -18,31 +18,7 @@
29942 #include <asm/alternative-asm.h>
29943 #include <asm/asm.h>
29944 #include <asm/smap.h>
29945-
29946-/*
29947- * By placing feature2 after feature1 in altinstructions section, we logically
29948- * implement:
29949- * If CPU has feature2, jmp to alt2 is used
29950- * else if CPU has feature1, jmp to alt1 is used
29951- * else jmp to orig is used.
29952- */
29953- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29954-0:
29955- .byte 0xe9 /* 32bit jump */
29956- .long \orig-1f /* by default jump to orig */
29957-1:
29958- .section .altinstr_replacement,"ax"
29959-2: .byte 0xe9 /* near jump with 32bit immediate */
29960- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29961-3: .byte 0xe9 /* near jump with 32bit immediate */
29962- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29963- .previous
29964-
29965- .section .altinstructions,"a"
29966- altinstruction_entry 0b,2b,\feature1,5,5
29967- altinstruction_entry 0b,3b,\feature2,5,5
29968- .previous
29969- .endm
29970+#include <asm/pgtable.h>
29971
29972 .macro ALIGN_DESTINATION
29973 #ifdef FIX_ALIGNMENT
29974@@ -70,52 +46,6 @@
29975 #endif
29976 .endm
29977
29978-/* Standard copy_to_user with segment limit checking */
29979-ENTRY(_copy_to_user)
29980- CFI_STARTPROC
29981- GET_THREAD_INFO(%rax)
29982- movq %rdi,%rcx
29983- addq %rdx,%rcx
29984- jc bad_to_user
29985- cmpq TI_addr_limit(%rax),%rcx
29986- ja bad_to_user
29987- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29988- copy_user_generic_unrolled,copy_user_generic_string, \
29989- copy_user_enhanced_fast_string
29990- CFI_ENDPROC
29991-ENDPROC(_copy_to_user)
29992-
29993-/* Standard copy_from_user with segment limit checking */
29994-ENTRY(_copy_from_user)
29995- CFI_STARTPROC
29996- GET_THREAD_INFO(%rax)
29997- movq %rsi,%rcx
29998- addq %rdx,%rcx
29999- jc bad_from_user
30000- cmpq TI_addr_limit(%rax),%rcx
30001- ja bad_from_user
30002- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30003- copy_user_generic_unrolled,copy_user_generic_string, \
30004- copy_user_enhanced_fast_string
30005- CFI_ENDPROC
30006-ENDPROC(_copy_from_user)
30007-
30008- .section .fixup,"ax"
30009- /* must zero dest */
30010-ENTRY(bad_from_user)
30011-bad_from_user:
30012- CFI_STARTPROC
30013- movl %edx,%ecx
30014- xorl %eax,%eax
30015- rep
30016- stosb
30017-bad_to_user:
30018- movl %edx,%eax
30019- ret
30020- CFI_ENDPROC
30021-ENDPROC(bad_from_user)
30022- .previous
30023-
30024 /*
30025 * copy_user_generic_unrolled - memory copy with exception handling.
30026 * This version is for CPUs like P4 that don't have efficient micro
30027@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30028 */
30029 ENTRY(copy_user_generic_unrolled)
30030 CFI_STARTPROC
30031+ ASM_PAX_OPEN_USERLAND
30032 ASM_STAC
30033 cmpl $8,%edx
30034 jb 20f /* less then 8 bytes, go to byte copy loop */
30035@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30036 jnz 21b
30037 23: xor %eax,%eax
30038 ASM_CLAC
30039+ ASM_PAX_CLOSE_USERLAND
30040+ pax_force_retaddr
30041 ret
30042
30043 .section .fixup,"ax"
30044@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30045 */
30046 ENTRY(copy_user_generic_string)
30047 CFI_STARTPROC
30048+ ASM_PAX_OPEN_USERLAND
30049 ASM_STAC
30050 cmpl $8,%edx
30051 jb 2f /* less than 8 bytes, go to byte copy loop */
30052@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30053 movsb
30054 xorl %eax,%eax
30055 ASM_CLAC
30056+ ASM_PAX_CLOSE_USERLAND
30057+ pax_force_retaddr
30058 ret
30059
30060 .section .fixup,"ax"
30061@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30062 */
30063 ENTRY(copy_user_enhanced_fast_string)
30064 CFI_STARTPROC
30065+ ASM_PAX_OPEN_USERLAND
30066 ASM_STAC
30067 movl %edx,%ecx
30068 1: rep
30069 movsb
30070 xorl %eax,%eax
30071 ASM_CLAC
30072+ ASM_PAX_CLOSE_USERLAND
30073+ pax_force_retaddr
30074 ret
30075
30076 .section .fixup,"ax"
30077diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30078index 6a4f43c..c70fb52 100644
30079--- a/arch/x86/lib/copy_user_nocache_64.S
30080+++ b/arch/x86/lib/copy_user_nocache_64.S
30081@@ -8,6 +8,7 @@
30082
30083 #include <linux/linkage.h>
30084 #include <asm/dwarf2.h>
30085+#include <asm/alternative-asm.h>
30086
30087 #define FIX_ALIGNMENT 1
30088
30089@@ -16,6 +17,7 @@
30090 #include <asm/thread_info.h>
30091 #include <asm/asm.h>
30092 #include <asm/smap.h>
30093+#include <asm/pgtable.h>
30094
30095 .macro ALIGN_DESTINATION
30096 #ifdef FIX_ALIGNMENT
30097@@ -49,6 +51,16 @@
30098 */
30099 ENTRY(__copy_user_nocache)
30100 CFI_STARTPROC
30101+
30102+#ifdef CONFIG_PAX_MEMORY_UDEREF
30103+ mov pax_user_shadow_base,%rcx
30104+ cmp %rcx,%rsi
30105+ jae 1f
30106+ add %rcx,%rsi
30107+1:
30108+#endif
30109+
30110+ ASM_PAX_OPEN_USERLAND
30111 ASM_STAC
30112 cmpl $8,%edx
30113 jb 20f /* less then 8 bytes, go to byte copy loop */
30114@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30115 jnz 21b
30116 23: xorl %eax,%eax
30117 ASM_CLAC
30118+ ASM_PAX_CLOSE_USERLAND
30119 sfence
30120+ pax_force_retaddr
30121 ret
30122
30123 .section .fixup,"ax"
30124diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30125index 2419d5f..fe52d0e 100644
30126--- a/arch/x86/lib/csum-copy_64.S
30127+++ b/arch/x86/lib/csum-copy_64.S
30128@@ -9,6 +9,7 @@
30129 #include <asm/dwarf2.h>
30130 #include <asm/errno.h>
30131 #include <asm/asm.h>
30132+#include <asm/alternative-asm.h>
30133
30134 /*
30135 * Checksum copy with exception handling.
30136@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30137 CFI_ADJUST_CFA_OFFSET 7*8
30138 movq %rbx, 2*8(%rsp)
30139 CFI_REL_OFFSET rbx, 2*8
30140- movq %r12, 3*8(%rsp)
30141- CFI_REL_OFFSET r12, 3*8
30142+ movq %r15, 3*8(%rsp)
30143+ CFI_REL_OFFSET r15, 3*8
30144 movq %r14, 4*8(%rsp)
30145 CFI_REL_OFFSET r14, 4*8
30146 movq %r13, 5*8(%rsp)
30147@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30148 movl %edx, %ecx
30149
30150 xorl %r9d, %r9d
30151- movq %rcx, %r12
30152+ movq %rcx, %r15
30153
30154- shrq $6, %r12
30155+ shrq $6, %r15
30156 jz .Lhandle_tail /* < 64 */
30157
30158 clc
30159
30160 /* main loop. clear in 64 byte blocks */
30161 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30162- /* r11: temp3, rdx: temp4, r12 loopcnt */
30163+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30164 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30165 .p2align 4
30166 .Lloop:
30167@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30168 adcq %r14, %rax
30169 adcq %r13, %rax
30170
30171- decl %r12d
30172+ decl %r15d
30173
30174 dest
30175 movq %rbx, (%rsi)
30176@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30177 .Lende:
30178 movq 2*8(%rsp), %rbx
30179 CFI_RESTORE rbx
30180- movq 3*8(%rsp), %r12
30181- CFI_RESTORE r12
30182+ movq 3*8(%rsp), %r15
30183+ CFI_RESTORE r15
30184 movq 4*8(%rsp), %r14
30185 CFI_RESTORE r14
30186 movq 5*8(%rsp), %r13
30187@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30188 CFI_RESTORE rbp
30189 addq $7*8, %rsp
30190 CFI_ADJUST_CFA_OFFSET -7*8
30191+ pax_force_retaddr
30192 ret
30193 CFI_RESTORE_STATE
30194
30195diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30196index 7609e0e..b449b98 100644
30197--- a/arch/x86/lib/csum-wrappers_64.c
30198+++ b/arch/x86/lib/csum-wrappers_64.c
30199@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30200 len -= 2;
30201 }
30202 }
30203+ pax_open_userland();
30204 stac();
30205- isum = csum_partial_copy_generic((__force const void *)src,
30206+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30207 dst, len, isum, errp, NULL);
30208 clac();
30209+ pax_close_userland();
30210 if (unlikely(*errp))
30211 goto out_err;
30212
30213@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30214 }
30215
30216 *errp = 0;
30217+ pax_open_userland();
30218 stac();
30219- ret = csum_partial_copy_generic(src, (void __force *)dst,
30220+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30221 len, isum, NULL, errp);
30222 clac();
30223+ pax_close_userland();
30224 return ret;
30225 }
30226 EXPORT_SYMBOL(csum_partial_copy_to_user);
30227diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30228index a451235..1daa956 100644
30229--- a/arch/x86/lib/getuser.S
30230+++ b/arch/x86/lib/getuser.S
30231@@ -33,17 +33,40 @@
30232 #include <asm/thread_info.h>
30233 #include <asm/asm.h>
30234 #include <asm/smap.h>
30235+#include <asm/segment.h>
30236+#include <asm/pgtable.h>
30237+#include <asm/alternative-asm.h>
30238+
30239+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30240+#define __copyuser_seg gs;
30241+#else
30242+#define __copyuser_seg
30243+#endif
30244
30245 .text
30246 ENTRY(__get_user_1)
30247 CFI_STARTPROC
30248+
30249+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30250 GET_THREAD_INFO(%_ASM_DX)
30251 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30252 jae bad_get_user
30253 ASM_STAC
30254-1: movzbl (%_ASM_AX),%edx
30255+
30256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30257+ mov pax_user_shadow_base,%_ASM_DX
30258+ cmp %_ASM_DX,%_ASM_AX
30259+ jae 1234f
30260+ add %_ASM_DX,%_ASM_AX
30261+1234:
30262+#endif
30263+
30264+#endif
30265+
30266+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30267 xor %eax,%eax
30268 ASM_CLAC
30269+ pax_force_retaddr
30270 ret
30271 CFI_ENDPROC
30272 ENDPROC(__get_user_1)
30273@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30274 ENTRY(__get_user_2)
30275 CFI_STARTPROC
30276 add $1,%_ASM_AX
30277+
30278+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30279 jc bad_get_user
30280 GET_THREAD_INFO(%_ASM_DX)
30281 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30282 jae bad_get_user
30283 ASM_STAC
30284-2: movzwl -1(%_ASM_AX),%edx
30285+
30286+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30287+ mov pax_user_shadow_base,%_ASM_DX
30288+ cmp %_ASM_DX,%_ASM_AX
30289+ jae 1234f
30290+ add %_ASM_DX,%_ASM_AX
30291+1234:
30292+#endif
30293+
30294+#endif
30295+
30296+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30297 xor %eax,%eax
30298 ASM_CLAC
30299+ pax_force_retaddr
30300 ret
30301 CFI_ENDPROC
30302 ENDPROC(__get_user_2)
30303@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30304 ENTRY(__get_user_4)
30305 CFI_STARTPROC
30306 add $3,%_ASM_AX
30307+
30308+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30309 jc bad_get_user
30310 GET_THREAD_INFO(%_ASM_DX)
30311 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30312 jae bad_get_user
30313 ASM_STAC
30314-3: movl -3(%_ASM_AX),%edx
30315+
30316+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30317+ mov pax_user_shadow_base,%_ASM_DX
30318+ cmp %_ASM_DX,%_ASM_AX
30319+ jae 1234f
30320+ add %_ASM_DX,%_ASM_AX
30321+1234:
30322+#endif
30323+
30324+#endif
30325+
30326+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30327 xor %eax,%eax
30328 ASM_CLAC
30329+ pax_force_retaddr
30330 ret
30331 CFI_ENDPROC
30332 ENDPROC(__get_user_4)
30333@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30334 GET_THREAD_INFO(%_ASM_DX)
30335 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30336 jae bad_get_user
30337+
30338+#ifdef CONFIG_PAX_MEMORY_UDEREF
30339+ mov pax_user_shadow_base,%_ASM_DX
30340+ cmp %_ASM_DX,%_ASM_AX
30341+ jae 1234f
30342+ add %_ASM_DX,%_ASM_AX
30343+1234:
30344+#endif
30345+
30346 ASM_STAC
30347 4: movq -7(%_ASM_AX),%rdx
30348 xor %eax,%eax
30349 ASM_CLAC
30350+ pax_force_retaddr
30351 ret
30352 #else
30353 add $7,%_ASM_AX
30354@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30355 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30356 jae bad_get_user_8
30357 ASM_STAC
30358-4: movl -7(%_ASM_AX),%edx
30359-5: movl -3(%_ASM_AX),%ecx
30360+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30361+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30362 xor %eax,%eax
30363 ASM_CLAC
30364+ pax_force_retaddr
30365 ret
30366 #endif
30367 CFI_ENDPROC
30368@@ -113,6 +175,7 @@ bad_get_user:
30369 xor %edx,%edx
30370 mov $(-EFAULT),%_ASM_AX
30371 ASM_CLAC
30372+ pax_force_retaddr
30373 ret
30374 CFI_ENDPROC
30375 END(bad_get_user)
30376@@ -124,6 +187,7 @@ bad_get_user_8:
30377 xor %ecx,%ecx
30378 mov $(-EFAULT),%_ASM_AX
30379 ASM_CLAC
30380+ pax_force_retaddr
30381 ret
30382 CFI_ENDPROC
30383 END(bad_get_user_8)
30384diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30385index 54fcffe..7be149e 100644
30386--- a/arch/x86/lib/insn.c
30387+++ b/arch/x86/lib/insn.c
30388@@ -20,8 +20,10 @@
30389
30390 #ifdef __KERNEL__
30391 #include <linux/string.h>
30392+#include <asm/pgtable_types.h>
30393 #else
30394 #include <string.h>
30395+#define ktla_ktva(addr) addr
30396 #endif
30397 #include <asm/inat.h>
30398 #include <asm/insn.h>
30399@@ -53,8 +55,8 @@
30400 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30401 {
30402 memset(insn, 0, sizeof(*insn));
30403- insn->kaddr = kaddr;
30404- insn->next_byte = kaddr;
30405+ insn->kaddr = ktla_ktva(kaddr);
30406+ insn->next_byte = ktla_ktva(kaddr);
30407 insn->x86_64 = x86_64 ? 1 : 0;
30408 insn->opnd_bytes = 4;
30409 if (x86_64)
30410diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30411index 05a95e7..326f2fa 100644
30412--- a/arch/x86/lib/iomap_copy_64.S
30413+++ b/arch/x86/lib/iomap_copy_64.S
30414@@ -17,6 +17,7 @@
30415
30416 #include <linux/linkage.h>
30417 #include <asm/dwarf2.h>
30418+#include <asm/alternative-asm.h>
30419
30420 /*
30421 * override generic version in lib/iomap_copy.c
30422@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30423 CFI_STARTPROC
30424 movl %edx,%ecx
30425 rep movsd
30426+ pax_force_retaddr
30427 ret
30428 CFI_ENDPROC
30429 ENDPROC(__iowrite32_copy)
30430diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30431index 56313a3..0db417e 100644
30432--- a/arch/x86/lib/memcpy_64.S
30433+++ b/arch/x86/lib/memcpy_64.S
30434@@ -24,7 +24,7 @@
30435 * This gets patched over the unrolled variant (below) via the
30436 * alternative instructions framework:
30437 */
30438- .section .altinstr_replacement, "ax", @progbits
30439+ .section .altinstr_replacement, "a", @progbits
30440 .Lmemcpy_c:
30441 movq %rdi, %rax
30442 movq %rdx, %rcx
30443@@ -33,6 +33,7 @@
30444 rep movsq
30445 movl %edx, %ecx
30446 rep movsb
30447+ pax_force_retaddr
30448 ret
30449 .Lmemcpy_e:
30450 .previous
30451@@ -44,11 +45,12 @@
30452 * This gets patched over the unrolled variant (below) via the
30453 * alternative instructions framework:
30454 */
30455- .section .altinstr_replacement, "ax", @progbits
30456+ .section .altinstr_replacement, "a", @progbits
30457 .Lmemcpy_c_e:
30458 movq %rdi, %rax
30459 movq %rdx, %rcx
30460 rep movsb
30461+ pax_force_retaddr
30462 ret
30463 .Lmemcpy_e_e:
30464 .previous
30465@@ -136,6 +138,7 @@ ENTRY(memcpy)
30466 movq %r9, 1*8(%rdi)
30467 movq %r10, -2*8(%rdi, %rdx)
30468 movq %r11, -1*8(%rdi, %rdx)
30469+ pax_force_retaddr
30470 retq
30471 .p2align 4
30472 .Lless_16bytes:
30473@@ -148,6 +151,7 @@ ENTRY(memcpy)
30474 movq -1*8(%rsi, %rdx), %r9
30475 movq %r8, 0*8(%rdi)
30476 movq %r9, -1*8(%rdi, %rdx)
30477+ pax_force_retaddr
30478 retq
30479 .p2align 4
30480 .Lless_8bytes:
30481@@ -161,6 +165,7 @@ ENTRY(memcpy)
30482 movl -4(%rsi, %rdx), %r8d
30483 movl %ecx, (%rdi)
30484 movl %r8d, -4(%rdi, %rdx)
30485+ pax_force_retaddr
30486 retq
30487 .p2align 4
30488 .Lless_3bytes:
30489@@ -179,6 +184,7 @@ ENTRY(memcpy)
30490 movb %cl, (%rdi)
30491
30492 .Lend:
30493+ pax_force_retaddr
30494 retq
30495 CFI_ENDPROC
30496 ENDPROC(memcpy)
30497diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30498index 65268a6..dd1de11 100644
30499--- a/arch/x86/lib/memmove_64.S
30500+++ b/arch/x86/lib/memmove_64.S
30501@@ -202,14 +202,16 @@ ENTRY(memmove)
30502 movb (%rsi), %r11b
30503 movb %r11b, (%rdi)
30504 13:
30505+ pax_force_retaddr
30506 retq
30507 CFI_ENDPROC
30508
30509- .section .altinstr_replacement,"ax"
30510+ .section .altinstr_replacement,"a"
30511 .Lmemmove_begin_forward_efs:
30512 /* Forward moving data. */
30513 movq %rdx, %rcx
30514 rep movsb
30515+ pax_force_retaddr
30516 retq
30517 .Lmemmove_end_forward_efs:
30518 .previous
30519diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30520index 2dcb380..2eb79fe 100644
30521--- a/arch/x86/lib/memset_64.S
30522+++ b/arch/x86/lib/memset_64.S
30523@@ -16,7 +16,7 @@
30524 *
30525 * rax original destination
30526 */
30527- .section .altinstr_replacement, "ax", @progbits
30528+ .section .altinstr_replacement, "a", @progbits
30529 .Lmemset_c:
30530 movq %rdi,%r9
30531 movq %rdx,%rcx
30532@@ -30,6 +30,7 @@
30533 movl %edx,%ecx
30534 rep stosb
30535 movq %r9,%rax
30536+ pax_force_retaddr
30537 ret
30538 .Lmemset_e:
30539 .previous
30540@@ -45,13 +46,14 @@
30541 *
30542 * rax original destination
30543 */
30544- .section .altinstr_replacement, "ax", @progbits
30545+ .section .altinstr_replacement, "a", @progbits
30546 .Lmemset_c_e:
30547 movq %rdi,%r9
30548 movb %sil,%al
30549 movq %rdx,%rcx
30550 rep stosb
30551 movq %r9,%rax
30552+ pax_force_retaddr
30553 ret
30554 .Lmemset_e_e:
30555 .previous
30556@@ -118,6 +120,7 @@ ENTRY(__memset)
30557
30558 .Lende:
30559 movq %r10,%rax
30560+ pax_force_retaddr
30561 ret
30562
30563 CFI_RESTORE_STATE
30564diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30565index c9f2d9b..e7fd2c0 100644
30566--- a/arch/x86/lib/mmx_32.c
30567+++ b/arch/x86/lib/mmx_32.c
30568@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30569 {
30570 void *p;
30571 int i;
30572+ unsigned long cr0;
30573
30574 if (unlikely(in_interrupt()))
30575 return __memcpy(to, from, len);
30576@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30577 kernel_fpu_begin();
30578
30579 __asm__ __volatile__ (
30580- "1: prefetch (%0)\n" /* This set is 28 bytes */
30581- " prefetch 64(%0)\n"
30582- " prefetch 128(%0)\n"
30583- " prefetch 192(%0)\n"
30584- " prefetch 256(%0)\n"
30585+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30586+ " prefetch 64(%1)\n"
30587+ " prefetch 128(%1)\n"
30588+ " prefetch 192(%1)\n"
30589+ " prefetch 256(%1)\n"
30590 "2: \n"
30591 ".section .fixup, \"ax\"\n"
30592- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30593+ "3: \n"
30594+
30595+#ifdef CONFIG_PAX_KERNEXEC
30596+ " movl %%cr0, %0\n"
30597+ " movl %0, %%eax\n"
30598+ " andl $0xFFFEFFFF, %%eax\n"
30599+ " movl %%eax, %%cr0\n"
30600+#endif
30601+
30602+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30603+
30604+#ifdef CONFIG_PAX_KERNEXEC
30605+ " movl %0, %%cr0\n"
30606+#endif
30607+
30608 " jmp 2b\n"
30609 ".previous\n"
30610 _ASM_EXTABLE(1b, 3b)
30611- : : "r" (from));
30612+ : "=&r" (cr0) : "r" (from) : "ax");
30613
30614 for ( ; i > 5; i--) {
30615 __asm__ __volatile__ (
30616- "1: prefetch 320(%0)\n"
30617- "2: movq (%0), %%mm0\n"
30618- " movq 8(%0), %%mm1\n"
30619- " movq 16(%0), %%mm2\n"
30620- " movq 24(%0), %%mm3\n"
30621- " movq %%mm0, (%1)\n"
30622- " movq %%mm1, 8(%1)\n"
30623- " movq %%mm2, 16(%1)\n"
30624- " movq %%mm3, 24(%1)\n"
30625- " movq 32(%0), %%mm0\n"
30626- " movq 40(%0), %%mm1\n"
30627- " movq 48(%0), %%mm2\n"
30628- " movq 56(%0), %%mm3\n"
30629- " movq %%mm0, 32(%1)\n"
30630- " movq %%mm1, 40(%1)\n"
30631- " movq %%mm2, 48(%1)\n"
30632- " movq %%mm3, 56(%1)\n"
30633+ "1: prefetch 320(%1)\n"
30634+ "2: movq (%1), %%mm0\n"
30635+ " movq 8(%1), %%mm1\n"
30636+ " movq 16(%1), %%mm2\n"
30637+ " movq 24(%1), %%mm3\n"
30638+ " movq %%mm0, (%2)\n"
30639+ " movq %%mm1, 8(%2)\n"
30640+ " movq %%mm2, 16(%2)\n"
30641+ " movq %%mm3, 24(%2)\n"
30642+ " movq 32(%1), %%mm0\n"
30643+ " movq 40(%1), %%mm1\n"
30644+ " movq 48(%1), %%mm2\n"
30645+ " movq 56(%1), %%mm3\n"
30646+ " movq %%mm0, 32(%2)\n"
30647+ " movq %%mm1, 40(%2)\n"
30648+ " movq %%mm2, 48(%2)\n"
30649+ " movq %%mm3, 56(%2)\n"
30650 ".section .fixup, \"ax\"\n"
30651- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30652+ "3:\n"
30653+
30654+#ifdef CONFIG_PAX_KERNEXEC
30655+ " movl %%cr0, %0\n"
30656+ " movl %0, %%eax\n"
30657+ " andl $0xFFFEFFFF, %%eax\n"
30658+ " movl %%eax, %%cr0\n"
30659+#endif
30660+
30661+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30662+
30663+#ifdef CONFIG_PAX_KERNEXEC
30664+ " movl %0, %%cr0\n"
30665+#endif
30666+
30667 " jmp 2b\n"
30668 ".previous\n"
30669 _ASM_EXTABLE(1b, 3b)
30670- : : "r" (from), "r" (to) : "memory");
30671+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30672
30673 from += 64;
30674 to += 64;
30675@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30676 static void fast_copy_page(void *to, void *from)
30677 {
30678 int i;
30679+ unsigned long cr0;
30680
30681 kernel_fpu_begin();
30682
30683@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30684 * but that is for later. -AV
30685 */
30686 __asm__ __volatile__(
30687- "1: prefetch (%0)\n"
30688- " prefetch 64(%0)\n"
30689- " prefetch 128(%0)\n"
30690- " prefetch 192(%0)\n"
30691- " prefetch 256(%0)\n"
30692+ "1: prefetch (%1)\n"
30693+ " prefetch 64(%1)\n"
30694+ " prefetch 128(%1)\n"
30695+ " prefetch 192(%1)\n"
30696+ " prefetch 256(%1)\n"
30697 "2: \n"
30698 ".section .fixup, \"ax\"\n"
30699- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30700+ "3: \n"
30701+
30702+#ifdef CONFIG_PAX_KERNEXEC
30703+ " movl %%cr0, %0\n"
30704+ " movl %0, %%eax\n"
30705+ " andl $0xFFFEFFFF, %%eax\n"
30706+ " movl %%eax, %%cr0\n"
30707+#endif
30708+
30709+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30710+
30711+#ifdef CONFIG_PAX_KERNEXEC
30712+ " movl %0, %%cr0\n"
30713+#endif
30714+
30715 " jmp 2b\n"
30716 ".previous\n"
30717- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30718+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30719
30720 for (i = 0; i < (4096-320)/64; i++) {
30721 __asm__ __volatile__ (
30722- "1: prefetch 320(%0)\n"
30723- "2: movq (%0), %%mm0\n"
30724- " movntq %%mm0, (%1)\n"
30725- " movq 8(%0), %%mm1\n"
30726- " movntq %%mm1, 8(%1)\n"
30727- " movq 16(%0), %%mm2\n"
30728- " movntq %%mm2, 16(%1)\n"
30729- " movq 24(%0), %%mm3\n"
30730- " movntq %%mm3, 24(%1)\n"
30731- " movq 32(%0), %%mm4\n"
30732- " movntq %%mm4, 32(%1)\n"
30733- " movq 40(%0), %%mm5\n"
30734- " movntq %%mm5, 40(%1)\n"
30735- " movq 48(%0), %%mm6\n"
30736- " movntq %%mm6, 48(%1)\n"
30737- " movq 56(%0), %%mm7\n"
30738- " movntq %%mm7, 56(%1)\n"
30739+ "1: prefetch 320(%1)\n"
30740+ "2: movq (%1), %%mm0\n"
30741+ " movntq %%mm0, (%2)\n"
30742+ " movq 8(%1), %%mm1\n"
30743+ " movntq %%mm1, 8(%2)\n"
30744+ " movq 16(%1), %%mm2\n"
30745+ " movntq %%mm2, 16(%2)\n"
30746+ " movq 24(%1), %%mm3\n"
30747+ " movntq %%mm3, 24(%2)\n"
30748+ " movq 32(%1), %%mm4\n"
30749+ " movntq %%mm4, 32(%2)\n"
30750+ " movq 40(%1), %%mm5\n"
30751+ " movntq %%mm5, 40(%2)\n"
30752+ " movq 48(%1), %%mm6\n"
30753+ " movntq %%mm6, 48(%2)\n"
30754+ " movq 56(%1), %%mm7\n"
30755+ " movntq %%mm7, 56(%2)\n"
30756 ".section .fixup, \"ax\"\n"
30757- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30758+ "3:\n"
30759+
30760+#ifdef CONFIG_PAX_KERNEXEC
30761+ " movl %%cr0, %0\n"
30762+ " movl %0, %%eax\n"
30763+ " andl $0xFFFEFFFF, %%eax\n"
30764+ " movl %%eax, %%cr0\n"
30765+#endif
30766+
30767+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30768+
30769+#ifdef CONFIG_PAX_KERNEXEC
30770+ " movl %0, %%cr0\n"
30771+#endif
30772+
30773 " jmp 2b\n"
30774 ".previous\n"
30775- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30776+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30777
30778 from += 64;
30779 to += 64;
30780@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30781 static void fast_copy_page(void *to, void *from)
30782 {
30783 int i;
30784+ unsigned long cr0;
30785
30786 kernel_fpu_begin();
30787
30788 __asm__ __volatile__ (
30789- "1: prefetch (%0)\n"
30790- " prefetch 64(%0)\n"
30791- " prefetch 128(%0)\n"
30792- " prefetch 192(%0)\n"
30793- " prefetch 256(%0)\n"
30794+ "1: prefetch (%1)\n"
30795+ " prefetch 64(%1)\n"
30796+ " prefetch 128(%1)\n"
30797+ " prefetch 192(%1)\n"
30798+ " prefetch 256(%1)\n"
30799 "2: \n"
30800 ".section .fixup, \"ax\"\n"
30801- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30802+ "3: \n"
30803+
30804+#ifdef CONFIG_PAX_KERNEXEC
30805+ " movl %%cr0, %0\n"
30806+ " movl %0, %%eax\n"
30807+ " andl $0xFFFEFFFF, %%eax\n"
30808+ " movl %%eax, %%cr0\n"
30809+#endif
30810+
30811+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30812+
30813+#ifdef CONFIG_PAX_KERNEXEC
30814+ " movl %0, %%cr0\n"
30815+#endif
30816+
30817 " jmp 2b\n"
30818 ".previous\n"
30819- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30820+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30821
30822 for (i = 0; i < 4096/64; i++) {
30823 __asm__ __volatile__ (
30824- "1: prefetch 320(%0)\n"
30825- "2: movq (%0), %%mm0\n"
30826- " movq 8(%0), %%mm1\n"
30827- " movq 16(%0), %%mm2\n"
30828- " movq 24(%0), %%mm3\n"
30829- " movq %%mm0, (%1)\n"
30830- " movq %%mm1, 8(%1)\n"
30831- " movq %%mm2, 16(%1)\n"
30832- " movq %%mm3, 24(%1)\n"
30833- " movq 32(%0), %%mm0\n"
30834- " movq 40(%0), %%mm1\n"
30835- " movq 48(%0), %%mm2\n"
30836- " movq 56(%0), %%mm3\n"
30837- " movq %%mm0, 32(%1)\n"
30838- " movq %%mm1, 40(%1)\n"
30839- " movq %%mm2, 48(%1)\n"
30840- " movq %%mm3, 56(%1)\n"
30841+ "1: prefetch 320(%1)\n"
30842+ "2: movq (%1), %%mm0\n"
30843+ " movq 8(%1), %%mm1\n"
30844+ " movq 16(%1), %%mm2\n"
30845+ " movq 24(%1), %%mm3\n"
30846+ " movq %%mm0, (%2)\n"
30847+ " movq %%mm1, 8(%2)\n"
30848+ " movq %%mm2, 16(%2)\n"
30849+ " movq %%mm3, 24(%2)\n"
30850+ " movq 32(%1), %%mm0\n"
30851+ " movq 40(%1), %%mm1\n"
30852+ " movq 48(%1), %%mm2\n"
30853+ " movq 56(%1), %%mm3\n"
30854+ " movq %%mm0, 32(%2)\n"
30855+ " movq %%mm1, 40(%2)\n"
30856+ " movq %%mm2, 48(%2)\n"
30857+ " movq %%mm3, 56(%2)\n"
30858 ".section .fixup, \"ax\"\n"
30859- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30860+ "3:\n"
30861+
30862+#ifdef CONFIG_PAX_KERNEXEC
30863+ " movl %%cr0, %0\n"
30864+ " movl %0, %%eax\n"
30865+ " andl $0xFFFEFFFF, %%eax\n"
30866+ " movl %%eax, %%cr0\n"
30867+#endif
30868+
30869+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30870+
30871+#ifdef CONFIG_PAX_KERNEXEC
30872+ " movl %0, %%cr0\n"
30873+#endif
30874+
30875 " jmp 2b\n"
30876 ".previous\n"
30877 _ASM_EXTABLE(1b, 3b)
30878- : : "r" (from), "r" (to) : "memory");
30879+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30880
30881 from += 64;
30882 to += 64;
30883diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30884index f6d13ee..d789440 100644
30885--- a/arch/x86/lib/msr-reg.S
30886+++ b/arch/x86/lib/msr-reg.S
30887@@ -3,6 +3,7 @@
30888 #include <asm/dwarf2.h>
30889 #include <asm/asm.h>
30890 #include <asm/msr.h>
30891+#include <asm/alternative-asm.h>
30892
30893 #ifdef CONFIG_X86_64
30894 /*
30895@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30896 movl %edi, 28(%r10)
30897 popq_cfi %rbp
30898 popq_cfi %rbx
30899+ pax_force_retaddr
30900 ret
30901 3:
30902 CFI_RESTORE_STATE
30903diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30904index fc6ba17..d4d989d 100644
30905--- a/arch/x86/lib/putuser.S
30906+++ b/arch/x86/lib/putuser.S
30907@@ -16,7 +16,9 @@
30908 #include <asm/errno.h>
30909 #include <asm/asm.h>
30910 #include <asm/smap.h>
30911-
30912+#include <asm/segment.h>
30913+#include <asm/pgtable.h>
30914+#include <asm/alternative-asm.h>
30915
30916 /*
30917 * __put_user_X
30918@@ -30,57 +32,125 @@
30919 * as they get called from within inline assembly.
30920 */
30921
30922-#define ENTER CFI_STARTPROC ; \
30923- GET_THREAD_INFO(%_ASM_BX)
30924-#define EXIT ASM_CLAC ; \
30925- ret ; \
30926+#define ENTER CFI_STARTPROC
30927+#define EXIT ASM_CLAC ; \
30928+ pax_force_retaddr ; \
30929+ ret ; \
30930 CFI_ENDPROC
30931
30932+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30933+#define _DEST %_ASM_CX,%_ASM_BX
30934+#else
30935+#define _DEST %_ASM_CX
30936+#endif
30937+
30938+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30939+#define __copyuser_seg gs;
30940+#else
30941+#define __copyuser_seg
30942+#endif
30943+
30944 .text
30945 ENTRY(__put_user_1)
30946 ENTER
30947+
30948+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30949+ GET_THREAD_INFO(%_ASM_BX)
30950 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30951 jae bad_put_user
30952 ASM_STAC
30953-1: movb %al,(%_ASM_CX)
30954+
30955+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30956+ mov pax_user_shadow_base,%_ASM_BX
30957+ cmp %_ASM_BX,%_ASM_CX
30958+ jb 1234f
30959+ xor %ebx,%ebx
30960+1234:
30961+#endif
30962+
30963+#endif
30964+
30965+1: __copyuser_seg movb %al,(_DEST)
30966 xor %eax,%eax
30967 EXIT
30968 ENDPROC(__put_user_1)
30969
30970 ENTRY(__put_user_2)
30971 ENTER
30972+
30973+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30974+ GET_THREAD_INFO(%_ASM_BX)
30975 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30976 sub $1,%_ASM_BX
30977 cmp %_ASM_BX,%_ASM_CX
30978 jae bad_put_user
30979 ASM_STAC
30980-2: movw %ax,(%_ASM_CX)
30981+
30982+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30983+ mov pax_user_shadow_base,%_ASM_BX
30984+ cmp %_ASM_BX,%_ASM_CX
30985+ jb 1234f
30986+ xor %ebx,%ebx
30987+1234:
30988+#endif
30989+
30990+#endif
30991+
30992+2: __copyuser_seg movw %ax,(_DEST)
30993 xor %eax,%eax
30994 EXIT
30995 ENDPROC(__put_user_2)
30996
30997 ENTRY(__put_user_4)
30998 ENTER
30999+
31000+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31001+ GET_THREAD_INFO(%_ASM_BX)
31002 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31003 sub $3,%_ASM_BX
31004 cmp %_ASM_BX,%_ASM_CX
31005 jae bad_put_user
31006 ASM_STAC
31007-3: movl %eax,(%_ASM_CX)
31008+
31009+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31010+ mov pax_user_shadow_base,%_ASM_BX
31011+ cmp %_ASM_BX,%_ASM_CX
31012+ jb 1234f
31013+ xor %ebx,%ebx
31014+1234:
31015+#endif
31016+
31017+#endif
31018+
31019+3: __copyuser_seg movl %eax,(_DEST)
31020 xor %eax,%eax
31021 EXIT
31022 ENDPROC(__put_user_4)
31023
31024 ENTRY(__put_user_8)
31025 ENTER
31026+
31027+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31028+ GET_THREAD_INFO(%_ASM_BX)
31029 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31030 sub $7,%_ASM_BX
31031 cmp %_ASM_BX,%_ASM_CX
31032 jae bad_put_user
31033 ASM_STAC
31034-4: mov %_ASM_AX,(%_ASM_CX)
31035+
31036+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31037+ mov pax_user_shadow_base,%_ASM_BX
31038+ cmp %_ASM_BX,%_ASM_CX
31039+ jb 1234f
31040+ xor %ebx,%ebx
31041+1234:
31042+#endif
31043+
31044+#endif
31045+
31046+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31047 #ifdef CONFIG_X86_32
31048-5: movl %edx,4(%_ASM_CX)
31049+5: __copyuser_seg movl %edx,4(_DEST)
31050 #endif
31051 xor %eax,%eax
31052 EXIT
31053diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
31054index 1cad221..de671ee 100644
31055--- a/arch/x86/lib/rwlock.S
31056+++ b/arch/x86/lib/rwlock.S
31057@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
31058 FRAME
31059 0: LOCK_PREFIX
31060 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31061+
31062+#ifdef CONFIG_PAX_REFCOUNT
31063+ jno 1234f
31064+ LOCK_PREFIX
31065+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31066+ int $4
31067+1234:
31068+ _ASM_EXTABLE(1234b, 1234b)
31069+#endif
31070+
31071 1: rep; nop
31072 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
31073 jne 1b
31074 LOCK_PREFIX
31075 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31076+
31077+#ifdef CONFIG_PAX_REFCOUNT
31078+ jno 1234f
31079+ LOCK_PREFIX
31080+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31081+ int $4
31082+1234:
31083+ _ASM_EXTABLE(1234b, 1234b)
31084+#endif
31085+
31086 jnz 0b
31087 ENDFRAME
31088+ pax_force_retaddr
31089 ret
31090 CFI_ENDPROC
31091 END(__write_lock_failed)
31092@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
31093 FRAME
31094 0: LOCK_PREFIX
31095 READ_LOCK_SIZE(inc) (%__lock_ptr)
31096+
31097+#ifdef CONFIG_PAX_REFCOUNT
31098+ jno 1234f
31099+ LOCK_PREFIX
31100+ READ_LOCK_SIZE(dec) (%__lock_ptr)
31101+ int $4
31102+1234:
31103+ _ASM_EXTABLE(1234b, 1234b)
31104+#endif
31105+
31106 1: rep; nop
31107 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
31108 js 1b
31109 LOCK_PREFIX
31110 READ_LOCK_SIZE(dec) (%__lock_ptr)
31111+
31112+#ifdef CONFIG_PAX_REFCOUNT
31113+ jno 1234f
31114+ LOCK_PREFIX
31115+ READ_LOCK_SIZE(inc) (%__lock_ptr)
31116+ int $4
31117+1234:
31118+ _ASM_EXTABLE(1234b, 1234b)
31119+#endif
31120+
31121 js 0b
31122 ENDFRAME
31123+ pax_force_retaddr
31124 ret
31125 CFI_ENDPROC
31126 END(__read_lock_failed)
31127diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31128index 5dff5f0..cadebf4 100644
31129--- a/arch/x86/lib/rwsem.S
31130+++ b/arch/x86/lib/rwsem.S
31131@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31132 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31133 CFI_RESTORE __ASM_REG(dx)
31134 restore_common_regs
31135+ pax_force_retaddr
31136 ret
31137 CFI_ENDPROC
31138 ENDPROC(call_rwsem_down_read_failed)
31139@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31140 movq %rax,%rdi
31141 call rwsem_down_write_failed
31142 restore_common_regs
31143+ pax_force_retaddr
31144 ret
31145 CFI_ENDPROC
31146 ENDPROC(call_rwsem_down_write_failed)
31147@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31148 movq %rax,%rdi
31149 call rwsem_wake
31150 restore_common_regs
31151-1: ret
31152+1: pax_force_retaddr
31153+ ret
31154 CFI_ENDPROC
31155 ENDPROC(call_rwsem_wake)
31156
31157@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31158 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31159 CFI_RESTORE __ASM_REG(dx)
31160 restore_common_regs
31161+ pax_force_retaddr
31162 ret
31163 CFI_ENDPROC
31164 ENDPROC(call_rwsem_downgrade_wake)
31165diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31166index 92d9fea..b2762c8 100644
31167--- a/arch/x86/lib/thunk_64.S
31168+++ b/arch/x86/lib/thunk_64.S
31169@@ -9,6 +9,7 @@
31170 #include <asm/dwarf2.h>
31171 #include <asm/calling.h>
31172 #include <asm/asm.h>
31173+#include <asm/alternative-asm.h>
31174
31175 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31176 .macro THUNK name, func, put_ret_addr_in_rdi=0
31177@@ -16,11 +17,11 @@
31178 \name:
31179 CFI_STARTPROC
31180
31181- /* this one pushes 9 elems, the next one would be %rIP */
31182- SAVE_ARGS
31183+ /* this one pushes 15+1 elems, the next one would be %rIP */
31184+ SAVE_ARGS 8
31185
31186 .if \put_ret_addr_in_rdi
31187- movq_cfi_restore 9*8, rdi
31188+ movq_cfi_restore RIP, rdi
31189 .endif
31190
31191 call \func
31192@@ -40,9 +41,10 @@
31193
31194 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31195 CFI_STARTPROC
31196- SAVE_ARGS
31197+ SAVE_ARGS 8
31198 restore:
31199- RESTORE_ARGS
31200+ RESTORE_ARGS 1,8
31201+ pax_force_retaddr
31202 ret
31203 CFI_ENDPROC
31204 _ASM_NOKPROBE(restore)
31205diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31206index e2f5e21..4b22130 100644
31207--- a/arch/x86/lib/usercopy_32.c
31208+++ b/arch/x86/lib/usercopy_32.c
31209@@ -42,11 +42,13 @@ do { \
31210 int __d0; \
31211 might_fault(); \
31212 __asm__ __volatile__( \
31213+ __COPYUSER_SET_ES \
31214 ASM_STAC "\n" \
31215 "0: rep; stosl\n" \
31216 " movl %2,%0\n" \
31217 "1: rep; stosb\n" \
31218 "2: " ASM_CLAC "\n" \
31219+ __COPYUSER_RESTORE_ES \
31220 ".section .fixup,\"ax\"\n" \
31221 "3: lea 0(%2,%0,4),%0\n" \
31222 " jmp 2b\n" \
31223@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31224
31225 #ifdef CONFIG_X86_INTEL_USERCOPY
31226 static unsigned long
31227-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31228+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31229 {
31230 int d0, d1;
31231 __asm__ __volatile__(
31232@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31233 " .align 2,0x90\n"
31234 "3: movl 0(%4), %%eax\n"
31235 "4: movl 4(%4), %%edx\n"
31236- "5: movl %%eax, 0(%3)\n"
31237- "6: movl %%edx, 4(%3)\n"
31238+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31239+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31240 "7: movl 8(%4), %%eax\n"
31241 "8: movl 12(%4),%%edx\n"
31242- "9: movl %%eax, 8(%3)\n"
31243- "10: movl %%edx, 12(%3)\n"
31244+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31245+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31246 "11: movl 16(%4), %%eax\n"
31247 "12: movl 20(%4), %%edx\n"
31248- "13: movl %%eax, 16(%3)\n"
31249- "14: movl %%edx, 20(%3)\n"
31250+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31251+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31252 "15: movl 24(%4), %%eax\n"
31253 "16: movl 28(%4), %%edx\n"
31254- "17: movl %%eax, 24(%3)\n"
31255- "18: movl %%edx, 28(%3)\n"
31256+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31257+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31258 "19: movl 32(%4), %%eax\n"
31259 "20: movl 36(%4), %%edx\n"
31260- "21: movl %%eax, 32(%3)\n"
31261- "22: movl %%edx, 36(%3)\n"
31262+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31263+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31264 "23: movl 40(%4), %%eax\n"
31265 "24: movl 44(%4), %%edx\n"
31266- "25: movl %%eax, 40(%3)\n"
31267- "26: movl %%edx, 44(%3)\n"
31268+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31269+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31270 "27: movl 48(%4), %%eax\n"
31271 "28: movl 52(%4), %%edx\n"
31272- "29: movl %%eax, 48(%3)\n"
31273- "30: movl %%edx, 52(%3)\n"
31274+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31275+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31276 "31: movl 56(%4), %%eax\n"
31277 "32: movl 60(%4), %%edx\n"
31278- "33: movl %%eax, 56(%3)\n"
31279- "34: movl %%edx, 60(%3)\n"
31280+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31281+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31282 " addl $-64, %0\n"
31283 " addl $64, %4\n"
31284 " addl $64, %3\n"
31285@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31286 " shrl $2, %0\n"
31287 " andl $3, %%eax\n"
31288 " cld\n"
31289+ __COPYUSER_SET_ES
31290 "99: rep; movsl\n"
31291 "36: movl %%eax, %0\n"
31292 "37: rep; movsb\n"
31293 "100:\n"
31294+ __COPYUSER_RESTORE_ES
31295+ ".section .fixup,\"ax\"\n"
31296+ "101: lea 0(%%eax,%0,4),%0\n"
31297+ " jmp 100b\n"
31298+ ".previous\n"
31299+ _ASM_EXTABLE(1b,100b)
31300+ _ASM_EXTABLE(2b,100b)
31301+ _ASM_EXTABLE(3b,100b)
31302+ _ASM_EXTABLE(4b,100b)
31303+ _ASM_EXTABLE(5b,100b)
31304+ _ASM_EXTABLE(6b,100b)
31305+ _ASM_EXTABLE(7b,100b)
31306+ _ASM_EXTABLE(8b,100b)
31307+ _ASM_EXTABLE(9b,100b)
31308+ _ASM_EXTABLE(10b,100b)
31309+ _ASM_EXTABLE(11b,100b)
31310+ _ASM_EXTABLE(12b,100b)
31311+ _ASM_EXTABLE(13b,100b)
31312+ _ASM_EXTABLE(14b,100b)
31313+ _ASM_EXTABLE(15b,100b)
31314+ _ASM_EXTABLE(16b,100b)
31315+ _ASM_EXTABLE(17b,100b)
31316+ _ASM_EXTABLE(18b,100b)
31317+ _ASM_EXTABLE(19b,100b)
31318+ _ASM_EXTABLE(20b,100b)
31319+ _ASM_EXTABLE(21b,100b)
31320+ _ASM_EXTABLE(22b,100b)
31321+ _ASM_EXTABLE(23b,100b)
31322+ _ASM_EXTABLE(24b,100b)
31323+ _ASM_EXTABLE(25b,100b)
31324+ _ASM_EXTABLE(26b,100b)
31325+ _ASM_EXTABLE(27b,100b)
31326+ _ASM_EXTABLE(28b,100b)
31327+ _ASM_EXTABLE(29b,100b)
31328+ _ASM_EXTABLE(30b,100b)
31329+ _ASM_EXTABLE(31b,100b)
31330+ _ASM_EXTABLE(32b,100b)
31331+ _ASM_EXTABLE(33b,100b)
31332+ _ASM_EXTABLE(34b,100b)
31333+ _ASM_EXTABLE(35b,100b)
31334+ _ASM_EXTABLE(36b,100b)
31335+ _ASM_EXTABLE(37b,100b)
31336+ _ASM_EXTABLE(99b,101b)
31337+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31338+ : "1"(to), "2"(from), "0"(size)
31339+ : "eax", "edx", "memory");
31340+ return size;
31341+}
31342+
31343+static unsigned long
31344+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31345+{
31346+ int d0, d1;
31347+ __asm__ __volatile__(
31348+ " .align 2,0x90\n"
31349+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31350+ " cmpl $67, %0\n"
31351+ " jbe 3f\n"
31352+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31353+ " .align 2,0x90\n"
31354+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31355+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31356+ "5: movl %%eax, 0(%3)\n"
31357+ "6: movl %%edx, 4(%3)\n"
31358+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31359+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31360+ "9: movl %%eax, 8(%3)\n"
31361+ "10: movl %%edx, 12(%3)\n"
31362+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31363+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31364+ "13: movl %%eax, 16(%3)\n"
31365+ "14: movl %%edx, 20(%3)\n"
31366+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31367+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31368+ "17: movl %%eax, 24(%3)\n"
31369+ "18: movl %%edx, 28(%3)\n"
31370+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31371+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31372+ "21: movl %%eax, 32(%3)\n"
31373+ "22: movl %%edx, 36(%3)\n"
31374+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31375+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31376+ "25: movl %%eax, 40(%3)\n"
31377+ "26: movl %%edx, 44(%3)\n"
31378+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31379+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31380+ "29: movl %%eax, 48(%3)\n"
31381+ "30: movl %%edx, 52(%3)\n"
31382+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31383+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31384+ "33: movl %%eax, 56(%3)\n"
31385+ "34: movl %%edx, 60(%3)\n"
31386+ " addl $-64, %0\n"
31387+ " addl $64, %4\n"
31388+ " addl $64, %3\n"
31389+ " cmpl $63, %0\n"
31390+ " ja 1b\n"
31391+ "35: movl %0, %%eax\n"
31392+ " shrl $2, %0\n"
31393+ " andl $3, %%eax\n"
31394+ " cld\n"
31395+ "99: rep; "__copyuser_seg" movsl\n"
31396+ "36: movl %%eax, %0\n"
31397+ "37: rep; "__copyuser_seg" movsb\n"
31398+ "100:\n"
31399 ".section .fixup,\"ax\"\n"
31400 "101: lea 0(%%eax,%0,4),%0\n"
31401 " jmp 100b\n"
31402@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31403 int d0, d1;
31404 __asm__ __volatile__(
31405 " .align 2,0x90\n"
31406- "0: movl 32(%4), %%eax\n"
31407+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31408 " cmpl $67, %0\n"
31409 " jbe 2f\n"
31410- "1: movl 64(%4), %%eax\n"
31411+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31412 " .align 2,0x90\n"
31413- "2: movl 0(%4), %%eax\n"
31414- "21: movl 4(%4), %%edx\n"
31415+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31416+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31417 " movl %%eax, 0(%3)\n"
31418 " movl %%edx, 4(%3)\n"
31419- "3: movl 8(%4), %%eax\n"
31420- "31: movl 12(%4),%%edx\n"
31421+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31422+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31423 " movl %%eax, 8(%3)\n"
31424 " movl %%edx, 12(%3)\n"
31425- "4: movl 16(%4), %%eax\n"
31426- "41: movl 20(%4), %%edx\n"
31427+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31428+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31429 " movl %%eax, 16(%3)\n"
31430 " movl %%edx, 20(%3)\n"
31431- "10: movl 24(%4), %%eax\n"
31432- "51: movl 28(%4), %%edx\n"
31433+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31434+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31435 " movl %%eax, 24(%3)\n"
31436 " movl %%edx, 28(%3)\n"
31437- "11: movl 32(%4), %%eax\n"
31438- "61: movl 36(%4), %%edx\n"
31439+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31440+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31441 " movl %%eax, 32(%3)\n"
31442 " movl %%edx, 36(%3)\n"
31443- "12: movl 40(%4), %%eax\n"
31444- "71: movl 44(%4), %%edx\n"
31445+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31446+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31447 " movl %%eax, 40(%3)\n"
31448 " movl %%edx, 44(%3)\n"
31449- "13: movl 48(%4), %%eax\n"
31450- "81: movl 52(%4), %%edx\n"
31451+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31452+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31453 " movl %%eax, 48(%3)\n"
31454 " movl %%edx, 52(%3)\n"
31455- "14: movl 56(%4), %%eax\n"
31456- "91: movl 60(%4), %%edx\n"
31457+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31458+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31459 " movl %%eax, 56(%3)\n"
31460 " movl %%edx, 60(%3)\n"
31461 " addl $-64, %0\n"
31462@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31463 " shrl $2, %0\n"
31464 " andl $3, %%eax\n"
31465 " cld\n"
31466- "6: rep; movsl\n"
31467+ "6: rep; "__copyuser_seg" movsl\n"
31468 " movl %%eax,%0\n"
31469- "7: rep; movsb\n"
31470+ "7: rep; "__copyuser_seg" movsb\n"
31471 "8:\n"
31472 ".section .fixup,\"ax\"\n"
31473 "9: lea 0(%%eax,%0,4),%0\n"
31474@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31475
31476 __asm__ __volatile__(
31477 " .align 2,0x90\n"
31478- "0: movl 32(%4), %%eax\n"
31479+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31480 " cmpl $67, %0\n"
31481 " jbe 2f\n"
31482- "1: movl 64(%4), %%eax\n"
31483+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31484 " .align 2,0x90\n"
31485- "2: movl 0(%4), %%eax\n"
31486- "21: movl 4(%4), %%edx\n"
31487+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31488+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31489 " movnti %%eax, 0(%3)\n"
31490 " movnti %%edx, 4(%3)\n"
31491- "3: movl 8(%4), %%eax\n"
31492- "31: movl 12(%4),%%edx\n"
31493+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31494+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31495 " movnti %%eax, 8(%3)\n"
31496 " movnti %%edx, 12(%3)\n"
31497- "4: movl 16(%4), %%eax\n"
31498- "41: movl 20(%4), %%edx\n"
31499+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31500+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31501 " movnti %%eax, 16(%3)\n"
31502 " movnti %%edx, 20(%3)\n"
31503- "10: movl 24(%4), %%eax\n"
31504- "51: movl 28(%4), %%edx\n"
31505+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31506+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31507 " movnti %%eax, 24(%3)\n"
31508 " movnti %%edx, 28(%3)\n"
31509- "11: movl 32(%4), %%eax\n"
31510- "61: movl 36(%4), %%edx\n"
31511+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31512+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31513 " movnti %%eax, 32(%3)\n"
31514 " movnti %%edx, 36(%3)\n"
31515- "12: movl 40(%4), %%eax\n"
31516- "71: movl 44(%4), %%edx\n"
31517+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31518+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31519 " movnti %%eax, 40(%3)\n"
31520 " movnti %%edx, 44(%3)\n"
31521- "13: movl 48(%4), %%eax\n"
31522- "81: movl 52(%4), %%edx\n"
31523+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31524+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31525 " movnti %%eax, 48(%3)\n"
31526 " movnti %%edx, 52(%3)\n"
31527- "14: movl 56(%4), %%eax\n"
31528- "91: movl 60(%4), %%edx\n"
31529+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31530+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31531 " movnti %%eax, 56(%3)\n"
31532 " movnti %%edx, 60(%3)\n"
31533 " addl $-64, %0\n"
31534@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31535 " shrl $2, %0\n"
31536 " andl $3, %%eax\n"
31537 " cld\n"
31538- "6: rep; movsl\n"
31539+ "6: rep; "__copyuser_seg" movsl\n"
31540 " movl %%eax,%0\n"
31541- "7: rep; movsb\n"
31542+ "7: rep; "__copyuser_seg" movsb\n"
31543 "8:\n"
31544 ".section .fixup,\"ax\"\n"
31545 "9: lea 0(%%eax,%0,4),%0\n"
31546@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31547
31548 __asm__ __volatile__(
31549 " .align 2,0x90\n"
31550- "0: movl 32(%4), %%eax\n"
31551+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31552 " cmpl $67, %0\n"
31553 " jbe 2f\n"
31554- "1: movl 64(%4), %%eax\n"
31555+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31556 " .align 2,0x90\n"
31557- "2: movl 0(%4), %%eax\n"
31558- "21: movl 4(%4), %%edx\n"
31559+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31560+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31561 " movnti %%eax, 0(%3)\n"
31562 " movnti %%edx, 4(%3)\n"
31563- "3: movl 8(%4), %%eax\n"
31564- "31: movl 12(%4),%%edx\n"
31565+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31566+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31567 " movnti %%eax, 8(%3)\n"
31568 " movnti %%edx, 12(%3)\n"
31569- "4: movl 16(%4), %%eax\n"
31570- "41: movl 20(%4), %%edx\n"
31571+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31572+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31573 " movnti %%eax, 16(%3)\n"
31574 " movnti %%edx, 20(%3)\n"
31575- "10: movl 24(%4), %%eax\n"
31576- "51: movl 28(%4), %%edx\n"
31577+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31578+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31579 " movnti %%eax, 24(%3)\n"
31580 " movnti %%edx, 28(%3)\n"
31581- "11: movl 32(%4), %%eax\n"
31582- "61: movl 36(%4), %%edx\n"
31583+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31584+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31585 " movnti %%eax, 32(%3)\n"
31586 " movnti %%edx, 36(%3)\n"
31587- "12: movl 40(%4), %%eax\n"
31588- "71: movl 44(%4), %%edx\n"
31589+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31590+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31591 " movnti %%eax, 40(%3)\n"
31592 " movnti %%edx, 44(%3)\n"
31593- "13: movl 48(%4), %%eax\n"
31594- "81: movl 52(%4), %%edx\n"
31595+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31596+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31597 " movnti %%eax, 48(%3)\n"
31598 " movnti %%edx, 52(%3)\n"
31599- "14: movl 56(%4), %%eax\n"
31600- "91: movl 60(%4), %%edx\n"
31601+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31602+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31603 " movnti %%eax, 56(%3)\n"
31604 " movnti %%edx, 60(%3)\n"
31605 " addl $-64, %0\n"
31606@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31607 " shrl $2, %0\n"
31608 " andl $3, %%eax\n"
31609 " cld\n"
31610- "6: rep; movsl\n"
31611+ "6: rep; "__copyuser_seg" movsl\n"
31612 " movl %%eax,%0\n"
31613- "7: rep; movsb\n"
31614+ "7: rep; "__copyuser_seg" movsb\n"
31615 "8:\n"
31616 ".section .fixup,\"ax\"\n"
31617 "9: lea 0(%%eax,%0,4),%0\n"
31618@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31619 */
31620 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31621 unsigned long size);
31622-unsigned long __copy_user_intel(void __user *to, const void *from,
31623+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31624+ unsigned long size);
31625+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31626 unsigned long size);
31627 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31628 const void __user *from, unsigned long size);
31629 #endif /* CONFIG_X86_INTEL_USERCOPY */
31630
31631 /* Generic arbitrary sized copy. */
31632-#define __copy_user(to, from, size) \
31633+#define __copy_user(to, from, size, prefix, set, restore) \
31634 do { \
31635 int __d0, __d1, __d2; \
31636 __asm__ __volatile__( \
31637+ set \
31638 " cmp $7,%0\n" \
31639 " jbe 1f\n" \
31640 " movl %1,%0\n" \
31641 " negl %0\n" \
31642 " andl $7,%0\n" \
31643 " subl %0,%3\n" \
31644- "4: rep; movsb\n" \
31645+ "4: rep; "prefix"movsb\n" \
31646 " movl %3,%0\n" \
31647 " shrl $2,%0\n" \
31648 " andl $3,%3\n" \
31649 " .align 2,0x90\n" \
31650- "0: rep; movsl\n" \
31651+ "0: rep; "prefix"movsl\n" \
31652 " movl %3,%0\n" \
31653- "1: rep; movsb\n" \
31654+ "1: rep; "prefix"movsb\n" \
31655 "2:\n" \
31656+ restore \
31657 ".section .fixup,\"ax\"\n" \
31658 "5: addl %3,%0\n" \
31659 " jmp 2b\n" \
31660@@ -538,14 +650,14 @@ do { \
31661 " negl %0\n" \
31662 " andl $7,%0\n" \
31663 " subl %0,%3\n" \
31664- "4: rep; movsb\n" \
31665+ "4: rep; "__copyuser_seg"movsb\n" \
31666 " movl %3,%0\n" \
31667 " shrl $2,%0\n" \
31668 " andl $3,%3\n" \
31669 " .align 2,0x90\n" \
31670- "0: rep; movsl\n" \
31671+ "0: rep; "__copyuser_seg"movsl\n" \
31672 " movl %3,%0\n" \
31673- "1: rep; movsb\n" \
31674+ "1: rep; "__copyuser_seg"movsb\n" \
31675 "2:\n" \
31676 ".section .fixup,\"ax\"\n" \
31677 "5: addl %3,%0\n" \
31678@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31679 {
31680 stac();
31681 if (movsl_is_ok(to, from, n))
31682- __copy_user(to, from, n);
31683+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31684 else
31685- n = __copy_user_intel(to, from, n);
31686+ n = __generic_copy_to_user_intel(to, from, n);
31687 clac();
31688 return n;
31689 }
31690@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31691 {
31692 stac();
31693 if (movsl_is_ok(to, from, n))
31694- __copy_user(to, from, n);
31695+ __copy_user(to, from, n, __copyuser_seg, "", "");
31696 else
31697- n = __copy_user_intel((void __user *)to,
31698- (const void *)from, n);
31699+ n = __generic_copy_from_user_intel(to, from, n);
31700 clac();
31701 return n;
31702 }
31703@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31704 if (n > 64 && cpu_has_xmm2)
31705 n = __copy_user_intel_nocache(to, from, n);
31706 else
31707- __copy_user(to, from, n);
31708+ __copy_user(to, from, n, __copyuser_seg, "", "");
31709 #else
31710- __copy_user(to, from, n);
31711+ __copy_user(to, from, n, __copyuser_seg, "", "");
31712 #endif
31713 clac();
31714 return n;
31715 }
31716 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31717
31718-/**
31719- * copy_to_user: - Copy a block of data into user space.
31720- * @to: Destination address, in user space.
31721- * @from: Source address, in kernel space.
31722- * @n: Number of bytes to copy.
31723- *
31724- * Context: User context only. This function may sleep.
31725- *
31726- * Copy data from kernel space to user space.
31727- *
31728- * Returns number of bytes that could not be copied.
31729- * On success, this will be zero.
31730- */
31731-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31732+#ifdef CONFIG_PAX_MEMORY_UDEREF
31733+void __set_fs(mm_segment_t x)
31734 {
31735- if (access_ok(VERIFY_WRITE, to, n))
31736- n = __copy_to_user(to, from, n);
31737- return n;
31738+ switch (x.seg) {
31739+ case 0:
31740+ loadsegment(gs, 0);
31741+ break;
31742+ case TASK_SIZE_MAX:
31743+ loadsegment(gs, __USER_DS);
31744+ break;
31745+ case -1UL:
31746+ loadsegment(gs, __KERNEL_DS);
31747+ break;
31748+ default:
31749+ BUG();
31750+ }
31751 }
31752-EXPORT_SYMBOL(_copy_to_user);
31753+EXPORT_SYMBOL(__set_fs);
31754
31755-/**
31756- * copy_from_user: - Copy a block of data from user space.
31757- * @to: Destination address, in kernel space.
31758- * @from: Source address, in user space.
31759- * @n: Number of bytes to copy.
31760- *
31761- * Context: User context only. This function may sleep.
31762- *
31763- * Copy data from user space to kernel space.
31764- *
31765- * Returns number of bytes that could not be copied.
31766- * On success, this will be zero.
31767- *
31768- * If some data could not be copied, this function will pad the copied
31769- * data to the requested size using zero bytes.
31770- */
31771-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31772+void set_fs(mm_segment_t x)
31773 {
31774- if (access_ok(VERIFY_READ, from, n))
31775- n = __copy_from_user(to, from, n);
31776- else
31777- memset(to, 0, n);
31778- return n;
31779+ current_thread_info()->addr_limit = x;
31780+ __set_fs(x);
31781 }
31782-EXPORT_SYMBOL(_copy_from_user);
31783+EXPORT_SYMBOL(set_fs);
31784+#endif
31785diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31786index c905e89..01ab928 100644
31787--- a/arch/x86/lib/usercopy_64.c
31788+++ b/arch/x86/lib/usercopy_64.c
31789@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31790 might_fault();
31791 /* no memory constraint because it doesn't change any memory gcc knows
31792 about */
31793+ pax_open_userland();
31794 stac();
31795 asm volatile(
31796 " testq %[size8],%[size8]\n"
31797@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31798 _ASM_EXTABLE(0b,3b)
31799 _ASM_EXTABLE(1b,2b)
31800 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31801- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31802+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31803 [zero] "r" (0UL), [eight] "r" (8UL));
31804 clac();
31805+ pax_close_userland();
31806 return size;
31807 }
31808 EXPORT_SYMBOL(__clear_user);
31809@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31810 }
31811 EXPORT_SYMBOL(clear_user);
31812
31813-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31814+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31815 {
31816- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31817- return copy_user_generic((__force void *)to, (__force void *)from, len);
31818- }
31819- return len;
31820+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31821+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31822+ return len;
31823 }
31824 EXPORT_SYMBOL(copy_in_user);
31825
31826@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31827 * it is not necessary to optimize tail handling.
31828 */
31829 __visible unsigned long
31830-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31831+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31832 {
31833 char c;
31834 unsigned zero_len;
31835
31836+ clac();
31837+ pax_close_userland();
31838 for (; len; --len, to++) {
31839 if (__get_user_nocheck(c, from++, sizeof(char)))
31840 break;
31841@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31842 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31843 if (__put_user_nocheck(c, to++, sizeof(char)))
31844 break;
31845- clac();
31846 return len;
31847 }
31848diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31849index 6a19ad9..1c48f9a 100644
31850--- a/arch/x86/mm/Makefile
31851+++ b/arch/x86/mm/Makefile
31852@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31853 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31854
31855 obj-$(CONFIG_MEMTEST) += memtest.o
31856+
31857+quote:="
31858+obj-$(CONFIG_X86_64) += uderef_64.o
31859+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31860diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31861index 903ec1e..c4166b2 100644
31862--- a/arch/x86/mm/extable.c
31863+++ b/arch/x86/mm/extable.c
31864@@ -6,12 +6,24 @@
31865 static inline unsigned long
31866 ex_insn_addr(const struct exception_table_entry *x)
31867 {
31868- return (unsigned long)&x->insn + x->insn;
31869+ unsigned long reloc = 0;
31870+
31871+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31872+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31873+#endif
31874+
31875+ return (unsigned long)&x->insn + x->insn + reloc;
31876 }
31877 static inline unsigned long
31878 ex_fixup_addr(const struct exception_table_entry *x)
31879 {
31880- return (unsigned long)&x->fixup + x->fixup;
31881+ unsigned long reloc = 0;
31882+
31883+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31884+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31885+#endif
31886+
31887+ return (unsigned long)&x->fixup + x->fixup + reloc;
31888 }
31889
31890 int fixup_exception(struct pt_regs *regs)
31891@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31892 unsigned long new_ip;
31893
31894 #ifdef CONFIG_PNPBIOS
31895- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31896+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31897 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31898 extern u32 pnp_bios_is_utter_crap;
31899 pnp_bios_is_utter_crap = 1;
31900@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31901 i += 4;
31902 p->fixup -= i;
31903 i += 4;
31904+
31905+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31906+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31907+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31908+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31909+#endif
31910+
31911 }
31912 }
31913
31914diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31915index 3664279..c6a7830 100644
31916--- a/arch/x86/mm/fault.c
31917+++ b/arch/x86/mm/fault.c
31918@@ -14,12 +14,19 @@
31919 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31920 #include <linux/prefetch.h> /* prefetchw */
31921 #include <linux/context_tracking.h> /* exception_enter(), ... */
31922+#include <linux/unistd.h>
31923+#include <linux/compiler.h>
31924
31925 #include <asm/traps.h> /* dotraplinkage, ... */
31926 #include <asm/pgalloc.h> /* pgd_*(), ... */
31927 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31928 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31929 #include <asm/vsyscall.h> /* emulate_vsyscall */
31930+#include <asm/tlbflush.h>
31931+
31932+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31933+#include <asm/stacktrace.h>
31934+#endif
31935
31936 #define CREATE_TRACE_POINTS
31937 #include <asm/trace/exceptions.h>
31938@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31939 int ret = 0;
31940
31941 /* kprobe_running() needs smp_processor_id() */
31942- if (kprobes_built_in() && !user_mode_vm(regs)) {
31943+ if (kprobes_built_in() && !user_mode(regs)) {
31944 preempt_disable();
31945 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31946 ret = 1;
31947@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31948 return !instr_lo || (instr_lo>>1) == 1;
31949 case 0x00:
31950 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31951- if (probe_kernel_address(instr, opcode))
31952+ if (user_mode(regs)) {
31953+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31954+ return 0;
31955+ } else if (probe_kernel_address(instr, opcode))
31956 return 0;
31957
31958 *prefetch = (instr_lo == 0xF) &&
31959@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31960 while (instr < max_instr) {
31961 unsigned char opcode;
31962
31963- if (probe_kernel_address(instr, opcode))
31964+ if (user_mode(regs)) {
31965+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31966+ break;
31967+ } else if (probe_kernel_address(instr, opcode))
31968 break;
31969
31970 instr++;
31971@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31972 force_sig_info(si_signo, &info, tsk);
31973 }
31974
31975+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31976+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31977+#endif
31978+
31979+#ifdef CONFIG_PAX_EMUTRAMP
31980+static int pax_handle_fetch_fault(struct pt_regs *regs);
31981+#endif
31982+
31983+#ifdef CONFIG_PAX_PAGEEXEC
31984+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31985+{
31986+ pgd_t *pgd;
31987+ pud_t *pud;
31988+ pmd_t *pmd;
31989+
31990+ pgd = pgd_offset(mm, address);
31991+ if (!pgd_present(*pgd))
31992+ return NULL;
31993+ pud = pud_offset(pgd, address);
31994+ if (!pud_present(*pud))
31995+ return NULL;
31996+ pmd = pmd_offset(pud, address);
31997+ if (!pmd_present(*pmd))
31998+ return NULL;
31999+ return pmd;
32000+}
32001+#endif
32002+
32003 DEFINE_SPINLOCK(pgd_lock);
32004 LIST_HEAD(pgd_list);
32005
32006@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
32007 for (address = VMALLOC_START & PMD_MASK;
32008 address >= TASK_SIZE && address < FIXADDR_TOP;
32009 address += PMD_SIZE) {
32010+
32011+#ifdef CONFIG_PAX_PER_CPU_PGD
32012+ unsigned long cpu;
32013+#else
32014 struct page *page;
32015+#endif
32016
32017 spin_lock(&pgd_lock);
32018+
32019+#ifdef CONFIG_PAX_PER_CPU_PGD
32020+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32021+ pgd_t *pgd = get_cpu_pgd(cpu, user);
32022+ pmd_t *ret;
32023+
32024+ ret = vmalloc_sync_one(pgd, address);
32025+ if (!ret)
32026+ break;
32027+ pgd = get_cpu_pgd(cpu, kernel);
32028+#else
32029 list_for_each_entry(page, &pgd_list, lru) {
32030+ pgd_t *pgd;
32031 spinlock_t *pgt_lock;
32032 pmd_t *ret;
32033
32034@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
32035 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32036
32037 spin_lock(pgt_lock);
32038- ret = vmalloc_sync_one(page_address(page), address);
32039+ pgd = page_address(page);
32040+#endif
32041+
32042+ ret = vmalloc_sync_one(pgd, address);
32043+
32044+#ifndef CONFIG_PAX_PER_CPU_PGD
32045 spin_unlock(pgt_lock);
32046+#endif
32047
32048 if (!ret)
32049 break;
32050@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
32051 * an interrupt in the middle of a task switch..
32052 */
32053 pgd_paddr = read_cr3();
32054+
32055+#ifdef CONFIG_PAX_PER_CPU_PGD
32056+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32057+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32058+#endif
32059+
32060 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32061 if (!pmd_k)
32062 return -1;
32063@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
32064 * happen within a race in page table update. In the later
32065 * case just flush:
32066 */
32067- pgd = pgd_offset(current->active_mm, address);
32068+
32069 pgd_ref = pgd_offset_k(address);
32070 if (pgd_none(*pgd_ref))
32071 return -1;
32072
32073+#ifdef CONFIG_PAX_PER_CPU_PGD
32074+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32075+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32076+ if (pgd_none(*pgd)) {
32077+ set_pgd(pgd, *pgd_ref);
32078+ arch_flush_lazy_mmu_mode();
32079+ } else {
32080+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32081+ }
32082+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32083+#else
32084+ pgd = pgd_offset(current->active_mm, address);
32085+#endif
32086+
32087 if (pgd_none(*pgd)) {
32088 set_pgd(pgd, *pgd_ref);
32089 arch_flush_lazy_mmu_mode();
32090@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32091 static int is_errata100(struct pt_regs *regs, unsigned long address)
32092 {
32093 #ifdef CONFIG_X86_64
32094- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32095+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32096 return 1;
32097 #endif
32098 return 0;
32099@@ -576,7 +660,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32100 }
32101
32102 static const char nx_warning[] = KERN_CRIT
32103-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32104+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32105
32106 static void
32107 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32108@@ -585,7 +669,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32109 if (!oops_may_print())
32110 return;
32111
32112- if (error_code & PF_INSTR) {
32113+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32114 unsigned int level;
32115 pgd_t *pgd;
32116 pte_t *pte;
32117@@ -596,9 +680,21 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32118 pte = lookup_address_in_pgd(pgd, address, &level);
32119
32120 if (pte && pte_present(*pte) && !pte_exec(*pte))
32121- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32122+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32123 }
32124
32125+#ifdef CONFIG_PAX_KERNEXEC
32126+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32127+ if (current->signal->curr_ip)
32128+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32129+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32130+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32131+ else
32132+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32133+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32134+ }
32135+#endif
32136+
32137 printk(KERN_ALERT "BUG: unable to handle kernel ");
32138 if (address < PAGE_SIZE)
32139 printk(KERN_CONT "NULL pointer dereference");
32140@@ -779,6 +875,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32141 return;
32142 }
32143 #endif
32144+
32145+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32146+ if (pax_is_fetch_fault(regs, error_code, address)) {
32147+
32148+#ifdef CONFIG_PAX_EMUTRAMP
32149+ switch (pax_handle_fetch_fault(regs)) {
32150+ case 2:
32151+ return;
32152+ }
32153+#endif
32154+
32155+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32156+ do_group_exit(SIGKILL);
32157+ }
32158+#endif
32159+
32160 /* Kernel addresses are always protection faults: */
32161 if (address >= TASK_SIZE)
32162 error_code |= PF_PROT;
32163@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32164 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32165 printk(KERN_ERR
32166 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32167- tsk->comm, tsk->pid, address);
32168+ tsk->comm, task_pid_nr(tsk), address);
32169 code = BUS_MCEERR_AR;
32170 }
32171 #endif
32172@@ -918,6 +1030,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32173 return 1;
32174 }
32175
32176+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32177+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32178+{
32179+ pte_t *pte;
32180+ pmd_t *pmd;
32181+ spinlock_t *ptl;
32182+ unsigned char pte_mask;
32183+
32184+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32185+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32186+ return 0;
32187+
32188+ /* PaX: it's our fault, let's handle it if we can */
32189+
32190+ /* PaX: take a look at read faults before acquiring any locks */
32191+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32192+ /* instruction fetch attempt from a protected page in user mode */
32193+ up_read(&mm->mmap_sem);
32194+
32195+#ifdef CONFIG_PAX_EMUTRAMP
32196+ switch (pax_handle_fetch_fault(regs)) {
32197+ case 2:
32198+ return 1;
32199+ }
32200+#endif
32201+
32202+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32203+ do_group_exit(SIGKILL);
32204+ }
32205+
32206+ pmd = pax_get_pmd(mm, address);
32207+ if (unlikely(!pmd))
32208+ return 0;
32209+
32210+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32211+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32212+ pte_unmap_unlock(pte, ptl);
32213+ return 0;
32214+ }
32215+
32216+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32217+ /* write attempt to a protected page in user mode */
32218+ pte_unmap_unlock(pte, ptl);
32219+ return 0;
32220+ }
32221+
32222+#ifdef CONFIG_SMP
32223+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32224+#else
32225+ if (likely(address > get_limit(regs->cs)))
32226+#endif
32227+ {
32228+ set_pte(pte, pte_mkread(*pte));
32229+ __flush_tlb_one(address);
32230+ pte_unmap_unlock(pte, ptl);
32231+ up_read(&mm->mmap_sem);
32232+ return 1;
32233+ }
32234+
32235+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32236+
32237+ /*
32238+ * PaX: fill DTLB with user rights and retry
32239+ */
32240+ __asm__ __volatile__ (
32241+ "orb %2,(%1)\n"
32242+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32243+/*
32244+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32245+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32246+ * page fault when examined during a TLB load attempt. this is true not only
32247+ * for PTEs holding a non-present entry but also present entries that will
32248+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32249+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32250+ * for our target pages since their PTEs are simply not in the TLBs at all.
32251+
32252+ * the best thing in omitting it is that we gain around 15-20% speed in the
32253+ * fast path of the page fault handler and can get rid of tracing since we
32254+ * can no longer flush unintended entries.
32255+ */
32256+ "invlpg (%0)\n"
32257+#endif
32258+ __copyuser_seg"testb $0,(%0)\n"
32259+ "xorb %3,(%1)\n"
32260+ :
32261+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32262+ : "memory", "cc");
32263+ pte_unmap_unlock(pte, ptl);
32264+ up_read(&mm->mmap_sem);
32265+ return 1;
32266+}
32267+#endif
32268+
32269 /*
32270 * Handle a spurious fault caused by a stale TLB entry.
32271 *
32272@@ -985,6 +1190,9 @@ int show_unhandled_signals = 1;
32273 static inline int
32274 access_error(unsigned long error_code, struct vm_area_struct *vma)
32275 {
32276+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32277+ return 1;
32278+
32279 if (error_code & PF_WRITE) {
32280 /* write, present and write, not present: */
32281 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32282@@ -1019,7 +1227,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32283 if (error_code & PF_USER)
32284 return false;
32285
32286- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32287+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32288 return false;
32289
32290 return true;
32291@@ -1047,6 +1255,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32292 tsk = current;
32293 mm = tsk->mm;
32294
32295+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32296+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32297+ if (!search_exception_tables(regs->ip)) {
32298+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32299+ bad_area_nosemaphore(regs, error_code, address);
32300+ return;
32301+ }
32302+ if (address < pax_user_shadow_base) {
32303+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32304+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32305+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32306+ } else
32307+ address -= pax_user_shadow_base;
32308+ }
32309+#endif
32310+
32311 /*
32312 * Detect and handle instructions that would cause a page fault for
32313 * both a tracked kernel page and a userspace page.
32314@@ -1124,7 +1348,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32315 * User-mode registers count as a user access even for any
32316 * potential system fault or CPU buglet:
32317 */
32318- if (user_mode_vm(regs)) {
32319+ if (user_mode(regs)) {
32320 local_irq_enable();
32321 error_code |= PF_USER;
32322 flags |= FAULT_FLAG_USER;
32323@@ -1171,6 +1395,11 @@ retry:
32324 might_sleep();
32325 }
32326
32327+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32328+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32329+ return;
32330+#endif
32331+
32332 vma = find_vma(mm, address);
32333 if (unlikely(!vma)) {
32334 bad_area(regs, error_code, address);
32335@@ -1182,18 +1411,24 @@ retry:
32336 bad_area(regs, error_code, address);
32337 return;
32338 }
32339- if (error_code & PF_USER) {
32340- /*
32341- * Accessing the stack below %sp is always a bug.
32342- * The large cushion allows instructions like enter
32343- * and pusha to work. ("enter $65535, $31" pushes
32344- * 32 pointers and then decrements %sp by 65535.)
32345- */
32346- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32347- bad_area(regs, error_code, address);
32348- return;
32349- }
32350+ /*
32351+ * Accessing the stack below %sp is always a bug.
32352+ * The large cushion allows instructions like enter
32353+ * and pusha to work. ("enter $65535, $31" pushes
32354+ * 32 pointers and then decrements %sp by 65535.)
32355+ */
32356+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32357+ bad_area(regs, error_code, address);
32358+ return;
32359 }
32360+
32361+#ifdef CONFIG_PAX_SEGMEXEC
32362+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32363+ bad_area(regs, error_code, address);
32364+ return;
32365+ }
32366+#endif
32367+
32368 if (unlikely(expand_stack(vma, address))) {
32369 bad_area(regs, error_code, address);
32370 return;
32371@@ -1309,3 +1544,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32372 }
32373 NOKPROBE_SYMBOL(trace_do_page_fault);
32374 #endif /* CONFIG_TRACING */
32375+
32376+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32377+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32378+{
32379+ struct mm_struct *mm = current->mm;
32380+ unsigned long ip = regs->ip;
32381+
32382+ if (v8086_mode(regs))
32383+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32384+
32385+#ifdef CONFIG_PAX_PAGEEXEC
32386+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32387+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32388+ return true;
32389+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32390+ return true;
32391+ return false;
32392+ }
32393+#endif
32394+
32395+#ifdef CONFIG_PAX_SEGMEXEC
32396+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32397+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32398+ return true;
32399+ return false;
32400+ }
32401+#endif
32402+
32403+ return false;
32404+}
32405+#endif
32406+
32407+#ifdef CONFIG_PAX_EMUTRAMP
32408+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32409+{
32410+ int err;
32411+
32412+ do { /* PaX: libffi trampoline emulation */
32413+ unsigned char mov, jmp;
32414+ unsigned int addr1, addr2;
32415+
32416+#ifdef CONFIG_X86_64
32417+ if ((regs->ip + 9) >> 32)
32418+ break;
32419+#endif
32420+
32421+ err = get_user(mov, (unsigned char __user *)regs->ip);
32422+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32423+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32424+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32425+
32426+ if (err)
32427+ break;
32428+
32429+ if (mov == 0xB8 && jmp == 0xE9) {
32430+ regs->ax = addr1;
32431+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32432+ return 2;
32433+ }
32434+ } while (0);
32435+
32436+ do { /* PaX: gcc trampoline emulation #1 */
32437+ unsigned char mov1, mov2;
32438+ unsigned short jmp;
32439+ unsigned int addr1, addr2;
32440+
32441+#ifdef CONFIG_X86_64
32442+ if ((regs->ip + 11) >> 32)
32443+ break;
32444+#endif
32445+
32446+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32447+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32448+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32449+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32450+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32451+
32452+ if (err)
32453+ break;
32454+
32455+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32456+ regs->cx = addr1;
32457+ regs->ax = addr2;
32458+ regs->ip = addr2;
32459+ return 2;
32460+ }
32461+ } while (0);
32462+
32463+ do { /* PaX: gcc trampoline emulation #2 */
32464+ unsigned char mov, jmp;
32465+ unsigned int addr1, addr2;
32466+
32467+#ifdef CONFIG_X86_64
32468+ if ((regs->ip + 9) >> 32)
32469+ break;
32470+#endif
32471+
32472+ err = get_user(mov, (unsigned char __user *)regs->ip);
32473+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32474+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32475+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32476+
32477+ if (err)
32478+ break;
32479+
32480+ if (mov == 0xB9 && jmp == 0xE9) {
32481+ regs->cx = addr1;
32482+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32483+ return 2;
32484+ }
32485+ } while (0);
32486+
32487+ return 1; /* PaX in action */
32488+}
32489+
32490+#ifdef CONFIG_X86_64
32491+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32492+{
32493+ int err;
32494+
32495+ do { /* PaX: libffi trampoline emulation */
32496+ unsigned short mov1, mov2, jmp1;
32497+ unsigned char stcclc, jmp2;
32498+ unsigned long addr1, addr2;
32499+
32500+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32501+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32502+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32503+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32504+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32505+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32506+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32507+
32508+ if (err)
32509+ break;
32510+
32511+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32512+ regs->r11 = addr1;
32513+ regs->r10 = addr2;
32514+ if (stcclc == 0xF8)
32515+ regs->flags &= ~X86_EFLAGS_CF;
32516+ else
32517+ regs->flags |= X86_EFLAGS_CF;
32518+ regs->ip = addr1;
32519+ return 2;
32520+ }
32521+ } while (0);
32522+
32523+ do { /* PaX: gcc trampoline emulation #1 */
32524+ unsigned short mov1, mov2, jmp1;
32525+ unsigned char jmp2;
32526+ unsigned int addr1;
32527+ unsigned long addr2;
32528+
32529+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32530+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32531+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32532+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32533+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32534+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32535+
32536+ if (err)
32537+ break;
32538+
32539+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32540+ regs->r11 = addr1;
32541+ regs->r10 = addr2;
32542+ regs->ip = addr1;
32543+ return 2;
32544+ }
32545+ } while (0);
32546+
32547+ do { /* PaX: gcc trampoline emulation #2 */
32548+ unsigned short mov1, mov2, jmp1;
32549+ unsigned char jmp2;
32550+ unsigned long addr1, addr2;
32551+
32552+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32553+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32554+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32555+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32556+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32557+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32558+
32559+ if (err)
32560+ break;
32561+
32562+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32563+ regs->r11 = addr1;
32564+ regs->r10 = addr2;
32565+ regs->ip = addr1;
32566+ return 2;
32567+ }
32568+ } while (0);
32569+
32570+ return 1; /* PaX in action */
32571+}
32572+#endif
32573+
32574+/*
32575+ * PaX: decide what to do with offenders (regs->ip = fault address)
32576+ *
32577+ * returns 1 when task should be killed
32578+ * 2 when gcc trampoline was detected
32579+ */
32580+static int pax_handle_fetch_fault(struct pt_regs *regs)
32581+{
32582+ if (v8086_mode(regs))
32583+ return 1;
32584+
32585+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32586+ return 1;
32587+
32588+#ifdef CONFIG_X86_32
32589+ return pax_handle_fetch_fault_32(regs);
32590+#else
32591+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32592+ return pax_handle_fetch_fault_32(regs);
32593+ else
32594+ return pax_handle_fetch_fault_64(regs);
32595+#endif
32596+}
32597+#endif
32598+
32599+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32600+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32601+{
32602+ long i;
32603+
32604+ printk(KERN_ERR "PAX: bytes at PC: ");
32605+ for (i = 0; i < 20; i++) {
32606+ unsigned char c;
32607+ if (get_user(c, (unsigned char __force_user *)pc+i))
32608+ printk(KERN_CONT "?? ");
32609+ else
32610+ printk(KERN_CONT "%02x ", c);
32611+ }
32612+ printk("\n");
32613+
32614+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32615+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32616+ unsigned long c;
32617+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32618+#ifdef CONFIG_X86_32
32619+ printk(KERN_CONT "???????? ");
32620+#else
32621+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32622+ printk(KERN_CONT "???????? ???????? ");
32623+ else
32624+ printk(KERN_CONT "???????????????? ");
32625+#endif
32626+ } else {
32627+#ifdef CONFIG_X86_64
32628+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32629+ printk(KERN_CONT "%08x ", (unsigned int)c);
32630+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32631+ } else
32632+#endif
32633+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32634+ }
32635+ }
32636+ printk("\n");
32637+}
32638+#endif
32639+
32640+/**
32641+ * probe_kernel_write(): safely attempt to write to a location
32642+ * @dst: address to write to
32643+ * @src: pointer to the data that shall be written
32644+ * @size: size of the data chunk
32645+ *
32646+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32647+ * happens, handle that and return -EFAULT.
32648+ */
32649+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32650+{
32651+ long ret;
32652+ mm_segment_t old_fs = get_fs();
32653+
32654+ set_fs(KERNEL_DS);
32655+ pagefault_disable();
32656+ pax_open_kernel();
32657+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32658+ pax_close_kernel();
32659+ pagefault_enable();
32660+ set_fs(old_fs);
32661+
32662+ return ret ? -EFAULT : 0;
32663+}
32664diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32665index 207d9aef..69030980 100644
32666--- a/arch/x86/mm/gup.c
32667+++ b/arch/x86/mm/gup.c
32668@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32669 addr = start;
32670 len = (unsigned long) nr_pages << PAGE_SHIFT;
32671 end = start + len;
32672- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32673+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32674 (void __user *)start, len)))
32675 return 0;
32676
32677@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32678 goto slow_irqon;
32679 #endif
32680
32681+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32682+ (void __user *)start, len)))
32683+ return 0;
32684+
32685 /*
32686 * XXX: batch / limit 'nr', to avoid large irq off latency
32687 * needs some instrumenting to determine the common sizes used by
32688diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32689index 4500142..53a363c 100644
32690--- a/arch/x86/mm/highmem_32.c
32691+++ b/arch/x86/mm/highmem_32.c
32692@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32693 idx = type + KM_TYPE_NR*smp_processor_id();
32694 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32695 BUG_ON(!pte_none(*(kmap_pte-idx)));
32696+
32697+ pax_open_kernel();
32698 set_pte(kmap_pte-idx, mk_pte(page, prot));
32699+ pax_close_kernel();
32700+
32701 arch_flush_lazy_mmu_mode();
32702
32703 return (void *)vaddr;
32704diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32705index 8b977eb..4732c33 100644
32706--- a/arch/x86/mm/hugetlbpage.c
32707+++ b/arch/x86/mm/hugetlbpage.c
32708@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
32709 #ifdef CONFIG_HUGETLB_PAGE
32710 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32711 unsigned long addr, unsigned long len,
32712- unsigned long pgoff, unsigned long flags)
32713+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32714 {
32715 struct hstate *h = hstate_file(file);
32716 struct vm_unmapped_area_info info;
32717-
32718+
32719 info.flags = 0;
32720 info.length = len;
32721 info.low_limit = current->mm->mmap_legacy_base;
32722 info.high_limit = TASK_SIZE;
32723 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32724 info.align_offset = 0;
32725+ info.threadstack_offset = offset;
32726 return vm_unmapped_area(&info);
32727 }
32728
32729 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32730 unsigned long addr0, unsigned long len,
32731- unsigned long pgoff, unsigned long flags)
32732+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32733 {
32734 struct hstate *h = hstate_file(file);
32735 struct vm_unmapped_area_info info;
32736@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32737 info.high_limit = current->mm->mmap_base;
32738 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32739 info.align_offset = 0;
32740+ info.threadstack_offset = offset;
32741 addr = vm_unmapped_area(&info);
32742
32743 /*
32744@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32745 VM_BUG_ON(addr != -ENOMEM);
32746 info.flags = 0;
32747 info.low_limit = TASK_UNMAPPED_BASE;
32748+
32749+#ifdef CONFIG_PAX_RANDMMAP
32750+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32751+ info.low_limit += current->mm->delta_mmap;
32752+#endif
32753+
32754 info.high_limit = TASK_SIZE;
32755 addr = vm_unmapped_area(&info);
32756 }
32757@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32758 struct hstate *h = hstate_file(file);
32759 struct mm_struct *mm = current->mm;
32760 struct vm_area_struct *vma;
32761+ unsigned long pax_task_size = TASK_SIZE;
32762+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32763
32764 if (len & ~huge_page_mask(h))
32765 return -EINVAL;
32766- if (len > TASK_SIZE)
32767+
32768+#ifdef CONFIG_PAX_SEGMEXEC
32769+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32770+ pax_task_size = SEGMEXEC_TASK_SIZE;
32771+#endif
32772+
32773+ pax_task_size -= PAGE_SIZE;
32774+
32775+ if (len > pax_task_size)
32776 return -ENOMEM;
32777
32778 if (flags & MAP_FIXED) {
32779@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32780 return addr;
32781 }
32782
32783+#ifdef CONFIG_PAX_RANDMMAP
32784+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32785+#endif
32786+
32787 if (addr) {
32788 addr = ALIGN(addr, huge_page_size(h));
32789 vma = find_vma(mm, addr);
32790- if (TASK_SIZE - len >= addr &&
32791- (!vma || addr + len <= vma->vm_start))
32792+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32793 return addr;
32794 }
32795 if (mm->get_unmapped_area == arch_get_unmapped_area)
32796 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32797- pgoff, flags);
32798+ pgoff, flags, offset);
32799 else
32800 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32801- pgoff, flags);
32802+ pgoff, flags, offset);
32803 }
32804 #endif /* CONFIG_HUGETLB_PAGE */
32805
32806diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32807index f971306..e83e0f6 100644
32808--- a/arch/x86/mm/init.c
32809+++ b/arch/x86/mm/init.c
32810@@ -4,6 +4,7 @@
32811 #include <linux/swap.h>
32812 #include <linux/memblock.h>
32813 #include <linux/bootmem.h> /* for max_low_pfn */
32814+#include <linux/tboot.h>
32815
32816 #include <asm/cacheflush.h>
32817 #include <asm/e820.h>
32818@@ -17,6 +18,8 @@
32819 #include <asm/proto.h>
32820 #include <asm/dma.h> /* for MAX_DMA_PFN */
32821 #include <asm/microcode.h>
32822+#include <asm/desc.h>
32823+#include <asm/bios_ebda.h>
32824
32825 #include "mm_internal.h"
32826
32827@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32828 early_ioremap_page_table_range_init();
32829 #endif
32830
32831+#ifdef CONFIG_PAX_PER_CPU_PGD
32832+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32833+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32834+ KERNEL_PGD_PTRS);
32835+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32836+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32837+ KERNEL_PGD_PTRS);
32838+ load_cr3(get_cpu_pgd(0, kernel));
32839+#else
32840 load_cr3(swapper_pg_dir);
32841+#endif
32842+
32843 __flush_tlb_all();
32844
32845 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32846@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32847 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32848 * mmio resources as well as potential bios/acpi data regions.
32849 */
32850+
32851+#ifdef CONFIG_GRKERNSEC_KMEM
32852+static unsigned int ebda_start __read_only;
32853+static unsigned int ebda_end __read_only;
32854+#endif
32855+
32856 int devmem_is_allowed(unsigned long pagenr)
32857 {
32858- if (pagenr < 256)
32859+#ifdef CONFIG_GRKERNSEC_KMEM
32860+ /* allow BDA */
32861+ if (!pagenr)
32862 return 1;
32863+ /* allow EBDA */
32864+ if (pagenr >= ebda_start && pagenr < ebda_end)
32865+ return 1;
32866+ /* if tboot is in use, allow access to its hardcoded serial log range */
32867+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32868+ return 1;
32869+#else
32870+ if (!pagenr)
32871+ return 1;
32872+#ifdef CONFIG_VM86
32873+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32874+ return 1;
32875+#endif
32876+#endif
32877+
32878+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32879+ return 1;
32880+#ifdef CONFIG_GRKERNSEC_KMEM
32881+ /* throw out everything else below 1MB */
32882+ if (pagenr <= 256)
32883+ return 0;
32884+#endif
32885 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32886 return 0;
32887 if (!page_is_ram(pagenr))
32888@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32889 #endif
32890 }
32891
32892+#ifdef CONFIG_GRKERNSEC_KMEM
32893+static inline void gr_init_ebda(void)
32894+{
32895+ unsigned int ebda_addr;
32896+ unsigned int ebda_size = 0;
32897+
32898+ ebda_addr = get_bios_ebda();
32899+ if (ebda_addr) {
32900+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32901+ ebda_size <<= 10;
32902+ }
32903+ if (ebda_addr && ebda_size) {
32904+ ebda_start = ebda_addr >> PAGE_SHIFT;
32905+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32906+ } else {
32907+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32908+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32909+ }
32910+}
32911+#else
32912+static inline void gr_init_ebda(void) { }
32913+#endif
32914+
32915 void free_initmem(void)
32916 {
32917+#ifdef CONFIG_PAX_KERNEXEC
32918+#ifdef CONFIG_X86_32
32919+ /* PaX: limit KERNEL_CS to actual size */
32920+ unsigned long addr, limit;
32921+ struct desc_struct d;
32922+ int cpu;
32923+#else
32924+ pgd_t *pgd;
32925+ pud_t *pud;
32926+ pmd_t *pmd;
32927+ unsigned long addr, end;
32928+#endif
32929+#endif
32930+
32931+ gr_init_ebda();
32932+
32933+#ifdef CONFIG_PAX_KERNEXEC
32934+#ifdef CONFIG_X86_32
32935+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32936+ limit = (limit - 1UL) >> PAGE_SHIFT;
32937+
32938+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32939+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32940+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32941+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32942+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32943+ }
32944+
32945+ /* PaX: make KERNEL_CS read-only */
32946+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32947+ if (!paravirt_enabled())
32948+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32949+/*
32950+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32951+ pgd = pgd_offset_k(addr);
32952+ pud = pud_offset(pgd, addr);
32953+ pmd = pmd_offset(pud, addr);
32954+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32955+ }
32956+*/
32957+#ifdef CONFIG_X86_PAE
32958+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32959+/*
32960+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32961+ pgd = pgd_offset_k(addr);
32962+ pud = pud_offset(pgd, addr);
32963+ pmd = pmd_offset(pud, addr);
32964+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32965+ }
32966+*/
32967+#endif
32968+
32969+#ifdef CONFIG_MODULES
32970+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32971+#endif
32972+
32973+#else
32974+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32975+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32976+ pgd = pgd_offset_k(addr);
32977+ pud = pud_offset(pgd, addr);
32978+ pmd = pmd_offset(pud, addr);
32979+ if (!pmd_present(*pmd))
32980+ continue;
32981+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32982+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32983+ else
32984+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32985+ }
32986+
32987+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32988+ end = addr + KERNEL_IMAGE_SIZE;
32989+ for (; addr < end; addr += PMD_SIZE) {
32990+ pgd = pgd_offset_k(addr);
32991+ pud = pud_offset(pgd, addr);
32992+ pmd = pmd_offset(pud, addr);
32993+ if (!pmd_present(*pmd))
32994+ continue;
32995+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32996+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32997+ }
32998+#endif
32999+
33000+ flush_tlb_all();
33001+#endif
33002+
33003 free_init_pages("unused kernel",
33004 (unsigned long)(&__init_begin),
33005 (unsigned long)(&__init_end));
33006diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
33007index e395048..cd38278 100644
33008--- a/arch/x86/mm/init_32.c
33009+++ b/arch/x86/mm/init_32.c
33010@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
33011 bool __read_mostly __vmalloc_start_set = false;
33012
33013 /*
33014- * Creates a middle page table and puts a pointer to it in the
33015- * given global directory entry. This only returns the gd entry
33016- * in non-PAE compilation mode, since the middle layer is folded.
33017- */
33018-static pmd_t * __init one_md_table_init(pgd_t *pgd)
33019-{
33020- pud_t *pud;
33021- pmd_t *pmd_table;
33022-
33023-#ifdef CONFIG_X86_PAE
33024- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
33025- pmd_table = (pmd_t *)alloc_low_page();
33026- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
33027- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
33028- pud = pud_offset(pgd, 0);
33029- BUG_ON(pmd_table != pmd_offset(pud, 0));
33030-
33031- return pmd_table;
33032- }
33033-#endif
33034- pud = pud_offset(pgd, 0);
33035- pmd_table = pmd_offset(pud, 0);
33036-
33037- return pmd_table;
33038-}
33039-
33040-/*
33041 * Create a page table and place a pointer to it in a middle page
33042 * directory entry:
33043 */
33044@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33045 pte_t *page_table = (pte_t *)alloc_low_page();
33046
33047 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33048+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33049+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33050+#else
33051 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33052+#endif
33053 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33054 }
33055
33056 return pte_offset_kernel(pmd, 0);
33057 }
33058
33059+static pmd_t * __init one_md_table_init(pgd_t *pgd)
33060+{
33061+ pud_t *pud;
33062+ pmd_t *pmd_table;
33063+
33064+ pud = pud_offset(pgd, 0);
33065+ pmd_table = pmd_offset(pud, 0);
33066+
33067+ return pmd_table;
33068+}
33069+
33070 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33071 {
33072 int pgd_idx = pgd_index(vaddr);
33073@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33074 int pgd_idx, pmd_idx;
33075 unsigned long vaddr;
33076 pgd_t *pgd;
33077+ pud_t *pud;
33078 pmd_t *pmd;
33079 pte_t *pte = NULL;
33080 unsigned long count = page_table_range_init_count(start, end);
33081@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33082 pgd = pgd_base + pgd_idx;
33083
33084 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33085- pmd = one_md_table_init(pgd);
33086- pmd = pmd + pmd_index(vaddr);
33087+ pud = pud_offset(pgd, vaddr);
33088+ pmd = pmd_offset(pud, vaddr);
33089+
33090+#ifdef CONFIG_X86_PAE
33091+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33092+#endif
33093+
33094 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33095 pmd++, pmd_idx++) {
33096 pte = page_table_kmap_check(one_page_table_init(pmd),
33097@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33098 }
33099 }
33100
33101-static inline int is_kernel_text(unsigned long addr)
33102+static inline int is_kernel_text(unsigned long start, unsigned long end)
33103 {
33104- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33105- return 1;
33106- return 0;
33107+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33108+ end <= ktla_ktva((unsigned long)_stext)) &&
33109+ (start >= ktla_ktva((unsigned long)_einittext) ||
33110+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33111+
33112+#ifdef CONFIG_ACPI_SLEEP
33113+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33114+#endif
33115+
33116+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33117+ return 0;
33118+ return 1;
33119 }
33120
33121 /*
33122@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33123 unsigned long last_map_addr = end;
33124 unsigned long start_pfn, end_pfn;
33125 pgd_t *pgd_base = swapper_pg_dir;
33126- int pgd_idx, pmd_idx, pte_ofs;
33127+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33128 unsigned long pfn;
33129 pgd_t *pgd;
33130+ pud_t *pud;
33131 pmd_t *pmd;
33132 pte_t *pte;
33133 unsigned pages_2m, pages_4k;
33134@@ -291,8 +295,13 @@ repeat:
33135 pfn = start_pfn;
33136 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33137 pgd = pgd_base + pgd_idx;
33138- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33139- pmd = one_md_table_init(pgd);
33140+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33141+ pud = pud_offset(pgd, 0);
33142+ pmd = pmd_offset(pud, 0);
33143+
33144+#ifdef CONFIG_X86_PAE
33145+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33146+#endif
33147
33148 if (pfn >= end_pfn)
33149 continue;
33150@@ -304,14 +313,13 @@ repeat:
33151 #endif
33152 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33153 pmd++, pmd_idx++) {
33154- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33155+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33156
33157 /*
33158 * Map with big pages if possible, otherwise
33159 * create normal page tables:
33160 */
33161 if (use_pse) {
33162- unsigned int addr2;
33163 pgprot_t prot = PAGE_KERNEL_LARGE;
33164 /*
33165 * first pass will use the same initial
33166@@ -322,11 +330,7 @@ repeat:
33167 _PAGE_PSE);
33168
33169 pfn &= PMD_MASK >> PAGE_SHIFT;
33170- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33171- PAGE_OFFSET + PAGE_SIZE-1;
33172-
33173- if (is_kernel_text(addr) ||
33174- is_kernel_text(addr2))
33175+ if (is_kernel_text(address, address + PMD_SIZE))
33176 prot = PAGE_KERNEL_LARGE_EXEC;
33177
33178 pages_2m++;
33179@@ -343,7 +347,7 @@ repeat:
33180 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33181 pte += pte_ofs;
33182 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33183- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33184+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33185 pgprot_t prot = PAGE_KERNEL;
33186 /*
33187 * first pass will use the same initial
33188@@ -351,7 +355,7 @@ repeat:
33189 */
33190 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33191
33192- if (is_kernel_text(addr))
33193+ if (is_kernel_text(address, address + PAGE_SIZE))
33194 prot = PAGE_KERNEL_EXEC;
33195
33196 pages_4k++;
33197@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33198
33199 pud = pud_offset(pgd, va);
33200 pmd = pmd_offset(pud, va);
33201- if (!pmd_present(*pmd))
33202+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33203 break;
33204
33205 /* should not be large page here */
33206@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33207
33208 static void __init pagetable_init(void)
33209 {
33210- pgd_t *pgd_base = swapper_pg_dir;
33211-
33212- permanent_kmaps_init(pgd_base);
33213+ permanent_kmaps_init(swapper_pg_dir);
33214 }
33215
33216-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33217+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33218 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33219
33220 /* user-defined highmem size */
33221@@ -787,10 +789,10 @@ void __init mem_init(void)
33222 ((unsigned long)&__init_end -
33223 (unsigned long)&__init_begin) >> 10,
33224
33225- (unsigned long)&_etext, (unsigned long)&_edata,
33226- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33227+ (unsigned long)&_sdata, (unsigned long)&_edata,
33228+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33229
33230- (unsigned long)&_text, (unsigned long)&_etext,
33231+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33232 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33233
33234 /*
33235@@ -883,6 +885,7 @@ void set_kernel_text_rw(void)
33236 if (!kernel_set_to_readonly)
33237 return;
33238
33239+ start = ktla_ktva(start);
33240 pr_debug("Set kernel text: %lx - %lx for read write\n",
33241 start, start+size);
33242
33243@@ -897,6 +900,7 @@ void set_kernel_text_ro(void)
33244 if (!kernel_set_to_readonly)
33245 return;
33246
33247+ start = ktla_ktva(start);
33248 pr_debug("Set kernel text: %lx - %lx for read only\n",
33249 start, start+size);
33250
33251@@ -925,6 +929,7 @@ void mark_rodata_ro(void)
33252 unsigned long start = PFN_ALIGN(_text);
33253 unsigned long size = PFN_ALIGN(_etext) - start;
33254
33255+ start = ktla_ktva(start);
33256 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33257 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33258 size >> 10);
33259diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33260index df1a992..94c272c 100644
33261--- a/arch/x86/mm/init_64.c
33262+++ b/arch/x86/mm/init_64.c
33263@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33264 * around without checking the pgd every time.
33265 */
33266
33267-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33268+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33269 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33270
33271 int force_personality32;
33272@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33273
33274 for (address = start; address <= end; address += PGDIR_SIZE) {
33275 const pgd_t *pgd_ref = pgd_offset_k(address);
33276+
33277+#ifdef CONFIG_PAX_PER_CPU_PGD
33278+ unsigned long cpu;
33279+#else
33280 struct page *page;
33281+#endif
33282
33283 if (pgd_none(*pgd_ref))
33284 continue;
33285
33286 spin_lock(&pgd_lock);
33287+
33288+#ifdef CONFIG_PAX_PER_CPU_PGD
33289+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33290+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33291+
33292+ if (pgd_none(*pgd))
33293+ set_pgd(pgd, *pgd_ref);
33294+ else
33295+ BUG_ON(pgd_page_vaddr(*pgd)
33296+ != pgd_page_vaddr(*pgd_ref));
33297+ pgd = pgd_offset_cpu(cpu, kernel, address);
33298+#else
33299 list_for_each_entry(page, &pgd_list, lru) {
33300 pgd_t *pgd;
33301 spinlock_t *pgt_lock;
33302@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33303 /* the pgt_lock only for Xen */
33304 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33305 spin_lock(pgt_lock);
33306+#endif
33307
33308 if (pgd_none(*pgd))
33309 set_pgd(pgd, *pgd_ref);
33310@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33311 BUG_ON(pgd_page_vaddr(*pgd)
33312 != pgd_page_vaddr(*pgd_ref));
33313
33314+#ifndef CONFIG_PAX_PER_CPU_PGD
33315 spin_unlock(pgt_lock);
33316+#endif
33317+
33318 }
33319 spin_unlock(&pgd_lock);
33320 }
33321@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33322 {
33323 if (pgd_none(*pgd)) {
33324 pud_t *pud = (pud_t *)spp_getpage();
33325- pgd_populate(&init_mm, pgd, pud);
33326+ pgd_populate_kernel(&init_mm, pgd, pud);
33327 if (pud != pud_offset(pgd, 0))
33328 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33329 pud, pud_offset(pgd, 0));
33330@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33331 {
33332 if (pud_none(*pud)) {
33333 pmd_t *pmd = (pmd_t *) spp_getpage();
33334- pud_populate(&init_mm, pud, pmd);
33335+ pud_populate_kernel(&init_mm, pud, pmd);
33336 if (pmd != pmd_offset(pud, 0))
33337 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33338 pmd, pmd_offset(pud, 0));
33339@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33340 pmd = fill_pmd(pud, vaddr);
33341 pte = fill_pte(pmd, vaddr);
33342
33343+ pax_open_kernel();
33344 set_pte(pte, new_pte);
33345+ pax_close_kernel();
33346
33347 /*
33348 * It's enough to flush this one mapping.
33349@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33350 pgd = pgd_offset_k((unsigned long)__va(phys));
33351 if (pgd_none(*pgd)) {
33352 pud = (pud_t *) spp_getpage();
33353- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33354- _PAGE_USER));
33355+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33356 }
33357 pud = pud_offset(pgd, (unsigned long)__va(phys));
33358 if (pud_none(*pud)) {
33359 pmd = (pmd_t *) spp_getpage();
33360- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33361- _PAGE_USER));
33362+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33363 }
33364 pmd = pmd_offset(pud, phys);
33365 BUG_ON(!pmd_none(*pmd));
33366@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33367 prot);
33368
33369 spin_lock(&init_mm.page_table_lock);
33370- pud_populate(&init_mm, pud, pmd);
33371+ pud_populate_kernel(&init_mm, pud, pmd);
33372 spin_unlock(&init_mm.page_table_lock);
33373 }
33374 __flush_tlb_all();
33375@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33376 page_size_mask);
33377
33378 spin_lock(&init_mm.page_table_lock);
33379- pgd_populate(&init_mm, pgd, pud);
33380+ pgd_populate_kernel(&init_mm, pgd, pud);
33381 spin_unlock(&init_mm.page_table_lock);
33382 pgd_changed = true;
33383 }
33384@@ -1195,8 +1216,8 @@ static struct vm_operations_struct gate_vma_ops = {
33385 static struct vm_area_struct gate_vma = {
33386 .vm_start = VSYSCALL_ADDR,
33387 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
33388- .vm_page_prot = PAGE_READONLY_EXEC,
33389- .vm_flags = VM_READ | VM_EXEC,
33390+ .vm_page_prot = PAGE_READONLY,
33391+ .vm_flags = VM_READ,
33392 .vm_ops = &gate_vma_ops,
33393 };
33394
33395diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33396index 7b179b4..6bd17777 100644
33397--- a/arch/x86/mm/iomap_32.c
33398+++ b/arch/x86/mm/iomap_32.c
33399@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33400 type = kmap_atomic_idx_push();
33401 idx = type + KM_TYPE_NR * smp_processor_id();
33402 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33403+
33404+ pax_open_kernel();
33405 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33406+ pax_close_kernel();
33407+
33408 arch_flush_lazy_mmu_mode();
33409
33410 return (void *)vaddr;
33411diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33412index baff1da..2816ef4 100644
33413--- a/arch/x86/mm/ioremap.c
33414+++ b/arch/x86/mm/ioremap.c
33415@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33416 unsigned long i;
33417
33418 for (i = 0; i < nr_pages; ++i)
33419- if (pfn_valid(start_pfn + i) &&
33420- !PageReserved(pfn_to_page(start_pfn + i)))
33421+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33422+ !PageReserved(pfn_to_page(start_pfn + i))))
33423 return 1;
33424
33425 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33426@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
33427 *
33428 * Caller must ensure there is only one unmapping for the same pointer.
33429 */
33430-void iounmap(volatile void __iomem *addr)
33431+void iounmap(const volatile void __iomem *addr)
33432 {
33433 struct vm_struct *p, *o;
33434
33435@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33436
33437 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33438 if (page_is_ram(start >> PAGE_SHIFT))
33439+#ifdef CONFIG_HIGHMEM
33440+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33441+#endif
33442 return __va(phys);
33443
33444 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33445@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33446 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33447 {
33448 if (page_is_ram(phys >> PAGE_SHIFT))
33449+#ifdef CONFIG_HIGHMEM
33450+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33451+#endif
33452 return;
33453
33454 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33455 return;
33456 }
33457
33458-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33459+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33460
33461 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33462 {
33463@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33464 early_ioremap_setup();
33465
33466 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33467- memset(bm_pte, 0, sizeof(bm_pte));
33468- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33469+ pmd_populate_user(&init_mm, pmd, bm_pte);
33470
33471 /*
33472 * The boot-ioremap range spans multiple pmds, for which
33473diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33474index dd89a13..d77bdcc 100644
33475--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33476+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33477@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33478 * memory (e.g. tracked pages)? For now, we need this to avoid
33479 * invoking kmemcheck for PnP BIOS calls.
33480 */
33481- if (regs->flags & X86_VM_MASK)
33482+ if (v8086_mode(regs))
33483 return false;
33484- if (regs->cs != __KERNEL_CS)
33485+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33486 return false;
33487
33488 pte = kmemcheck_pte_lookup(address);
33489diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33490index 25e7e13..1964579 100644
33491--- a/arch/x86/mm/mmap.c
33492+++ b/arch/x86/mm/mmap.c
33493@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
33494 * Leave an at least ~128 MB hole with possible stack randomization.
33495 */
33496 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33497-#define MAX_GAP (TASK_SIZE/6*5)
33498+#define MAX_GAP (pax_task_size/6*5)
33499
33500 static int mmap_is_legacy(void)
33501 {
33502@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33503 return rnd << PAGE_SHIFT;
33504 }
33505
33506-static unsigned long mmap_base(void)
33507+static unsigned long mmap_base(struct mm_struct *mm)
33508 {
33509 unsigned long gap = rlimit(RLIMIT_STACK);
33510+ unsigned long pax_task_size = TASK_SIZE;
33511+
33512+#ifdef CONFIG_PAX_SEGMEXEC
33513+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33514+ pax_task_size = SEGMEXEC_TASK_SIZE;
33515+#endif
33516
33517 if (gap < MIN_GAP)
33518 gap = MIN_GAP;
33519 else if (gap > MAX_GAP)
33520 gap = MAX_GAP;
33521
33522- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33523+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33524 }
33525
33526 /*
33527 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33528 * does, but not when emulating X86_32
33529 */
33530-static unsigned long mmap_legacy_base(void)
33531+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33532 {
33533- if (mmap_is_ia32())
33534+ if (mmap_is_ia32()) {
33535+
33536+#ifdef CONFIG_PAX_SEGMEXEC
33537+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33538+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33539+ else
33540+#endif
33541+
33542 return TASK_UNMAPPED_BASE;
33543- else
33544+ } else
33545 return TASK_UNMAPPED_BASE + mmap_rnd();
33546 }
33547
33548@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33549 */
33550 void arch_pick_mmap_layout(struct mm_struct *mm)
33551 {
33552- mm->mmap_legacy_base = mmap_legacy_base();
33553- mm->mmap_base = mmap_base();
33554+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33555+ mm->mmap_base = mmap_base(mm);
33556+
33557+#ifdef CONFIG_PAX_RANDMMAP
33558+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33559+ mm->mmap_legacy_base += mm->delta_mmap;
33560+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33561+ }
33562+#endif
33563
33564 if (mmap_is_legacy()) {
33565 mm->mmap_base = mm->mmap_legacy_base;
33566diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33567index 0057a7a..95c7edd 100644
33568--- a/arch/x86/mm/mmio-mod.c
33569+++ b/arch/x86/mm/mmio-mod.c
33570@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33571 break;
33572 default:
33573 {
33574- unsigned char *ip = (unsigned char *)instptr;
33575+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33576 my_trace->opcode = MMIO_UNKNOWN_OP;
33577 my_trace->width = 0;
33578 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33579@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33580 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33581 void __iomem *addr)
33582 {
33583- static atomic_t next_id;
33584+ static atomic_unchecked_t next_id;
33585 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33586 /* These are page-unaligned. */
33587 struct mmiotrace_map map = {
33588@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33589 .private = trace
33590 },
33591 .phys = offset,
33592- .id = atomic_inc_return(&next_id)
33593+ .id = atomic_inc_return_unchecked(&next_id)
33594 };
33595 map.map_id = trace->id;
33596
33597@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33598 ioremap_trace_core(offset, size, addr);
33599 }
33600
33601-static void iounmap_trace_core(volatile void __iomem *addr)
33602+static void iounmap_trace_core(const volatile void __iomem *addr)
33603 {
33604 struct mmiotrace_map map = {
33605 .phys = 0,
33606@@ -328,7 +328,7 @@ not_enabled:
33607 }
33608 }
33609
33610-void mmiotrace_iounmap(volatile void __iomem *addr)
33611+void mmiotrace_iounmap(const volatile void __iomem *addr)
33612 {
33613 might_sleep();
33614 if (is_enabled()) /* recheck and proper locking in *_core() */
33615diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33616index a32b706..efb308b 100644
33617--- a/arch/x86/mm/numa.c
33618+++ b/arch/x86/mm/numa.c
33619@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
33620 return true;
33621 }
33622
33623-static int __init numa_register_memblks(struct numa_meminfo *mi)
33624+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33625 {
33626 unsigned long uninitialized_var(pfn_align);
33627 int i, nid;
33628diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33629index ae242a7..1c7998f 100644
33630--- a/arch/x86/mm/pageattr.c
33631+++ b/arch/x86/mm/pageattr.c
33632@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33633 */
33634 #ifdef CONFIG_PCI_BIOS
33635 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33636- pgprot_val(forbidden) |= _PAGE_NX;
33637+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33638 #endif
33639
33640 /*
33641@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33642 * Does not cover __inittext since that is gone later on. On
33643 * 64bit we do not enforce !NX on the low mapping
33644 */
33645- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33646- pgprot_val(forbidden) |= _PAGE_NX;
33647+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33648+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33649
33650+#ifdef CONFIG_DEBUG_RODATA
33651 /*
33652 * The .rodata section needs to be read-only. Using the pfn
33653 * catches all aliases.
33654@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33655 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33656 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33657 pgprot_val(forbidden) |= _PAGE_RW;
33658+#endif
33659
33660 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33661 /*
33662@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33663 }
33664 #endif
33665
33666+#ifdef CONFIG_PAX_KERNEXEC
33667+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33668+ pgprot_val(forbidden) |= _PAGE_RW;
33669+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33670+ }
33671+#endif
33672+
33673 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33674
33675 return prot;
33676@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33677 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33678 {
33679 /* change init_mm */
33680+ pax_open_kernel();
33681 set_pte_atomic(kpte, pte);
33682+
33683 #ifdef CONFIG_X86_32
33684 if (!SHARED_KERNEL_PMD) {
33685+
33686+#ifdef CONFIG_PAX_PER_CPU_PGD
33687+ unsigned long cpu;
33688+#else
33689 struct page *page;
33690+#endif
33691
33692+#ifdef CONFIG_PAX_PER_CPU_PGD
33693+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33694+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33695+#else
33696 list_for_each_entry(page, &pgd_list, lru) {
33697- pgd_t *pgd;
33698+ pgd_t *pgd = (pgd_t *)page_address(page);
33699+#endif
33700+
33701 pud_t *pud;
33702 pmd_t *pmd;
33703
33704- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33705+ pgd += pgd_index(address);
33706 pud = pud_offset(pgd, address);
33707 pmd = pmd_offset(pud, address);
33708 set_pte_atomic((pte_t *)pmd, pte);
33709 }
33710 }
33711 #endif
33712+ pax_close_kernel();
33713 }
33714
33715 static int
33716diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33717index 6574388..87e9bef 100644
33718--- a/arch/x86/mm/pat.c
33719+++ b/arch/x86/mm/pat.c
33720@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33721
33722 if (!entry) {
33723 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33724- current->comm, current->pid, start, end - 1);
33725+ current->comm, task_pid_nr(current), start, end - 1);
33726 return -EINVAL;
33727 }
33728
33729@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33730
33731 while (cursor < to) {
33732 if (!devmem_is_allowed(pfn)) {
33733- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33734- current->comm, from, to - 1);
33735+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33736+ current->comm, from, to - 1, cursor);
33737 return 0;
33738 }
33739 cursor += PAGE_SIZE;
33740@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33741 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33742 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33743 "for [mem %#010Lx-%#010Lx]\n",
33744- current->comm, current->pid,
33745+ current->comm, task_pid_nr(current),
33746 cattr_name(flags),
33747 base, (unsigned long long)(base + size-1));
33748 return -EINVAL;
33749@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33750 flags = lookup_memtype(paddr);
33751 if (want_flags != flags) {
33752 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33753- current->comm, current->pid,
33754+ current->comm, task_pid_nr(current),
33755 cattr_name(want_flags),
33756 (unsigned long long)paddr,
33757 (unsigned long long)(paddr + size - 1),
33758@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33759 free_memtype(paddr, paddr + size);
33760 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33761 " for [mem %#010Lx-%#010Lx], got %s\n",
33762- current->comm, current->pid,
33763+ current->comm, task_pid_nr(current),
33764 cattr_name(want_flags),
33765 (unsigned long long)paddr,
33766 (unsigned long long)(paddr + size - 1),
33767diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33768index 415f6c4..d319983 100644
33769--- a/arch/x86/mm/pat_rbtree.c
33770+++ b/arch/x86/mm/pat_rbtree.c
33771@@ -160,7 +160,7 @@ success:
33772
33773 failure:
33774 printk(KERN_INFO "%s:%d conflicting memory types "
33775- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33776+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33777 end, cattr_name(found_type), cattr_name(match->type));
33778 return -EBUSY;
33779 }
33780diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33781index 9f0614d..92ae64a 100644
33782--- a/arch/x86/mm/pf_in.c
33783+++ b/arch/x86/mm/pf_in.c
33784@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33785 int i;
33786 enum reason_type rv = OTHERS;
33787
33788- p = (unsigned char *)ins_addr;
33789+ p = (unsigned char *)ktla_ktva(ins_addr);
33790 p += skip_prefix(p, &prf);
33791 p += get_opcode(p, &opcode);
33792
33793@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33794 struct prefix_bits prf;
33795 int i;
33796
33797- p = (unsigned char *)ins_addr;
33798+ p = (unsigned char *)ktla_ktva(ins_addr);
33799 p += skip_prefix(p, &prf);
33800 p += get_opcode(p, &opcode);
33801
33802@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33803 struct prefix_bits prf;
33804 int i;
33805
33806- p = (unsigned char *)ins_addr;
33807+ p = (unsigned char *)ktla_ktva(ins_addr);
33808 p += skip_prefix(p, &prf);
33809 p += get_opcode(p, &opcode);
33810
33811@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33812 struct prefix_bits prf;
33813 int i;
33814
33815- p = (unsigned char *)ins_addr;
33816+ p = (unsigned char *)ktla_ktva(ins_addr);
33817 p += skip_prefix(p, &prf);
33818 p += get_opcode(p, &opcode);
33819 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33820@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33821 struct prefix_bits prf;
33822 int i;
33823
33824- p = (unsigned char *)ins_addr;
33825+ p = (unsigned char *)ktla_ktva(ins_addr);
33826 p += skip_prefix(p, &prf);
33827 p += get_opcode(p, &opcode);
33828 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33829diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33830index 6fb6927..4fc13c0 100644
33831--- a/arch/x86/mm/pgtable.c
33832+++ b/arch/x86/mm/pgtable.c
33833@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33834 list_del(&page->lru);
33835 }
33836
33837-#define UNSHARED_PTRS_PER_PGD \
33838- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33839+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33840+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33841
33842+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33843+{
33844+ unsigned int count = USER_PGD_PTRS;
33845
33846+ if (!pax_user_shadow_base)
33847+ return;
33848+
33849+ while (count--)
33850+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33851+}
33852+#endif
33853+
33854+#ifdef CONFIG_PAX_PER_CPU_PGD
33855+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33856+{
33857+ unsigned int count = USER_PGD_PTRS;
33858+
33859+ while (count--) {
33860+ pgd_t pgd;
33861+
33862+#ifdef CONFIG_X86_64
33863+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33864+#else
33865+ pgd = *src++;
33866+#endif
33867+
33868+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33869+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33870+#endif
33871+
33872+ *dst++ = pgd;
33873+ }
33874+
33875+}
33876+#endif
33877+
33878+#ifdef CONFIG_X86_64
33879+#define pxd_t pud_t
33880+#define pyd_t pgd_t
33881+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33882+#define pgtable_pxd_page_ctor(page) true
33883+#define pgtable_pxd_page_dtor(page)
33884+#define pxd_free(mm, pud) pud_free((mm), (pud))
33885+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33886+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33887+#define PYD_SIZE PGDIR_SIZE
33888+#else
33889+#define pxd_t pmd_t
33890+#define pyd_t pud_t
33891+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33892+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33893+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33894+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33895+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33896+#define pyd_offset(mm, address) pud_offset((mm), (address))
33897+#define PYD_SIZE PUD_SIZE
33898+#endif
33899+
33900+#ifdef CONFIG_PAX_PER_CPU_PGD
33901+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33902+static inline void pgd_dtor(pgd_t *pgd) {}
33903+#else
33904 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33905 {
33906 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33907@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33908 pgd_list_del(pgd);
33909 spin_unlock(&pgd_lock);
33910 }
33911+#endif
33912
33913 /*
33914 * List of all pgd's needed for non-PAE so it can invalidate entries
33915@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33916 * -- nyc
33917 */
33918
33919-#ifdef CONFIG_X86_PAE
33920+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33921 /*
33922 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33923 * updating the top-level pagetable entries to guarantee the
33924@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33925 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33926 * and initialize the kernel pmds here.
33927 */
33928-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33929+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33930
33931 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33932 {
33933@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33934 */
33935 flush_tlb_mm(mm);
33936 }
33937+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33938+#define PREALLOCATED_PXDS USER_PGD_PTRS
33939 #else /* !CONFIG_X86_PAE */
33940
33941 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33942-#define PREALLOCATED_PMDS 0
33943+#define PREALLOCATED_PXDS 0
33944
33945 #endif /* CONFIG_X86_PAE */
33946
33947-static void free_pmds(pmd_t *pmds[])
33948+static void free_pxds(pxd_t *pxds[])
33949 {
33950 int i;
33951
33952- for(i = 0; i < PREALLOCATED_PMDS; i++)
33953- if (pmds[i]) {
33954- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33955- free_page((unsigned long)pmds[i]);
33956+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33957+ if (pxds[i]) {
33958+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33959+ free_page((unsigned long)pxds[i]);
33960 }
33961 }
33962
33963-static int preallocate_pmds(pmd_t *pmds[])
33964+static int preallocate_pxds(pxd_t *pxds[])
33965 {
33966 int i;
33967 bool failed = false;
33968
33969- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33970- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33971- if (!pmd)
33972+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33973+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33974+ if (!pxd)
33975 failed = true;
33976- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33977- free_page((unsigned long)pmd);
33978- pmd = NULL;
33979+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33980+ free_page((unsigned long)pxd);
33981+ pxd = NULL;
33982 failed = true;
33983 }
33984- pmds[i] = pmd;
33985+ pxds[i] = pxd;
33986 }
33987
33988 if (failed) {
33989- free_pmds(pmds);
33990+ free_pxds(pxds);
33991 return -ENOMEM;
33992 }
33993
33994@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33995 * preallocate which never got a corresponding vma will need to be
33996 * freed manually.
33997 */
33998-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33999+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34000 {
34001 int i;
34002
34003- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34004+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34005 pgd_t pgd = pgdp[i];
34006
34007 if (pgd_val(pgd) != 0) {
34008- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34009+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34010
34011- pgdp[i] = native_make_pgd(0);
34012+ set_pgd(pgdp + i, native_make_pgd(0));
34013
34014- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34015- pmd_free(mm, pmd);
34016+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34017+ pxd_free(mm, pxd);
34018 }
34019 }
34020 }
34021
34022-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34023+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34024 {
34025- pud_t *pud;
34026+ pyd_t *pyd;
34027 int i;
34028
34029- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34030+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34031 return;
34032
34033- pud = pud_offset(pgd, 0);
34034-
34035- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34036- pmd_t *pmd = pmds[i];
34037+#ifdef CONFIG_X86_64
34038+ pyd = pyd_offset(mm, 0L);
34039+#else
34040+ pyd = pyd_offset(pgd, 0L);
34041+#endif
34042
34043+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34044+ pxd_t *pxd = pxds[i];
34045 if (i >= KERNEL_PGD_BOUNDARY)
34046- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34047- sizeof(pmd_t) * PTRS_PER_PMD);
34048+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34049+ sizeof(pxd_t) * PTRS_PER_PMD);
34050
34051- pud_populate(mm, pud, pmd);
34052+ pyd_populate(mm, pyd, pxd);
34053 }
34054 }
34055
34056 pgd_t *pgd_alloc(struct mm_struct *mm)
34057 {
34058 pgd_t *pgd;
34059- pmd_t *pmds[PREALLOCATED_PMDS];
34060+ pxd_t *pxds[PREALLOCATED_PXDS];
34061
34062 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34063
34064@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34065
34066 mm->pgd = pgd;
34067
34068- if (preallocate_pmds(pmds) != 0)
34069+ if (preallocate_pxds(pxds) != 0)
34070 goto out_free_pgd;
34071
34072 if (paravirt_pgd_alloc(mm) != 0)
34073- goto out_free_pmds;
34074+ goto out_free_pxds;
34075
34076 /*
34077 * Make sure that pre-populating the pmds is atomic with
34078@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34079 spin_lock(&pgd_lock);
34080
34081 pgd_ctor(mm, pgd);
34082- pgd_prepopulate_pmd(mm, pgd, pmds);
34083+ pgd_prepopulate_pxd(mm, pgd, pxds);
34084
34085 spin_unlock(&pgd_lock);
34086
34087 return pgd;
34088
34089-out_free_pmds:
34090- free_pmds(pmds);
34091+out_free_pxds:
34092+ free_pxds(pxds);
34093 out_free_pgd:
34094 free_page((unsigned long)pgd);
34095 out:
34096@@ -313,7 +380,7 @@ out:
34097
34098 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34099 {
34100- pgd_mop_up_pmds(mm, pgd);
34101+ pgd_mop_up_pxds(mm, pgd);
34102 pgd_dtor(pgd);
34103 paravirt_pgd_free(mm, pgd);
34104 free_page((unsigned long)pgd);
34105diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34106index 4dd8cf6..f9d143e 100644
34107--- a/arch/x86/mm/pgtable_32.c
34108+++ b/arch/x86/mm/pgtable_32.c
34109@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34110 return;
34111 }
34112 pte = pte_offset_kernel(pmd, vaddr);
34113+
34114+ pax_open_kernel();
34115 if (pte_val(pteval))
34116 set_pte_at(&init_mm, vaddr, pte, pteval);
34117 else
34118 pte_clear(&init_mm, vaddr, pte);
34119+ pax_close_kernel();
34120
34121 /*
34122 * It's enough to flush this one mapping.
34123diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34124index e666cbb..61788c45 100644
34125--- a/arch/x86/mm/physaddr.c
34126+++ b/arch/x86/mm/physaddr.c
34127@@ -10,7 +10,7 @@
34128 #ifdef CONFIG_X86_64
34129
34130 #ifdef CONFIG_DEBUG_VIRTUAL
34131-unsigned long __phys_addr(unsigned long x)
34132+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34133 {
34134 unsigned long y = x - __START_KERNEL_map;
34135
34136@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34137 #else
34138
34139 #ifdef CONFIG_DEBUG_VIRTUAL
34140-unsigned long __phys_addr(unsigned long x)
34141+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34142 {
34143 unsigned long phys_addr = x - PAGE_OFFSET;
34144 /* VMALLOC_* aren't constants */
34145diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34146index 90555bf..f5f1828 100644
34147--- a/arch/x86/mm/setup_nx.c
34148+++ b/arch/x86/mm/setup_nx.c
34149@@ -5,8 +5,10 @@
34150 #include <asm/pgtable.h>
34151 #include <asm/proto.h>
34152
34153+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34154 static int disable_nx;
34155
34156+#ifndef CONFIG_PAX_PAGEEXEC
34157 /*
34158 * noexec = on|off
34159 *
34160@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34161 return 0;
34162 }
34163 early_param("noexec", noexec_setup);
34164+#endif
34165+
34166+#endif
34167
34168 void x86_configure_nx(void)
34169 {
34170+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34171 if (cpu_has_nx && !disable_nx)
34172 __supported_pte_mask |= _PAGE_NX;
34173 else
34174+#endif
34175 __supported_pte_mask &= ~_PAGE_NX;
34176 }
34177
34178diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34179index dd8dda1..9e9b0f6 100644
34180--- a/arch/x86/mm/tlb.c
34181+++ b/arch/x86/mm/tlb.c
34182@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34183 BUG();
34184 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34185 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34186+
34187+#ifndef CONFIG_PAX_PER_CPU_PGD
34188 load_cr3(swapper_pg_dir);
34189+#endif
34190+
34191 }
34192 }
34193 EXPORT_SYMBOL_GPL(leave_mm);
34194diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34195new file mode 100644
34196index 0000000..dace51c
34197--- /dev/null
34198+++ b/arch/x86/mm/uderef_64.c
34199@@ -0,0 +1,37 @@
34200+#include <linux/mm.h>
34201+#include <asm/pgtable.h>
34202+#include <asm/uaccess.h>
34203+
34204+#ifdef CONFIG_PAX_MEMORY_UDEREF
34205+/* PaX: due to the special call convention these functions must
34206+ * - remain leaf functions under all configurations,
34207+ * - never be called directly, only dereferenced from the wrappers.
34208+ */
34209+void __pax_open_userland(void)
34210+{
34211+ unsigned int cpu;
34212+
34213+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34214+ return;
34215+
34216+ cpu = raw_get_cpu();
34217+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34218+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34219+ raw_put_cpu_no_resched();
34220+}
34221+EXPORT_SYMBOL(__pax_open_userland);
34222+
34223+void __pax_close_userland(void)
34224+{
34225+ unsigned int cpu;
34226+
34227+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34228+ return;
34229+
34230+ cpu = raw_get_cpu();
34231+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34232+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34233+ raw_put_cpu_no_resched();
34234+}
34235+EXPORT_SYMBOL(__pax_close_userland);
34236+#endif
34237diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34238index 6440221..f746de8 100644
34239--- a/arch/x86/net/bpf_jit.S
34240+++ b/arch/x86/net/bpf_jit.S
34241@@ -9,19 +9,17 @@
34242 */
34243 #include <linux/linkage.h>
34244 #include <asm/dwarf2.h>
34245+#include <asm/alternative-asm.h>
34246
34247 /*
34248 * Calling convention :
34249- * rbx : skb pointer (callee saved)
34250+ * rdi : skb pointer
34251 * esi : offset of byte(s) to fetch in skb (can be scratched)
34252- * r10 : copy of skb->data
34253+ * r8 : copy of skb->data
34254 * r9d : hlen = skb->len - skb->data_len
34255 */
34256-#define SKBDATA %r10
34257+#define SKBDATA %r8
34258 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
34259-#define MAX_BPF_STACK (512 /* from filter.h */ + \
34260- 32 /* space for rbx,r13,r14,r15 */ + \
34261- 8 /* space for skb_copy_bits */)
34262
34263 sk_load_word:
34264 .globl sk_load_word
34265@@ -38,6 +36,7 @@ sk_load_word_positive_offset:
34266 jle bpf_slow_path_word
34267 mov (SKBDATA,%rsi),%eax
34268 bswap %eax /* ntohl() */
34269+ pax_force_retaddr
34270 ret
34271
34272 sk_load_half:
34273@@ -55,6 +54,7 @@ sk_load_half_positive_offset:
34274 jle bpf_slow_path_half
34275 movzwl (SKBDATA,%rsi),%eax
34276 rol $8,%ax # ntohs()
34277+ pax_force_retaddr
34278 ret
34279
34280 sk_load_byte:
34281@@ -69,45 +69,83 @@ sk_load_byte_positive_offset:
34282 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34283 jle bpf_slow_path_byte
34284 movzbl (SKBDATA,%rsi),%eax
34285+ pax_force_retaddr
34286+ ret
34287+
34288+/**
34289+ * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
34290+ *
34291+ * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
34292+ * Must preserve A accumulator (%eax)
34293+ * Inputs : %esi is the offset value
34294+ */
34295+sk_load_byte_msh:
34296+ .globl sk_load_byte_msh
34297+ test %esi,%esi
34298+ js bpf_slow_path_byte_msh_neg
34299+
34300+sk_load_byte_msh_positive_offset:
34301+ .globl sk_load_byte_msh_positive_offset
34302+ cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
34303+ jle bpf_slow_path_byte_msh
34304+ movzbl (SKBDATA,%rsi),%ebx
34305+ and $15,%bl
34306+ shl $2,%bl
34307+ pax_force_retaddr
34308 ret
34309
34310 /* rsi contains offset and can be scratched */
34311 #define bpf_slow_path_common(LEN) \
34312- mov %rbx, %rdi; /* arg1 == skb */ \
34313+ push %rdi; /* save skb */ \
34314 push %r9; \
34315 push SKBDATA; \
34316 /* rsi already has offset */ \
34317 mov $LEN,%ecx; /* len */ \
34318- lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
34319+ lea -12(%rbp),%rdx; \
34320 call skb_copy_bits; \
34321 test %eax,%eax; \
34322 pop SKBDATA; \
34323- pop %r9;
34324+ pop %r9; \
34325+ pop %rdi
34326
34327
34328 bpf_slow_path_word:
34329 bpf_slow_path_common(4)
34330 js bpf_error
34331- mov - MAX_BPF_STACK + 32(%rbp),%eax
34332+ mov -12(%rbp),%eax
34333 bswap %eax
34334+ pax_force_retaddr
34335 ret
34336
34337 bpf_slow_path_half:
34338 bpf_slow_path_common(2)
34339 js bpf_error
34340- mov - MAX_BPF_STACK + 32(%rbp),%ax
34341+ mov -12(%rbp),%ax
34342 rol $8,%ax
34343 movzwl %ax,%eax
34344+ pax_force_retaddr
34345 ret
34346
34347 bpf_slow_path_byte:
34348 bpf_slow_path_common(1)
34349 js bpf_error
34350- movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34351+ movzbl -12(%rbp),%eax
34352+ pax_force_retaddr
34353+ ret
34354+
34355+bpf_slow_path_byte_msh:
34356+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34357+ bpf_slow_path_common(1)
34358+ js bpf_error
34359+ movzbl -12(%rbp),%eax
34360+ and $15,%al
34361+ shl $2,%al
34362+ xchg %eax,%ebx
34363+ pax_force_retaddr
34364 ret
34365
34366 #define sk_negative_common(SIZE) \
34367- mov %rbx, %rdi; /* arg1 == skb */ \
34368+ push %rdi; /* save skb */ \
34369 push %r9; \
34370 push SKBDATA; \
34371 /* rsi already has offset */ \
34372@@ -116,8 +154,10 @@ bpf_slow_path_byte:
34373 test %rax,%rax; \
34374 pop SKBDATA; \
34375 pop %r9; \
34376+ pop %rdi; \
34377 jz bpf_error
34378
34379+
34380 bpf_slow_path_word_neg:
34381 cmp SKF_MAX_NEG_OFF, %esi /* test range */
34382 jl bpf_error /* offset lower -> error */
34383@@ -126,6 +166,7 @@ sk_load_word_negative_offset:
34384 sk_negative_common(4)
34385 mov (%rax), %eax
34386 bswap %eax
34387+ pax_force_retaddr
34388 ret
34389
34390 bpf_slow_path_half_neg:
34391@@ -137,6 +178,7 @@ sk_load_half_negative_offset:
34392 mov (%rax),%ax
34393 rol $8,%ax
34394 movzwl %ax,%eax
34395+ pax_force_retaddr
34396 ret
34397
34398 bpf_slow_path_byte_neg:
34399@@ -146,14 +188,27 @@ sk_load_byte_negative_offset:
34400 .globl sk_load_byte_negative_offset
34401 sk_negative_common(1)
34402 movzbl (%rax), %eax
34403+ pax_force_retaddr
34404+ ret
34405+
34406+bpf_slow_path_byte_msh_neg:
34407+ cmp SKF_MAX_NEG_OFF, %esi
34408+ jl bpf_error
34409+sk_load_byte_msh_negative_offset:
34410+ .globl sk_load_byte_msh_negative_offset
34411+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34412+ sk_negative_common(1)
34413+ movzbl (%rax),%eax
34414+ and $15,%al
34415+ shl $2,%al
34416+ xchg %eax,%ebx
34417+ pax_force_retaddr
34418 ret
34419
34420 bpf_error:
34421 # force a return 0 from jit handler
34422- xor %eax,%eax
34423- mov - MAX_BPF_STACK(%rbp),%rbx
34424- mov - MAX_BPF_STACK + 8(%rbp),%r13
34425- mov - MAX_BPF_STACK + 16(%rbp),%r14
34426- mov - MAX_BPF_STACK + 24(%rbp),%r15
34427+ xor %eax,%eax
34428+ mov -8(%rbp),%rbx
34429 leaveq
34430+ pax_force_retaddr
34431 ret
34432diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34433index 99bef86..bdfb5c6 100644
34434--- a/arch/x86/net/bpf_jit_comp.c
34435+++ b/arch/x86/net/bpf_jit_comp.c
34436@@ -1,7 +1,6 @@
34437 /* bpf_jit_comp.c : BPF JIT compiler
34438 *
34439 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
34440- * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
34441 *
34442 * This program is free software; you can redistribute it and/or
34443 * modify it under the terms of the GNU General Public License
34444@@ -15,16 +14,28 @@
34445 #include <linux/if_vlan.h>
34446 #include <linux/random.h>
34447
34448+/*
34449+ * Conventions :
34450+ * EAX : BPF A accumulator
34451+ * EBX : BPF X accumulator
34452+ * RDI : pointer to skb (first argument given to JIT function)
34453+ * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
34454+ * ECX,EDX,ESI : scratch registers
34455+ * r9d : skb->len - skb->data_len (headlen)
34456+ * r8 : skb->data
34457+ * -8(RBP) : saved RBX value
34458+ * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
34459+ */
34460 int bpf_jit_enable __read_mostly;
34461
34462 /*
34463 * assembly code in arch/x86/net/bpf_jit.S
34464 */
34465-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
34466+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
34467 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34468-extern u8 sk_load_byte_positive_offset[];
34469+extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
34470 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
34471-extern u8 sk_load_byte_negative_offset[];
34472+extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
34473
34474 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34475 {
34476@@ -39,50 +50,113 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34477 return ptr + len;
34478 }
34479
34480+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34481+#define MAX_INSTR_CODE_SIZE 96
34482+#else
34483+#define MAX_INSTR_CODE_SIZE 64
34484+#endif
34485+
34486 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
34487
34488 #define EMIT1(b1) EMIT(b1, 1)
34489 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34490 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
34491 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
34492-#define EMIT1_off32(b1, off) \
34493- do {EMIT1(b1); EMIT(off, 4); } while (0)
34494-#define EMIT2_off32(b1, b2, off) \
34495- do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
34496-#define EMIT3_off32(b1, b2, b3, off) \
34497- do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
34498-#define EMIT4_off32(b1, b2, b3, b4, off) \
34499- do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
34500+
34501+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34502+/* original constant will appear in ecx */
34503+#define DILUTE_CONST_SEQUENCE(_off, _key) \
34504+do { \
34505+ /* mov ecx, randkey */ \
34506+ EMIT1(0xb9); \
34507+ EMIT(_key, 4); \
34508+ /* xor ecx, randkey ^ off */ \
34509+ EMIT2(0x81, 0xf1); \
34510+ EMIT((_key) ^ (_off), 4); \
34511+} while (0)
34512+
34513+#define EMIT1_off32(b1, _off) \
34514+do { \
34515+ switch (b1) { \
34516+ case 0x05: /* add eax, imm32 */ \
34517+ case 0x2d: /* sub eax, imm32 */ \
34518+ case 0x25: /* and eax, imm32 */ \
34519+ case 0x0d: /* or eax, imm32 */ \
34520+ case 0xb8: /* mov eax, imm32 */ \
34521+ case 0x35: /* xor eax, imm32 */ \
34522+ case 0x3d: /* cmp eax, imm32 */ \
34523+ case 0xa9: /* test eax, imm32 */ \
34524+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34525+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
34526+ break; \
34527+ case 0xbb: /* mov ebx, imm32 */ \
34528+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34529+ /* mov ebx, ecx */ \
34530+ EMIT2(0x89, 0xcb); \
34531+ break; \
34532+ case 0xbe: /* mov esi, imm32 */ \
34533+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34534+ /* mov esi, ecx */ \
34535+ EMIT2(0x89, 0xce); \
34536+ break; \
34537+ case 0xe8: /* call rel imm32, always to known funcs */ \
34538+ EMIT1(b1); \
34539+ EMIT(_off, 4); \
34540+ break; \
34541+ case 0xe9: /* jmp rel imm32 */ \
34542+ EMIT1(b1); \
34543+ EMIT(_off, 4); \
34544+ /* prevent fall-through, we're not called if off = 0 */ \
34545+ EMIT(0xcccccccc, 4); \
34546+ EMIT(0xcccccccc, 4); \
34547+ break; \
34548+ default: \
34549+ BUILD_BUG(); \
34550+ } \
34551+} while (0)
34552+
34553+#define EMIT2_off32(b1, b2, _off) \
34554+do { \
34555+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
34556+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
34557+ EMIT(randkey, 4); \
34558+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
34559+ EMIT((_off) - randkey, 4); \
34560+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
34561+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34562+ /* imul eax, ecx */ \
34563+ EMIT3(0x0f, 0xaf, 0xc1); \
34564+ } else { \
34565+ BUILD_BUG(); \
34566+ } \
34567+} while (0)
34568+#else
34569+#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
34570+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
34571+#endif
34572+
34573+#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
34574+#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
34575
34576 static inline bool is_imm8(int value)
34577 {
34578 return value <= 127 && value >= -128;
34579 }
34580
34581-static inline bool is_simm32(s64 value)
34582+static inline bool is_near(int offset)
34583 {
34584- return value == (s64) (s32) value;
34585+ return offset <= 127 && offset >= -128;
34586 }
34587
34588-/* mov dst, src */
34589-#define EMIT_mov(DST, SRC) \
34590- do {if (DST != SRC) \
34591- EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
34592- } while (0)
34593-
34594-static int bpf_size_to_x86_bytes(int bpf_size)
34595-{
34596- if (bpf_size == BPF_W)
34597- return 4;
34598- else if (bpf_size == BPF_H)
34599- return 2;
34600- else if (bpf_size == BPF_B)
34601- return 1;
34602- else if (bpf_size == BPF_DW)
34603- return 4; /* imm32 */
34604- else
34605- return 0;
34606-}
34607+#define EMIT_JMP(offset) \
34608+do { \
34609+ if (offset) { \
34610+ if (is_near(offset)) \
34611+ EMIT2(0xeb, offset); /* jmp .+off8 */ \
34612+ else \
34613+ EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \
34614+ } \
34615+} while (0)
34616
34617 /* list of x86 cond jumps opcodes (. + s8)
34618 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
34619@@ -93,8 +167,46 @@ static int bpf_size_to_x86_bytes(int bpf_size)
34620 #define X86_JNE 0x75
34621 #define X86_JBE 0x76
34622 #define X86_JA 0x77
34623-#define X86_JGE 0x7D
34624-#define X86_JG 0x7F
34625+
34626+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34627+#define APPEND_FLOW_VERIFY() \
34628+do { \
34629+ /* mov ecx, randkey */ \
34630+ EMIT1(0xb9); \
34631+ EMIT(randkey, 4); \
34632+ /* cmp ecx, randkey */ \
34633+ EMIT2(0x81, 0xf9); \
34634+ EMIT(randkey, 4); \
34635+ /* jz after 8 int 3s */ \
34636+ EMIT2(0x74, 0x08); \
34637+ EMIT(0xcccccccc, 4); \
34638+ EMIT(0xcccccccc, 4); \
34639+} while (0)
34640+#else
34641+#define APPEND_FLOW_VERIFY() do { } while (0)
34642+#endif
34643+
34644+#define EMIT_COND_JMP(op, offset) \
34645+do { \
34646+ if (is_near(offset)) \
34647+ EMIT2(op, offset); /* jxx .+off8 */ \
34648+ else { \
34649+ EMIT2(0x0f, op + 0x10); \
34650+ EMIT(offset, 4); /* jxx .+off32 */ \
34651+ APPEND_FLOW_VERIFY(); \
34652+ } \
34653+} while (0)
34654+
34655+#define COND_SEL(CODE, TOP, FOP) \
34656+ case CODE: \
34657+ t_op = TOP; \
34658+ f_op = FOP; \
34659+ goto cond_branch
34660+
34661+
34662+#define SEEN_DATAREF 1 /* might call external helpers */
34663+#define SEEN_XREG 2 /* ebx is used */
34664+#define SEEN_MEM 4 /* use mem[] for temporary storage */
34665
34666 static inline void bpf_flush_icache(void *start, void *end)
34667 {
34668@@ -109,804 +221,646 @@ static inline void bpf_flush_icache(void *start, void *end)
34669 #define CHOOSE_LOAD_FUNC(K, func) \
34670 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
34671
34672-struct bpf_binary_header {
34673- unsigned int pages;
34674- /* Note : for security reasons, bpf code will follow a randomly
34675- * sized amount of int3 instructions
34676- */
34677- u8 image[];
34678-};
34679+/* Helper to find the offset of pkt_type in sk_buff
34680+ * We want to make sure its still a 3bit field starting at a byte boundary.
34681+ */
34682+#define PKT_TYPE_MAX 7
34683+static int pkt_type_offset(void)
34684+{
34685+ struct sk_buff skb_probe = {
34686+ .pkt_type = ~0,
34687+ };
34688+ char *ct = (char *)&skb_probe;
34689+ unsigned int off;
34690
34691-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34692+ for (off = 0; off < sizeof(struct sk_buff); off++) {
34693+ if (ct[off] == PKT_TYPE_MAX)
34694+ return off;
34695+ }
34696+ pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
34697+ return -1;
34698+}
34699+
34700+/* Note : for security reasons, bpf code will follow a randomly
34701+ * sized amount of int3 instructions
34702+ */
34703+static u8 *bpf_alloc_binary(unsigned int proglen,
34704 u8 **image_ptr)
34705 {
34706 unsigned int sz, hole;
34707- struct bpf_binary_header *header;
34708+ u8 *header;
34709
34710 /* Most of BPF filters are really small,
34711 * but if some of them fill a page, allow at least
34712 * 128 extra bytes to insert a random section of int3
34713 */
34714- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34715- header = module_alloc(sz);
34716+ sz = round_up(proglen + 128, PAGE_SIZE);
34717+ header = module_alloc_exec(sz);
34718 if (!header)
34719 return NULL;
34720
34721+ pax_open_kernel();
34722 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34723+ pax_close_kernel();
34724
34725- header->pages = sz / PAGE_SIZE;
34726- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34727+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34728
34729 /* insert a random number of int3 instructions before BPF code */
34730- *image_ptr = &header->image[prandom_u32() % hole];
34731+ *image_ptr = &header[prandom_u32() % hole];
34732 return header;
34733 }
34734
34735-/* pick a register outside of BPF range for JIT internal work */
34736-#define AUX_REG (MAX_BPF_REG + 1)
34737-
34738-/* the following table maps BPF registers to x64 registers.
34739- * x64 register r12 is unused, since if used as base address register
34740- * in load/store instructions, it always needs an extra byte of encoding
34741- */
34742-static const int reg2hex[] = {
34743- [BPF_REG_0] = 0, /* rax */
34744- [BPF_REG_1] = 7, /* rdi */
34745- [BPF_REG_2] = 6, /* rsi */
34746- [BPF_REG_3] = 2, /* rdx */
34747- [BPF_REG_4] = 1, /* rcx */
34748- [BPF_REG_5] = 0, /* r8 */
34749- [BPF_REG_6] = 3, /* rbx callee saved */
34750- [BPF_REG_7] = 5, /* r13 callee saved */
34751- [BPF_REG_8] = 6, /* r14 callee saved */
34752- [BPF_REG_9] = 7, /* r15 callee saved */
34753- [BPF_REG_FP] = 5, /* rbp readonly */
34754- [AUX_REG] = 3, /* r11 temp register */
34755-};
34756-
34757-/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
34758- * which need extra byte of encoding.
34759- * rax,rcx,...,rbp have simpler encoding
34760- */
34761-static inline bool is_ereg(u32 reg)
34762-{
34763- if (reg == BPF_REG_5 || reg == AUX_REG ||
34764- (reg >= BPF_REG_7 && reg <= BPF_REG_9))
34765- return true;
34766- else
34767- return false;
34768-}
34769-
34770-/* add modifiers if 'reg' maps to x64 registers r8..r15 */
34771-static inline u8 add_1mod(u8 byte, u32 reg)
34772-{
34773- if (is_ereg(reg))
34774- byte |= 1;
34775- return byte;
34776-}
34777-
34778-static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
34779-{
34780- if (is_ereg(r1))
34781- byte |= 1;
34782- if (is_ereg(r2))
34783- byte |= 4;
34784- return byte;
34785-}
34786-
34787-/* encode 'dst_reg' register into x64 opcode 'byte' */
34788-static inline u8 add_1reg(u8 byte, u32 dst_reg)
34789-{
34790- return byte + reg2hex[dst_reg];
34791-}
34792-
34793-/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
34794-static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34795-{
34796- return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
34797-}
34798-
34799-struct jit_context {
34800- unsigned int cleanup_addr; /* epilogue code offset */
34801- bool seen_ld_abs;
34802-};
34803-
34804-static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
34805- int oldproglen, struct jit_context *ctx)
34806-{
34807- struct sock_filter_int *insn = bpf_prog->insnsi;
34808- int insn_cnt = bpf_prog->len;
34809- u8 temp[64];
34810- int i;
34811- int proglen = 0;
34812- u8 *prog = temp;
34813- int stacksize = MAX_BPF_STACK +
34814- 32 /* space for rbx, r13, r14, r15 */ +
34815- 8 /* space for skb_copy_bits() buffer */;
34816-
34817- EMIT1(0x55); /* push rbp */
34818- EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
34819-
34820- /* sub rsp, stacksize */
34821- EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
34822-
34823- /* all classic BPF filters use R6(rbx) save it */
34824-
34825- /* mov qword ptr [rbp-X],rbx */
34826- EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
34827-
34828- /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
34829- * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
34830- * R8(r14). R9(r15) spill could be made conditional, but there is only
34831- * one 'bpf_error' return path out of helper functions inside bpf_jit.S
34832- * The overhead of extra spill is negligible for any filter other
34833- * than synthetic ones. Therefore not worth adding complexity.
34834- */
34835-
34836- /* mov qword ptr [rbp-X],r13 */
34837- EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
34838- /* mov qword ptr [rbp-X],r14 */
34839- EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
34840- /* mov qword ptr [rbp-X],r15 */
34841- EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
34842-
34843- /* clear A and X registers */
34844- EMIT2(0x31, 0xc0); /* xor eax, eax */
34845- EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
34846-
34847- if (ctx->seen_ld_abs) {
34848- /* r9d : skb->len - skb->data_len (headlen)
34849- * r10 : skb->data
34850- */
34851- if (is_imm8(offsetof(struct sk_buff, len)))
34852- /* mov %r9d, off8(%rdi) */
34853- EMIT4(0x44, 0x8b, 0x4f,
34854- offsetof(struct sk_buff, len));
34855- else
34856- /* mov %r9d, off32(%rdi) */
34857- EMIT3_off32(0x44, 0x8b, 0x8f,
34858- offsetof(struct sk_buff, len));
34859-
34860- if (is_imm8(offsetof(struct sk_buff, data_len)))
34861- /* sub %r9d, off8(%rdi) */
34862- EMIT4(0x44, 0x2b, 0x4f,
34863- offsetof(struct sk_buff, data_len));
34864- else
34865- EMIT3_off32(0x44, 0x2b, 0x8f,
34866- offsetof(struct sk_buff, data_len));
34867-
34868- if (is_imm8(offsetof(struct sk_buff, data)))
34869- /* mov %r10, off8(%rdi) */
34870- EMIT4(0x4c, 0x8b, 0x57,
34871- offsetof(struct sk_buff, data));
34872- else
34873- /* mov %r10, off32(%rdi) */
34874- EMIT3_off32(0x4c, 0x8b, 0x97,
34875- offsetof(struct sk_buff, data));
34876- }
34877-
34878- for (i = 0; i < insn_cnt; i++, insn++) {
34879- const s32 imm32 = insn->imm;
34880- u32 dst_reg = insn->dst_reg;
34881- u32 src_reg = insn->src_reg;
34882- u8 b1 = 0, b2 = 0, b3 = 0;
34883- s64 jmp_offset;
34884- u8 jmp_cond;
34885- int ilen;
34886- u8 *func;
34887-
34888- switch (insn->code) {
34889- /* ALU */
34890- case BPF_ALU | BPF_ADD | BPF_X:
34891- case BPF_ALU | BPF_SUB | BPF_X:
34892- case BPF_ALU | BPF_AND | BPF_X:
34893- case BPF_ALU | BPF_OR | BPF_X:
34894- case BPF_ALU | BPF_XOR | BPF_X:
34895- case BPF_ALU64 | BPF_ADD | BPF_X:
34896- case BPF_ALU64 | BPF_SUB | BPF_X:
34897- case BPF_ALU64 | BPF_AND | BPF_X:
34898- case BPF_ALU64 | BPF_OR | BPF_X:
34899- case BPF_ALU64 | BPF_XOR | BPF_X:
34900- switch (BPF_OP(insn->code)) {
34901- case BPF_ADD: b2 = 0x01; break;
34902- case BPF_SUB: b2 = 0x29; break;
34903- case BPF_AND: b2 = 0x21; break;
34904- case BPF_OR: b2 = 0x09; break;
34905- case BPF_XOR: b2 = 0x31; break;
34906- }
34907- if (BPF_CLASS(insn->code) == BPF_ALU64)
34908- EMIT1(add_2mod(0x48, dst_reg, src_reg));
34909- else if (is_ereg(dst_reg) || is_ereg(src_reg))
34910- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34911- EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
34912- break;
34913-
34914- /* mov dst, src */
34915- case BPF_ALU64 | BPF_MOV | BPF_X:
34916- EMIT_mov(dst_reg, src_reg);
34917- break;
34918-
34919- /* mov32 dst, src */
34920- case BPF_ALU | BPF_MOV | BPF_X:
34921- if (is_ereg(dst_reg) || is_ereg(src_reg))
34922- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34923- EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
34924- break;
34925-
34926- /* neg dst */
34927- case BPF_ALU | BPF_NEG:
34928- case BPF_ALU64 | BPF_NEG:
34929- if (BPF_CLASS(insn->code) == BPF_ALU64)
34930- EMIT1(add_1mod(0x48, dst_reg));
34931- else if (is_ereg(dst_reg))
34932- EMIT1(add_1mod(0x40, dst_reg));
34933- EMIT2(0xF7, add_1reg(0xD8, dst_reg));
34934- break;
34935-
34936- case BPF_ALU | BPF_ADD | BPF_K:
34937- case BPF_ALU | BPF_SUB | BPF_K:
34938- case BPF_ALU | BPF_AND | BPF_K:
34939- case BPF_ALU | BPF_OR | BPF_K:
34940- case BPF_ALU | BPF_XOR | BPF_K:
34941- case BPF_ALU64 | BPF_ADD | BPF_K:
34942- case BPF_ALU64 | BPF_SUB | BPF_K:
34943- case BPF_ALU64 | BPF_AND | BPF_K:
34944- case BPF_ALU64 | BPF_OR | BPF_K:
34945- case BPF_ALU64 | BPF_XOR | BPF_K:
34946- if (BPF_CLASS(insn->code) == BPF_ALU64)
34947- EMIT1(add_1mod(0x48, dst_reg));
34948- else if (is_ereg(dst_reg))
34949- EMIT1(add_1mod(0x40, dst_reg));
34950-
34951- switch (BPF_OP(insn->code)) {
34952- case BPF_ADD: b3 = 0xC0; break;
34953- case BPF_SUB: b3 = 0xE8; break;
34954- case BPF_AND: b3 = 0xE0; break;
34955- case BPF_OR: b3 = 0xC8; break;
34956- case BPF_XOR: b3 = 0xF0; break;
34957- }
34958-
34959- if (is_imm8(imm32))
34960- EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
34961- else
34962- EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
34963- break;
34964-
34965- case BPF_ALU64 | BPF_MOV | BPF_K:
34966- /* optimization: if imm32 is positive,
34967- * use 'mov eax, imm32' (which zero-extends imm32)
34968- * to save 2 bytes
34969- */
34970- if (imm32 < 0) {
34971- /* 'mov rax, imm32' sign extends imm32 */
34972- b1 = add_1mod(0x48, dst_reg);
34973- b2 = 0xC7;
34974- b3 = 0xC0;
34975- EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
34976- break;
34977- }
34978-
34979- case BPF_ALU | BPF_MOV | BPF_K:
34980- /* mov %eax, imm32 */
34981- if (is_ereg(dst_reg))
34982- EMIT1(add_1mod(0x40, dst_reg));
34983- EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
34984- break;
34985-
34986- /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
34987- case BPF_ALU | BPF_MOD | BPF_X:
34988- case BPF_ALU | BPF_DIV | BPF_X:
34989- case BPF_ALU | BPF_MOD | BPF_K:
34990- case BPF_ALU | BPF_DIV | BPF_K:
34991- case BPF_ALU64 | BPF_MOD | BPF_X:
34992- case BPF_ALU64 | BPF_DIV | BPF_X:
34993- case BPF_ALU64 | BPF_MOD | BPF_K:
34994- case BPF_ALU64 | BPF_DIV | BPF_K:
34995- EMIT1(0x50); /* push rax */
34996- EMIT1(0x52); /* push rdx */
34997-
34998- if (BPF_SRC(insn->code) == BPF_X)
34999- /* mov r11, src_reg */
35000- EMIT_mov(AUX_REG, src_reg);
35001- else
35002- /* mov r11, imm32 */
35003- EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
35004-
35005- /* mov rax, dst_reg */
35006- EMIT_mov(BPF_REG_0, dst_reg);
35007-
35008- /* xor edx, edx
35009- * equivalent to 'xor rdx, rdx', but one byte less
35010- */
35011- EMIT2(0x31, 0xd2);
35012-
35013- if (BPF_SRC(insn->code) == BPF_X) {
35014- /* if (src_reg == 0) return 0 */
35015-
35016- /* cmp r11, 0 */
35017- EMIT4(0x49, 0x83, 0xFB, 0x00);
35018-
35019- /* jne .+9 (skip over pop, pop, xor and jmp) */
35020- EMIT2(X86_JNE, 1 + 1 + 2 + 5);
35021- EMIT1(0x5A); /* pop rdx */
35022- EMIT1(0x58); /* pop rax */
35023- EMIT2(0x31, 0xc0); /* xor eax, eax */
35024-
35025- /* jmp cleanup_addr
35026- * addrs[i] - 11, because there are 11 bytes
35027- * after this insn: div, mov, pop, pop, mov
35028- */
35029- jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
35030- EMIT1_off32(0xE9, jmp_offset);
35031- }
35032-
35033- if (BPF_CLASS(insn->code) == BPF_ALU64)
35034- /* div r11 */
35035- EMIT3(0x49, 0xF7, 0xF3);
35036- else
35037- /* div r11d */
35038- EMIT3(0x41, 0xF7, 0xF3);
35039-
35040- if (BPF_OP(insn->code) == BPF_MOD)
35041- /* mov r11, rdx */
35042- EMIT3(0x49, 0x89, 0xD3);
35043- else
35044- /* mov r11, rax */
35045- EMIT3(0x49, 0x89, 0xC3);
35046-
35047- EMIT1(0x5A); /* pop rdx */
35048- EMIT1(0x58); /* pop rax */
35049-
35050- /* mov dst_reg, r11 */
35051- EMIT_mov(dst_reg, AUX_REG);
35052- break;
35053-
35054- case BPF_ALU | BPF_MUL | BPF_K:
35055- case BPF_ALU | BPF_MUL | BPF_X:
35056- case BPF_ALU64 | BPF_MUL | BPF_K:
35057- case BPF_ALU64 | BPF_MUL | BPF_X:
35058- EMIT1(0x50); /* push rax */
35059- EMIT1(0x52); /* push rdx */
35060-
35061- /* mov r11, dst_reg */
35062- EMIT_mov(AUX_REG, dst_reg);
35063-
35064- if (BPF_SRC(insn->code) == BPF_X)
35065- /* mov rax, src_reg */
35066- EMIT_mov(BPF_REG_0, src_reg);
35067- else
35068- /* mov rax, imm32 */
35069- EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
35070-
35071- if (BPF_CLASS(insn->code) == BPF_ALU64)
35072- EMIT1(add_1mod(0x48, AUX_REG));
35073- else if (is_ereg(AUX_REG))
35074- EMIT1(add_1mod(0x40, AUX_REG));
35075- /* mul(q) r11 */
35076- EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
35077-
35078- /* mov r11, rax */
35079- EMIT_mov(AUX_REG, BPF_REG_0);
35080-
35081- EMIT1(0x5A); /* pop rdx */
35082- EMIT1(0x58); /* pop rax */
35083-
35084- /* mov dst_reg, r11 */
35085- EMIT_mov(dst_reg, AUX_REG);
35086- break;
35087-
35088- /* shifts */
35089- case BPF_ALU | BPF_LSH | BPF_K:
35090- case BPF_ALU | BPF_RSH | BPF_K:
35091- case BPF_ALU | BPF_ARSH | BPF_K:
35092- case BPF_ALU64 | BPF_LSH | BPF_K:
35093- case BPF_ALU64 | BPF_RSH | BPF_K:
35094- case BPF_ALU64 | BPF_ARSH | BPF_K:
35095- if (BPF_CLASS(insn->code) == BPF_ALU64)
35096- EMIT1(add_1mod(0x48, dst_reg));
35097- else if (is_ereg(dst_reg))
35098- EMIT1(add_1mod(0x40, dst_reg));
35099-
35100- switch (BPF_OP(insn->code)) {
35101- case BPF_LSH: b3 = 0xE0; break;
35102- case BPF_RSH: b3 = 0xE8; break;
35103- case BPF_ARSH: b3 = 0xF8; break;
35104- }
35105- EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
35106- break;
35107-
35108- case BPF_ALU | BPF_END | BPF_FROM_BE:
35109- switch (imm32) {
35110- case 16:
35111- /* emit 'ror %ax, 8' to swap lower 2 bytes */
35112- EMIT1(0x66);
35113- if (is_ereg(dst_reg))
35114- EMIT1(0x41);
35115- EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
35116- break;
35117- case 32:
35118- /* emit 'bswap eax' to swap lower 4 bytes */
35119- if (is_ereg(dst_reg))
35120- EMIT2(0x41, 0x0F);
35121- else
35122- EMIT1(0x0F);
35123- EMIT1(add_1reg(0xC8, dst_reg));
35124- break;
35125- case 64:
35126- /* emit 'bswap rax' to swap 8 bytes */
35127- EMIT3(add_1mod(0x48, dst_reg), 0x0F,
35128- add_1reg(0xC8, dst_reg));
35129- break;
35130- }
35131- break;
35132-
35133- case BPF_ALU | BPF_END | BPF_FROM_LE:
35134- break;
35135-
35136- /* ST: *(u8*)(dst_reg + off) = imm */
35137- case BPF_ST | BPF_MEM | BPF_B:
35138- if (is_ereg(dst_reg))
35139- EMIT2(0x41, 0xC6);
35140- else
35141- EMIT1(0xC6);
35142- goto st;
35143- case BPF_ST | BPF_MEM | BPF_H:
35144- if (is_ereg(dst_reg))
35145- EMIT3(0x66, 0x41, 0xC7);
35146- else
35147- EMIT2(0x66, 0xC7);
35148- goto st;
35149- case BPF_ST | BPF_MEM | BPF_W:
35150- if (is_ereg(dst_reg))
35151- EMIT2(0x41, 0xC7);
35152- else
35153- EMIT1(0xC7);
35154- goto st;
35155- case BPF_ST | BPF_MEM | BPF_DW:
35156- EMIT2(add_1mod(0x48, dst_reg), 0xC7);
35157-
35158-st: if (is_imm8(insn->off))
35159- EMIT2(add_1reg(0x40, dst_reg), insn->off);
35160- else
35161- EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
35162-
35163- EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
35164- break;
35165-
35166- /* STX: *(u8*)(dst_reg + off) = src_reg */
35167- case BPF_STX | BPF_MEM | BPF_B:
35168- /* emit 'mov byte ptr [rax + off], al' */
35169- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
35170- /* have to add extra byte for x86 SIL, DIL regs */
35171- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
35172- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
35173- else
35174- EMIT1(0x88);
35175- goto stx;
35176- case BPF_STX | BPF_MEM | BPF_H:
35177- if (is_ereg(dst_reg) || is_ereg(src_reg))
35178- EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
35179- else
35180- EMIT2(0x66, 0x89);
35181- goto stx;
35182- case BPF_STX | BPF_MEM | BPF_W:
35183- if (is_ereg(dst_reg) || is_ereg(src_reg))
35184- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
35185- else
35186- EMIT1(0x89);
35187- goto stx;
35188- case BPF_STX | BPF_MEM | BPF_DW:
35189- EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
35190-stx: if (is_imm8(insn->off))
35191- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35192- else
35193- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35194- insn->off);
35195- break;
35196-
35197- /* LDX: dst_reg = *(u8*)(src_reg + off) */
35198- case BPF_LDX | BPF_MEM | BPF_B:
35199- /* emit 'movzx rax, byte ptr [rax + off]' */
35200- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
35201- goto ldx;
35202- case BPF_LDX | BPF_MEM | BPF_H:
35203- /* emit 'movzx rax, word ptr [rax + off]' */
35204- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
35205- goto ldx;
35206- case BPF_LDX | BPF_MEM | BPF_W:
35207- /* emit 'mov eax, dword ptr [rax+0x14]' */
35208- if (is_ereg(dst_reg) || is_ereg(src_reg))
35209- EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
35210- else
35211- EMIT1(0x8B);
35212- goto ldx;
35213- case BPF_LDX | BPF_MEM | BPF_DW:
35214- /* emit 'mov rax, qword ptr [rax+0x14]' */
35215- EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
35216-ldx: /* if insn->off == 0 we can save one extra byte, but
35217- * special case of x86 r13 which always needs an offset
35218- * is not worth the hassle
35219- */
35220- if (is_imm8(insn->off))
35221- EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
35222- else
35223- EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
35224- insn->off);
35225- break;
35226-
35227- /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
35228- case BPF_STX | BPF_XADD | BPF_W:
35229- /* emit 'lock add dword ptr [rax + off], eax' */
35230- if (is_ereg(dst_reg) || is_ereg(src_reg))
35231- EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
35232- else
35233- EMIT2(0xF0, 0x01);
35234- goto xadd;
35235- case BPF_STX | BPF_XADD | BPF_DW:
35236- EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
35237-xadd: if (is_imm8(insn->off))
35238- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35239- else
35240- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35241- insn->off);
35242- break;
35243-
35244- /* call */
35245- case BPF_JMP | BPF_CALL:
35246- func = (u8 *) __bpf_call_base + imm32;
35247- jmp_offset = func - (image + addrs[i]);
35248- if (ctx->seen_ld_abs) {
35249- EMIT2(0x41, 0x52); /* push %r10 */
35250- EMIT2(0x41, 0x51); /* push %r9 */
35251- /* need to adjust jmp offset, since
35252- * pop %r9, pop %r10 take 4 bytes after call insn
35253- */
35254- jmp_offset += 4;
35255- }
35256- if (!imm32 || !is_simm32(jmp_offset)) {
35257- pr_err("unsupported bpf func %d addr %p image %p\n",
35258- imm32, func, image);
35259- return -EINVAL;
35260- }
35261- EMIT1_off32(0xE8, jmp_offset);
35262- if (ctx->seen_ld_abs) {
35263- EMIT2(0x41, 0x59); /* pop %r9 */
35264- EMIT2(0x41, 0x5A); /* pop %r10 */
35265- }
35266- break;
35267-
35268- /* cond jump */
35269- case BPF_JMP | BPF_JEQ | BPF_X:
35270- case BPF_JMP | BPF_JNE | BPF_X:
35271- case BPF_JMP | BPF_JGT | BPF_X:
35272- case BPF_JMP | BPF_JGE | BPF_X:
35273- case BPF_JMP | BPF_JSGT | BPF_X:
35274- case BPF_JMP | BPF_JSGE | BPF_X:
35275- /* cmp dst_reg, src_reg */
35276- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
35277- add_2reg(0xC0, dst_reg, src_reg));
35278- goto emit_cond_jmp;
35279-
35280- case BPF_JMP | BPF_JSET | BPF_X:
35281- /* test dst_reg, src_reg */
35282- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
35283- add_2reg(0xC0, dst_reg, src_reg));
35284- goto emit_cond_jmp;
35285-
35286- case BPF_JMP | BPF_JSET | BPF_K:
35287- /* test dst_reg, imm32 */
35288- EMIT1(add_1mod(0x48, dst_reg));
35289- EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
35290- goto emit_cond_jmp;
35291-
35292- case BPF_JMP | BPF_JEQ | BPF_K:
35293- case BPF_JMP | BPF_JNE | BPF_K:
35294- case BPF_JMP | BPF_JGT | BPF_K:
35295- case BPF_JMP | BPF_JGE | BPF_K:
35296- case BPF_JMP | BPF_JSGT | BPF_K:
35297- case BPF_JMP | BPF_JSGE | BPF_K:
35298- /* cmp dst_reg, imm8/32 */
35299- EMIT1(add_1mod(0x48, dst_reg));
35300-
35301- if (is_imm8(imm32))
35302- EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
35303- else
35304- EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
35305-
35306-emit_cond_jmp: /* convert BPF opcode to x86 */
35307- switch (BPF_OP(insn->code)) {
35308- case BPF_JEQ:
35309- jmp_cond = X86_JE;
35310- break;
35311- case BPF_JSET:
35312- case BPF_JNE:
35313- jmp_cond = X86_JNE;
35314- break;
35315- case BPF_JGT:
35316- /* GT is unsigned '>', JA in x86 */
35317- jmp_cond = X86_JA;
35318- break;
35319- case BPF_JGE:
35320- /* GE is unsigned '>=', JAE in x86 */
35321- jmp_cond = X86_JAE;
35322- break;
35323- case BPF_JSGT:
35324- /* signed '>', GT in x86 */
35325- jmp_cond = X86_JG;
35326- break;
35327- case BPF_JSGE:
35328- /* signed '>=', GE in x86 */
35329- jmp_cond = X86_JGE;
35330- break;
35331- default: /* to silence gcc warning */
35332- return -EFAULT;
35333- }
35334- jmp_offset = addrs[i + insn->off] - addrs[i];
35335- if (is_imm8(jmp_offset)) {
35336- EMIT2(jmp_cond, jmp_offset);
35337- } else if (is_simm32(jmp_offset)) {
35338- EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
35339- } else {
35340- pr_err("cond_jmp gen bug %llx\n", jmp_offset);
35341- return -EFAULT;
35342- }
35343-
35344- break;
35345-
35346- case BPF_JMP | BPF_JA:
35347- jmp_offset = addrs[i + insn->off] - addrs[i];
35348- if (!jmp_offset)
35349- /* optimize out nop jumps */
35350- break;
35351-emit_jmp:
35352- if (is_imm8(jmp_offset)) {
35353- EMIT2(0xEB, jmp_offset);
35354- } else if (is_simm32(jmp_offset)) {
35355- EMIT1_off32(0xE9, jmp_offset);
35356- } else {
35357- pr_err("jmp gen bug %llx\n", jmp_offset);
35358- return -EFAULT;
35359- }
35360- break;
35361-
35362- case BPF_LD | BPF_IND | BPF_W:
35363- func = sk_load_word;
35364- goto common_load;
35365- case BPF_LD | BPF_ABS | BPF_W:
35366- func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
35367-common_load: ctx->seen_ld_abs = true;
35368- jmp_offset = func - (image + addrs[i]);
35369- if (!func || !is_simm32(jmp_offset)) {
35370- pr_err("unsupported bpf func %d addr %p image %p\n",
35371- imm32, func, image);
35372- return -EINVAL;
35373- }
35374- if (BPF_MODE(insn->code) == BPF_ABS) {
35375- /* mov %esi, imm32 */
35376- EMIT1_off32(0xBE, imm32);
35377- } else {
35378- /* mov %rsi, src_reg */
35379- EMIT_mov(BPF_REG_2, src_reg);
35380- if (imm32) {
35381- if (is_imm8(imm32))
35382- /* add %esi, imm8 */
35383- EMIT3(0x83, 0xC6, imm32);
35384- else
35385- /* add %esi, imm32 */
35386- EMIT2_off32(0x81, 0xC6, imm32);
35387- }
35388- }
35389- /* skb pointer is in R6 (%rbx), it will be copied into
35390- * %rdi if skb_copy_bits() call is necessary.
35391- * sk_load_* helpers also use %r10 and %r9d.
35392- * See bpf_jit.S
35393- */
35394- EMIT1_off32(0xE8, jmp_offset); /* call */
35395- break;
35396-
35397- case BPF_LD | BPF_IND | BPF_H:
35398- func = sk_load_half;
35399- goto common_load;
35400- case BPF_LD | BPF_ABS | BPF_H:
35401- func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
35402- goto common_load;
35403- case BPF_LD | BPF_IND | BPF_B:
35404- func = sk_load_byte;
35405- goto common_load;
35406- case BPF_LD | BPF_ABS | BPF_B:
35407- func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
35408- goto common_load;
35409-
35410- case BPF_JMP | BPF_EXIT:
35411- if (i != insn_cnt - 1) {
35412- jmp_offset = ctx->cleanup_addr - addrs[i];
35413- goto emit_jmp;
35414- }
35415- /* update cleanup_addr */
35416- ctx->cleanup_addr = proglen;
35417- /* mov rbx, qword ptr [rbp-X] */
35418- EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
35419- /* mov r13, qword ptr [rbp-X] */
35420- EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
35421- /* mov r14, qword ptr [rbp-X] */
35422- EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
35423- /* mov r15, qword ptr [rbp-X] */
35424- EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
35425-
35426- EMIT1(0xC9); /* leave */
35427- EMIT1(0xC3); /* ret */
35428- break;
35429-
35430- default:
35431- /* By design x64 JIT should support all BPF instructions
35432- * This error will be seen if new instruction was added
35433- * to interpreter, but not to JIT
35434- * or if there is junk in sk_filter
35435- */
35436- pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
35437- return -EINVAL;
35438- }
35439-
35440- ilen = prog - temp;
35441- if (image) {
35442- if (unlikely(proglen + ilen > oldproglen)) {
35443- pr_err("bpf_jit_compile fatal error\n");
35444- return -EFAULT;
35445- }
35446- memcpy(image + proglen, temp, ilen);
35447- }
35448- proglen += ilen;
35449- addrs[i] = proglen;
35450- prog = temp;
35451- }
35452- return proglen;
35453-}
35454-
35455-void bpf_jit_compile(struct sk_filter *prog)
35456-{
35457-}
35458-
35459-void bpf_int_jit_compile(struct sk_filter *prog)
35460-{
35461- struct bpf_binary_header *header = NULL;
35462- int proglen, oldproglen = 0;
35463- struct jit_context ctx = {};
35464+void bpf_jit_compile(struct sk_filter *fp)
35465+{
35466+ u8 temp[MAX_INSTR_CODE_SIZE];
35467+ u8 *prog;
35468+ unsigned int proglen, oldproglen = 0;
35469+ int ilen, i;
35470+ int t_offset, f_offset;
35471+ u8 t_op, f_op, seen = 0, pass;
35472 u8 *image = NULL;
35473- int *addrs;
35474- int pass;
35475- int i;
35476+ u8 *header = NULL;
35477+ u8 *func;
35478+ int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
35479+ unsigned int cleanup_addr; /* epilogue code offset */
35480+ unsigned int *addrs;
35481+ const struct sock_filter *filter = fp->insns;
35482+ int flen = fp->len;
35483+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35484+ unsigned int randkey;
35485+#endif
35486
35487 if (!bpf_jit_enable)
35488 return;
35489
35490- if (!prog || !prog->len)
35491- return;
35492-
35493- addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
35494- if (!addrs)
35495+ addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
35496+ if (addrs == NULL)
35497 return;
35498
35499 /* Before first pass, make a rough estimation of addrs[]
35500- * each bpf instruction is translated to less than 64 bytes
35501+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
35502 */
35503- for (proglen = 0, i = 0; i < prog->len; i++) {
35504- proglen += 64;
35505+ for (proglen = 0, i = 0; i < flen; i++) {
35506+ proglen += MAX_INSTR_CODE_SIZE;
35507 addrs[i] = proglen;
35508 }
35509- ctx.cleanup_addr = proglen;
35510+ cleanup_addr = proglen; /* epilogue address */
35511
35512 for (pass = 0; pass < 10; pass++) {
35513- proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
35514- if (proglen <= 0) {
35515- image = NULL;
35516- if (header)
35517- module_free(NULL, header);
35518- goto out;
35519+ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
35520+ /* no prologue/epilogue for trivial filters (RET something) */
35521+ proglen = 0;
35522+ prog = temp;
35523+
35524+ if (seen_or_pass0) {
35525+ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
35526+ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
35527+ /* note : must save %rbx in case bpf_error is hit */
35528+ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
35529+ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
35530+ if (seen_or_pass0 & SEEN_XREG)
35531+ CLEAR_X(); /* make sure we dont leek kernel memory */
35532+
35533+ /*
35534+ * If this filter needs to access skb data,
35535+ * loads r9 and r8 with :
35536+ * r9 = skb->len - skb->data_len
35537+ * r8 = skb->data
35538+ */
35539+ if (seen_or_pass0 & SEEN_DATAREF) {
35540+ if (offsetof(struct sk_buff, len) <= 127)
35541+ /* mov off8(%rdi),%r9d */
35542+ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
35543+ else {
35544+ /* mov off32(%rdi),%r9d */
35545+ EMIT3(0x44, 0x8b, 0x8f);
35546+ EMIT(offsetof(struct sk_buff, len), 4);
35547+ }
35548+ if (is_imm8(offsetof(struct sk_buff, data_len)))
35549+ /* sub off8(%rdi),%r9d */
35550+ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
35551+ else {
35552+ EMIT3(0x44, 0x2b, 0x8f);
35553+ EMIT(offsetof(struct sk_buff, data_len), 4);
35554+ }
35555+
35556+ if (is_imm8(offsetof(struct sk_buff, data)))
35557+ /* mov off8(%rdi),%r8 */
35558+ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
35559+ else {
35560+ /* mov off32(%rdi),%r8 */
35561+ EMIT3(0x4c, 0x8b, 0x87);
35562+ EMIT(offsetof(struct sk_buff, data), 4);
35563+ }
35564+ }
35565 }
35566+
35567+ switch (filter[0].code) {
35568+ case BPF_S_RET_K:
35569+ case BPF_S_LD_W_LEN:
35570+ case BPF_S_ANC_PROTOCOL:
35571+ case BPF_S_ANC_IFINDEX:
35572+ case BPF_S_ANC_MARK:
35573+ case BPF_S_ANC_RXHASH:
35574+ case BPF_S_ANC_CPU:
35575+ case BPF_S_ANC_VLAN_TAG:
35576+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35577+ case BPF_S_ANC_QUEUE:
35578+ case BPF_S_ANC_PKTTYPE:
35579+ case BPF_S_LD_W_ABS:
35580+ case BPF_S_LD_H_ABS:
35581+ case BPF_S_LD_B_ABS:
35582+ /* first instruction sets A register (or is RET 'constant') */
35583+ break;
35584+ default:
35585+ /* make sure we dont leak kernel information to user */
35586+ CLEAR_A(); /* A = 0 */
35587+ }
35588+
35589+ for (i = 0; i < flen; i++) {
35590+ unsigned int K = filter[i].k;
35591+
35592+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35593+ randkey = prandom_u32();
35594+#endif
35595+
35596+ switch (filter[i].code) {
35597+ case BPF_S_ALU_ADD_X: /* A += X; */
35598+ seen |= SEEN_XREG;
35599+ EMIT2(0x01, 0xd8); /* add %ebx,%eax */
35600+ break;
35601+ case BPF_S_ALU_ADD_K: /* A += K; */
35602+ if (!K)
35603+ break;
35604+ if (is_imm8(K))
35605+ EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
35606+ else
35607+ EMIT1_off32(0x05, K); /* add imm32,%eax */
35608+ break;
35609+ case BPF_S_ALU_SUB_X: /* A -= X; */
35610+ seen |= SEEN_XREG;
35611+ EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
35612+ break;
35613+ case BPF_S_ALU_SUB_K: /* A -= K */
35614+ if (!K)
35615+ break;
35616+ if (is_imm8(K))
35617+ EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
35618+ else
35619+ EMIT1_off32(0x2d, K); /* sub imm32,%eax */
35620+ break;
35621+ case BPF_S_ALU_MUL_X: /* A *= X; */
35622+ seen |= SEEN_XREG;
35623+ EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
35624+ break;
35625+ case BPF_S_ALU_MUL_K: /* A *= K */
35626+ if (is_imm8(K))
35627+ EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
35628+ else
35629+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
35630+ break;
35631+ case BPF_S_ALU_DIV_X: /* A /= X; */
35632+ seen |= SEEN_XREG;
35633+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35634+ if (pc_ret0 > 0) {
35635+ /* addrs[pc_ret0 - 1] is start address of target
35636+ * (addrs[i] - 4) is the address following this jmp
35637+ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
35638+ */
35639+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35640+ (addrs[i] - 4));
35641+ } else {
35642+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35643+ CLEAR_A();
35644+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
35645+ }
35646+ EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
35647+ break;
35648+ case BPF_S_ALU_MOD_X: /* A %= X; */
35649+ seen |= SEEN_XREG;
35650+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35651+ if (pc_ret0 > 0) {
35652+ /* addrs[pc_ret0 - 1] is start address of target
35653+ * (addrs[i] - 6) is the address following this jmp
35654+ * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
35655+ */
35656+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35657+ (addrs[i] - 6));
35658+ } else {
35659+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35660+ CLEAR_A();
35661+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
35662+ }
35663+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35664+ EMIT2(0xf7, 0xf3); /* div %ebx */
35665+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35666+ break;
35667+ case BPF_S_ALU_MOD_K: /* A %= K; */
35668+ if (K == 1) {
35669+ CLEAR_A();
35670+ break;
35671+ }
35672+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35673+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35674+ DILUTE_CONST_SEQUENCE(K, randkey);
35675+#else
35676+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35677+#endif
35678+ EMIT2(0xf7, 0xf1); /* div %ecx */
35679+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35680+ break;
35681+ case BPF_S_ALU_DIV_K: /* A /= K */
35682+ if (K == 1)
35683+ break;
35684+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35685+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35686+ DILUTE_CONST_SEQUENCE(K, randkey);
35687+#else
35688+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35689+#endif
35690+ EMIT2(0xf7, 0xf1); /* div %ecx */
35691+ break;
35692+ case BPF_S_ALU_AND_X:
35693+ seen |= SEEN_XREG;
35694+ EMIT2(0x21, 0xd8); /* and %ebx,%eax */
35695+ break;
35696+ case BPF_S_ALU_AND_K:
35697+ if (K >= 0xFFFFFF00) {
35698+ EMIT2(0x24, K & 0xFF); /* and imm8,%al */
35699+ } else if (K >= 0xFFFF0000) {
35700+ EMIT2(0x66, 0x25); /* and imm16,%ax */
35701+ EMIT(K, 2);
35702+ } else {
35703+ EMIT1_off32(0x25, K); /* and imm32,%eax */
35704+ }
35705+ break;
35706+ case BPF_S_ALU_OR_X:
35707+ seen |= SEEN_XREG;
35708+ EMIT2(0x09, 0xd8); /* or %ebx,%eax */
35709+ break;
35710+ case BPF_S_ALU_OR_K:
35711+ if (is_imm8(K))
35712+ EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
35713+ else
35714+ EMIT1_off32(0x0d, K); /* or imm32,%eax */
35715+ break;
35716+ case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
35717+ case BPF_S_ALU_XOR_X:
35718+ seen |= SEEN_XREG;
35719+ EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
35720+ break;
35721+ case BPF_S_ALU_XOR_K: /* A ^= K; */
35722+ if (K == 0)
35723+ break;
35724+ if (is_imm8(K))
35725+ EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
35726+ else
35727+ EMIT1_off32(0x35, K); /* xor imm32,%eax */
35728+ break;
35729+ case BPF_S_ALU_LSH_X: /* A <<= X; */
35730+ seen |= SEEN_XREG;
35731+ EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
35732+ break;
35733+ case BPF_S_ALU_LSH_K:
35734+ if (K == 0)
35735+ break;
35736+ else if (K == 1)
35737+ EMIT2(0xd1, 0xe0); /* shl %eax */
35738+ else
35739+ EMIT3(0xc1, 0xe0, K);
35740+ break;
35741+ case BPF_S_ALU_RSH_X: /* A >>= X; */
35742+ seen |= SEEN_XREG;
35743+ EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
35744+ break;
35745+ case BPF_S_ALU_RSH_K: /* A >>= K; */
35746+ if (K == 0)
35747+ break;
35748+ else if (K == 1)
35749+ EMIT2(0xd1, 0xe8); /* shr %eax */
35750+ else
35751+ EMIT3(0xc1, 0xe8, K);
35752+ break;
35753+ case BPF_S_ALU_NEG:
35754+ EMIT2(0xf7, 0xd8); /* neg %eax */
35755+ break;
35756+ case BPF_S_RET_K:
35757+ if (!K) {
35758+ if (pc_ret0 == -1)
35759+ pc_ret0 = i;
35760+ CLEAR_A();
35761+ } else {
35762+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35763+ }
35764+ /* fallinto */
35765+ case BPF_S_RET_A:
35766+ if (seen_or_pass0) {
35767+ if (i != flen - 1) {
35768+ EMIT_JMP(cleanup_addr - addrs[i]);
35769+ break;
35770+ }
35771+ if (seen_or_pass0 & SEEN_XREG)
35772+ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
35773+ EMIT1(0xc9); /* leaveq */
35774+ }
35775+ EMIT1(0xc3); /* ret */
35776+ break;
35777+ case BPF_S_MISC_TAX: /* X = A */
35778+ seen |= SEEN_XREG;
35779+ EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
35780+ break;
35781+ case BPF_S_MISC_TXA: /* A = X */
35782+ seen |= SEEN_XREG;
35783+ EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
35784+ break;
35785+ case BPF_S_LD_IMM: /* A = K */
35786+ if (!K)
35787+ CLEAR_A();
35788+ else
35789+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35790+ break;
35791+ case BPF_S_LDX_IMM: /* X = K */
35792+ seen |= SEEN_XREG;
35793+ if (!K)
35794+ CLEAR_X();
35795+ else
35796+ EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
35797+ break;
35798+ case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
35799+ seen |= SEEN_MEM;
35800+ EMIT3(0x8b, 0x45, 0xf0 - K*4);
35801+ break;
35802+ case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
35803+ seen |= SEEN_XREG | SEEN_MEM;
35804+ EMIT3(0x8b, 0x5d, 0xf0 - K*4);
35805+ break;
35806+ case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
35807+ seen |= SEEN_MEM;
35808+ EMIT3(0x89, 0x45, 0xf0 - K*4);
35809+ break;
35810+ case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
35811+ seen |= SEEN_XREG | SEEN_MEM;
35812+ EMIT3(0x89, 0x5d, 0xf0 - K*4);
35813+ break;
35814+ case BPF_S_LD_W_LEN: /* A = skb->len; */
35815+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
35816+ if (is_imm8(offsetof(struct sk_buff, len)))
35817+ /* mov off8(%rdi),%eax */
35818+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
35819+ else {
35820+ EMIT2(0x8b, 0x87);
35821+ EMIT(offsetof(struct sk_buff, len), 4);
35822+ }
35823+ break;
35824+ case BPF_S_LDX_W_LEN: /* X = skb->len; */
35825+ seen |= SEEN_XREG;
35826+ if (is_imm8(offsetof(struct sk_buff, len)))
35827+ /* mov off8(%rdi),%ebx */
35828+ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
35829+ else {
35830+ EMIT2(0x8b, 0x9f);
35831+ EMIT(offsetof(struct sk_buff, len), 4);
35832+ }
35833+ break;
35834+ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
35835+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
35836+ if (is_imm8(offsetof(struct sk_buff, protocol))) {
35837+ /* movzwl off8(%rdi),%eax */
35838+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
35839+ } else {
35840+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35841+ EMIT(offsetof(struct sk_buff, protocol), 4);
35842+ }
35843+ EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
35844+ break;
35845+ case BPF_S_ANC_IFINDEX:
35846+ if (is_imm8(offsetof(struct sk_buff, dev))) {
35847+ /* movq off8(%rdi),%rax */
35848+ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
35849+ } else {
35850+ EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
35851+ EMIT(offsetof(struct sk_buff, dev), 4);
35852+ }
35853+ EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
35854+ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
35855+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
35856+ EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
35857+ EMIT(offsetof(struct net_device, ifindex), 4);
35858+ break;
35859+ case BPF_S_ANC_MARK:
35860+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
35861+ if (is_imm8(offsetof(struct sk_buff, mark))) {
35862+ /* mov off8(%rdi),%eax */
35863+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
35864+ } else {
35865+ EMIT2(0x8b, 0x87);
35866+ EMIT(offsetof(struct sk_buff, mark), 4);
35867+ }
35868+ break;
35869+ case BPF_S_ANC_RXHASH:
35870+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
35871+ if (is_imm8(offsetof(struct sk_buff, hash))) {
35872+ /* mov off8(%rdi),%eax */
35873+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
35874+ } else {
35875+ EMIT2(0x8b, 0x87);
35876+ EMIT(offsetof(struct sk_buff, hash), 4);
35877+ }
35878+ break;
35879+ case BPF_S_ANC_QUEUE:
35880+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
35881+ if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
35882+ /* movzwl off8(%rdi),%eax */
35883+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
35884+ } else {
35885+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35886+ EMIT(offsetof(struct sk_buff, queue_mapping), 4);
35887+ }
35888+ break;
35889+ case BPF_S_ANC_CPU:
35890+#ifdef CONFIG_SMP
35891+ EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
35892+ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
35893+#else
35894+ CLEAR_A();
35895+#endif
35896+ break;
35897+ case BPF_S_ANC_VLAN_TAG:
35898+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35899+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
35900+ if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
35901+ /* movzwl off8(%rdi),%eax */
35902+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
35903+ } else {
35904+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35905+ EMIT(offsetof(struct sk_buff, vlan_tci), 4);
35906+ }
35907+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
35908+ if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
35909+ EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */
35910+ } else {
35911+ EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */
35912+ EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
35913+ }
35914+ break;
35915+ case BPF_S_ANC_PKTTYPE:
35916+ {
35917+ int off = pkt_type_offset();
35918+
35919+ if (off < 0)
35920+ goto out;
35921+ if (is_imm8(off)) {
35922+ /* movzbl off8(%rdi),%eax */
35923+ EMIT4(0x0f, 0xb6, 0x47, off);
35924+ } else {
35925+ /* movbl off32(%rdi),%eax */
35926+ EMIT3(0x0f, 0xb6, 0x87);
35927+ EMIT(off, 4);
35928+ }
35929+ EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
35930+ break;
35931+ }
35932+ case BPF_S_LD_W_ABS:
35933+ func = CHOOSE_LOAD_FUNC(K, sk_load_word);
35934+common_load: seen |= SEEN_DATAREF;
35935+ t_offset = func - (image + addrs[i]);
35936+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35937+ EMIT1_off32(0xe8, t_offset); /* call */
35938+ break;
35939+ case BPF_S_LD_H_ABS:
35940+ func = CHOOSE_LOAD_FUNC(K, sk_load_half);
35941+ goto common_load;
35942+ case BPF_S_LD_B_ABS:
35943+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
35944+ goto common_load;
35945+ case BPF_S_LDX_B_MSH:
35946+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
35947+ seen |= SEEN_DATAREF | SEEN_XREG;
35948+ t_offset = func - (image + addrs[i]);
35949+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35950+ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
35951+ break;
35952+ case BPF_S_LD_W_IND:
35953+ func = sk_load_word;
35954+common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
35955+ t_offset = func - (image + addrs[i]);
35956+ if (K) {
35957+ if (is_imm8(K)) {
35958+ EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
35959+ } else {
35960+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
35961+ }
35962+ } else {
35963+ EMIT2(0x89,0xde); /* mov %ebx,%esi */
35964+ }
35965+ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
35966+ break;
35967+ case BPF_S_LD_H_IND:
35968+ func = sk_load_half;
35969+ goto common_load_ind;
35970+ case BPF_S_LD_B_IND:
35971+ func = sk_load_byte;
35972+ goto common_load_ind;
35973+ case BPF_S_JMP_JA:
35974+ t_offset = addrs[i + K] - addrs[i];
35975+ EMIT_JMP(t_offset);
35976+ break;
35977+ COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
35978+ COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
35979+ COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
35980+ COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
35981+ COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
35982+ COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
35983+ COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
35984+ COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
35985+
35986+cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
35987+ t_offset = addrs[i + filter[i].jt] - addrs[i];
35988+
35989+ /* same targets, can avoid doing the test :) */
35990+ if (filter[i].jt == filter[i].jf) {
35991+ EMIT_JMP(t_offset);
35992+ break;
35993+ }
35994+
35995+ switch (filter[i].code) {
35996+ case BPF_S_JMP_JGT_X:
35997+ case BPF_S_JMP_JGE_X:
35998+ case BPF_S_JMP_JEQ_X:
35999+ seen |= SEEN_XREG;
36000+ EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
36001+ break;
36002+ case BPF_S_JMP_JSET_X:
36003+ seen |= SEEN_XREG;
36004+ EMIT2(0x85, 0xd8); /* test %ebx,%eax */
36005+ break;
36006+ case BPF_S_JMP_JEQ_K:
36007+ if (K == 0) {
36008+ EMIT2(0x85, 0xc0); /* test %eax,%eax */
36009+ break;
36010+ }
36011+ case BPF_S_JMP_JGT_K:
36012+ case BPF_S_JMP_JGE_K:
36013+ if (K <= 127)
36014+ EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
36015+ else
36016+ EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
36017+ break;
36018+ case BPF_S_JMP_JSET_K:
36019+ if (K <= 0xFF)
36020+ EMIT2(0xa8, K); /* test imm8,%al */
36021+ else if (!(K & 0xFFFF00FF))
36022+ EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
36023+ else if (K <= 0xFFFF) {
36024+ EMIT2(0x66, 0xa9); /* test imm16,%ax */
36025+ EMIT(K, 2);
36026+ } else {
36027+ EMIT1_off32(0xa9, K); /* test imm32,%eax */
36028+ }
36029+ break;
36030+ }
36031+ if (filter[i].jt != 0) {
36032+ if (filter[i].jf && f_offset)
36033+ t_offset += is_near(f_offset) ? 2 : 5;
36034+ EMIT_COND_JMP(t_op, t_offset);
36035+ if (filter[i].jf)
36036+ EMIT_JMP(f_offset);
36037+ break;
36038+ }
36039+ EMIT_COND_JMP(f_op, f_offset);
36040+ break;
36041+ default:
36042+ /* hmm, too complex filter, give up with jit compiler */
36043+ goto out;
36044+ }
36045+ ilen = prog - temp;
36046+ if (image) {
36047+ if (unlikely(proglen + ilen > oldproglen)) {
36048+ pr_err("bpb_jit_compile fatal error\n");
36049+ kfree(addrs);
36050+ module_free_exec(NULL, image);
36051+ return;
36052+ }
36053+ pax_open_kernel();
36054+ memcpy(image + proglen, temp, ilen);
36055+ pax_close_kernel();
36056+ }
36057+ proglen += ilen;
36058+ addrs[i] = proglen;
36059+ prog = temp;
36060+ }
36061+ /* last bpf instruction is always a RET :
36062+ * use it to give the cleanup instruction(s) addr
36063+ */
36064+ cleanup_addr = proglen - 1; /* ret */
36065+ if (seen_or_pass0)
36066+ cleanup_addr -= 1; /* leaveq */
36067+ if (seen_or_pass0 & SEEN_XREG)
36068+ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
36069+
36070 if (image) {
36071 if (proglen != oldproglen)
36072- pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
36073- proglen, oldproglen);
36074+ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
36075 break;
36076 }
36077 if (proglen == oldproglen) {
36078@@ -918,32 +872,30 @@ void bpf_int_jit_compile(struct sk_filter *prog)
36079 }
36080
36081 if (bpf_jit_enable > 1)
36082- bpf_jit_dump(prog->len, proglen, 0, image);
36083+ bpf_jit_dump(flen, proglen, pass, image);
36084
36085 if (image) {
36086 bpf_flush_icache(header, image + proglen);
36087- set_memory_ro((unsigned long)header, header->pages);
36088- prog->bpf_func = (void *)image;
36089- prog->jited = 1;
36090+ fp->bpf_func = (void *)image;
36091 }
36092 out:
36093 kfree(addrs);
36094+ return;
36095 }
36096
36097 static void bpf_jit_free_deferred(struct work_struct *work)
36098 {
36099 struct sk_filter *fp = container_of(work, struct sk_filter, work);
36100 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
36101- struct bpf_binary_header *header = (void *)addr;
36102
36103- set_memory_rw(addr, header->pages);
36104- module_free(NULL, header);
36105+ set_memory_rw(addr, 1);
36106+ module_free_exec(NULL, (void *)addr);
36107 kfree(fp);
36108 }
36109
36110 void bpf_jit_free(struct sk_filter *fp)
36111 {
36112- if (fp->jited) {
36113+ if (fp->bpf_func != sk_run_filter) {
36114 INIT_WORK(&fp->work, bpf_jit_free_deferred);
36115 schedule_work(&fp->work);
36116 } else {
36117diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
36118index 5d04be5..2beeaa2 100644
36119--- a/arch/x86/oprofile/backtrace.c
36120+++ b/arch/x86/oprofile/backtrace.c
36121@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
36122 struct stack_frame_ia32 *fp;
36123 unsigned long bytes;
36124
36125- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36126+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36127 if (bytes != 0)
36128 return NULL;
36129
36130- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
36131+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
36132
36133 oprofile_add_trace(bufhead[0].return_address);
36134
36135@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
36136 struct stack_frame bufhead[2];
36137 unsigned long bytes;
36138
36139- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36140+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36141 if (bytes != 0)
36142 return NULL;
36143
36144@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
36145 {
36146 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
36147
36148- if (!user_mode_vm(regs)) {
36149+ if (!user_mode(regs)) {
36150 unsigned long stack = kernel_stack_pointer(regs);
36151 if (depth)
36152 dump_trace(NULL, regs, (unsigned long *)stack, 0,
36153diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
36154index 379e8bd..6386e09 100644
36155--- a/arch/x86/oprofile/nmi_int.c
36156+++ b/arch/x86/oprofile/nmi_int.c
36157@@ -23,6 +23,7 @@
36158 #include <asm/nmi.h>
36159 #include <asm/msr.h>
36160 #include <asm/apic.h>
36161+#include <asm/pgtable.h>
36162
36163 #include "op_counter.h"
36164 #include "op_x86_model.h"
36165@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
36166 if (ret)
36167 return ret;
36168
36169- if (!model->num_virt_counters)
36170- model->num_virt_counters = model->num_counters;
36171+ if (!model->num_virt_counters) {
36172+ pax_open_kernel();
36173+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
36174+ pax_close_kernel();
36175+ }
36176
36177 mux_init(ops);
36178
36179diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
36180index 50d86c0..7985318 100644
36181--- a/arch/x86/oprofile/op_model_amd.c
36182+++ b/arch/x86/oprofile/op_model_amd.c
36183@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
36184 num_counters = AMD64_NUM_COUNTERS;
36185 }
36186
36187- op_amd_spec.num_counters = num_counters;
36188- op_amd_spec.num_controls = num_counters;
36189- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36190+ pax_open_kernel();
36191+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
36192+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
36193+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36194+ pax_close_kernel();
36195
36196 return 0;
36197 }
36198diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
36199index d90528e..0127e2b 100644
36200--- a/arch/x86/oprofile/op_model_ppro.c
36201+++ b/arch/x86/oprofile/op_model_ppro.c
36202@@ -19,6 +19,7 @@
36203 #include <asm/msr.h>
36204 #include <asm/apic.h>
36205 #include <asm/nmi.h>
36206+#include <asm/pgtable.h>
36207
36208 #include "op_x86_model.h"
36209 #include "op_counter.h"
36210@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
36211
36212 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
36213
36214- op_arch_perfmon_spec.num_counters = num_counters;
36215- op_arch_perfmon_spec.num_controls = num_counters;
36216+ pax_open_kernel();
36217+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
36218+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
36219+ pax_close_kernel();
36220 }
36221
36222 static int arch_perfmon_init(struct oprofile_operations *ignore)
36223diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
36224index 71e8a67..6a313bb 100644
36225--- a/arch/x86/oprofile/op_x86_model.h
36226+++ b/arch/x86/oprofile/op_x86_model.h
36227@@ -52,7 +52,7 @@ struct op_x86_model_spec {
36228 void (*switch_ctrl)(struct op_x86_model_spec const *model,
36229 struct op_msrs const * const msrs);
36230 #endif
36231-};
36232+} __do_const;
36233
36234 struct op_counter_config;
36235
36236diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
36237index 84b9d67..260e5ff 100644
36238--- a/arch/x86/pci/intel_mid_pci.c
36239+++ b/arch/x86/pci/intel_mid_pci.c
36240@@ -245,7 +245,7 @@ int __init intel_mid_pci_init(void)
36241 pr_info("Intel MID platform detected, using MID PCI ops\n");
36242 pci_mmcfg_late_init();
36243 pcibios_enable_irq = intel_mid_pci_irq_enable;
36244- pci_root_ops = intel_mid_pci_ops;
36245+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
36246 pci_soc_mode = 1;
36247 /* Continue with standard init */
36248 return 1;
36249diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
36250index 84112f5..6334d60 100644
36251--- a/arch/x86/pci/irq.c
36252+++ b/arch/x86/pci/irq.c
36253@@ -50,7 +50,7 @@ struct irq_router {
36254 struct irq_router_handler {
36255 u16 vendor;
36256 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
36257-};
36258+} __do_const;
36259
36260 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
36261 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
36262@@ -790,7 +790,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
36263 return 0;
36264 }
36265
36266-static __initdata struct irq_router_handler pirq_routers[] = {
36267+static __initconst const struct irq_router_handler pirq_routers[] = {
36268 { PCI_VENDOR_ID_INTEL, intel_router_probe },
36269 { PCI_VENDOR_ID_AL, ali_router_probe },
36270 { PCI_VENDOR_ID_ITE, ite_router_probe },
36271@@ -817,7 +817,7 @@ static struct pci_dev *pirq_router_dev;
36272 static void __init pirq_find_router(struct irq_router *r)
36273 {
36274 struct irq_routing_table *rt = pirq_table;
36275- struct irq_router_handler *h;
36276+ const struct irq_router_handler *h;
36277
36278 #ifdef CONFIG_PCI_BIOS
36279 if (!rt->signature) {
36280@@ -1090,7 +1090,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
36281 return 0;
36282 }
36283
36284-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
36285+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
36286 {
36287 .callback = fix_broken_hp_bios_irq9,
36288 .ident = "HP Pavilion N5400 Series Laptop",
36289diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
36290index c77b24a..c979855 100644
36291--- a/arch/x86/pci/pcbios.c
36292+++ b/arch/x86/pci/pcbios.c
36293@@ -79,7 +79,7 @@ union bios32 {
36294 static struct {
36295 unsigned long address;
36296 unsigned short segment;
36297-} bios32_indirect = { 0, __KERNEL_CS };
36298+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
36299
36300 /*
36301 * Returns the entry point for the given service, NULL on error
36302@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
36303 unsigned long length; /* %ecx */
36304 unsigned long entry; /* %edx */
36305 unsigned long flags;
36306+ struct desc_struct d, *gdt;
36307
36308 local_irq_save(flags);
36309- __asm__("lcall *(%%edi); cld"
36310+
36311+ gdt = get_cpu_gdt_table(smp_processor_id());
36312+
36313+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
36314+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36315+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
36316+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36317+
36318+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
36319 : "=a" (return_code),
36320 "=b" (address),
36321 "=c" (length),
36322 "=d" (entry)
36323 : "0" (service),
36324 "1" (0),
36325- "D" (&bios32_indirect));
36326+ "D" (&bios32_indirect),
36327+ "r"(__PCIBIOS_DS)
36328+ : "memory");
36329+
36330+ pax_open_kernel();
36331+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
36332+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
36333+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
36334+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
36335+ pax_close_kernel();
36336+
36337 local_irq_restore(flags);
36338
36339 switch (return_code) {
36340- case 0:
36341- return address + entry;
36342- case 0x80: /* Not present */
36343- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36344- return 0;
36345- default: /* Shouldn't happen */
36346- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36347- service, return_code);
36348+ case 0: {
36349+ int cpu;
36350+ unsigned char flags;
36351+
36352+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
36353+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
36354+ printk(KERN_WARNING "bios32_service: not valid\n");
36355 return 0;
36356+ }
36357+ address = address + PAGE_OFFSET;
36358+ length += 16UL; /* some BIOSs underreport this... */
36359+ flags = 4;
36360+ if (length >= 64*1024*1024) {
36361+ length >>= PAGE_SHIFT;
36362+ flags |= 8;
36363+ }
36364+
36365+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
36366+ gdt = get_cpu_gdt_table(cpu);
36367+ pack_descriptor(&d, address, length, 0x9b, flags);
36368+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36369+ pack_descriptor(&d, address, length, 0x93, flags);
36370+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36371+ }
36372+ return entry;
36373+ }
36374+ case 0x80: /* Not present */
36375+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36376+ return 0;
36377+ default: /* Shouldn't happen */
36378+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36379+ service, return_code);
36380+ return 0;
36381 }
36382 }
36383
36384 static struct {
36385 unsigned long address;
36386 unsigned short segment;
36387-} pci_indirect = { 0, __KERNEL_CS };
36388+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
36389
36390-static int pci_bios_present;
36391+static int pci_bios_present __read_only;
36392
36393 static int check_pcibios(void)
36394 {
36395@@ -131,11 +174,13 @@ static int check_pcibios(void)
36396 unsigned long flags, pcibios_entry;
36397
36398 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
36399- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
36400+ pci_indirect.address = pcibios_entry;
36401
36402 local_irq_save(flags);
36403- __asm__(
36404- "lcall *(%%edi); cld\n\t"
36405+ __asm__("movw %w6, %%ds\n\t"
36406+ "lcall *%%ss:(%%edi); cld\n\t"
36407+ "push %%ss\n\t"
36408+ "pop %%ds\n\t"
36409 "jc 1f\n\t"
36410 "xor %%ah, %%ah\n"
36411 "1:"
36412@@ -144,7 +189,8 @@ static int check_pcibios(void)
36413 "=b" (ebx),
36414 "=c" (ecx)
36415 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
36416- "D" (&pci_indirect)
36417+ "D" (&pci_indirect),
36418+ "r" (__PCIBIOS_DS)
36419 : "memory");
36420 local_irq_restore(flags);
36421
36422@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36423
36424 switch (len) {
36425 case 1:
36426- __asm__("lcall *(%%esi); cld\n\t"
36427+ __asm__("movw %w6, %%ds\n\t"
36428+ "lcall *%%ss:(%%esi); cld\n\t"
36429+ "push %%ss\n\t"
36430+ "pop %%ds\n\t"
36431 "jc 1f\n\t"
36432 "xor %%ah, %%ah\n"
36433 "1:"
36434@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36435 : "1" (PCIBIOS_READ_CONFIG_BYTE),
36436 "b" (bx),
36437 "D" ((long)reg),
36438- "S" (&pci_indirect));
36439+ "S" (&pci_indirect),
36440+ "r" (__PCIBIOS_DS));
36441 /*
36442 * Zero-extend the result beyond 8 bits, do not trust the
36443 * BIOS having done it:
36444@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36445 *value &= 0xff;
36446 break;
36447 case 2:
36448- __asm__("lcall *(%%esi); cld\n\t"
36449+ __asm__("movw %w6, %%ds\n\t"
36450+ "lcall *%%ss:(%%esi); cld\n\t"
36451+ "push %%ss\n\t"
36452+ "pop %%ds\n\t"
36453 "jc 1f\n\t"
36454 "xor %%ah, %%ah\n"
36455 "1:"
36456@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36457 : "1" (PCIBIOS_READ_CONFIG_WORD),
36458 "b" (bx),
36459 "D" ((long)reg),
36460- "S" (&pci_indirect));
36461+ "S" (&pci_indirect),
36462+ "r" (__PCIBIOS_DS));
36463 /*
36464 * Zero-extend the result beyond 16 bits, do not trust the
36465 * BIOS having done it:
36466@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36467 *value &= 0xffff;
36468 break;
36469 case 4:
36470- __asm__("lcall *(%%esi); cld\n\t"
36471+ __asm__("movw %w6, %%ds\n\t"
36472+ "lcall *%%ss:(%%esi); cld\n\t"
36473+ "push %%ss\n\t"
36474+ "pop %%ds\n\t"
36475 "jc 1f\n\t"
36476 "xor %%ah, %%ah\n"
36477 "1:"
36478@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36479 : "1" (PCIBIOS_READ_CONFIG_DWORD),
36480 "b" (bx),
36481 "D" ((long)reg),
36482- "S" (&pci_indirect));
36483+ "S" (&pci_indirect),
36484+ "r" (__PCIBIOS_DS));
36485 break;
36486 }
36487
36488@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36489
36490 switch (len) {
36491 case 1:
36492- __asm__("lcall *(%%esi); cld\n\t"
36493+ __asm__("movw %w6, %%ds\n\t"
36494+ "lcall *%%ss:(%%esi); cld\n\t"
36495+ "push %%ss\n\t"
36496+ "pop %%ds\n\t"
36497 "jc 1f\n\t"
36498 "xor %%ah, %%ah\n"
36499 "1:"
36500@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36501 "c" (value),
36502 "b" (bx),
36503 "D" ((long)reg),
36504- "S" (&pci_indirect));
36505+ "S" (&pci_indirect),
36506+ "r" (__PCIBIOS_DS));
36507 break;
36508 case 2:
36509- __asm__("lcall *(%%esi); cld\n\t"
36510+ __asm__("movw %w6, %%ds\n\t"
36511+ "lcall *%%ss:(%%esi); cld\n\t"
36512+ "push %%ss\n\t"
36513+ "pop %%ds\n\t"
36514 "jc 1f\n\t"
36515 "xor %%ah, %%ah\n"
36516 "1:"
36517@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36518 "c" (value),
36519 "b" (bx),
36520 "D" ((long)reg),
36521- "S" (&pci_indirect));
36522+ "S" (&pci_indirect),
36523+ "r" (__PCIBIOS_DS));
36524 break;
36525 case 4:
36526- __asm__("lcall *(%%esi); cld\n\t"
36527+ __asm__("movw %w6, %%ds\n\t"
36528+ "lcall *%%ss:(%%esi); cld\n\t"
36529+ "push %%ss\n\t"
36530+ "pop %%ds\n\t"
36531 "jc 1f\n\t"
36532 "xor %%ah, %%ah\n"
36533 "1:"
36534@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36535 "c" (value),
36536 "b" (bx),
36537 "D" ((long)reg),
36538- "S" (&pci_indirect));
36539+ "S" (&pci_indirect),
36540+ "r" (__PCIBIOS_DS));
36541 break;
36542 }
36543
36544@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36545
36546 DBG("PCI: Fetching IRQ routing table... ");
36547 __asm__("push %%es\n\t"
36548+ "movw %w8, %%ds\n\t"
36549 "push %%ds\n\t"
36550 "pop %%es\n\t"
36551- "lcall *(%%esi); cld\n\t"
36552+ "lcall *%%ss:(%%esi); cld\n\t"
36553 "pop %%es\n\t"
36554+ "push %%ss\n\t"
36555+ "pop %%ds\n"
36556 "jc 1f\n\t"
36557 "xor %%ah, %%ah\n"
36558 "1:"
36559@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36560 "1" (0),
36561 "D" ((long) &opt),
36562 "S" (&pci_indirect),
36563- "m" (opt)
36564+ "m" (opt),
36565+ "r" (__PCIBIOS_DS)
36566 : "memory");
36567 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
36568 if (ret & 0xff00)
36569@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36570 {
36571 int ret;
36572
36573- __asm__("lcall *(%%esi); cld\n\t"
36574+ __asm__("movw %w5, %%ds\n\t"
36575+ "lcall *%%ss:(%%esi); cld\n\t"
36576+ "push %%ss\n\t"
36577+ "pop %%ds\n"
36578 "jc 1f\n\t"
36579 "xor %%ah, %%ah\n"
36580 "1:"
36581@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36582 : "0" (PCIBIOS_SET_PCI_HW_INT),
36583 "b" ((dev->bus->number << 8) | dev->devfn),
36584 "c" ((irq << 8) | (pin + 10)),
36585- "S" (&pci_indirect));
36586+ "S" (&pci_indirect),
36587+ "r" (__PCIBIOS_DS));
36588 return !(ret & 0xff00);
36589 }
36590 EXPORT_SYMBOL(pcibios_set_irq_routing);
36591diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
36592index 9ee3491..872192f 100644
36593--- a/arch/x86/platform/efi/efi_32.c
36594+++ b/arch/x86/platform/efi/efi_32.c
36595@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
36596 {
36597 struct desc_ptr gdt_descr;
36598
36599+#ifdef CONFIG_PAX_KERNEXEC
36600+ struct desc_struct d;
36601+#endif
36602+
36603 local_irq_save(efi_rt_eflags);
36604
36605 load_cr3(initial_page_table);
36606 __flush_tlb_all();
36607
36608+#ifdef CONFIG_PAX_KERNEXEC
36609+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
36610+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36611+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
36612+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36613+#endif
36614+
36615 gdt_descr.address = __pa(get_cpu_gdt_table(0));
36616 gdt_descr.size = GDT_SIZE - 1;
36617 load_gdt(&gdt_descr);
36618@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
36619 {
36620 struct desc_ptr gdt_descr;
36621
36622+#ifdef CONFIG_PAX_KERNEXEC
36623+ struct desc_struct d;
36624+
36625+ memset(&d, 0, sizeof d);
36626+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36627+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36628+#endif
36629+
36630 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
36631 gdt_descr.size = GDT_SIZE - 1;
36632 load_gdt(&gdt_descr);
36633
36634+#ifdef CONFIG_PAX_PER_CPU_PGD
36635+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36636+#else
36637 load_cr3(swapper_pg_dir);
36638+#endif
36639+
36640 __flush_tlb_all();
36641
36642 local_irq_restore(efi_rt_eflags);
36643diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
36644index 290d397..3906bcd 100644
36645--- a/arch/x86/platform/efi/efi_64.c
36646+++ b/arch/x86/platform/efi/efi_64.c
36647@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
36648 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
36649 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
36650 }
36651+
36652+#ifdef CONFIG_PAX_PER_CPU_PGD
36653+ load_cr3(swapper_pg_dir);
36654+#endif
36655+
36656 __flush_tlb_all();
36657 }
36658
36659@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
36660 for (pgd = 0; pgd < n_pgds; pgd++)
36661 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
36662 kfree(save_pgd);
36663+
36664+#ifdef CONFIG_PAX_PER_CPU_PGD
36665+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36666+#endif
36667+
36668 __flush_tlb_all();
36669 local_irq_restore(efi_flags);
36670 early_code_mapping_set_exec(0);
36671diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
36672index fbe66e6..eae5e38 100644
36673--- a/arch/x86/platform/efi/efi_stub_32.S
36674+++ b/arch/x86/platform/efi/efi_stub_32.S
36675@@ -6,7 +6,9 @@
36676 */
36677
36678 #include <linux/linkage.h>
36679+#include <linux/init.h>
36680 #include <asm/page_types.h>
36681+#include <asm/segment.h>
36682
36683 /*
36684 * efi_call_phys(void *, ...) is a function with variable parameters.
36685@@ -20,7 +22,7 @@
36686 * service functions will comply with gcc calling convention, too.
36687 */
36688
36689-.text
36690+__INIT
36691 ENTRY(efi_call_phys)
36692 /*
36693 * 0. The function can only be called in Linux kernel. So CS has been
36694@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
36695 * The mapping of lower virtual memory has been created in prelog and
36696 * epilog.
36697 */
36698- movl $1f, %edx
36699- subl $__PAGE_OFFSET, %edx
36700- jmp *%edx
36701+#ifdef CONFIG_PAX_KERNEXEC
36702+ movl $(__KERNEXEC_EFI_DS), %edx
36703+ mov %edx, %ds
36704+ mov %edx, %es
36705+ mov %edx, %ss
36706+ addl $2f,(1f)
36707+ ljmp *(1f)
36708+
36709+__INITDATA
36710+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
36711+.previous
36712+
36713+2:
36714+ subl $2b,(1b)
36715+#else
36716+ jmp 1f-__PAGE_OFFSET
36717 1:
36718+#endif
36719
36720 /*
36721 * 2. Now on the top of stack is the return
36722@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
36723 * parameter 2, ..., param n. To make things easy, we save the return
36724 * address of efi_call_phys in a global variable.
36725 */
36726- popl %edx
36727- movl %edx, saved_return_addr
36728- /* get the function pointer into ECX*/
36729- popl %ecx
36730- movl %ecx, efi_rt_function_ptr
36731- movl $2f, %edx
36732- subl $__PAGE_OFFSET, %edx
36733- pushl %edx
36734+ popl (saved_return_addr)
36735+ popl (efi_rt_function_ptr)
36736
36737 /*
36738 * 3. Clear PG bit in %CR0.
36739@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
36740 /*
36741 * 5. Call the physical function.
36742 */
36743- jmp *%ecx
36744+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
36745
36746-2:
36747 /*
36748 * 6. After EFI runtime service returns, control will return to
36749 * following instruction. We'd better readjust stack pointer first.
36750@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
36751 movl %cr0, %edx
36752 orl $0x80000000, %edx
36753 movl %edx, %cr0
36754- jmp 1f
36755-1:
36756+
36757 /*
36758 * 8. Now restore the virtual mode from flat mode by
36759 * adding EIP with PAGE_OFFSET.
36760 */
36761- movl $1f, %edx
36762- jmp *%edx
36763+#ifdef CONFIG_PAX_KERNEXEC
36764+ movl $(__KERNEL_DS), %edx
36765+ mov %edx, %ds
36766+ mov %edx, %es
36767+ mov %edx, %ss
36768+ ljmp $(__KERNEL_CS),$1f
36769+#else
36770+ jmp 1f+__PAGE_OFFSET
36771+#endif
36772 1:
36773
36774 /*
36775 * 9. Balance the stack. And because EAX contain the return value,
36776 * we'd better not clobber it.
36777 */
36778- leal efi_rt_function_ptr, %edx
36779- movl (%edx), %ecx
36780- pushl %ecx
36781+ pushl (efi_rt_function_ptr)
36782
36783 /*
36784- * 10. Push the saved return address onto the stack and return.
36785+ * 10. Return to the saved return address.
36786 */
36787- leal saved_return_addr, %edx
36788- movl (%edx), %ecx
36789- pushl %ecx
36790- ret
36791+ jmpl *(saved_return_addr)
36792 ENDPROC(efi_call_phys)
36793 .previous
36794
36795-.data
36796+__INITDATA
36797 saved_return_addr:
36798 .long 0
36799 efi_rt_function_ptr:
36800diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
36801index 5fcda72..b9d1d65 100644
36802--- a/arch/x86/platform/efi/efi_stub_64.S
36803+++ b/arch/x86/platform/efi/efi_stub_64.S
36804@@ -11,6 +11,7 @@
36805 #include <asm/msr.h>
36806 #include <asm/processor-flags.h>
36807 #include <asm/page_types.h>
36808+#include <asm/alternative-asm.h>
36809
36810 #define SAVE_XMM \
36811 mov %rsp, %rax; \
36812@@ -88,6 +89,7 @@ ENTRY(efi_call)
36813 RESTORE_PGT
36814 addq $48, %rsp
36815 RESTORE_XMM
36816+ pax_force_retaddr 0, 1
36817 ret
36818 ENDPROC(efi_call)
36819
36820diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
36821index 1bbedc4..eb795b5 100644
36822--- a/arch/x86/platform/intel-mid/intel-mid.c
36823+++ b/arch/x86/platform/intel-mid/intel-mid.c
36824@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
36825 {
36826 };
36827
36828-static void intel_mid_reboot(void)
36829+static void __noreturn intel_mid_reboot(void)
36830 {
36831 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
36832+ BUG();
36833 }
36834
36835 static unsigned long __init intel_mid_calibrate_tsc(void)
36836diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
36837index d6ee929..3637cb5 100644
36838--- a/arch/x86/platform/olpc/olpc_dt.c
36839+++ b/arch/x86/platform/olpc/olpc_dt.c
36840@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
36841 return res;
36842 }
36843
36844-static struct of_pdt_ops prom_olpc_ops __initdata = {
36845+static struct of_pdt_ops prom_olpc_ops __initconst = {
36846 .nextprop = olpc_dt_nextprop,
36847 .getproplen = olpc_dt_getproplen,
36848 .getproperty = olpc_dt_getproperty,
36849diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
36850index 424f4c9..f2a2988 100644
36851--- a/arch/x86/power/cpu.c
36852+++ b/arch/x86/power/cpu.c
36853@@ -137,11 +137,8 @@ static void do_fpu_end(void)
36854 static void fix_processor_context(void)
36855 {
36856 int cpu = smp_processor_id();
36857- struct tss_struct *t = &per_cpu(init_tss, cpu);
36858-#ifdef CONFIG_X86_64
36859- struct desc_struct *desc = get_cpu_gdt_table(cpu);
36860- tss_desc tss;
36861-#endif
36862+ struct tss_struct *t = init_tss + cpu;
36863+
36864 set_tss_desc(cpu, t); /*
36865 * This just modifies memory; should not be
36866 * necessary. But... This is necessary, because
36867@@ -150,10 +147,6 @@ static void fix_processor_context(void)
36868 */
36869
36870 #ifdef CONFIG_X86_64
36871- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
36872- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
36873- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
36874-
36875 syscall_init(); /* This sets MSR_*STAR and related */
36876 #endif
36877 load_TR_desc(); /* This does ltr */
36878diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
36879index bad628a..a102610 100644
36880--- a/arch/x86/realmode/init.c
36881+++ b/arch/x86/realmode/init.c
36882@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
36883 __va(real_mode_header->trampoline_header);
36884
36885 #ifdef CONFIG_X86_32
36886- trampoline_header->start = __pa_symbol(startup_32_smp);
36887+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
36888+
36889+#ifdef CONFIG_PAX_KERNEXEC
36890+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
36891+#endif
36892+
36893+ trampoline_header->boot_cs = __BOOT_CS;
36894 trampoline_header->gdt_limit = __BOOT_DS + 7;
36895 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
36896 #else
36897@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
36898 *trampoline_cr4_features = read_cr4();
36899
36900 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
36901- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
36902+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
36903 trampoline_pgd[511] = init_level4_pgt[511].pgd;
36904 #endif
36905 }
36906diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
36907index 7c0d7be..d24dc88 100644
36908--- a/arch/x86/realmode/rm/Makefile
36909+++ b/arch/x86/realmode/rm/Makefile
36910@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
36911
36912 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
36913 -I$(srctree)/arch/x86/boot
36914+ifdef CONSTIFY_PLUGIN
36915+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
36916+endif
36917 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
36918 GCOV_PROFILE := n
36919diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
36920index a28221d..93c40f1 100644
36921--- a/arch/x86/realmode/rm/header.S
36922+++ b/arch/x86/realmode/rm/header.S
36923@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
36924 #endif
36925 /* APM/BIOS reboot */
36926 .long pa_machine_real_restart_asm
36927-#ifdef CONFIG_X86_64
36928+#ifdef CONFIG_X86_32
36929+ .long __KERNEL_CS
36930+#else
36931 .long __KERNEL32_CS
36932 #endif
36933 END(real_mode_header)
36934diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
36935index 48ddd76..c26749f 100644
36936--- a/arch/x86/realmode/rm/trampoline_32.S
36937+++ b/arch/x86/realmode/rm/trampoline_32.S
36938@@ -24,6 +24,12 @@
36939 #include <asm/page_types.h>
36940 #include "realmode.h"
36941
36942+#ifdef CONFIG_PAX_KERNEXEC
36943+#define ta(X) (X)
36944+#else
36945+#define ta(X) (pa_ ## X)
36946+#endif
36947+
36948 .text
36949 .code16
36950
36951@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
36952
36953 cli # We should be safe anyway
36954
36955- movl tr_start, %eax # where we need to go
36956-
36957 movl $0xA5A5A5A5, trampoline_status
36958 # write marker for master knows we're running
36959
36960@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
36961 movw $1, %dx # protected mode (PE) bit
36962 lmsw %dx # into protected mode
36963
36964- ljmpl $__BOOT_CS, $pa_startup_32
36965+ ljmpl *(trampoline_header)
36966
36967 .section ".text32","ax"
36968 .code32
36969@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
36970 .balign 8
36971 GLOBAL(trampoline_header)
36972 tr_start: .space 4
36973- tr_gdt_pad: .space 2
36974+ tr_boot_cs: .space 2
36975 tr_gdt: .space 6
36976 END(trampoline_header)
36977
36978diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
36979index dac7b20..72dbaca 100644
36980--- a/arch/x86/realmode/rm/trampoline_64.S
36981+++ b/arch/x86/realmode/rm/trampoline_64.S
36982@@ -93,6 +93,7 @@ ENTRY(startup_32)
36983 movl %edx, %gs
36984
36985 movl pa_tr_cr4, %eax
36986+ andl $~X86_CR4_PCIDE, %eax
36987 movl %eax, %cr4 # Enable PAE mode
36988
36989 # Setup trampoline 4 level pagetables
36990@@ -106,7 +107,7 @@ ENTRY(startup_32)
36991 wrmsr
36992
36993 # Enable paging and in turn activate Long Mode
36994- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
36995+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
36996 movl %eax, %cr0
36997
36998 /*
36999diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
37000index 9e7e147..25a4158 100644
37001--- a/arch/x86/realmode/rm/wakeup_asm.S
37002+++ b/arch/x86/realmode/rm/wakeup_asm.S
37003@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
37004 lgdtl pmode_gdt
37005
37006 /* This really couldn't... */
37007- movl pmode_entry, %eax
37008 movl pmode_cr0, %ecx
37009 movl %ecx, %cr0
37010- ljmpl $__KERNEL_CS, $pa_startup_32
37011- /* -> jmp *%eax in trampoline_32.S */
37012+
37013+ ljmpl *pmode_entry
37014 #else
37015 jmp trampoline_start
37016 #endif
37017diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
37018index 604a37e..e49702a 100644
37019--- a/arch/x86/tools/Makefile
37020+++ b/arch/x86/tools/Makefile
37021@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
37022
37023 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
37024
37025-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
37026+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
37027 hostprogs-y += relocs
37028 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
37029 PHONY += relocs
37030diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
37031index bbb1d22..e505211 100644
37032--- a/arch/x86/tools/relocs.c
37033+++ b/arch/x86/tools/relocs.c
37034@@ -1,5 +1,7 @@
37035 /* This is included from relocs_32/64.c */
37036
37037+#include "../../../include/generated/autoconf.h"
37038+
37039 #define ElfW(type) _ElfW(ELF_BITS, type)
37040 #define _ElfW(bits, type) __ElfW(bits, type)
37041 #define __ElfW(bits, type) Elf##bits##_##type
37042@@ -11,6 +13,7 @@
37043 #define Elf_Sym ElfW(Sym)
37044
37045 static Elf_Ehdr ehdr;
37046+static Elf_Phdr *phdr;
37047
37048 struct relocs {
37049 uint32_t *offset;
37050@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
37051 }
37052 }
37053
37054+static void read_phdrs(FILE *fp)
37055+{
37056+ unsigned int i;
37057+
37058+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
37059+ if (!phdr) {
37060+ die("Unable to allocate %d program headers\n",
37061+ ehdr.e_phnum);
37062+ }
37063+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
37064+ die("Seek to %d failed: %s\n",
37065+ ehdr.e_phoff, strerror(errno));
37066+ }
37067+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
37068+ die("Cannot read ELF program headers: %s\n",
37069+ strerror(errno));
37070+ }
37071+ for(i = 0; i < ehdr.e_phnum; i++) {
37072+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
37073+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
37074+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
37075+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
37076+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
37077+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
37078+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
37079+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
37080+ }
37081+
37082+}
37083+
37084 static void read_shdrs(FILE *fp)
37085 {
37086- int i;
37087+ unsigned int i;
37088 Elf_Shdr shdr;
37089
37090 secs = calloc(ehdr.e_shnum, sizeof(struct section));
37091@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
37092
37093 static void read_strtabs(FILE *fp)
37094 {
37095- int i;
37096+ unsigned int i;
37097 for (i = 0; i < ehdr.e_shnum; i++) {
37098 struct section *sec = &secs[i];
37099 if (sec->shdr.sh_type != SHT_STRTAB) {
37100@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
37101
37102 static void read_symtabs(FILE *fp)
37103 {
37104- int i,j;
37105+ unsigned int i,j;
37106 for (i = 0; i < ehdr.e_shnum; i++) {
37107 struct section *sec = &secs[i];
37108 if (sec->shdr.sh_type != SHT_SYMTAB) {
37109@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
37110 }
37111
37112
37113-static void read_relocs(FILE *fp)
37114+static void read_relocs(FILE *fp, int use_real_mode)
37115 {
37116- int i,j;
37117+ unsigned int i,j;
37118+ uint32_t base;
37119+
37120 for (i = 0; i < ehdr.e_shnum; i++) {
37121 struct section *sec = &secs[i];
37122 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37123@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
37124 die("Cannot read symbol table: %s\n",
37125 strerror(errno));
37126 }
37127+ base = 0;
37128+
37129+#ifdef CONFIG_X86_32
37130+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
37131+ if (phdr[j].p_type != PT_LOAD )
37132+ continue;
37133+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
37134+ continue;
37135+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
37136+ break;
37137+ }
37138+#endif
37139+
37140 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
37141 Elf_Rel *rel = &sec->reltab[j];
37142- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
37143+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
37144 rel->r_info = elf_xword_to_cpu(rel->r_info);
37145 #if (SHT_REL_TYPE == SHT_RELA)
37146 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
37147@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
37148
37149 static void print_absolute_symbols(void)
37150 {
37151- int i;
37152+ unsigned int i;
37153 const char *format;
37154
37155 if (ELF_BITS == 64)
37156@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
37157 for (i = 0; i < ehdr.e_shnum; i++) {
37158 struct section *sec = &secs[i];
37159 char *sym_strtab;
37160- int j;
37161+ unsigned int j;
37162
37163 if (sec->shdr.sh_type != SHT_SYMTAB) {
37164 continue;
37165@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
37166
37167 static void print_absolute_relocs(void)
37168 {
37169- int i, printed = 0;
37170+ unsigned int i, printed = 0;
37171 const char *format;
37172
37173 if (ELF_BITS == 64)
37174@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
37175 struct section *sec_applies, *sec_symtab;
37176 char *sym_strtab;
37177 Elf_Sym *sh_symtab;
37178- int j;
37179+ unsigned int j;
37180 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37181 continue;
37182 }
37183@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
37184 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
37185 Elf_Sym *sym, const char *symname))
37186 {
37187- int i;
37188+ unsigned int i;
37189 /* Walk through the relocations */
37190 for (i = 0; i < ehdr.e_shnum; i++) {
37191 char *sym_strtab;
37192 Elf_Sym *sh_symtab;
37193 struct section *sec_applies, *sec_symtab;
37194- int j;
37195+ unsigned int j;
37196 struct section *sec = &secs[i];
37197
37198 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37199@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37200 {
37201 unsigned r_type = ELF32_R_TYPE(rel->r_info);
37202 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
37203+ char *sym_strtab = sec->link->link->strtab;
37204+
37205+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
37206+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
37207+ return 0;
37208+
37209+#ifdef CONFIG_PAX_KERNEXEC
37210+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
37211+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
37212+ return 0;
37213+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
37214+ return 0;
37215+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
37216+ return 0;
37217+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
37218+ return 0;
37219+#endif
37220
37221 switch (r_type) {
37222 case R_386_NONE:
37223@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
37224
37225 static void emit_relocs(int as_text, int use_real_mode)
37226 {
37227- int i;
37228+ unsigned int i;
37229 int (*write_reloc)(uint32_t, FILE *) = write32;
37230 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37231 const char *symname);
37232@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
37233 {
37234 regex_init(use_real_mode);
37235 read_ehdr(fp);
37236+ read_phdrs(fp);
37237 read_shdrs(fp);
37238 read_strtabs(fp);
37239 read_symtabs(fp);
37240- read_relocs(fp);
37241+ read_relocs(fp, use_real_mode);
37242 if (ELF_BITS == 64)
37243 percpu_init();
37244 if (show_absolute_syms) {
37245diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
37246index f40281e..92728c9 100644
37247--- a/arch/x86/um/mem_32.c
37248+++ b/arch/x86/um/mem_32.c
37249@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
37250 gate_vma.vm_start = FIXADDR_USER_START;
37251 gate_vma.vm_end = FIXADDR_USER_END;
37252 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
37253- gate_vma.vm_page_prot = __P101;
37254+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
37255
37256 return 0;
37257 }
37258diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
37259index 80ffa5b..a33bd15 100644
37260--- a/arch/x86/um/tls_32.c
37261+++ b/arch/x86/um/tls_32.c
37262@@ -260,7 +260,7 @@ out:
37263 if (unlikely(task == current &&
37264 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
37265 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
37266- "without flushed TLS.", current->pid);
37267+ "without flushed TLS.", task_pid_nr(current));
37268 }
37269
37270 return 0;
37271diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
37272index 61b04fe..3134230 100644
37273--- a/arch/x86/vdso/Makefile
37274+++ b/arch/x86/vdso/Makefile
37275@@ -170,7 +170,7 @@ quiet_cmd_vdso = VDSO $@
37276 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
37277 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
37278
37279-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37280+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37281 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
37282 GCOV_PROFILE := n
37283
37284diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
37285index e4f7781..ab5ab26 100644
37286--- a/arch/x86/vdso/vdso32-setup.c
37287+++ b/arch/x86/vdso/vdso32-setup.c
37288@@ -14,6 +14,7 @@
37289 #include <asm/cpufeature.h>
37290 #include <asm/processor.h>
37291 #include <asm/vdso.h>
37292+#include <asm/mman.h>
37293
37294 #ifdef CONFIG_COMPAT_VDSO
37295 #define VDSO_DEFAULT 0
37296diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
37297index 5a5176d..e570acd 100644
37298--- a/arch/x86/vdso/vma.c
37299+++ b/arch/x86/vdso/vma.c
37300@@ -16,10 +16,9 @@
37301 #include <asm/vdso.h>
37302 #include <asm/page.h>
37303 #include <asm/hpet.h>
37304+#include <asm/mman.h>
37305
37306 #if defined(CONFIG_X86_64)
37307-unsigned int __read_mostly vdso64_enabled = 1;
37308-
37309 extern unsigned short vdso_sync_cpuid;
37310 #endif
37311
37312@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37313 .pages = no_pages,
37314 };
37315
37316+#ifdef CONFIG_PAX_RANDMMAP
37317+ if (mm->pax_flags & MF_PAX_RANDMMAP)
37318+ calculate_addr = false;
37319+#endif
37320+
37321 if (calculate_addr) {
37322 addr = vdso_addr(current->mm->start_stack,
37323 image->sym_end_mapping);
37324@@ -110,13 +114,13 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37325
37326 down_write(&mm->mmap_sem);
37327
37328- addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
37329+ addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, MAP_EXECUTABLE);
37330 if (IS_ERR_VALUE(addr)) {
37331 ret = addr;
37332 goto up_fail;
37333 }
37334
37335- current->mm->context.vdso = (void __user *)addr;
37336+ mm->context.vdso = addr;
37337
37338 /*
37339 * MAYWRITE to allow gdb to COW and set breakpoints
37340@@ -161,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37341 hpet_address >> PAGE_SHIFT,
37342 PAGE_SIZE,
37343 pgprot_noncached(PAGE_READONLY));
37344-
37345- if (ret)
37346- goto up_fail;
37347 }
37348 #endif
37349
37350 up_fail:
37351 if (ret)
37352- current->mm->context.vdso = NULL;
37353+ current->mm->context.vdso = 0;
37354
37355 up_write(&mm->mmap_sem);
37356 return ret;
37357@@ -189,8 +190,8 @@ static int load_vdso32(void)
37358
37359 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
37360 current_thread_info()->sysenter_return =
37361- current->mm->context.vdso +
37362- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
37363+ (void __force_user *)(current->mm->context.vdso +
37364+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
37365
37366 return 0;
37367 }
37368@@ -199,9 +200,6 @@ static int load_vdso32(void)
37369 #ifdef CONFIG_X86_64
37370 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37371 {
37372- if (!vdso64_enabled)
37373- return 0;
37374-
37375 return map_vdso(&vdso_image_64, true);
37376 }
37377
37378@@ -210,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
37379 int uses_interp)
37380 {
37381 #ifdef CONFIG_X86_X32_ABI
37382- if (test_thread_flag(TIF_X32)) {
37383- if (!vdso64_enabled)
37384- return 0;
37385-
37386+ if (test_thread_flag(TIF_X32))
37387 return map_vdso(&vdso_image_x32, true);
37388- }
37389 #endif
37390
37391 return load_vdso32();
37392@@ -227,12 +221,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37393 return load_vdso32();
37394 }
37395 #endif
37396-
37397-#ifdef CONFIG_X86_64
37398-static __init int vdso_setup(char *s)
37399-{
37400- vdso64_enabled = simple_strtoul(s, NULL, 0);
37401- return 0;
37402-}
37403-__setup("vdso=", vdso_setup);
37404-#endif
37405diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
37406index e88fda8..76ce7ce 100644
37407--- a/arch/x86/xen/Kconfig
37408+++ b/arch/x86/xen/Kconfig
37409@@ -9,6 +9,7 @@ config XEN
37410 select XEN_HAVE_PVMMU
37411 depends on X86_64 || (X86_32 && X86_PAE)
37412 depends on X86_TSC
37413+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
37414 help
37415 This is the Linux Xen port. Enabling this will allow the
37416 kernel to boot in a paravirtualized environment under the
37417diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
37418index ffb101e..98c0ecf 100644
37419--- a/arch/x86/xen/enlighten.c
37420+++ b/arch/x86/xen/enlighten.c
37421@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
37422
37423 struct shared_info xen_dummy_shared_info;
37424
37425-void *xen_initial_gdt;
37426-
37427 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
37428 __read_mostly int xen_have_vector_callback;
37429 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
37430@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
37431 {
37432 unsigned long va = dtr->address;
37433 unsigned int size = dtr->size + 1;
37434- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37435- unsigned long frames[pages];
37436+ unsigned long frames[65536 / PAGE_SIZE];
37437 int f;
37438
37439 /*
37440@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37441 {
37442 unsigned long va = dtr->address;
37443 unsigned int size = dtr->size + 1;
37444- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37445- unsigned long frames[pages];
37446+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
37447 int f;
37448
37449 /*
37450@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37451 * 8-byte entries, or 16 4k pages..
37452 */
37453
37454- BUG_ON(size > 65536);
37455+ BUG_ON(size > GDT_SIZE);
37456 BUG_ON(va & ~PAGE_MASK);
37457
37458 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
37459@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
37460 return 0;
37461 }
37462
37463-static void set_xen_basic_apic_ops(void)
37464+static void __init set_xen_basic_apic_ops(void)
37465 {
37466 apic->read = xen_apic_read;
37467 apic->write = xen_apic_write;
37468@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
37469 #endif
37470 };
37471
37472-static void xen_reboot(int reason)
37473+static __noreturn void xen_reboot(int reason)
37474 {
37475 struct sched_shutdown r = { .reason = reason };
37476
37477- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
37478- BUG();
37479+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
37480+ BUG();
37481 }
37482
37483-static void xen_restart(char *msg)
37484+static __noreturn void xen_restart(char *msg)
37485 {
37486 xen_reboot(SHUTDOWN_reboot);
37487 }
37488
37489-static void xen_emergency_restart(void)
37490+static __noreturn void xen_emergency_restart(void)
37491 {
37492 xen_reboot(SHUTDOWN_reboot);
37493 }
37494
37495-static void xen_machine_halt(void)
37496+static __noreturn void xen_machine_halt(void)
37497 {
37498 xen_reboot(SHUTDOWN_poweroff);
37499 }
37500
37501-static void xen_machine_power_off(void)
37502+static __noreturn void xen_machine_power_off(void)
37503 {
37504 if (pm_power_off)
37505 pm_power_off();
37506@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
37507 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
37508
37509 /* Work out if we support NX */
37510- x86_configure_nx();
37511+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
37512+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
37513+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
37514+ unsigned l, h;
37515+
37516+ __supported_pte_mask |= _PAGE_NX;
37517+ rdmsr(MSR_EFER, l, h);
37518+ l |= EFER_NX;
37519+ wrmsr(MSR_EFER, l, h);
37520+ }
37521+#endif
37522
37523 /* Get mfn list */
37524 xen_build_dynamic_phys_to_machine();
37525@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
37526
37527 machine_ops = xen_machine_ops;
37528
37529- /*
37530- * The only reliable way to retain the initial address of the
37531- * percpu gdt_page is to remember it here, so we can go and
37532- * mark it RW later, when the initial percpu area is freed.
37533- */
37534- xen_initial_gdt = &per_cpu(gdt_page, 0);
37535-
37536 xen_smp_init();
37537
37538 #ifdef CONFIG_ACPI_NUMA
37539diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
37540index e8a1201..046c66c 100644
37541--- a/arch/x86/xen/mmu.c
37542+++ b/arch/x86/xen/mmu.c
37543@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
37544 return val;
37545 }
37546
37547-static pteval_t pte_pfn_to_mfn(pteval_t val)
37548+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
37549 {
37550 if (val & _PAGE_PRESENT) {
37551 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
37552@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37553 /* L3_k[510] -> level2_kernel_pgt
37554 * L3_i[511] -> level2_fixmap_pgt */
37555 convert_pfn_mfn(level3_kernel_pgt);
37556+ convert_pfn_mfn(level3_vmalloc_start_pgt);
37557+ convert_pfn_mfn(level3_vmalloc_end_pgt);
37558+ convert_pfn_mfn(level3_vmemmap_pgt);
37559 }
37560 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
37561 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
37562@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37563 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
37564 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
37565 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
37566+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
37567+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
37568+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
37569 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
37570 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
37571+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
37572 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
37573 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
37574
37575@@ -2120,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
37576 pv_mmu_ops.set_pud = xen_set_pud;
37577 #if PAGETABLE_LEVELS == 4
37578 pv_mmu_ops.set_pgd = xen_set_pgd;
37579+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
37580 #endif
37581
37582 /* This will work as long as patching hasn't happened yet
37583@@ -2198,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
37584 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
37585 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
37586 .set_pgd = xen_set_pgd_hyper,
37587+ .set_pgd_batched = xen_set_pgd_hyper,
37588
37589 .alloc_pud = xen_alloc_pmd_init,
37590 .release_pud = xen_release_pmd_init,
37591diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
37592index 7005974..54fb05f 100644
37593--- a/arch/x86/xen/smp.c
37594+++ b/arch/x86/xen/smp.c
37595@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
37596
37597 if (xen_pv_domain()) {
37598 if (!xen_feature(XENFEAT_writable_page_tables))
37599- /* We've switched to the "real" per-cpu gdt, so make
37600- * sure the old memory can be recycled. */
37601- make_lowmem_page_readwrite(xen_initial_gdt);
37602-
37603 #ifdef CONFIG_X86_32
37604 /*
37605 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
37606 * expects __USER_DS
37607 */
37608- loadsegment(ds, __USER_DS);
37609- loadsegment(es, __USER_DS);
37610+ loadsegment(ds, __KERNEL_DS);
37611+ loadsegment(es, __KERNEL_DS);
37612 #endif
37613
37614 xen_filter_cpu_maps();
37615@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37616 #ifdef CONFIG_X86_32
37617 /* Note: PVH is not yet supported on x86_32. */
37618 ctxt->user_regs.fs = __KERNEL_PERCPU;
37619- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
37620+ savesegment(gs, ctxt->user_regs.gs);
37621 #endif
37622 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
37623
37624@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37625 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
37626 ctxt->flags = VGCF_IN_KERNEL;
37627 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
37628- ctxt->user_regs.ds = __USER_DS;
37629- ctxt->user_regs.es = __USER_DS;
37630+ ctxt->user_regs.ds = __KERNEL_DS;
37631+ ctxt->user_regs.es = __KERNEL_DS;
37632 ctxt->user_regs.ss = __KERNEL_DS;
37633
37634 xen_copy_trap_info(ctxt->trap_ctxt);
37635@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
37636 int rc;
37637
37638 per_cpu(current_task, cpu) = idle;
37639+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
37640 #ifdef CONFIG_X86_32
37641 irq_ctx_init(cpu);
37642 #else
37643 clear_tsk_thread_flag(idle, TIF_FORK);
37644 #endif
37645- per_cpu(kernel_stack, cpu) =
37646- (unsigned long)task_stack_page(idle) -
37647- KERNEL_STACK_OFFSET + THREAD_SIZE;
37648+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
37649
37650 xen_setup_runstate_info(cpu);
37651 xen_setup_timer(cpu);
37652@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
37653
37654 void __init xen_smp_init(void)
37655 {
37656- smp_ops = xen_smp_ops;
37657+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
37658 xen_fill_possible_map();
37659 }
37660
37661diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
37662index fd92a64..1f72641 100644
37663--- a/arch/x86/xen/xen-asm_32.S
37664+++ b/arch/x86/xen/xen-asm_32.S
37665@@ -99,7 +99,7 @@ ENTRY(xen_iret)
37666 pushw %fs
37667 movl $(__KERNEL_PERCPU), %eax
37668 movl %eax, %fs
37669- movl %fs:xen_vcpu, %eax
37670+ mov PER_CPU_VAR(xen_vcpu), %eax
37671 POP_FS
37672 #else
37673 movl %ss:xen_vcpu, %eax
37674diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
37675index 485b695..fda3e7c 100644
37676--- a/arch/x86/xen/xen-head.S
37677+++ b/arch/x86/xen/xen-head.S
37678@@ -39,6 +39,17 @@ ENTRY(startup_xen)
37679 #ifdef CONFIG_X86_32
37680 mov %esi,xen_start_info
37681 mov $init_thread_union+THREAD_SIZE,%esp
37682+#ifdef CONFIG_SMP
37683+ movl $cpu_gdt_table,%edi
37684+ movl $__per_cpu_load,%eax
37685+ movw %ax,__KERNEL_PERCPU + 2(%edi)
37686+ rorl $16,%eax
37687+ movb %al,__KERNEL_PERCPU + 4(%edi)
37688+ movb %ah,__KERNEL_PERCPU + 7(%edi)
37689+ movl $__per_cpu_end - 1,%eax
37690+ subl $__per_cpu_start,%eax
37691+ movw %ax,__KERNEL_PERCPU + 0(%edi)
37692+#endif
37693 #else
37694 mov %rsi,xen_start_info
37695 mov $init_thread_union+THREAD_SIZE,%rsp
37696diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
37697index 97d8765..c4526ec 100644
37698--- a/arch/x86/xen/xen-ops.h
37699+++ b/arch/x86/xen/xen-ops.h
37700@@ -10,8 +10,6 @@
37701 extern const char xen_hypervisor_callback[];
37702 extern const char xen_failsafe_callback[];
37703
37704-extern void *xen_initial_gdt;
37705-
37706 struct trap_info;
37707 void xen_copy_trap_info(struct trap_info *traps);
37708
37709diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
37710index 525bd3d..ef888b1 100644
37711--- a/arch/xtensa/variants/dc232b/include/variant/core.h
37712+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
37713@@ -119,9 +119,9 @@
37714 ----------------------------------------------------------------------*/
37715
37716 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
37717-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
37718 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
37719 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
37720+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37721
37722 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
37723 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
37724diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
37725index 2f33760..835e50a 100644
37726--- a/arch/xtensa/variants/fsf/include/variant/core.h
37727+++ b/arch/xtensa/variants/fsf/include/variant/core.h
37728@@ -11,6 +11,7 @@
37729 #ifndef _XTENSA_CORE_H
37730 #define _XTENSA_CORE_H
37731
37732+#include <linux/const.h>
37733
37734 /****************************************************************************
37735 Parameters Useful for Any Code, USER or PRIVILEGED
37736@@ -112,9 +113,9 @@
37737 ----------------------------------------------------------------------*/
37738
37739 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37740-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37741 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37742 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37743+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37744
37745 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
37746 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
37747diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
37748index af00795..2bb8105 100644
37749--- a/arch/xtensa/variants/s6000/include/variant/core.h
37750+++ b/arch/xtensa/variants/s6000/include/variant/core.h
37751@@ -11,6 +11,7 @@
37752 #ifndef _XTENSA_CORE_CONFIGURATION_H
37753 #define _XTENSA_CORE_CONFIGURATION_H
37754
37755+#include <linux/const.h>
37756
37757 /****************************************************************************
37758 Parameters Useful for Any Code, USER or PRIVILEGED
37759@@ -118,9 +119,9 @@
37760 ----------------------------------------------------------------------*/
37761
37762 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37763-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37764 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37765 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37766+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37767
37768 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
37769 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
37770diff --git a/block/bio.c b/block/bio.c
37771index 0ec61c9..93b94060 100644
37772--- a/block/bio.c
37773+++ b/block/bio.c
37774@@ -1159,7 +1159,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
37775 /*
37776 * Overflow, abort
37777 */
37778- if (end < start)
37779+ if (end < start || end - start > INT_MAX - nr_pages)
37780 return ERR_PTR(-EINVAL);
37781
37782 nr_pages += end - start;
37783@@ -1293,7 +1293,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
37784 /*
37785 * Overflow, abort
37786 */
37787- if (end < start)
37788+ if (end < start || end - start > INT_MAX - nr_pages)
37789 return ERR_PTR(-EINVAL);
37790
37791 nr_pages += end - start;
37792@@ -1555,7 +1555,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
37793 const int read = bio_data_dir(bio) == READ;
37794 struct bio_map_data *bmd = bio->bi_private;
37795 int i;
37796- char *p = bmd->sgvecs[0].iov_base;
37797+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
37798
37799 bio_for_each_segment_all(bvec, bio, i) {
37800 char *addr = page_address(bvec->bv_page);
37801diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
37802index 28d227c..d4c0bad 100644
37803--- a/block/blk-cgroup.c
37804+++ b/block/blk-cgroup.c
37805@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
37806 static struct cgroup_subsys_state *
37807 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37808 {
37809- static atomic64_t id_seq = ATOMIC64_INIT(0);
37810+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
37811 struct blkcg *blkcg;
37812
37813 if (!parent_css) {
37814@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37815
37816 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
37817 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
37818- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
37819+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
37820 done:
37821 spin_lock_init(&blkcg->lock);
37822 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
37823diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
37824index 0736729..2ec3b48 100644
37825--- a/block/blk-iopoll.c
37826+++ b/block/blk-iopoll.c
37827@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
37828 }
37829 EXPORT_SYMBOL(blk_iopoll_complete);
37830
37831-static void blk_iopoll_softirq(struct softirq_action *h)
37832+static __latent_entropy void blk_iopoll_softirq(void)
37833 {
37834 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
37835 int rearm = 0, budget = blk_iopoll_budget;
37836diff --git a/block/blk-map.c b/block/blk-map.c
37837index f890d43..97b0482 100644
37838--- a/block/blk-map.c
37839+++ b/block/blk-map.c
37840@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
37841 if (!len || !kbuf)
37842 return -EINVAL;
37843
37844- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
37845+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
37846 if (do_copy)
37847 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
37848 else
37849diff --git a/block/blk-softirq.c b/block/blk-softirq.c
37850index 53b1737..08177d2e 100644
37851--- a/block/blk-softirq.c
37852+++ b/block/blk-softirq.c
37853@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
37854 * Softirq action handler - move entries to local list and loop over them
37855 * while passing them to the queue registered handler.
37856 */
37857-static void blk_done_softirq(struct softirq_action *h)
37858+static __latent_entropy void blk_done_softirq(void)
37859 {
37860 struct list_head *cpu_list, local_list;
37861
37862diff --git a/block/bsg.c b/block/bsg.c
37863index ff46add..c4ba8ee 100644
37864--- a/block/bsg.c
37865+++ b/block/bsg.c
37866@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
37867 struct sg_io_v4 *hdr, struct bsg_device *bd,
37868 fmode_t has_write_perm)
37869 {
37870+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37871+ unsigned char *cmdptr;
37872+
37873 if (hdr->request_len > BLK_MAX_CDB) {
37874 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
37875 if (!rq->cmd)
37876 return -ENOMEM;
37877- }
37878+ cmdptr = rq->cmd;
37879+ } else
37880+ cmdptr = tmpcmd;
37881
37882- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
37883+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
37884 hdr->request_len))
37885 return -EFAULT;
37886
37887+ if (cmdptr != rq->cmd)
37888+ memcpy(rq->cmd, cmdptr, hdr->request_len);
37889+
37890 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
37891 if (blk_verify_command(rq->cmd, has_write_perm))
37892 return -EPERM;
37893diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
37894index a0926a6..b2b14b2 100644
37895--- a/block/compat_ioctl.c
37896+++ b/block/compat_ioctl.c
37897@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
37898 cgc = compat_alloc_user_space(sizeof(*cgc));
37899 cgc32 = compat_ptr(arg);
37900
37901- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
37902+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
37903 get_user(data, &cgc32->buffer) ||
37904 put_user(compat_ptr(data), &cgc->buffer) ||
37905 copy_in_user(&cgc->buflen, &cgc32->buflen,
37906@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
37907 err |= __get_user(f->spec1, &uf->spec1);
37908 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
37909 err |= __get_user(name, &uf->name);
37910- f->name = compat_ptr(name);
37911+ f->name = (void __force_kernel *)compat_ptr(name);
37912 if (err) {
37913 err = -EFAULT;
37914 goto out;
37915diff --git a/block/genhd.c b/block/genhd.c
37916index 791f419..89f21c4 100644
37917--- a/block/genhd.c
37918+++ b/block/genhd.c
37919@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
37920
37921 /*
37922 * Register device numbers dev..(dev+range-1)
37923- * range must be nonzero
37924+ * Noop if @range is zero.
37925 * The hash chain is sorted on range, so that subranges can override.
37926 */
37927 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
37928 struct kobject *(*probe)(dev_t, int *, void *),
37929 int (*lock)(dev_t, void *), void *data)
37930 {
37931- kobj_map(bdev_map, devt, range, module, probe, lock, data);
37932+ if (range)
37933+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
37934 }
37935
37936 EXPORT_SYMBOL(blk_register_region);
37937
37938+/* undo blk_register_region(), noop if @range is zero */
37939 void blk_unregister_region(dev_t devt, unsigned long range)
37940 {
37941- kobj_unmap(bdev_map, devt, range);
37942+ if (range)
37943+ kobj_unmap(bdev_map, devt, range);
37944 }
37945
37946 EXPORT_SYMBOL(blk_unregister_region);
37947diff --git a/block/partitions/efi.c b/block/partitions/efi.c
37948index dc51f46..d5446a8 100644
37949--- a/block/partitions/efi.c
37950+++ b/block/partitions/efi.c
37951@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
37952 if (!gpt)
37953 return NULL;
37954
37955+ if (!le32_to_cpu(gpt->num_partition_entries))
37956+ return NULL;
37957+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
37958+ if (!pte)
37959+ return NULL;
37960+
37961 count = le32_to_cpu(gpt->num_partition_entries) *
37962 le32_to_cpu(gpt->sizeof_partition_entry);
37963- if (!count)
37964- return NULL;
37965- pte = kmalloc(count, GFP_KERNEL);
37966- if (!pte)
37967- return NULL;
37968-
37969 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
37970 (u8 *) pte, count) < count) {
37971 kfree(pte);
37972diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
37973index 14695c6..27a4636 100644
37974--- a/block/scsi_ioctl.c
37975+++ b/block/scsi_ioctl.c
37976@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
37977 return put_user(0, p);
37978 }
37979
37980-static int sg_get_timeout(struct request_queue *q)
37981+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
37982 {
37983 return jiffies_to_clock_t(q->sg_timeout);
37984 }
37985@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
37986 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
37987 struct sg_io_hdr *hdr, fmode_t mode)
37988 {
37989- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
37990+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37991+ unsigned char *cmdptr;
37992+
37993+ if (rq->cmd != rq->__cmd)
37994+ cmdptr = rq->cmd;
37995+ else
37996+ cmdptr = tmpcmd;
37997+
37998+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
37999 return -EFAULT;
38000+
38001+ if (cmdptr != rq->cmd)
38002+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
38003+
38004 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
38005 return -EPERM;
38006
38007@@ -413,6 +425,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
38008 int err;
38009 unsigned int in_len, out_len, bytes, opcode, cmdlen;
38010 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
38011+ unsigned char tmpcmd[sizeof(rq->__cmd)];
38012+ unsigned char *cmdptr;
38013
38014 if (!sic)
38015 return -EINVAL;
38016@@ -446,9 +460,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
38017 */
38018 err = -EFAULT;
38019 rq->cmd_len = cmdlen;
38020- if (copy_from_user(rq->cmd, sic->data, cmdlen))
38021+
38022+ if (rq->cmd != rq->__cmd)
38023+ cmdptr = rq->cmd;
38024+ else
38025+ cmdptr = tmpcmd;
38026+
38027+ if (copy_from_user(cmdptr, sic->data, cmdlen))
38028 goto error;
38029
38030+ if (rq->cmd != cmdptr)
38031+ memcpy(rq->cmd, cmdptr, cmdlen);
38032+
38033 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
38034 goto error;
38035
38036diff --git a/crypto/cryptd.c b/crypto/cryptd.c
38037index 7bdd61b..afec999 100644
38038--- a/crypto/cryptd.c
38039+++ b/crypto/cryptd.c
38040@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
38041
38042 struct cryptd_blkcipher_request_ctx {
38043 crypto_completion_t complete;
38044-};
38045+} __no_const;
38046
38047 struct cryptd_hash_ctx {
38048 struct crypto_shash *child;
38049@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
38050
38051 struct cryptd_aead_request_ctx {
38052 crypto_completion_t complete;
38053-};
38054+} __no_const;
38055
38056 static void cryptd_queue_worker(struct work_struct *work);
38057
38058diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
38059index 309d345..1632720 100644
38060--- a/crypto/pcrypt.c
38061+++ b/crypto/pcrypt.c
38062@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
38063 int ret;
38064
38065 pinst->kobj.kset = pcrypt_kset;
38066- ret = kobject_add(&pinst->kobj, NULL, name);
38067+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
38068 if (!ret)
38069 kobject_uevent(&pinst->kobj, KOBJ_ADD);
38070
38071diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
38072index 6921c7f..78e1af7 100644
38073--- a/drivers/acpi/acpica/hwxfsleep.c
38074+++ b/drivers/acpi/acpica/hwxfsleep.c
38075@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
38076 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
38077
38078 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
38079- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38080- acpi_hw_extended_sleep},
38081- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38082- acpi_hw_extended_wake_prep},
38083- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
38084+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38085+ .extended_function = acpi_hw_extended_sleep},
38086+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38087+ .extended_function = acpi_hw_extended_wake_prep},
38088+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
38089+ .extended_function = acpi_hw_extended_wake}
38090 };
38091
38092 /*
38093diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
38094index e5bcd91..74f050d 100644
38095--- a/drivers/acpi/apei/apei-internal.h
38096+++ b/drivers/acpi/apei/apei-internal.h
38097@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
38098 struct apei_exec_ins_type {
38099 u32 flags;
38100 apei_exec_ins_func_t run;
38101-};
38102+} __do_const;
38103
38104 struct apei_exec_context {
38105 u32 ip;
38106diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
38107index dab7cb7..f0d2994 100644
38108--- a/drivers/acpi/apei/ghes.c
38109+++ b/drivers/acpi/apei/ghes.c
38110@@ -500,7 +500,7 @@ static void __ghes_print_estatus(const char *pfx,
38111 const struct acpi_hest_generic *generic,
38112 const struct acpi_generic_status *estatus)
38113 {
38114- static atomic_t seqno;
38115+ static atomic_unchecked_t seqno;
38116 unsigned int curr_seqno;
38117 char pfx_seq[64];
38118
38119@@ -511,7 +511,7 @@ static void __ghes_print_estatus(const char *pfx,
38120 else
38121 pfx = KERN_ERR;
38122 }
38123- curr_seqno = atomic_inc_return(&seqno);
38124+ curr_seqno = atomic_inc_return_unchecked(&seqno);
38125 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
38126 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
38127 pfx_seq, generic->header.source_id);
38128diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
38129index a83e3c6..c3d617f 100644
38130--- a/drivers/acpi/bgrt.c
38131+++ b/drivers/acpi/bgrt.c
38132@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
38133 if (!bgrt_image)
38134 return -ENODEV;
38135
38136- bin_attr_image.private = bgrt_image;
38137- bin_attr_image.size = bgrt_image_size;
38138+ pax_open_kernel();
38139+ *(void **)&bin_attr_image.private = bgrt_image;
38140+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
38141+ pax_close_kernel();
38142
38143 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
38144 if (!bgrt_kobj)
38145diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
38146index 3d8413d..95f638c 100644
38147--- a/drivers/acpi/blacklist.c
38148+++ b/drivers/acpi/blacklist.c
38149@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
38150 u32 is_critical_error;
38151 };
38152
38153-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
38154+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
38155
38156 /*
38157 * POLICY: If *anything* doesn't work, put it on the blacklist.
38158@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
38159 return 0;
38160 }
38161
38162-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
38163+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
38164 {
38165 .callback = dmi_disable_osi_vista,
38166 .ident = "Fujitsu Siemens",
38167diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
38168index c68e724..e863008 100644
38169--- a/drivers/acpi/custom_method.c
38170+++ b/drivers/acpi/custom_method.c
38171@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
38172 struct acpi_table_header table;
38173 acpi_status status;
38174
38175+#ifdef CONFIG_GRKERNSEC_KMEM
38176+ return -EPERM;
38177+#endif
38178+
38179 if (!(*ppos)) {
38180 /* parse the table header to get the table length */
38181 if (count <= sizeof(struct acpi_table_header))
38182diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
38183index 3dca36d..abaf070 100644
38184--- a/drivers/acpi/processor_idle.c
38185+++ b/drivers/acpi/processor_idle.c
38186@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
38187 {
38188 int i, count = CPUIDLE_DRIVER_STATE_START;
38189 struct acpi_processor_cx *cx;
38190- struct cpuidle_state *state;
38191+ cpuidle_state_no_const *state;
38192 struct cpuidle_driver *drv = &acpi_idle_driver;
38193
38194 if (!pr->flags.power_setup_done)
38195diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
38196index 38cb978..352c761 100644
38197--- a/drivers/acpi/sysfs.c
38198+++ b/drivers/acpi/sysfs.c
38199@@ -423,11 +423,11 @@ static u32 num_counters;
38200 static struct attribute **all_attrs;
38201 static u32 acpi_gpe_count;
38202
38203-static struct attribute_group interrupt_stats_attr_group = {
38204+static attribute_group_no_const interrupt_stats_attr_group = {
38205 .name = "interrupts",
38206 };
38207
38208-static struct kobj_attribute *counter_attrs;
38209+static kobj_attribute_no_const *counter_attrs;
38210
38211 static void delete_gpe_attr_array(void)
38212 {
38213diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
38214index d72ce04..d6ab3c2 100644
38215--- a/drivers/ata/libahci.c
38216+++ b/drivers/ata/libahci.c
38217@@ -1257,7 +1257,7 @@ int ahci_kick_engine(struct ata_port *ap)
38218 }
38219 EXPORT_SYMBOL_GPL(ahci_kick_engine);
38220
38221-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38222+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38223 struct ata_taskfile *tf, int is_cmd, u16 flags,
38224 unsigned long timeout_msec)
38225 {
38226diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
38227index 677c0c1..354b89b 100644
38228--- a/drivers/ata/libata-core.c
38229+++ b/drivers/ata/libata-core.c
38230@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
38231 static void ata_dev_xfermask(struct ata_device *dev);
38232 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
38233
38234-atomic_t ata_print_id = ATOMIC_INIT(0);
38235+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
38236
38237 struct ata_force_param {
38238 const char *name;
38239@@ -4863,7 +4863,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
38240 struct ata_port *ap;
38241 unsigned int tag;
38242
38243- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38244+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38245 ap = qc->ap;
38246
38247 qc->flags = 0;
38248@@ -4879,7 +4879,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
38249 struct ata_port *ap;
38250 struct ata_link *link;
38251
38252- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38253+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38254 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
38255 ap = qc->ap;
38256 link = qc->dev->link;
38257@@ -5983,6 +5983,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38258 return;
38259
38260 spin_lock(&lock);
38261+ pax_open_kernel();
38262
38263 for (cur = ops->inherits; cur; cur = cur->inherits) {
38264 void **inherit = (void **)cur;
38265@@ -5996,8 +5997,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38266 if (IS_ERR(*pp))
38267 *pp = NULL;
38268
38269- ops->inherits = NULL;
38270+ *(struct ata_port_operations **)&ops->inherits = NULL;
38271
38272+ pax_close_kernel();
38273 spin_unlock(&lock);
38274 }
38275
38276@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
38277
38278 /* give ports names and add SCSI hosts */
38279 for (i = 0; i < host->n_ports; i++) {
38280- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
38281+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
38282 host->ports[i]->local_port_no = i + 1;
38283 }
38284
38285diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
38286index 72691fd..ad104c0 100644
38287--- a/drivers/ata/libata-scsi.c
38288+++ b/drivers/ata/libata-scsi.c
38289@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
38290
38291 if (rc)
38292 return rc;
38293- ap->print_id = atomic_inc_return(&ata_print_id);
38294+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
38295 return 0;
38296 }
38297 EXPORT_SYMBOL_GPL(ata_sas_port_init);
38298diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
38299index 45b5ab3..98446b8 100644
38300--- a/drivers/ata/libata.h
38301+++ b/drivers/ata/libata.h
38302@@ -53,7 +53,7 @@ enum {
38303 ATA_DNXFER_QUIET = (1 << 31),
38304 };
38305
38306-extern atomic_t ata_print_id;
38307+extern atomic_unchecked_t ata_print_id;
38308 extern int atapi_passthru16;
38309 extern int libata_fua;
38310 extern int libata_noacpi;
38311diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
38312index 4edb1a8..84e1658 100644
38313--- a/drivers/ata/pata_arasan_cf.c
38314+++ b/drivers/ata/pata_arasan_cf.c
38315@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
38316 /* Handle platform specific quirks */
38317 if (quirk) {
38318 if (quirk & CF_BROKEN_PIO) {
38319- ap->ops->set_piomode = NULL;
38320+ pax_open_kernel();
38321+ *(void **)&ap->ops->set_piomode = NULL;
38322+ pax_close_kernel();
38323 ap->pio_mask = 0;
38324 }
38325 if (quirk & CF_BROKEN_MWDMA)
38326diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
38327index f9b983a..887b9d8 100644
38328--- a/drivers/atm/adummy.c
38329+++ b/drivers/atm/adummy.c
38330@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
38331 vcc->pop(vcc, skb);
38332 else
38333 dev_kfree_skb_any(skb);
38334- atomic_inc(&vcc->stats->tx);
38335+ atomic_inc_unchecked(&vcc->stats->tx);
38336
38337 return 0;
38338 }
38339diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
38340index f1a9198..f466a4a 100644
38341--- a/drivers/atm/ambassador.c
38342+++ b/drivers/atm/ambassador.c
38343@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
38344 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
38345
38346 // VC layer stats
38347- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38348+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38349
38350 // free the descriptor
38351 kfree (tx_descr);
38352@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38353 dump_skb ("<<<", vc, skb);
38354
38355 // VC layer stats
38356- atomic_inc(&atm_vcc->stats->rx);
38357+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38358 __net_timestamp(skb);
38359 // end of our responsibility
38360 atm_vcc->push (atm_vcc, skb);
38361@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38362 } else {
38363 PRINTK (KERN_INFO, "dropped over-size frame");
38364 // should we count this?
38365- atomic_inc(&atm_vcc->stats->rx_drop);
38366+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38367 }
38368
38369 } else {
38370@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
38371 }
38372
38373 if (check_area (skb->data, skb->len)) {
38374- atomic_inc(&atm_vcc->stats->tx_err);
38375+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
38376 return -ENOMEM; // ?
38377 }
38378
38379diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
38380index 0e3f8f9..765a7a5 100644
38381--- a/drivers/atm/atmtcp.c
38382+++ b/drivers/atm/atmtcp.c
38383@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38384 if (vcc->pop) vcc->pop(vcc,skb);
38385 else dev_kfree_skb(skb);
38386 if (dev_data) return 0;
38387- atomic_inc(&vcc->stats->tx_err);
38388+ atomic_inc_unchecked(&vcc->stats->tx_err);
38389 return -ENOLINK;
38390 }
38391 size = skb->len+sizeof(struct atmtcp_hdr);
38392@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38393 if (!new_skb) {
38394 if (vcc->pop) vcc->pop(vcc,skb);
38395 else dev_kfree_skb(skb);
38396- atomic_inc(&vcc->stats->tx_err);
38397+ atomic_inc_unchecked(&vcc->stats->tx_err);
38398 return -ENOBUFS;
38399 }
38400 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
38401@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38402 if (vcc->pop) vcc->pop(vcc,skb);
38403 else dev_kfree_skb(skb);
38404 out_vcc->push(out_vcc,new_skb);
38405- atomic_inc(&vcc->stats->tx);
38406- atomic_inc(&out_vcc->stats->rx);
38407+ atomic_inc_unchecked(&vcc->stats->tx);
38408+ atomic_inc_unchecked(&out_vcc->stats->rx);
38409 return 0;
38410 }
38411
38412@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38413 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
38414 read_unlock(&vcc_sklist_lock);
38415 if (!out_vcc) {
38416- atomic_inc(&vcc->stats->tx_err);
38417+ atomic_inc_unchecked(&vcc->stats->tx_err);
38418 goto done;
38419 }
38420 skb_pull(skb,sizeof(struct atmtcp_hdr));
38421@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38422 __net_timestamp(new_skb);
38423 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
38424 out_vcc->push(out_vcc,new_skb);
38425- atomic_inc(&vcc->stats->tx);
38426- atomic_inc(&out_vcc->stats->rx);
38427+ atomic_inc_unchecked(&vcc->stats->tx);
38428+ atomic_inc_unchecked(&out_vcc->stats->rx);
38429 done:
38430 if (vcc->pop) vcc->pop(vcc,skb);
38431 else dev_kfree_skb(skb);
38432diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
38433index b1955ba..b179940 100644
38434--- a/drivers/atm/eni.c
38435+++ b/drivers/atm/eni.c
38436@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
38437 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
38438 vcc->dev->number);
38439 length = 0;
38440- atomic_inc(&vcc->stats->rx_err);
38441+ atomic_inc_unchecked(&vcc->stats->rx_err);
38442 }
38443 else {
38444 length = ATM_CELL_SIZE-1; /* no HEC */
38445@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38446 size);
38447 }
38448 eff = length = 0;
38449- atomic_inc(&vcc->stats->rx_err);
38450+ atomic_inc_unchecked(&vcc->stats->rx_err);
38451 }
38452 else {
38453 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
38454@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38455 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
38456 vcc->dev->number,vcc->vci,length,size << 2,descr);
38457 length = eff = 0;
38458- atomic_inc(&vcc->stats->rx_err);
38459+ atomic_inc_unchecked(&vcc->stats->rx_err);
38460 }
38461 }
38462 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
38463@@ -767,7 +767,7 @@ rx_dequeued++;
38464 vcc->push(vcc,skb);
38465 pushed++;
38466 }
38467- atomic_inc(&vcc->stats->rx);
38468+ atomic_inc_unchecked(&vcc->stats->rx);
38469 }
38470 wake_up(&eni_dev->rx_wait);
38471 }
38472@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
38473 PCI_DMA_TODEVICE);
38474 if (vcc->pop) vcc->pop(vcc,skb);
38475 else dev_kfree_skb_irq(skb);
38476- atomic_inc(&vcc->stats->tx);
38477+ atomic_inc_unchecked(&vcc->stats->tx);
38478 wake_up(&eni_dev->tx_wait);
38479 dma_complete++;
38480 }
38481diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
38482index 82f2ae0..f205c02 100644
38483--- a/drivers/atm/firestream.c
38484+++ b/drivers/atm/firestream.c
38485@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
38486 }
38487 }
38488
38489- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38490+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38491
38492 fs_dprintk (FS_DEBUG_TXMEM, "i");
38493 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
38494@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38495 #endif
38496 skb_put (skb, qe->p1 & 0xffff);
38497 ATM_SKB(skb)->vcc = atm_vcc;
38498- atomic_inc(&atm_vcc->stats->rx);
38499+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38500 __net_timestamp(skb);
38501 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
38502 atm_vcc->push (atm_vcc, skb);
38503@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38504 kfree (pe);
38505 }
38506 if (atm_vcc)
38507- atomic_inc(&atm_vcc->stats->rx_drop);
38508+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38509 break;
38510 case 0x1f: /* Reassembly abort: no buffers. */
38511 /* Silently increment error counter. */
38512 if (atm_vcc)
38513- atomic_inc(&atm_vcc->stats->rx_drop);
38514+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38515 break;
38516 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
38517 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
38518diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
38519index d4725fc..2d4ea65 100644
38520--- a/drivers/atm/fore200e.c
38521+++ b/drivers/atm/fore200e.c
38522@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
38523 #endif
38524 /* check error condition */
38525 if (*entry->status & STATUS_ERROR)
38526- atomic_inc(&vcc->stats->tx_err);
38527+ atomic_inc_unchecked(&vcc->stats->tx_err);
38528 else
38529- atomic_inc(&vcc->stats->tx);
38530+ atomic_inc_unchecked(&vcc->stats->tx);
38531 }
38532 }
38533
38534@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38535 if (skb == NULL) {
38536 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
38537
38538- atomic_inc(&vcc->stats->rx_drop);
38539+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38540 return -ENOMEM;
38541 }
38542
38543@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38544
38545 dev_kfree_skb_any(skb);
38546
38547- atomic_inc(&vcc->stats->rx_drop);
38548+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38549 return -ENOMEM;
38550 }
38551
38552 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38553
38554 vcc->push(vcc, skb);
38555- atomic_inc(&vcc->stats->rx);
38556+ atomic_inc_unchecked(&vcc->stats->rx);
38557
38558 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38559
38560@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
38561 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
38562 fore200e->atm_dev->number,
38563 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
38564- atomic_inc(&vcc->stats->rx_err);
38565+ atomic_inc_unchecked(&vcc->stats->rx_err);
38566 }
38567 }
38568
38569@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
38570 goto retry_here;
38571 }
38572
38573- atomic_inc(&vcc->stats->tx_err);
38574+ atomic_inc_unchecked(&vcc->stats->tx_err);
38575
38576 fore200e->tx_sat++;
38577 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
38578diff --git a/drivers/atm/he.c b/drivers/atm/he.c
38579index aa6be26..f70a785 100644
38580--- a/drivers/atm/he.c
38581+++ b/drivers/atm/he.c
38582@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38583
38584 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
38585 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
38586- atomic_inc(&vcc->stats->rx_drop);
38587+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38588 goto return_host_buffers;
38589 }
38590
38591@@ -1717,7 +1717,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38592 RBRQ_LEN_ERR(he_dev->rbrq_head)
38593 ? "LEN_ERR" : "",
38594 vcc->vpi, vcc->vci);
38595- atomic_inc(&vcc->stats->rx_err);
38596+ atomic_inc_unchecked(&vcc->stats->rx_err);
38597 goto return_host_buffers;
38598 }
38599
38600@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38601 vcc->push(vcc, skb);
38602 spin_lock(&he_dev->global_lock);
38603
38604- atomic_inc(&vcc->stats->rx);
38605+ atomic_inc_unchecked(&vcc->stats->rx);
38606
38607 return_host_buffers:
38608 ++pdus_assembled;
38609@@ -2095,7 +2095,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
38610 tpd->vcc->pop(tpd->vcc, tpd->skb);
38611 else
38612 dev_kfree_skb_any(tpd->skb);
38613- atomic_inc(&tpd->vcc->stats->tx_err);
38614+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
38615 }
38616 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
38617 return;
38618@@ -2507,7 +2507,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38619 vcc->pop(vcc, skb);
38620 else
38621 dev_kfree_skb_any(skb);
38622- atomic_inc(&vcc->stats->tx_err);
38623+ atomic_inc_unchecked(&vcc->stats->tx_err);
38624 return -EINVAL;
38625 }
38626
38627@@ -2518,7 +2518,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38628 vcc->pop(vcc, skb);
38629 else
38630 dev_kfree_skb_any(skb);
38631- atomic_inc(&vcc->stats->tx_err);
38632+ atomic_inc_unchecked(&vcc->stats->tx_err);
38633 return -EINVAL;
38634 }
38635 #endif
38636@@ -2530,7 +2530,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38637 vcc->pop(vcc, skb);
38638 else
38639 dev_kfree_skb_any(skb);
38640- atomic_inc(&vcc->stats->tx_err);
38641+ atomic_inc_unchecked(&vcc->stats->tx_err);
38642 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38643 return -ENOMEM;
38644 }
38645@@ -2572,7 +2572,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38646 vcc->pop(vcc, skb);
38647 else
38648 dev_kfree_skb_any(skb);
38649- atomic_inc(&vcc->stats->tx_err);
38650+ atomic_inc_unchecked(&vcc->stats->tx_err);
38651 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38652 return -ENOMEM;
38653 }
38654@@ -2603,7 +2603,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38655 __enqueue_tpd(he_dev, tpd, cid);
38656 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38657
38658- atomic_inc(&vcc->stats->tx);
38659+ atomic_inc_unchecked(&vcc->stats->tx);
38660
38661 return 0;
38662 }
38663diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
38664index 1dc0519..1aadaf7 100644
38665--- a/drivers/atm/horizon.c
38666+++ b/drivers/atm/horizon.c
38667@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
38668 {
38669 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
38670 // VC layer stats
38671- atomic_inc(&vcc->stats->rx);
38672+ atomic_inc_unchecked(&vcc->stats->rx);
38673 __net_timestamp(skb);
38674 // end of our responsibility
38675 vcc->push (vcc, skb);
38676@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
38677 dev->tx_iovec = NULL;
38678
38679 // VC layer stats
38680- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38681+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38682
38683 // free the skb
38684 hrz_kfree_skb (skb);
38685diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
38686index b621f56..1e3a799 100644
38687--- a/drivers/atm/idt77252.c
38688+++ b/drivers/atm/idt77252.c
38689@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
38690 else
38691 dev_kfree_skb(skb);
38692
38693- atomic_inc(&vcc->stats->tx);
38694+ atomic_inc_unchecked(&vcc->stats->tx);
38695 }
38696
38697 atomic_dec(&scq->used);
38698@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38699 if ((sb = dev_alloc_skb(64)) == NULL) {
38700 printk("%s: Can't allocate buffers for aal0.\n",
38701 card->name);
38702- atomic_add(i, &vcc->stats->rx_drop);
38703+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38704 break;
38705 }
38706 if (!atm_charge(vcc, sb->truesize)) {
38707 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
38708 card->name);
38709- atomic_add(i - 1, &vcc->stats->rx_drop);
38710+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
38711 dev_kfree_skb(sb);
38712 break;
38713 }
38714@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38715 ATM_SKB(sb)->vcc = vcc;
38716 __net_timestamp(sb);
38717 vcc->push(vcc, sb);
38718- atomic_inc(&vcc->stats->rx);
38719+ atomic_inc_unchecked(&vcc->stats->rx);
38720
38721 cell += ATM_CELL_PAYLOAD;
38722 }
38723@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38724 "(CDC: %08x)\n",
38725 card->name, len, rpp->len, readl(SAR_REG_CDC));
38726 recycle_rx_pool_skb(card, rpp);
38727- atomic_inc(&vcc->stats->rx_err);
38728+ atomic_inc_unchecked(&vcc->stats->rx_err);
38729 return;
38730 }
38731 if (stat & SAR_RSQE_CRC) {
38732 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
38733 recycle_rx_pool_skb(card, rpp);
38734- atomic_inc(&vcc->stats->rx_err);
38735+ atomic_inc_unchecked(&vcc->stats->rx_err);
38736 return;
38737 }
38738 if (skb_queue_len(&rpp->queue) > 1) {
38739@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38740 RXPRINTK("%s: Can't alloc RX skb.\n",
38741 card->name);
38742 recycle_rx_pool_skb(card, rpp);
38743- atomic_inc(&vcc->stats->rx_err);
38744+ atomic_inc_unchecked(&vcc->stats->rx_err);
38745 return;
38746 }
38747 if (!atm_charge(vcc, skb->truesize)) {
38748@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38749 __net_timestamp(skb);
38750
38751 vcc->push(vcc, skb);
38752- atomic_inc(&vcc->stats->rx);
38753+ atomic_inc_unchecked(&vcc->stats->rx);
38754
38755 return;
38756 }
38757@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38758 __net_timestamp(skb);
38759
38760 vcc->push(vcc, skb);
38761- atomic_inc(&vcc->stats->rx);
38762+ atomic_inc_unchecked(&vcc->stats->rx);
38763
38764 if (skb->truesize > SAR_FB_SIZE_3)
38765 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
38766@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
38767 if (vcc->qos.aal != ATM_AAL0) {
38768 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
38769 card->name, vpi, vci);
38770- atomic_inc(&vcc->stats->rx_drop);
38771+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38772 goto drop;
38773 }
38774
38775 if ((sb = dev_alloc_skb(64)) == NULL) {
38776 printk("%s: Can't allocate buffers for AAL0.\n",
38777 card->name);
38778- atomic_inc(&vcc->stats->rx_err);
38779+ atomic_inc_unchecked(&vcc->stats->rx_err);
38780 goto drop;
38781 }
38782
38783@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
38784 ATM_SKB(sb)->vcc = vcc;
38785 __net_timestamp(sb);
38786 vcc->push(vcc, sb);
38787- atomic_inc(&vcc->stats->rx);
38788+ atomic_inc_unchecked(&vcc->stats->rx);
38789
38790 drop:
38791 skb_pull(queue, 64);
38792@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38793
38794 if (vc == NULL) {
38795 printk("%s: NULL connection in send().\n", card->name);
38796- atomic_inc(&vcc->stats->tx_err);
38797+ atomic_inc_unchecked(&vcc->stats->tx_err);
38798 dev_kfree_skb(skb);
38799 return -EINVAL;
38800 }
38801 if (!test_bit(VCF_TX, &vc->flags)) {
38802 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
38803- atomic_inc(&vcc->stats->tx_err);
38804+ atomic_inc_unchecked(&vcc->stats->tx_err);
38805 dev_kfree_skb(skb);
38806 return -EINVAL;
38807 }
38808@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38809 break;
38810 default:
38811 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
38812- atomic_inc(&vcc->stats->tx_err);
38813+ atomic_inc_unchecked(&vcc->stats->tx_err);
38814 dev_kfree_skb(skb);
38815 return -EINVAL;
38816 }
38817
38818 if (skb_shinfo(skb)->nr_frags != 0) {
38819 printk("%s: No scatter-gather yet.\n", card->name);
38820- atomic_inc(&vcc->stats->tx_err);
38821+ atomic_inc_unchecked(&vcc->stats->tx_err);
38822 dev_kfree_skb(skb);
38823 return -EINVAL;
38824 }
38825@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38826
38827 err = queue_skb(card, vc, skb, oam);
38828 if (err) {
38829- atomic_inc(&vcc->stats->tx_err);
38830+ atomic_inc_unchecked(&vcc->stats->tx_err);
38831 dev_kfree_skb(skb);
38832 return err;
38833 }
38834@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
38835 skb = dev_alloc_skb(64);
38836 if (!skb) {
38837 printk("%s: Out of memory in send_oam().\n", card->name);
38838- atomic_inc(&vcc->stats->tx_err);
38839+ atomic_inc_unchecked(&vcc->stats->tx_err);
38840 return -ENOMEM;
38841 }
38842 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
38843diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
38844index 4217f29..88f547a 100644
38845--- a/drivers/atm/iphase.c
38846+++ b/drivers/atm/iphase.c
38847@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
38848 status = (u_short) (buf_desc_ptr->desc_mode);
38849 if (status & (RX_CER | RX_PTE | RX_OFL))
38850 {
38851- atomic_inc(&vcc->stats->rx_err);
38852+ atomic_inc_unchecked(&vcc->stats->rx_err);
38853 IF_ERR(printk("IA: bad packet, dropping it");)
38854 if (status & RX_CER) {
38855 IF_ERR(printk(" cause: packet CRC error\n");)
38856@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
38857 len = dma_addr - buf_addr;
38858 if (len > iadev->rx_buf_sz) {
38859 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
38860- atomic_inc(&vcc->stats->rx_err);
38861+ atomic_inc_unchecked(&vcc->stats->rx_err);
38862 goto out_free_desc;
38863 }
38864
38865@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38866 ia_vcc = INPH_IA_VCC(vcc);
38867 if (ia_vcc == NULL)
38868 {
38869- atomic_inc(&vcc->stats->rx_err);
38870+ atomic_inc_unchecked(&vcc->stats->rx_err);
38871 atm_return(vcc, skb->truesize);
38872 dev_kfree_skb_any(skb);
38873 goto INCR_DLE;
38874@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38875 if ((length > iadev->rx_buf_sz) || (length >
38876 (skb->len - sizeof(struct cpcs_trailer))))
38877 {
38878- atomic_inc(&vcc->stats->rx_err);
38879+ atomic_inc_unchecked(&vcc->stats->rx_err);
38880 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
38881 length, skb->len);)
38882 atm_return(vcc, skb->truesize);
38883@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38884
38885 IF_RX(printk("rx_dle_intr: skb push");)
38886 vcc->push(vcc,skb);
38887- atomic_inc(&vcc->stats->rx);
38888+ atomic_inc_unchecked(&vcc->stats->rx);
38889 iadev->rx_pkt_cnt++;
38890 }
38891 INCR_DLE:
38892@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
38893 {
38894 struct k_sonet_stats *stats;
38895 stats = &PRIV(_ia_dev[board])->sonet_stats;
38896- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
38897- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
38898- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
38899- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
38900- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
38901- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
38902- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
38903- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
38904- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
38905+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
38906+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
38907+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
38908+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
38909+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
38910+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
38911+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
38912+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
38913+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
38914 }
38915 ia_cmds.status = 0;
38916 break;
38917@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38918 if ((desc == 0) || (desc > iadev->num_tx_desc))
38919 {
38920 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38921- atomic_inc(&vcc->stats->tx);
38922+ atomic_inc_unchecked(&vcc->stats->tx);
38923 if (vcc->pop)
38924 vcc->pop(vcc, skb);
38925 else
38926@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38927 ATM_DESC(skb) = vcc->vci;
38928 skb_queue_tail(&iadev->tx_dma_q, skb);
38929
38930- atomic_inc(&vcc->stats->tx);
38931+ atomic_inc_unchecked(&vcc->stats->tx);
38932 iadev->tx_pkt_cnt++;
38933 /* Increment transaction counter */
38934 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38935
38936 #if 0
38937 /* add flow control logic */
38938- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38939+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38940 if (iavcc->vc_desc_cnt > 10) {
38941 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38942 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38943diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38944index fa7d7019..1e404c7 100644
38945--- a/drivers/atm/lanai.c
38946+++ b/drivers/atm/lanai.c
38947@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38948 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38949 lanai_endtx(lanai, lvcc);
38950 lanai_free_skb(lvcc->tx.atmvcc, skb);
38951- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38952+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38953 }
38954
38955 /* Try to fill the buffer - don't call unless there is backlog */
38956@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38957 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38958 __net_timestamp(skb);
38959 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38960- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38961+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38962 out:
38963 lvcc->rx.buf.ptr = end;
38964 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38965@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38966 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38967 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38968 lanai->stats.service_rxnotaal5++;
38969- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38970+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38971 return 0;
38972 }
38973 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38974@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38975 int bytes;
38976 read_unlock(&vcc_sklist_lock);
38977 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38978- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38979+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38980 lvcc->stats.x.aal5.service_trash++;
38981 bytes = (SERVICE_GET_END(s) * 16) -
38982 (((unsigned long) lvcc->rx.buf.ptr) -
38983@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38984 }
38985 if (s & SERVICE_STREAM) {
38986 read_unlock(&vcc_sklist_lock);
38987- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38988+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38989 lvcc->stats.x.aal5.service_stream++;
38990 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
38991 "PDU on VCI %d!\n", lanai->number, vci);
38992@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38993 return 0;
38994 }
38995 DPRINTK("got rx crc error on vci %d\n", vci);
38996- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38997+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38998 lvcc->stats.x.aal5.service_rxcrc++;
38999 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
39000 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
39001diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
39002index 9988ac9..7c52585 100644
39003--- a/drivers/atm/nicstar.c
39004+++ b/drivers/atm/nicstar.c
39005@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39006 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
39007 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
39008 card->index);
39009- atomic_inc(&vcc->stats->tx_err);
39010+ atomic_inc_unchecked(&vcc->stats->tx_err);
39011 dev_kfree_skb_any(skb);
39012 return -EINVAL;
39013 }
39014@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39015 if (!vc->tx) {
39016 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
39017 card->index);
39018- atomic_inc(&vcc->stats->tx_err);
39019+ atomic_inc_unchecked(&vcc->stats->tx_err);
39020 dev_kfree_skb_any(skb);
39021 return -EINVAL;
39022 }
39023@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39024 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
39025 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
39026 card->index);
39027- atomic_inc(&vcc->stats->tx_err);
39028+ atomic_inc_unchecked(&vcc->stats->tx_err);
39029 dev_kfree_skb_any(skb);
39030 return -EINVAL;
39031 }
39032
39033 if (skb_shinfo(skb)->nr_frags != 0) {
39034 printk("nicstar%d: No scatter-gather yet.\n", card->index);
39035- atomic_inc(&vcc->stats->tx_err);
39036+ atomic_inc_unchecked(&vcc->stats->tx_err);
39037 dev_kfree_skb_any(skb);
39038 return -EINVAL;
39039 }
39040@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39041 }
39042
39043 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
39044- atomic_inc(&vcc->stats->tx_err);
39045+ atomic_inc_unchecked(&vcc->stats->tx_err);
39046 dev_kfree_skb_any(skb);
39047 return -EIO;
39048 }
39049- atomic_inc(&vcc->stats->tx);
39050+ atomic_inc_unchecked(&vcc->stats->tx);
39051
39052 return 0;
39053 }
39054@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39055 printk
39056 ("nicstar%d: Can't allocate buffers for aal0.\n",
39057 card->index);
39058- atomic_add(i, &vcc->stats->rx_drop);
39059+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
39060 break;
39061 }
39062 if (!atm_charge(vcc, sb->truesize)) {
39063 RXPRINTK
39064 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
39065 card->index);
39066- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39067+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39068 dev_kfree_skb_any(sb);
39069 break;
39070 }
39071@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39072 ATM_SKB(sb)->vcc = vcc;
39073 __net_timestamp(sb);
39074 vcc->push(vcc, sb);
39075- atomic_inc(&vcc->stats->rx);
39076+ atomic_inc_unchecked(&vcc->stats->rx);
39077 cell += ATM_CELL_PAYLOAD;
39078 }
39079
39080@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39081 if (iovb == NULL) {
39082 printk("nicstar%d: Out of iovec buffers.\n",
39083 card->index);
39084- atomic_inc(&vcc->stats->rx_drop);
39085+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39086 recycle_rx_buf(card, skb);
39087 return;
39088 }
39089@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39090 small or large buffer itself. */
39091 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
39092 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
39093- atomic_inc(&vcc->stats->rx_err);
39094+ atomic_inc_unchecked(&vcc->stats->rx_err);
39095 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39096 NS_MAX_IOVECS);
39097 NS_PRV_IOVCNT(iovb) = 0;
39098@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39099 ("nicstar%d: Expected a small buffer, and this is not one.\n",
39100 card->index);
39101 which_list(card, skb);
39102- atomic_inc(&vcc->stats->rx_err);
39103+ atomic_inc_unchecked(&vcc->stats->rx_err);
39104 recycle_rx_buf(card, skb);
39105 vc->rx_iov = NULL;
39106 recycle_iov_buf(card, iovb);
39107@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39108 ("nicstar%d: Expected a large buffer, and this is not one.\n",
39109 card->index);
39110 which_list(card, skb);
39111- atomic_inc(&vcc->stats->rx_err);
39112+ atomic_inc_unchecked(&vcc->stats->rx_err);
39113 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39114 NS_PRV_IOVCNT(iovb));
39115 vc->rx_iov = NULL;
39116@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39117 printk(" - PDU size mismatch.\n");
39118 else
39119 printk(".\n");
39120- atomic_inc(&vcc->stats->rx_err);
39121+ atomic_inc_unchecked(&vcc->stats->rx_err);
39122 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39123 NS_PRV_IOVCNT(iovb));
39124 vc->rx_iov = NULL;
39125@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39126 /* skb points to a small buffer */
39127 if (!atm_charge(vcc, skb->truesize)) {
39128 push_rxbufs(card, skb);
39129- atomic_inc(&vcc->stats->rx_drop);
39130+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39131 } else {
39132 skb_put(skb, len);
39133 dequeue_sm_buf(card, skb);
39134@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39135 ATM_SKB(skb)->vcc = vcc;
39136 __net_timestamp(skb);
39137 vcc->push(vcc, skb);
39138- atomic_inc(&vcc->stats->rx);
39139+ atomic_inc_unchecked(&vcc->stats->rx);
39140 }
39141 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
39142 struct sk_buff *sb;
39143@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39144 if (len <= NS_SMBUFSIZE) {
39145 if (!atm_charge(vcc, sb->truesize)) {
39146 push_rxbufs(card, sb);
39147- atomic_inc(&vcc->stats->rx_drop);
39148+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39149 } else {
39150 skb_put(sb, len);
39151 dequeue_sm_buf(card, sb);
39152@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39153 ATM_SKB(sb)->vcc = vcc;
39154 __net_timestamp(sb);
39155 vcc->push(vcc, sb);
39156- atomic_inc(&vcc->stats->rx);
39157+ atomic_inc_unchecked(&vcc->stats->rx);
39158 }
39159
39160 push_rxbufs(card, skb);
39161@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39162
39163 if (!atm_charge(vcc, skb->truesize)) {
39164 push_rxbufs(card, skb);
39165- atomic_inc(&vcc->stats->rx_drop);
39166+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39167 } else {
39168 dequeue_lg_buf(card, skb);
39169 #ifdef NS_USE_DESTRUCTORS
39170@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39171 ATM_SKB(skb)->vcc = vcc;
39172 __net_timestamp(skb);
39173 vcc->push(vcc, skb);
39174- atomic_inc(&vcc->stats->rx);
39175+ atomic_inc_unchecked(&vcc->stats->rx);
39176 }
39177
39178 push_rxbufs(card, sb);
39179@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39180 printk
39181 ("nicstar%d: Out of huge buffers.\n",
39182 card->index);
39183- atomic_inc(&vcc->stats->rx_drop);
39184+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39185 recycle_iovec_rx_bufs(card,
39186 (struct iovec *)
39187 iovb->data,
39188@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39189 card->hbpool.count++;
39190 } else
39191 dev_kfree_skb_any(hb);
39192- atomic_inc(&vcc->stats->rx_drop);
39193+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39194 } else {
39195 /* Copy the small buffer to the huge buffer */
39196 sb = (struct sk_buff *)iov->iov_base;
39197@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39198 #endif /* NS_USE_DESTRUCTORS */
39199 __net_timestamp(hb);
39200 vcc->push(vcc, hb);
39201- atomic_inc(&vcc->stats->rx);
39202+ atomic_inc_unchecked(&vcc->stats->rx);
39203 }
39204 }
39205
39206diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
39207index 943cf0d..37d15d5 100644
39208--- a/drivers/atm/solos-pci.c
39209+++ b/drivers/atm/solos-pci.c
39210@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
39211 }
39212 atm_charge(vcc, skb->truesize);
39213 vcc->push(vcc, skb);
39214- atomic_inc(&vcc->stats->rx);
39215+ atomic_inc_unchecked(&vcc->stats->rx);
39216 break;
39217
39218 case PKT_STATUS:
39219@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
39220 vcc = SKB_CB(oldskb)->vcc;
39221
39222 if (vcc) {
39223- atomic_inc(&vcc->stats->tx);
39224+ atomic_inc_unchecked(&vcc->stats->tx);
39225 solos_pop(vcc, oldskb);
39226 } else {
39227 dev_kfree_skb_irq(oldskb);
39228diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
39229index 0215934..ce9f5b1 100644
39230--- a/drivers/atm/suni.c
39231+++ b/drivers/atm/suni.c
39232@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
39233
39234
39235 #define ADD_LIMITED(s,v) \
39236- atomic_add((v),&stats->s); \
39237- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
39238+ atomic_add_unchecked((v),&stats->s); \
39239+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
39240
39241
39242 static void suni_hz(unsigned long from_timer)
39243diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
39244index 5120a96..e2572bd 100644
39245--- a/drivers/atm/uPD98402.c
39246+++ b/drivers/atm/uPD98402.c
39247@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
39248 struct sonet_stats tmp;
39249 int error = 0;
39250
39251- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39252+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39253 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
39254 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
39255 if (zero && !error) {
39256@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
39257
39258
39259 #define ADD_LIMITED(s,v) \
39260- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
39261- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
39262- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39263+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
39264+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
39265+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39266
39267
39268 static void stat_event(struct atm_dev *dev)
39269@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
39270 if (reason & uPD98402_INT_PFM) stat_event(dev);
39271 if (reason & uPD98402_INT_PCO) {
39272 (void) GET(PCOCR); /* clear interrupt cause */
39273- atomic_add(GET(HECCT),
39274+ atomic_add_unchecked(GET(HECCT),
39275 &PRIV(dev)->sonet_stats.uncorr_hcs);
39276 }
39277 if ((reason & uPD98402_INT_RFO) &&
39278@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
39279 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
39280 uPD98402_INT_LOS),PIMR); /* enable them */
39281 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
39282- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39283- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
39284- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
39285+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39286+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
39287+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
39288 return 0;
39289 }
39290
39291diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
39292index 969c3c2..9b72956 100644
39293--- a/drivers/atm/zatm.c
39294+++ b/drivers/atm/zatm.c
39295@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39296 }
39297 if (!size) {
39298 dev_kfree_skb_irq(skb);
39299- if (vcc) atomic_inc(&vcc->stats->rx_err);
39300+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
39301 continue;
39302 }
39303 if (!atm_charge(vcc,skb->truesize)) {
39304@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39305 skb->len = size;
39306 ATM_SKB(skb)->vcc = vcc;
39307 vcc->push(vcc,skb);
39308- atomic_inc(&vcc->stats->rx);
39309+ atomic_inc_unchecked(&vcc->stats->rx);
39310 }
39311 zout(pos & 0xffff,MTA(mbx));
39312 #if 0 /* probably a stupid idea */
39313@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
39314 skb_queue_head(&zatm_vcc->backlog,skb);
39315 break;
39316 }
39317- atomic_inc(&vcc->stats->tx);
39318+ atomic_inc_unchecked(&vcc->stats->tx);
39319 wake_up(&zatm_vcc->tx_wait);
39320 }
39321
39322diff --git a/drivers/base/bus.c b/drivers/base/bus.c
39323index 83e910a..b224a73 100644
39324--- a/drivers/base/bus.c
39325+++ b/drivers/base/bus.c
39326@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
39327 return -EINVAL;
39328
39329 mutex_lock(&subsys->p->mutex);
39330- list_add_tail(&sif->node, &subsys->p->interfaces);
39331+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
39332 if (sif->add_dev) {
39333 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39334 while ((dev = subsys_dev_iter_next(&iter)))
39335@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
39336 subsys = sif->subsys;
39337
39338 mutex_lock(&subsys->p->mutex);
39339- list_del_init(&sif->node);
39340+ pax_list_del_init((struct list_head *)&sif->node);
39341 if (sif->remove_dev) {
39342 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39343 while ((dev = subsys_dev_iter_next(&iter)))
39344diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
39345index 25798db..15f130e 100644
39346--- a/drivers/base/devtmpfs.c
39347+++ b/drivers/base/devtmpfs.c
39348@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
39349 if (!thread)
39350 return 0;
39351
39352- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
39353+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
39354 if (err)
39355 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
39356 else
39357@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
39358 *err = sys_unshare(CLONE_NEWNS);
39359 if (*err)
39360 goto out;
39361- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
39362+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
39363 if (*err)
39364 goto out;
39365- sys_chdir("/.."); /* will traverse into overmounted root */
39366- sys_chroot(".");
39367+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
39368+ sys_chroot((char __force_user *)".");
39369 complete(&setup_done);
39370 while (1) {
39371 spin_lock(&req_lock);
39372diff --git a/drivers/base/node.c b/drivers/base/node.c
39373index 8f7ed99..700dd0c 100644
39374--- a/drivers/base/node.c
39375+++ b/drivers/base/node.c
39376@@ -624,7 +624,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
39377 struct node_attr {
39378 struct device_attribute attr;
39379 enum node_states state;
39380-};
39381+} __do_const;
39382
39383 static ssize_t show_node_state(struct device *dev,
39384 struct device_attribute *attr, char *buf)
39385diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
39386index eee55c1..b8c9393 100644
39387--- a/drivers/base/power/domain.c
39388+++ b/drivers/base/power/domain.c
39389@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
39390
39391 if (dev->power.subsys_data->domain_data) {
39392 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
39393- gpd_data->ops = (struct gpd_dev_ops){ NULL };
39394+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
39395 if (clear_td)
39396- gpd_data->td = (struct gpd_timing_data){ 0 };
39397+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
39398
39399 if (--gpd_data->refcount == 0) {
39400 dev->power.subsys_data->domain_data = NULL;
39401@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
39402 {
39403 struct cpuidle_driver *cpuidle_drv;
39404 struct gpd_cpu_data *cpu_data;
39405- struct cpuidle_state *idle_state;
39406+ cpuidle_state_no_const *idle_state;
39407 int ret = 0;
39408
39409 if (IS_ERR_OR_NULL(genpd) || state < 0)
39410@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
39411 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
39412 {
39413 struct gpd_cpu_data *cpu_data;
39414- struct cpuidle_state *idle_state;
39415+ cpuidle_state_no_const *idle_state;
39416 int ret = 0;
39417
39418 if (IS_ERR_OR_NULL(genpd))
39419diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
39420index 95b181d1..c4f0e19 100644
39421--- a/drivers/base/power/sysfs.c
39422+++ b/drivers/base/power/sysfs.c
39423@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
39424 return -EIO;
39425 }
39426 }
39427- return sprintf(buf, p);
39428+ return sprintf(buf, "%s", p);
39429 }
39430
39431 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
39432diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
39433index eb1bd2e..2667d3a 100644
39434--- a/drivers/base/power/wakeup.c
39435+++ b/drivers/base/power/wakeup.c
39436@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
39437 * They need to be modified together atomically, so it's better to use one
39438 * atomic variable to hold them both.
39439 */
39440-static atomic_t combined_event_count = ATOMIC_INIT(0);
39441+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
39442
39443 #define IN_PROGRESS_BITS (sizeof(int) * 4)
39444 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
39445
39446 static void split_counters(unsigned int *cnt, unsigned int *inpr)
39447 {
39448- unsigned int comb = atomic_read(&combined_event_count);
39449+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
39450
39451 *cnt = (comb >> IN_PROGRESS_BITS);
39452 *inpr = comb & MAX_IN_PROGRESS;
39453@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
39454 ws->start_prevent_time = ws->last_time;
39455
39456 /* Increment the counter of events in progress. */
39457- cec = atomic_inc_return(&combined_event_count);
39458+ cec = atomic_inc_return_unchecked(&combined_event_count);
39459
39460 trace_wakeup_source_activate(ws->name, cec);
39461 }
39462@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
39463 * Increment the counter of registered wakeup events and decrement the
39464 * couter of wakeup events in progress simultaneously.
39465 */
39466- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
39467+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
39468 trace_wakeup_source_deactivate(ws->name, cec);
39469
39470 split_counters(&cnt, &inpr);
39471diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
39472index dbb8350..4762f4c 100644
39473--- a/drivers/base/syscore.c
39474+++ b/drivers/base/syscore.c
39475@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
39476 void register_syscore_ops(struct syscore_ops *ops)
39477 {
39478 mutex_lock(&syscore_ops_lock);
39479- list_add_tail(&ops->node, &syscore_ops_list);
39480+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
39481 mutex_unlock(&syscore_ops_lock);
39482 }
39483 EXPORT_SYMBOL_GPL(register_syscore_ops);
39484@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
39485 void unregister_syscore_ops(struct syscore_ops *ops)
39486 {
39487 mutex_lock(&syscore_ops_lock);
39488- list_del(&ops->node);
39489+ pax_list_del((struct list_head *)&ops->node);
39490 mutex_unlock(&syscore_ops_lock);
39491 }
39492 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
39493diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
39494index 4595c22..d4f6c54 100644
39495--- a/drivers/block/cciss.c
39496+++ b/drivers/block/cciss.c
39497@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
39498 while (!list_empty(&h->reqQ)) {
39499 c = list_entry(h->reqQ.next, CommandList_struct, list);
39500 /* can't do anything if fifo is full */
39501- if ((h->access.fifo_full(h))) {
39502+ if ((h->access->fifo_full(h))) {
39503 dev_warn(&h->pdev->dev, "fifo full\n");
39504 break;
39505 }
39506@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
39507 h->Qdepth--;
39508
39509 /* Tell the controller execute command */
39510- h->access.submit_command(h, c);
39511+ h->access->submit_command(h, c);
39512
39513 /* Put job onto the completed Q */
39514 addQ(&h->cmpQ, c);
39515@@ -3447,17 +3447,17 @@ startio:
39516
39517 static inline unsigned long get_next_completion(ctlr_info_t *h)
39518 {
39519- return h->access.command_completed(h);
39520+ return h->access->command_completed(h);
39521 }
39522
39523 static inline int interrupt_pending(ctlr_info_t *h)
39524 {
39525- return h->access.intr_pending(h);
39526+ return h->access->intr_pending(h);
39527 }
39528
39529 static inline long interrupt_not_for_us(ctlr_info_t *h)
39530 {
39531- return ((h->access.intr_pending(h) == 0) ||
39532+ return ((h->access->intr_pending(h) == 0) ||
39533 (h->interrupts_enabled == 0));
39534 }
39535
39536@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
39537 u32 a;
39538
39539 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39540- return h->access.command_completed(h);
39541+ return h->access->command_completed(h);
39542
39543 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
39544 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
39545@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
39546 trans_support & CFGTBL_Trans_use_short_tags);
39547
39548 /* Change the access methods to the performant access methods */
39549- h->access = SA5_performant_access;
39550+ h->access = &SA5_performant_access;
39551 h->transMethod = CFGTBL_Trans_Performant;
39552
39553 return;
39554@@ -4321,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
39555 if (prod_index < 0)
39556 return -ENODEV;
39557 h->product_name = products[prod_index].product_name;
39558- h->access = *(products[prod_index].access);
39559+ h->access = products[prod_index].access;
39560
39561 if (cciss_board_disabled(h)) {
39562 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
39563@@ -5053,7 +5053,7 @@ reinit_after_soft_reset:
39564 }
39565
39566 /* make sure the board interrupts are off */
39567- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39568+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39569 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
39570 if (rc)
39571 goto clean2;
39572@@ -5103,7 +5103,7 @@ reinit_after_soft_reset:
39573 * fake ones to scoop up any residual completions.
39574 */
39575 spin_lock_irqsave(&h->lock, flags);
39576- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39577+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39578 spin_unlock_irqrestore(&h->lock, flags);
39579 free_irq(h->intr[h->intr_mode], h);
39580 rc = cciss_request_irq(h, cciss_msix_discard_completions,
39581@@ -5123,9 +5123,9 @@ reinit_after_soft_reset:
39582 dev_info(&h->pdev->dev, "Board READY.\n");
39583 dev_info(&h->pdev->dev,
39584 "Waiting for stale completions to drain.\n");
39585- h->access.set_intr_mask(h, CCISS_INTR_ON);
39586+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39587 msleep(10000);
39588- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39589+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39590
39591 rc = controller_reset_failed(h->cfgtable);
39592 if (rc)
39593@@ -5148,7 +5148,7 @@ reinit_after_soft_reset:
39594 cciss_scsi_setup(h);
39595
39596 /* Turn the interrupts on so we can service requests */
39597- h->access.set_intr_mask(h, CCISS_INTR_ON);
39598+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39599
39600 /* Get the firmware version */
39601 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
39602@@ -5220,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
39603 kfree(flush_buf);
39604 if (return_code != IO_OK)
39605 dev_warn(&h->pdev->dev, "Error flushing cache\n");
39606- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39607+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39608 free_irq(h->intr[h->intr_mode], h);
39609 }
39610
39611diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
39612index 7fda30e..2f27946 100644
39613--- a/drivers/block/cciss.h
39614+++ b/drivers/block/cciss.h
39615@@ -101,7 +101,7 @@ struct ctlr_info
39616 /* information about each logical volume */
39617 drive_info_struct *drv[CISS_MAX_LUN];
39618
39619- struct access_method access;
39620+ struct access_method *access;
39621
39622 /* queue and queue Info */
39623 struct list_head reqQ;
39624@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
39625 }
39626
39627 static struct access_method SA5_access = {
39628- SA5_submit_command,
39629- SA5_intr_mask,
39630- SA5_fifo_full,
39631- SA5_intr_pending,
39632- SA5_completed,
39633+ .submit_command = SA5_submit_command,
39634+ .set_intr_mask = SA5_intr_mask,
39635+ .fifo_full = SA5_fifo_full,
39636+ .intr_pending = SA5_intr_pending,
39637+ .command_completed = SA5_completed,
39638 };
39639
39640 static struct access_method SA5B_access = {
39641- SA5_submit_command,
39642- SA5B_intr_mask,
39643- SA5_fifo_full,
39644- SA5B_intr_pending,
39645- SA5_completed,
39646+ .submit_command = SA5_submit_command,
39647+ .set_intr_mask = SA5B_intr_mask,
39648+ .fifo_full = SA5_fifo_full,
39649+ .intr_pending = SA5B_intr_pending,
39650+ .command_completed = SA5_completed,
39651 };
39652
39653 static struct access_method SA5_performant_access = {
39654- SA5_submit_command,
39655- SA5_performant_intr_mask,
39656- SA5_fifo_full,
39657- SA5_performant_intr_pending,
39658- SA5_performant_completed,
39659+ .submit_command = SA5_submit_command,
39660+ .set_intr_mask = SA5_performant_intr_mask,
39661+ .fifo_full = SA5_fifo_full,
39662+ .intr_pending = SA5_performant_intr_pending,
39663+ .command_completed = SA5_performant_completed,
39664 };
39665
39666 struct board_type {
39667diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
39668index 2b94403..fd6ad1f 100644
39669--- a/drivers/block/cpqarray.c
39670+++ b/drivers/block/cpqarray.c
39671@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39672 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
39673 goto Enomem4;
39674 }
39675- hba[i]->access.set_intr_mask(hba[i], 0);
39676+ hba[i]->access->set_intr_mask(hba[i], 0);
39677 if (request_irq(hba[i]->intr, do_ida_intr,
39678 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
39679 {
39680@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39681 add_timer(&hba[i]->timer);
39682
39683 /* Enable IRQ now that spinlock and rate limit timer are set up */
39684- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39685+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39686
39687 for(j=0; j<NWD; j++) {
39688 struct gendisk *disk = ida_gendisk[i][j];
39689@@ -694,7 +694,7 @@ DBGINFO(
39690 for(i=0; i<NR_PRODUCTS; i++) {
39691 if (board_id == products[i].board_id) {
39692 c->product_name = products[i].product_name;
39693- c->access = *(products[i].access);
39694+ c->access = products[i].access;
39695 break;
39696 }
39697 }
39698@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
39699 hba[ctlr]->intr = intr;
39700 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
39701 hba[ctlr]->product_name = products[j].product_name;
39702- hba[ctlr]->access = *(products[j].access);
39703+ hba[ctlr]->access = products[j].access;
39704 hba[ctlr]->ctlr = ctlr;
39705 hba[ctlr]->board_id = board_id;
39706 hba[ctlr]->pci_dev = NULL; /* not PCI */
39707@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
39708
39709 while((c = h->reqQ) != NULL) {
39710 /* Can't do anything if we're busy */
39711- if (h->access.fifo_full(h) == 0)
39712+ if (h->access->fifo_full(h) == 0)
39713 return;
39714
39715 /* Get the first entry from the request Q */
39716@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
39717 h->Qdepth--;
39718
39719 /* Tell the controller to do our bidding */
39720- h->access.submit_command(h, c);
39721+ h->access->submit_command(h, c);
39722
39723 /* Get onto the completion Q */
39724 addQ(&h->cmpQ, c);
39725@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39726 unsigned long flags;
39727 __u32 a,a1;
39728
39729- istat = h->access.intr_pending(h);
39730+ istat = h->access->intr_pending(h);
39731 /* Is this interrupt for us? */
39732 if (istat == 0)
39733 return IRQ_NONE;
39734@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39735 */
39736 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
39737 if (istat & FIFO_NOT_EMPTY) {
39738- while((a = h->access.command_completed(h))) {
39739+ while((a = h->access->command_completed(h))) {
39740 a1 = a; a &= ~3;
39741 if ((c = h->cmpQ) == NULL)
39742 {
39743@@ -1448,11 +1448,11 @@ static int sendcmd(
39744 /*
39745 * Disable interrupt
39746 */
39747- info_p->access.set_intr_mask(info_p, 0);
39748+ info_p->access->set_intr_mask(info_p, 0);
39749 /* Make sure there is room in the command FIFO */
39750 /* Actually it should be completely empty at this time. */
39751 for (i = 200000; i > 0; i--) {
39752- temp = info_p->access.fifo_full(info_p);
39753+ temp = info_p->access->fifo_full(info_p);
39754 if (temp != 0) {
39755 break;
39756 }
39757@@ -1465,7 +1465,7 @@ DBG(
39758 /*
39759 * Send the cmd
39760 */
39761- info_p->access.submit_command(info_p, c);
39762+ info_p->access->submit_command(info_p, c);
39763 complete = pollcomplete(ctlr);
39764
39765 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
39766@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
39767 * we check the new geometry. Then turn interrupts back on when
39768 * we're done.
39769 */
39770- host->access.set_intr_mask(host, 0);
39771+ host->access->set_intr_mask(host, 0);
39772 getgeometry(ctlr);
39773- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
39774+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
39775
39776 for(i=0; i<NWD; i++) {
39777 struct gendisk *disk = ida_gendisk[ctlr][i];
39778@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
39779 /* Wait (up to 2 seconds) for a command to complete */
39780
39781 for (i = 200000; i > 0; i--) {
39782- done = hba[ctlr]->access.command_completed(hba[ctlr]);
39783+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
39784 if (done == 0) {
39785 udelay(10); /* a short fixed delay */
39786 } else
39787diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
39788index be73e9d..7fbf140 100644
39789--- a/drivers/block/cpqarray.h
39790+++ b/drivers/block/cpqarray.h
39791@@ -99,7 +99,7 @@ struct ctlr_info {
39792 drv_info_t drv[NWD];
39793 struct proc_dir_entry *proc;
39794
39795- struct access_method access;
39796+ struct access_method *access;
39797
39798 cmdlist_t *reqQ;
39799 cmdlist_t *cmpQ;
39800diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
39801index a76ceb3..3c1a9fd 100644
39802--- a/drivers/block/drbd/drbd_int.h
39803+++ b/drivers/block/drbd/drbd_int.h
39804@@ -331,7 +331,7 @@ struct drbd_epoch {
39805 struct drbd_connection *connection;
39806 struct list_head list;
39807 unsigned int barrier_nr;
39808- atomic_t epoch_size; /* increased on every request added. */
39809+ atomic_unchecked_t epoch_size; /* increased on every request added. */
39810 atomic_t active; /* increased on every req. added, and dec on every finished. */
39811 unsigned long flags;
39812 };
39813@@ -797,7 +797,7 @@ struct drbd_device {
39814 unsigned int al_tr_number;
39815 int al_tr_cycle;
39816 wait_queue_head_t seq_wait;
39817- atomic_t packet_seq;
39818+ atomic_unchecked_t packet_seq;
39819 unsigned int peer_seq;
39820 spinlock_t peer_seq_lock;
39821 unsigned int minor;
39822@@ -1407,7 +1407,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
39823 char __user *uoptval;
39824 int err;
39825
39826- uoptval = (char __user __force *)optval;
39827+ uoptval = (char __force_user *)optval;
39828
39829 set_fs(KERNEL_DS);
39830 if (level == SOL_SOCKET)
39831diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
39832index 89c497c..9c736ae 100644
39833--- a/drivers/block/drbd/drbd_interval.c
39834+++ b/drivers/block/drbd/drbd_interval.c
39835@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
39836 }
39837
39838 static const struct rb_augment_callbacks augment_callbacks = {
39839- augment_propagate,
39840- augment_copy,
39841- augment_rotate,
39842+ .propagate = augment_propagate,
39843+ .copy = augment_copy,
39844+ .rotate = augment_rotate,
39845 };
39846
39847 /**
39848diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
39849index 960645c..6c2724a 100644
39850--- a/drivers/block/drbd/drbd_main.c
39851+++ b/drivers/block/drbd/drbd_main.c
39852@@ -1322,7 +1322,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
39853 p->sector = sector;
39854 p->block_id = block_id;
39855 p->blksize = blksize;
39856- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
39857+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
39858 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
39859 }
39860
39861@@ -1628,7 +1628,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
39862 return -EIO;
39863 p->sector = cpu_to_be64(req->i.sector);
39864 p->block_id = (unsigned long)req;
39865- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
39866+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
39867 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
39868 if (device->state.conn >= C_SYNC_SOURCE &&
39869 device->state.conn <= C_PAUSED_SYNC_T)
39870@@ -2670,8 +2670,8 @@ void drbd_destroy_connection(struct kref *kref)
39871 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
39872 struct drbd_resource *resource = connection->resource;
39873
39874- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
39875- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
39876+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
39877+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
39878 kfree(connection->current_epoch);
39879
39880 idr_destroy(&connection->peer_devices);
39881diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
39882index 3f2e167..d3170e4 100644
39883--- a/drivers/block/drbd/drbd_nl.c
39884+++ b/drivers/block/drbd/drbd_nl.c
39885@@ -3616,7 +3616,7 @@ finish:
39886
39887 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39888 {
39889- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39890+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39891 struct sk_buff *msg;
39892 struct drbd_genlmsghdr *d_out;
39893 unsigned seq;
39894@@ -3629,7 +3629,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39895 return;
39896 }
39897
39898- seq = atomic_inc_return(&drbd_genl_seq);
39899+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39900 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39901 if (!msg)
39902 goto failed;
39903diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39904index 5b17ec8..6c21e6b 100644
39905--- a/drivers/block/drbd/drbd_receiver.c
39906+++ b/drivers/block/drbd/drbd_receiver.c
39907@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39908 struct drbd_device *device = peer_device->device;
39909 int err;
39910
39911- atomic_set(&device->packet_seq, 0);
39912+ atomic_set_unchecked(&device->packet_seq, 0);
39913 device->peer_seq = 0;
39914
39915 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39916@@ -1199,7 +1199,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39917 do {
39918 next_epoch = NULL;
39919
39920- epoch_size = atomic_read(&epoch->epoch_size);
39921+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39922
39923 switch (ev & ~EV_CLEANUP) {
39924 case EV_PUT:
39925@@ -1239,7 +1239,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39926 rv = FE_DESTROYED;
39927 } else {
39928 epoch->flags = 0;
39929- atomic_set(&epoch->epoch_size, 0);
39930+ atomic_set_unchecked(&epoch->epoch_size, 0);
39931 /* atomic_set(&epoch->active, 0); is already zero */
39932 if (rv == FE_STILL_LIVE)
39933 rv = FE_RECYCLED;
39934@@ -1490,7 +1490,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39935 conn_wait_active_ee_empty(connection);
39936 drbd_flush(connection);
39937
39938- if (atomic_read(&connection->current_epoch->epoch_size)) {
39939+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39940 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39941 if (epoch)
39942 break;
39943@@ -1503,11 +1503,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39944 }
39945
39946 epoch->flags = 0;
39947- atomic_set(&epoch->epoch_size, 0);
39948+ atomic_set_unchecked(&epoch->epoch_size, 0);
39949 atomic_set(&epoch->active, 0);
39950
39951 spin_lock(&connection->epoch_lock);
39952- if (atomic_read(&connection->current_epoch->epoch_size)) {
39953+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39954 list_add(&epoch->list, &connection->current_epoch->list);
39955 connection->current_epoch = epoch;
39956 connection->epochs++;
39957@@ -2224,7 +2224,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39958
39959 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39960 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39961- atomic_inc(&connection->current_epoch->epoch_size);
39962+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39963 err2 = drbd_drain_block(peer_device, pi->size);
39964 if (!err)
39965 err = err2;
39966@@ -2266,7 +2266,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39967
39968 spin_lock(&connection->epoch_lock);
39969 peer_req->epoch = connection->current_epoch;
39970- atomic_inc(&peer_req->epoch->epoch_size);
39971+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39972 atomic_inc(&peer_req->epoch->active);
39973 spin_unlock(&connection->epoch_lock);
39974
39975@@ -4461,7 +4461,7 @@ struct data_cmd {
39976 int expect_payload;
39977 size_t pkt_size;
39978 int (*fn)(struct drbd_connection *, struct packet_info *);
39979-};
39980+} __do_const;
39981
39982 static struct data_cmd drbd_cmd_handler[] = {
39983 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39984@@ -4572,7 +4572,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39985 if (!list_empty(&connection->current_epoch->list))
39986 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39987 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39988- atomic_set(&connection->current_epoch->epoch_size, 0);
39989+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
39990 connection->send.seen_any_write_yet = false;
39991
39992 drbd_info(connection, "Connection closed\n");
39993@@ -5364,7 +5364,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
39994 struct asender_cmd {
39995 size_t pkt_size;
39996 int (*fn)(struct drbd_connection *connection, struct packet_info *);
39997-};
39998+} __do_const;
39999
40000 static struct asender_cmd asender_tbl[] = {
40001 [P_PING] = { 0, got_Ping },
40002diff --git a/drivers/block/loop.c b/drivers/block/loop.c
40003index 6cb1beb..bf490f7 100644
40004--- a/drivers/block/loop.c
40005+++ b/drivers/block/loop.c
40006@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
40007
40008 file_start_write(file);
40009 set_fs(get_ds());
40010- bw = file->f_op->write(file, buf, len, &pos);
40011+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
40012 set_fs(old_fs);
40013 file_end_write(file);
40014 if (likely(bw == len))
40015diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
40016index 02351e2..a9ea617 100644
40017--- a/drivers/block/nvme-core.c
40018+++ b/drivers/block/nvme-core.c
40019@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
40020 static struct task_struct *nvme_thread;
40021 static struct workqueue_struct *nvme_workq;
40022 static wait_queue_head_t nvme_kthread_wait;
40023-static struct notifier_block nvme_nb;
40024
40025 static void nvme_reset_failed_dev(struct work_struct *ws);
40026
40027@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
40028 .err_handler = &nvme_err_handler,
40029 };
40030
40031+static struct notifier_block nvme_nb = {
40032+ .notifier_call = &nvme_cpu_notify,
40033+};
40034+
40035 static int __init nvme_init(void)
40036 {
40037 int result;
40038@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
40039 else if (result > 0)
40040 nvme_major = result;
40041
40042- nvme_nb.notifier_call = &nvme_cpu_notify;
40043 result = register_hotcpu_notifier(&nvme_nb);
40044 if (result)
40045 goto unregister_blkdev;
40046diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
40047index 758ac44..58087fd 100644
40048--- a/drivers/block/pktcdvd.c
40049+++ b/drivers/block/pktcdvd.c
40050@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
40051
40052 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
40053 {
40054- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
40055+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
40056 }
40057
40058 /*
40059@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
40060 return -EROFS;
40061 }
40062 pd->settings.fp = ti.fp;
40063- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
40064+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
40065
40066 if (ti.nwa_v) {
40067 pd->nwa = be32_to_cpu(ti.next_writable);
40068diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
40069index e5565fb..71be10b4 100644
40070--- a/drivers/block/smart1,2.h
40071+++ b/drivers/block/smart1,2.h
40072@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
40073 }
40074
40075 static struct access_method smart4_access = {
40076- smart4_submit_command,
40077- smart4_intr_mask,
40078- smart4_fifo_full,
40079- smart4_intr_pending,
40080- smart4_completed,
40081+ .submit_command = smart4_submit_command,
40082+ .set_intr_mask = smart4_intr_mask,
40083+ .fifo_full = smart4_fifo_full,
40084+ .intr_pending = smart4_intr_pending,
40085+ .command_completed = smart4_completed,
40086 };
40087
40088 /*
40089@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
40090 }
40091
40092 static struct access_method smart2_access = {
40093- smart2_submit_command,
40094- smart2_intr_mask,
40095- smart2_fifo_full,
40096- smart2_intr_pending,
40097- smart2_completed,
40098+ .submit_command = smart2_submit_command,
40099+ .set_intr_mask = smart2_intr_mask,
40100+ .fifo_full = smart2_fifo_full,
40101+ .intr_pending = smart2_intr_pending,
40102+ .command_completed = smart2_completed,
40103 };
40104
40105 /*
40106@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
40107 }
40108
40109 static struct access_method smart2e_access = {
40110- smart2e_submit_command,
40111- smart2e_intr_mask,
40112- smart2e_fifo_full,
40113- smart2e_intr_pending,
40114- smart2e_completed,
40115+ .submit_command = smart2e_submit_command,
40116+ .set_intr_mask = smart2e_intr_mask,
40117+ .fifo_full = smart2e_fifo_full,
40118+ .intr_pending = smart2e_intr_pending,
40119+ .command_completed = smart2e_completed,
40120 };
40121
40122 /*
40123@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
40124 }
40125
40126 static struct access_method smart1_access = {
40127- smart1_submit_command,
40128- smart1_intr_mask,
40129- smart1_fifo_full,
40130- smart1_intr_pending,
40131- smart1_completed,
40132+ .submit_command = smart1_submit_command,
40133+ .set_intr_mask = smart1_intr_mask,
40134+ .fifo_full = smart1_fifo_full,
40135+ .intr_pending = smart1_intr_pending,
40136+ .command_completed = smart1_completed,
40137 };
40138diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
40139index f038dba..bb74c08 100644
40140--- a/drivers/bluetooth/btwilink.c
40141+++ b/drivers/bluetooth/btwilink.c
40142@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
40143
40144 static int bt_ti_probe(struct platform_device *pdev)
40145 {
40146- static struct ti_st *hst;
40147+ struct ti_st *hst;
40148 struct hci_dev *hdev;
40149 int err;
40150
40151diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
40152index 898b84b..86f74b9 100644
40153--- a/drivers/cdrom/cdrom.c
40154+++ b/drivers/cdrom/cdrom.c
40155@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
40156 ENSURE(reset, CDC_RESET);
40157 ENSURE(generic_packet, CDC_GENERIC_PACKET);
40158 cdi->mc_flags = 0;
40159- cdo->n_minors = 0;
40160 cdi->options = CDO_USE_FFLAGS;
40161
40162 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
40163@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
40164 else
40165 cdi->cdda_method = CDDA_OLD;
40166
40167- if (!cdo->generic_packet)
40168- cdo->generic_packet = cdrom_dummy_generic_packet;
40169+ if (!cdo->generic_packet) {
40170+ pax_open_kernel();
40171+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
40172+ pax_close_kernel();
40173+ }
40174
40175 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
40176 mutex_lock(&cdrom_mutex);
40177@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
40178 if (cdi->exit)
40179 cdi->exit(cdi);
40180
40181- cdi->ops->n_minors--;
40182 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
40183 }
40184
40185@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
40186 */
40187 nr = nframes;
40188 do {
40189- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40190+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40191 if (cgc.buffer)
40192 break;
40193
40194@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
40195 struct cdrom_device_info *cdi;
40196 int ret;
40197
40198- ret = scnprintf(info + *pos, max_size - *pos, header);
40199+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
40200 if (!ret)
40201 return 1;
40202
40203diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
40204index 584bc31..e64a12c 100644
40205--- a/drivers/cdrom/gdrom.c
40206+++ b/drivers/cdrom/gdrom.c
40207@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
40208 .audio_ioctl = gdrom_audio_ioctl,
40209 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
40210 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
40211- .n_minors = 1,
40212 };
40213
40214 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
40215diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
40216index 6e9f74a..50c7cea 100644
40217--- a/drivers/char/Kconfig
40218+++ b/drivers/char/Kconfig
40219@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
40220
40221 config DEVKMEM
40222 bool "/dev/kmem virtual device support"
40223- default y
40224+ default n
40225+ depends on !GRKERNSEC_KMEM
40226 help
40227 Say Y here if you want to support the /dev/kmem device. The
40228 /dev/kmem device is rarely used, but can be used for certain
40229@@ -577,6 +578,7 @@ config DEVPORT
40230 bool
40231 depends on !M68K
40232 depends on ISA || PCI
40233+ depends on !GRKERNSEC_KMEM
40234 default y
40235
40236 source "drivers/s390/char/Kconfig"
40237diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
40238index a48e05b..6bac831 100644
40239--- a/drivers/char/agp/compat_ioctl.c
40240+++ b/drivers/char/agp/compat_ioctl.c
40241@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
40242 return -ENOMEM;
40243 }
40244
40245- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
40246+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
40247 sizeof(*usegment) * ureserve.seg_count)) {
40248 kfree(usegment);
40249 kfree(ksegment);
40250diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
40251index b297033..fa217ca 100644
40252--- a/drivers/char/agp/frontend.c
40253+++ b/drivers/char/agp/frontend.c
40254@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40255 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
40256 return -EFAULT;
40257
40258- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
40259+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
40260 return -EFAULT;
40261
40262 client = agp_find_client_by_pid(reserve.pid);
40263@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40264 if (segment == NULL)
40265 return -ENOMEM;
40266
40267- if (copy_from_user(segment, (void __user *) reserve.seg_list,
40268+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
40269 sizeof(struct agp_segment) * reserve.seg_count)) {
40270 kfree(segment);
40271 return -EFAULT;
40272diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
40273index 4f94375..413694e 100644
40274--- a/drivers/char/genrtc.c
40275+++ b/drivers/char/genrtc.c
40276@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
40277 switch (cmd) {
40278
40279 case RTC_PLL_GET:
40280+ memset(&pll, 0, sizeof(pll));
40281 if (get_rtc_pll(&pll))
40282 return -EINVAL;
40283 else
40284diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
40285index d5d4cd8..22d561d 100644
40286--- a/drivers/char/hpet.c
40287+++ b/drivers/char/hpet.c
40288@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
40289 }
40290
40291 static int
40292-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
40293+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
40294 struct hpet_info *info)
40295 {
40296 struct hpet_timer __iomem *timer;
40297diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
40298index 86fe45c..c0ea948 100644
40299--- a/drivers/char/hw_random/intel-rng.c
40300+++ b/drivers/char/hw_random/intel-rng.c
40301@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
40302
40303 if (no_fwh_detect)
40304 return -ENODEV;
40305- printk(warning);
40306+ printk("%s", warning);
40307 return -EBUSY;
40308 }
40309
40310diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
40311index e6db938..835e3a2 100644
40312--- a/drivers/char/ipmi/ipmi_msghandler.c
40313+++ b/drivers/char/ipmi/ipmi_msghandler.c
40314@@ -438,7 +438,7 @@ struct ipmi_smi {
40315 struct proc_dir_entry *proc_dir;
40316 char proc_dir_name[10];
40317
40318- atomic_t stats[IPMI_NUM_STATS];
40319+ atomic_unchecked_t stats[IPMI_NUM_STATS];
40320
40321 /*
40322 * run_to_completion duplicate of smb_info, smi_info
40323@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
40324 static DEFINE_MUTEX(smi_watchers_mutex);
40325
40326 #define ipmi_inc_stat(intf, stat) \
40327- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
40328+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
40329 #define ipmi_get_stat(intf, stat) \
40330- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
40331+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
40332
40333 static int is_lan_addr(struct ipmi_addr *addr)
40334 {
40335@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
40336 INIT_LIST_HEAD(&intf->cmd_rcvrs);
40337 init_waitqueue_head(&intf->waitq);
40338 for (i = 0; i < IPMI_NUM_STATS; i++)
40339- atomic_set(&intf->stats[i], 0);
40340+ atomic_set_unchecked(&intf->stats[i], 0);
40341
40342 intf->proc_dir = NULL;
40343
40344diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
40345index 5d66568..c9d93c3 100644
40346--- a/drivers/char/ipmi/ipmi_si_intf.c
40347+++ b/drivers/char/ipmi/ipmi_si_intf.c
40348@@ -285,7 +285,7 @@ struct smi_info {
40349 unsigned char slave_addr;
40350
40351 /* Counters and things for the proc filesystem. */
40352- atomic_t stats[SI_NUM_STATS];
40353+ atomic_unchecked_t stats[SI_NUM_STATS];
40354
40355 struct task_struct *thread;
40356
40357@@ -294,9 +294,9 @@ struct smi_info {
40358 };
40359
40360 #define smi_inc_stat(smi, stat) \
40361- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
40362+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
40363 #define smi_get_stat(smi, stat) \
40364- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
40365+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
40366
40367 #define SI_MAX_PARMS 4
40368
40369@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
40370 atomic_set(&new_smi->req_events, 0);
40371 new_smi->run_to_completion = false;
40372 for (i = 0; i < SI_NUM_STATS; i++)
40373- atomic_set(&new_smi->stats[i], 0);
40374+ atomic_set_unchecked(&new_smi->stats[i], 0);
40375
40376 new_smi->interrupt_disabled = true;
40377 atomic_set(&new_smi->stop_operation, 0);
40378diff --git a/drivers/char/mem.c b/drivers/char/mem.c
40379index 917403f..dddd899 100644
40380--- a/drivers/char/mem.c
40381+++ b/drivers/char/mem.c
40382@@ -18,6 +18,7 @@
40383 #include <linux/raw.h>
40384 #include <linux/tty.h>
40385 #include <linux/capability.h>
40386+#include <linux/security.h>
40387 #include <linux/ptrace.h>
40388 #include <linux/device.h>
40389 #include <linux/highmem.h>
40390@@ -36,6 +37,10 @@
40391
40392 #define DEVPORT_MINOR 4
40393
40394+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40395+extern const struct file_operations grsec_fops;
40396+#endif
40397+
40398 static inline unsigned long size_inside_page(unsigned long start,
40399 unsigned long size)
40400 {
40401@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40402
40403 while (cursor < to) {
40404 if (!devmem_is_allowed(pfn)) {
40405+#ifdef CONFIG_GRKERNSEC_KMEM
40406+ gr_handle_mem_readwrite(from, to);
40407+#else
40408 printk(KERN_INFO
40409 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
40410 current->comm, from, to);
40411+#endif
40412 return 0;
40413 }
40414 cursor += PAGE_SIZE;
40415@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40416 }
40417 return 1;
40418 }
40419+#elif defined(CONFIG_GRKERNSEC_KMEM)
40420+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40421+{
40422+ return 0;
40423+}
40424 #else
40425 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40426 {
40427@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40428
40429 while (count > 0) {
40430 unsigned long remaining;
40431+ char *temp;
40432
40433 sz = size_inside_page(p, count);
40434
40435@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40436 if (!ptr)
40437 return -EFAULT;
40438
40439- remaining = copy_to_user(buf, ptr, sz);
40440+#ifdef CONFIG_PAX_USERCOPY
40441+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40442+ if (!temp) {
40443+ unxlate_dev_mem_ptr(p, ptr);
40444+ return -ENOMEM;
40445+ }
40446+ memcpy(temp, ptr, sz);
40447+#else
40448+ temp = ptr;
40449+#endif
40450+
40451+ remaining = copy_to_user(buf, temp, sz);
40452+
40453+#ifdef CONFIG_PAX_USERCOPY
40454+ kfree(temp);
40455+#endif
40456+
40457 unxlate_dev_mem_ptr(p, ptr);
40458 if (remaining)
40459 return -EFAULT;
40460@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40461 size_t count, loff_t *ppos)
40462 {
40463 unsigned long p = *ppos;
40464- ssize_t low_count, read, sz;
40465+ ssize_t low_count, read, sz, err = 0;
40466 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
40467- int err = 0;
40468
40469 read = 0;
40470 if (p < (unsigned long) high_memory) {
40471@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40472 }
40473 #endif
40474 while (low_count > 0) {
40475+ char *temp;
40476+
40477 sz = size_inside_page(p, low_count);
40478
40479 /*
40480@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40481 */
40482 kbuf = xlate_dev_kmem_ptr((char *)p);
40483
40484- if (copy_to_user(buf, kbuf, sz))
40485+#ifdef CONFIG_PAX_USERCOPY
40486+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40487+ if (!temp)
40488+ return -ENOMEM;
40489+ memcpy(temp, kbuf, sz);
40490+#else
40491+ temp = kbuf;
40492+#endif
40493+
40494+ err = copy_to_user(buf, temp, sz);
40495+
40496+#ifdef CONFIG_PAX_USERCOPY
40497+ kfree(temp);
40498+#endif
40499+
40500+ if (err)
40501 return -EFAULT;
40502 buf += sz;
40503 p += sz;
40504@@ -827,6 +874,9 @@ static const struct memdev {
40505 #ifdef CONFIG_PRINTK
40506 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
40507 #endif
40508+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40509+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
40510+#endif
40511 };
40512
40513 static int memory_open(struct inode *inode, struct file *filp)
40514@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
40515 continue;
40516
40517 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
40518- NULL, devlist[minor].name);
40519+ NULL, "%s", devlist[minor].name);
40520 }
40521
40522 return tty_init();
40523diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
40524index 9df78e2..01ba9ae 100644
40525--- a/drivers/char/nvram.c
40526+++ b/drivers/char/nvram.c
40527@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
40528
40529 spin_unlock_irq(&rtc_lock);
40530
40531- if (copy_to_user(buf, contents, tmp - contents))
40532+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
40533 return -EFAULT;
40534
40535 *ppos = i;
40536diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
40537index 8320abd..ec48108 100644
40538--- a/drivers/char/pcmcia/synclink_cs.c
40539+++ b/drivers/char/pcmcia/synclink_cs.c
40540@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40541
40542 if (debug_level >= DEBUG_LEVEL_INFO)
40543 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
40544- __FILE__, __LINE__, info->device_name, port->count);
40545+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
40546
40547- WARN_ON(!port->count);
40548+ WARN_ON(!atomic_read(&port->count));
40549
40550 if (tty_port_close_start(port, tty, filp) == 0)
40551 goto cleanup;
40552@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40553 cleanup:
40554 if (debug_level >= DEBUG_LEVEL_INFO)
40555 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
40556- tty->driver->name, port->count);
40557+ tty->driver->name, atomic_read(&port->count));
40558 }
40559
40560 /* Wait until the transmitter is empty.
40561@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40562
40563 if (debug_level >= DEBUG_LEVEL_INFO)
40564 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
40565- __FILE__, __LINE__, tty->driver->name, port->count);
40566+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
40567
40568 /* If port is closing, signal caller to try again */
40569 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
40570@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40571 goto cleanup;
40572 }
40573 spin_lock(&port->lock);
40574- port->count++;
40575+ atomic_inc(&port->count);
40576 spin_unlock(&port->lock);
40577 spin_unlock_irqrestore(&info->netlock, flags);
40578
40579- if (port->count == 1) {
40580+ if (atomic_read(&port->count) == 1) {
40581 /* 1st open on this device, init hardware */
40582 retval = startup(info, tty);
40583 if (retval < 0)
40584@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40585 unsigned short new_crctype;
40586
40587 /* return error if TTY interface open */
40588- if (info->port.count)
40589+ if (atomic_read(&info->port.count))
40590 return -EBUSY;
40591
40592 switch (encoding)
40593@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
40594
40595 /* arbitrate between network and tty opens */
40596 spin_lock_irqsave(&info->netlock, flags);
40597- if (info->port.count != 0 || info->netcount != 0) {
40598+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40599 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40600 spin_unlock_irqrestore(&info->netlock, flags);
40601 return -EBUSY;
40602@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40603 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
40604
40605 /* return error if TTY interface open */
40606- if (info->port.count)
40607+ if (atomic_read(&info->port.count))
40608 return -EBUSY;
40609
40610 if (cmd != SIOCWANDEV)
40611diff --git a/drivers/char/random.c b/drivers/char/random.c
40612index 71529e1..822b036 100644
40613--- a/drivers/char/random.c
40614+++ b/drivers/char/random.c
40615@@ -284,9 +284,6 @@
40616 /*
40617 * To allow fractional bits to be tracked, the entropy_count field is
40618 * denominated in units of 1/8th bits.
40619- *
40620- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
40621- * credit_entropy_bits() needs to be 64 bits wide.
40622 */
40623 #define ENTROPY_SHIFT 3
40624 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
40625@@ -433,9 +430,9 @@ struct entropy_store {
40626 };
40627
40628 static void push_to_pool(struct work_struct *work);
40629-static __u32 input_pool_data[INPUT_POOL_WORDS];
40630-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
40631-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
40632+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
40633+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40634+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40635
40636 static struct entropy_store input_pool = {
40637 .poolinfo = &poolinfo_table[0],
40638@@ -524,8 +521,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
40639 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
40640 }
40641
40642- ACCESS_ONCE(r->input_rotate) = input_rotate;
40643- ACCESS_ONCE(r->add_ptr) = i;
40644+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
40645+ ACCESS_ONCE_RW(r->add_ptr) = i;
40646 smp_wmb();
40647
40648 if (out)
40649@@ -632,7 +629,7 @@ retry:
40650 /* The +2 corresponds to the /4 in the denominator */
40651
40652 do {
40653- unsigned int anfrac = min(pnfrac, pool_size/2);
40654+ u64 anfrac = min(pnfrac, pool_size/2);
40655 unsigned int add =
40656 ((pool_size - entropy_count)*anfrac*3) >> s;
40657
40658@@ -1177,7 +1174,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40659
40660 extract_buf(r, tmp);
40661 i = min_t(int, nbytes, EXTRACT_SIZE);
40662- if (copy_to_user(buf, tmp, i)) {
40663+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
40664 ret = -EFAULT;
40665 break;
40666 }
40667@@ -1567,7 +1564,7 @@ static char sysctl_bootid[16];
40668 static int proc_do_uuid(struct ctl_table *table, int write,
40669 void __user *buffer, size_t *lenp, loff_t *ppos)
40670 {
40671- struct ctl_table fake_table;
40672+ ctl_table_no_const fake_table;
40673 unsigned char buf[64], tmp_uuid[16], *uuid;
40674
40675 uuid = table->data;
40676@@ -1597,7 +1594,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
40677 static int proc_do_entropy(struct ctl_table *table, int write,
40678 void __user *buffer, size_t *lenp, loff_t *ppos)
40679 {
40680- struct ctl_table fake_table;
40681+ ctl_table_no_const fake_table;
40682 int entropy_count;
40683
40684 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
40685diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
40686index 7cc1fe22..b602d6b 100644
40687--- a/drivers/char/sonypi.c
40688+++ b/drivers/char/sonypi.c
40689@@ -54,6 +54,7 @@
40690
40691 #include <asm/uaccess.h>
40692 #include <asm/io.h>
40693+#include <asm/local.h>
40694
40695 #include <linux/sonypi.h>
40696
40697@@ -490,7 +491,7 @@ static struct sonypi_device {
40698 spinlock_t fifo_lock;
40699 wait_queue_head_t fifo_proc_list;
40700 struct fasync_struct *fifo_async;
40701- int open_count;
40702+ local_t open_count;
40703 int model;
40704 struct input_dev *input_jog_dev;
40705 struct input_dev *input_key_dev;
40706@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
40707 static int sonypi_misc_release(struct inode *inode, struct file *file)
40708 {
40709 mutex_lock(&sonypi_device.lock);
40710- sonypi_device.open_count--;
40711+ local_dec(&sonypi_device.open_count);
40712 mutex_unlock(&sonypi_device.lock);
40713 return 0;
40714 }
40715@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
40716 {
40717 mutex_lock(&sonypi_device.lock);
40718 /* Flush input queue on first open */
40719- if (!sonypi_device.open_count)
40720+ if (!local_read(&sonypi_device.open_count))
40721 kfifo_reset(&sonypi_device.fifo);
40722- sonypi_device.open_count++;
40723+ local_inc(&sonypi_device.open_count);
40724 mutex_unlock(&sonypi_device.lock);
40725
40726 return 0;
40727diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
40728index 565a947..dcdc06e 100644
40729--- a/drivers/char/tpm/tpm_acpi.c
40730+++ b/drivers/char/tpm/tpm_acpi.c
40731@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
40732 virt = acpi_os_map_iomem(start, len);
40733 if (!virt) {
40734 kfree(log->bios_event_log);
40735+ log->bios_event_log = NULL;
40736 printk("%s: ERROR - Unable to map memory\n", __func__);
40737 return -EIO;
40738 }
40739
40740- memcpy_fromio(log->bios_event_log, virt, len);
40741+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
40742
40743 acpi_os_unmap_iomem(virt, len);
40744 return 0;
40745diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
40746index 59f7cb2..bac8b6d 100644
40747--- a/drivers/char/tpm/tpm_eventlog.c
40748+++ b/drivers/char/tpm/tpm_eventlog.c
40749@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
40750 event = addr;
40751
40752 if ((event->event_type == 0 && event->event_size == 0) ||
40753- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
40754+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
40755 return NULL;
40756
40757 return addr;
40758@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
40759 return NULL;
40760
40761 if ((event->event_type == 0 && event->event_size == 0) ||
40762- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
40763+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
40764 return NULL;
40765
40766 (*pos)++;
40767@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
40768 int i;
40769
40770 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
40771- seq_putc(m, data[i]);
40772+ if (!seq_putc(m, data[i]))
40773+ return -EFAULT;
40774
40775 return 0;
40776 }
40777diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
40778index 60aafb8..10c08e0 100644
40779--- a/drivers/char/virtio_console.c
40780+++ b/drivers/char/virtio_console.c
40781@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
40782 if (to_user) {
40783 ssize_t ret;
40784
40785- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
40786+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
40787 if (ret)
40788 return -EFAULT;
40789 } else {
40790@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
40791 if (!port_has_data(port) && !port->host_connected)
40792 return 0;
40793
40794- return fill_readbuf(port, ubuf, count, true);
40795+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
40796 }
40797
40798 static int wait_port_writable(struct port *port, bool nonblock)
40799diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
40800index 57a078e..c17cde8 100644
40801--- a/drivers/clk/clk-composite.c
40802+++ b/drivers/clk/clk-composite.c
40803@@ -146,7 +146,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
40804 struct clk *clk;
40805 struct clk_init_data init;
40806 struct clk_composite *composite;
40807- struct clk_ops *clk_composite_ops;
40808+ clk_ops_no_const *clk_composite_ops;
40809
40810 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
40811 if (!composite) {
40812diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
40813index dd3a78c..386d49c 100644
40814--- a/drivers/clk/socfpga/clk-gate.c
40815+++ b/drivers/clk/socfpga/clk-gate.c
40816@@ -22,6 +22,7 @@
40817 #include <linux/mfd/syscon.h>
40818 #include <linux/of.h>
40819 #include <linux/regmap.h>
40820+#include <asm/pgtable.h>
40821
40822 #include "clk.h"
40823
40824@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40825 return 0;
40826 }
40827
40828-static struct clk_ops gateclk_ops = {
40829+static clk_ops_no_const gateclk_ops __read_only = {
40830 .prepare = socfpga_clk_prepare,
40831 .recalc_rate = socfpga_clk_recalc_rate,
40832 .get_parent = socfpga_clk_get_parent,
40833@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40834 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40835 socfpga_clk->hw.bit_idx = clk_gate[1];
40836
40837- gateclk_ops.enable = clk_gate_ops.enable;
40838- gateclk_ops.disable = clk_gate_ops.disable;
40839+ pax_open_kernel();
40840+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40841+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40842+ pax_close_kernel();
40843 }
40844
40845 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40846diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40847index de6da95..c98278b 100644
40848--- a/drivers/clk/socfpga/clk-pll.c
40849+++ b/drivers/clk/socfpga/clk-pll.c
40850@@ -21,6 +21,7 @@
40851 #include <linux/io.h>
40852 #include <linux/of.h>
40853 #include <linux/of_address.h>
40854+#include <asm/pgtable.h>
40855
40856 #include "clk.h"
40857
40858@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40859 CLK_MGR_PLL_CLK_SRC_MASK;
40860 }
40861
40862-static struct clk_ops clk_pll_ops = {
40863+static clk_ops_no_const clk_pll_ops __read_only = {
40864 .recalc_rate = clk_pll_recalc_rate,
40865 .get_parent = clk_pll_get_parent,
40866 };
40867@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40868 pll_clk->hw.hw.init = &init;
40869
40870 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40871- clk_pll_ops.enable = clk_gate_ops.enable;
40872- clk_pll_ops.disable = clk_gate_ops.disable;
40873+ pax_open_kernel();
40874+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40875+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40876+ pax_close_kernel();
40877
40878 clk = clk_register(NULL, &pll_clk->hw.hw);
40879 if (WARN_ON(IS_ERR(clk))) {
40880diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40881index b0c18ed..1713a80 100644
40882--- a/drivers/cpufreq/acpi-cpufreq.c
40883+++ b/drivers/cpufreq/acpi-cpufreq.c
40884@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40885 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40886 per_cpu(acfreq_data, cpu) = data;
40887
40888- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40889- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40890+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40891+ pax_open_kernel();
40892+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40893+ pax_close_kernel();
40894+ }
40895
40896 result = acpi_processor_register_performance(data->acpi_data, cpu);
40897 if (result)
40898@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40899 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40900 break;
40901 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40902- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40903+ pax_open_kernel();
40904+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40905+ pax_close_kernel();
40906 break;
40907 default:
40908 break;
40909@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40910 if (!msrs)
40911 return;
40912
40913- acpi_cpufreq_driver.boost_supported = true;
40914- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40915+ pax_open_kernel();
40916+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40917+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40918+ pax_close_kernel();
40919
40920 cpu_notifier_register_begin();
40921
40922diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40923index 6f02485..13684ae 100644
40924--- a/drivers/cpufreq/cpufreq.c
40925+++ b/drivers/cpufreq/cpufreq.c
40926@@ -2100,7 +2100,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40927 }
40928
40929 mutex_lock(&cpufreq_governor_mutex);
40930- list_del(&governor->governor_list);
40931+ pax_list_del(&governor->governor_list);
40932 mutex_unlock(&cpufreq_governor_mutex);
40933 return;
40934 }
40935@@ -2316,7 +2316,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40936 return NOTIFY_OK;
40937 }
40938
40939-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40940+static struct notifier_block cpufreq_cpu_notifier = {
40941 .notifier_call = cpufreq_cpu_callback,
40942 };
40943
40944@@ -2356,13 +2356,17 @@ int cpufreq_boost_trigger_state(int state)
40945 return 0;
40946
40947 write_lock_irqsave(&cpufreq_driver_lock, flags);
40948- cpufreq_driver->boost_enabled = state;
40949+ pax_open_kernel();
40950+ *(bool *)&cpufreq_driver->boost_enabled = state;
40951+ pax_close_kernel();
40952 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40953
40954 ret = cpufreq_driver->set_boost(state);
40955 if (ret) {
40956 write_lock_irqsave(&cpufreq_driver_lock, flags);
40957- cpufreq_driver->boost_enabled = !state;
40958+ pax_open_kernel();
40959+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40960+ pax_close_kernel();
40961 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40962
40963 pr_err("%s: Cannot %s BOOST\n",
40964@@ -2419,8 +2423,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40965
40966 pr_debug("trying to register driver %s\n", driver_data->name);
40967
40968- if (driver_data->setpolicy)
40969- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40970+ if (driver_data->setpolicy) {
40971+ pax_open_kernel();
40972+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40973+ pax_close_kernel();
40974+ }
40975
40976 write_lock_irqsave(&cpufreq_driver_lock, flags);
40977 if (cpufreq_driver) {
40978@@ -2435,8 +2442,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40979 * Check if driver provides function to enable boost -
40980 * if not, use cpufreq_boost_set_sw as default
40981 */
40982- if (!cpufreq_driver->set_boost)
40983- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40984+ if (!cpufreq_driver->set_boost) {
40985+ pax_open_kernel();
40986+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40987+ pax_close_kernel();
40988+ }
40989
40990 ret = cpufreq_sysfs_create_file(&boost.attr);
40991 if (ret) {
40992diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
40993index 1b44496..b80ff5e 100644
40994--- a/drivers/cpufreq/cpufreq_governor.c
40995+++ b/drivers/cpufreq/cpufreq_governor.c
40996@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40997 struct dbs_data *dbs_data;
40998 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
40999 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
41000- struct od_ops *od_ops = NULL;
41001+ const struct od_ops *od_ops = NULL;
41002 struct od_dbs_tuners *od_tuners = NULL;
41003 struct cs_dbs_tuners *cs_tuners = NULL;
41004 struct cpu_dbs_common_info *cpu_cdbs;
41005@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41006
41007 if ((cdata->governor == GOV_CONSERVATIVE) &&
41008 (!policy->governor->initialized)) {
41009- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41010+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41011
41012 cpufreq_register_notifier(cs_ops->notifier_block,
41013 CPUFREQ_TRANSITION_NOTIFIER);
41014@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41015
41016 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
41017 (policy->governor->initialized == 1)) {
41018- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41019+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41020
41021 cpufreq_unregister_notifier(cs_ops->notifier_block,
41022 CPUFREQ_TRANSITION_NOTIFIER);
41023diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
41024index cc401d1..8197340 100644
41025--- a/drivers/cpufreq/cpufreq_governor.h
41026+++ b/drivers/cpufreq/cpufreq_governor.h
41027@@ -212,7 +212,7 @@ struct common_dbs_data {
41028 void (*exit)(struct dbs_data *dbs_data);
41029
41030 /* Governor specific ops, see below */
41031- void *gov_ops;
41032+ const void *gov_ops;
41033 };
41034
41035 /* Governor Per policy data */
41036@@ -232,7 +232,7 @@ struct od_ops {
41037 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
41038 unsigned int freq_next, unsigned int relation);
41039 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
41040-};
41041+} __no_const;
41042
41043 struct cs_ops {
41044 struct notifier_block *notifier_block;
41045diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
41046index 18d4091..434be15 100644
41047--- a/drivers/cpufreq/cpufreq_ondemand.c
41048+++ b/drivers/cpufreq/cpufreq_ondemand.c
41049@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
41050
41051 define_get_cpu_dbs_routines(od_cpu_dbs_info);
41052
41053-static struct od_ops od_ops = {
41054+static struct od_ops od_ops __read_only = {
41055 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
41056 .powersave_bias_target = generic_powersave_bias_target,
41057 .freq_increase = dbs_freq_increase,
41058@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
41059 (struct cpufreq_policy *, unsigned int, unsigned int),
41060 unsigned int powersave_bias)
41061 {
41062- od_ops.powersave_bias_target = f;
41063+ pax_open_kernel();
41064+ *(void **)&od_ops.powersave_bias_target = f;
41065+ pax_close_kernel();
41066 od_set_powersave_bias(powersave_bias);
41067 }
41068 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
41069
41070 void od_unregister_powersave_bias_handler(void)
41071 {
41072- od_ops.powersave_bias_target = generic_powersave_bias_target;
41073+ pax_open_kernel();
41074+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
41075+ pax_close_kernel();
41076 od_set_powersave_bias(0);
41077 }
41078 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
41079diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
41080index 86631cb..c34ec78 100644
41081--- a/drivers/cpufreq/intel_pstate.c
41082+++ b/drivers/cpufreq/intel_pstate.c
41083@@ -121,10 +121,10 @@ struct pstate_funcs {
41084 struct cpu_defaults {
41085 struct pstate_adjust_policy pid_policy;
41086 struct pstate_funcs funcs;
41087-};
41088+} __do_const;
41089
41090 static struct pstate_adjust_policy pid_params;
41091-static struct pstate_funcs pstate_funcs;
41092+static struct pstate_funcs *pstate_funcs;
41093
41094 struct perf_limits {
41095 int no_turbo;
41096@@ -526,7 +526,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
41097
41098 cpu->pstate.current_pstate = pstate;
41099
41100- pstate_funcs.set(cpu, pstate);
41101+ pstate_funcs->set(cpu, pstate);
41102 }
41103
41104 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
41105@@ -546,12 +546,12 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
41106
41107 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
41108 {
41109- cpu->pstate.min_pstate = pstate_funcs.get_min();
41110- cpu->pstate.max_pstate = pstate_funcs.get_max();
41111- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
41112+ cpu->pstate.min_pstate = pstate_funcs->get_min();
41113+ cpu->pstate.max_pstate = pstate_funcs->get_max();
41114+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
41115
41116- if (pstate_funcs.get_vid)
41117- pstate_funcs.get_vid(cpu);
41118+ if (pstate_funcs->get_vid)
41119+ pstate_funcs->get_vid(cpu);
41120 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
41121 }
41122
41123@@ -838,9 +838,9 @@ static int intel_pstate_msrs_not_valid(void)
41124 rdmsrl(MSR_IA32_APERF, aperf);
41125 rdmsrl(MSR_IA32_MPERF, mperf);
41126
41127- if (!pstate_funcs.get_max() ||
41128- !pstate_funcs.get_min() ||
41129- !pstate_funcs.get_turbo())
41130+ if (!pstate_funcs->get_max() ||
41131+ !pstate_funcs->get_min() ||
41132+ !pstate_funcs->get_turbo())
41133 return -ENODEV;
41134
41135 rdmsrl(MSR_IA32_APERF, tmp);
41136@@ -854,7 +854,7 @@ static int intel_pstate_msrs_not_valid(void)
41137 return 0;
41138 }
41139
41140-static void copy_pid_params(struct pstate_adjust_policy *policy)
41141+static void copy_pid_params(const struct pstate_adjust_policy *policy)
41142 {
41143 pid_params.sample_rate_ms = policy->sample_rate_ms;
41144 pid_params.p_gain_pct = policy->p_gain_pct;
41145@@ -866,11 +866,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
41146
41147 static void copy_cpu_funcs(struct pstate_funcs *funcs)
41148 {
41149- pstate_funcs.get_max = funcs->get_max;
41150- pstate_funcs.get_min = funcs->get_min;
41151- pstate_funcs.get_turbo = funcs->get_turbo;
41152- pstate_funcs.set = funcs->set;
41153- pstate_funcs.get_vid = funcs->get_vid;
41154+ pstate_funcs = funcs;
41155 }
41156
41157 #if IS_ENABLED(CONFIG_ACPI)
41158diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
41159index 529cfd9..0e28fff 100644
41160--- a/drivers/cpufreq/p4-clockmod.c
41161+++ b/drivers/cpufreq/p4-clockmod.c
41162@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41163 case 0x0F: /* Core Duo */
41164 case 0x16: /* Celeron Core */
41165 case 0x1C: /* Atom */
41166- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41167+ pax_open_kernel();
41168+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41169+ pax_close_kernel();
41170 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
41171 case 0x0D: /* Pentium M (Dothan) */
41172- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41173+ pax_open_kernel();
41174+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41175+ pax_close_kernel();
41176 /* fall through */
41177 case 0x09: /* Pentium M (Banias) */
41178 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
41179@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41180
41181 /* on P-4s, the TSC runs with constant frequency independent whether
41182 * throttling is active or not. */
41183- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41184+ pax_open_kernel();
41185+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41186+ pax_close_kernel();
41187
41188 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
41189 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
41190diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
41191index 9bb42ba..b01b4a2 100644
41192--- a/drivers/cpufreq/sparc-us3-cpufreq.c
41193+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
41194@@ -18,14 +18,12 @@
41195 #include <asm/head.h>
41196 #include <asm/timer.h>
41197
41198-static struct cpufreq_driver *cpufreq_us3_driver;
41199-
41200 struct us3_freq_percpu_info {
41201 struct cpufreq_frequency_table table[4];
41202 };
41203
41204 /* Indexed by cpu number. */
41205-static struct us3_freq_percpu_info *us3_freq_table;
41206+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
41207
41208 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
41209 * in the Safari config register.
41210@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
41211
41212 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
41213 {
41214- if (cpufreq_us3_driver)
41215- us3_freq_target(policy, 0);
41216+ us3_freq_target(policy, 0);
41217
41218 return 0;
41219 }
41220
41221+static int __init us3_freq_init(void);
41222+static void __exit us3_freq_exit(void);
41223+
41224+static struct cpufreq_driver cpufreq_us3_driver = {
41225+ .init = us3_freq_cpu_init,
41226+ .verify = cpufreq_generic_frequency_table_verify,
41227+ .target_index = us3_freq_target,
41228+ .get = us3_freq_get,
41229+ .exit = us3_freq_cpu_exit,
41230+ .name = "UltraSPARC-III",
41231+
41232+};
41233+
41234 static int __init us3_freq_init(void)
41235 {
41236 unsigned long manuf, impl, ver;
41237- int ret;
41238
41239 if (tlb_type != cheetah && tlb_type != cheetah_plus)
41240 return -ENODEV;
41241@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
41242 (impl == CHEETAH_IMPL ||
41243 impl == CHEETAH_PLUS_IMPL ||
41244 impl == JAGUAR_IMPL ||
41245- impl == PANTHER_IMPL)) {
41246- struct cpufreq_driver *driver;
41247-
41248- ret = -ENOMEM;
41249- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
41250- if (!driver)
41251- goto err_out;
41252-
41253- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
41254- GFP_KERNEL);
41255- if (!us3_freq_table)
41256- goto err_out;
41257-
41258- driver->init = us3_freq_cpu_init;
41259- driver->verify = cpufreq_generic_frequency_table_verify;
41260- driver->target_index = us3_freq_target;
41261- driver->get = us3_freq_get;
41262- driver->exit = us3_freq_cpu_exit;
41263- strcpy(driver->name, "UltraSPARC-III");
41264-
41265- cpufreq_us3_driver = driver;
41266- ret = cpufreq_register_driver(driver);
41267- if (ret)
41268- goto err_out;
41269-
41270- return 0;
41271-
41272-err_out:
41273- if (driver) {
41274- kfree(driver);
41275- cpufreq_us3_driver = NULL;
41276- }
41277- kfree(us3_freq_table);
41278- us3_freq_table = NULL;
41279- return ret;
41280- }
41281+ impl == PANTHER_IMPL))
41282+ return cpufreq_register_driver(&cpufreq_us3_driver);
41283
41284 return -ENODEV;
41285 }
41286
41287 static void __exit us3_freq_exit(void)
41288 {
41289- if (cpufreq_us3_driver) {
41290- cpufreq_unregister_driver(cpufreq_us3_driver);
41291- kfree(cpufreq_us3_driver);
41292- cpufreq_us3_driver = NULL;
41293- kfree(us3_freq_table);
41294- us3_freq_table = NULL;
41295- }
41296+ cpufreq_unregister_driver(&cpufreq_us3_driver);
41297 }
41298
41299 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
41300diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
41301index 7d4a315..21bb886 100644
41302--- a/drivers/cpufreq/speedstep-centrino.c
41303+++ b/drivers/cpufreq/speedstep-centrino.c
41304@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
41305 !cpu_has(cpu, X86_FEATURE_EST))
41306 return -ENODEV;
41307
41308- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
41309- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41310+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
41311+ pax_open_kernel();
41312+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41313+ pax_close_kernel();
41314+ }
41315
41316 if (policy->cpu != 0)
41317 return -ENODEV;
41318diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
41319index 9634f20..e1499c7 100644
41320--- a/drivers/cpuidle/driver.c
41321+++ b/drivers/cpuidle/driver.c
41322@@ -205,7 +205,7 @@ static int poll_idle(struct cpuidle_device *dev,
41323
41324 static void poll_idle_init(struct cpuidle_driver *drv)
41325 {
41326- struct cpuidle_state *state = &drv->states[0];
41327+ cpuidle_state_no_const *state = &drv->states[0];
41328
41329 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
41330 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
41331diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
41332index ca89412..a7b9c49 100644
41333--- a/drivers/cpuidle/governor.c
41334+++ b/drivers/cpuidle/governor.c
41335@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
41336 mutex_lock(&cpuidle_lock);
41337 if (__cpuidle_find_governor(gov->name) == NULL) {
41338 ret = 0;
41339- list_add_tail(&gov->governor_list, &cpuidle_governors);
41340+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
41341 if (!cpuidle_curr_governor ||
41342 cpuidle_curr_governor->rating < gov->rating)
41343 cpuidle_switch_governor(gov);
41344diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
41345index efe2f17..b8124f9 100644
41346--- a/drivers/cpuidle/sysfs.c
41347+++ b/drivers/cpuidle/sysfs.c
41348@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
41349 NULL
41350 };
41351
41352-static struct attribute_group cpuidle_attr_group = {
41353+static attribute_group_no_const cpuidle_attr_group = {
41354 .attrs = cpuidle_default_attrs,
41355 .name = "cpuidle",
41356 };
41357diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
41358index 12fea3e..1e28f47 100644
41359--- a/drivers/crypto/hifn_795x.c
41360+++ b/drivers/crypto/hifn_795x.c
41361@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
41362 MODULE_PARM_DESC(hifn_pll_ref,
41363 "PLL reference clock (pci[freq] or ext[freq], default ext)");
41364
41365-static atomic_t hifn_dev_number;
41366+static atomic_unchecked_t hifn_dev_number;
41367
41368 #define ACRYPTO_OP_DECRYPT 0
41369 #define ACRYPTO_OP_ENCRYPT 1
41370@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41371 goto err_out_disable_pci_device;
41372
41373 snprintf(name, sizeof(name), "hifn%d",
41374- atomic_inc_return(&hifn_dev_number)-1);
41375+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
41376
41377 err = pci_request_regions(pdev, name);
41378 if (err)
41379diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
41380index 9f90369..bfcacdb 100644
41381--- a/drivers/devfreq/devfreq.c
41382+++ b/drivers/devfreq/devfreq.c
41383@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
41384 goto err_out;
41385 }
41386
41387- list_add(&governor->node, &devfreq_governor_list);
41388+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
41389
41390 list_for_each_entry(devfreq, &devfreq_list, node) {
41391 int ret = 0;
41392@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
41393 }
41394 }
41395
41396- list_del(&governor->node);
41397+ pax_list_del((struct list_head *)&governor->node);
41398 err_out:
41399 mutex_unlock(&devfreq_list_lock);
41400
41401diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
41402index b35007e..55ad549 100644
41403--- a/drivers/dma/sh/shdma-base.c
41404+++ b/drivers/dma/sh/shdma-base.c
41405@@ -267,8 +267,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
41406 schan->slave_id = -EINVAL;
41407 }
41408
41409- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
41410- sdev->desc_size, GFP_KERNEL);
41411+ schan->desc = kcalloc(sdev->desc_size,
41412+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
41413 if (!schan->desc) {
41414 ret = -ENOMEM;
41415 goto edescalloc;
41416diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
41417index 146d5df..3c14970 100644
41418--- a/drivers/dma/sh/shdmac.c
41419+++ b/drivers/dma/sh/shdmac.c
41420@@ -514,7 +514,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
41421 return ret;
41422 }
41423
41424-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
41425+static struct notifier_block sh_dmae_nmi_notifier = {
41426 .notifier_call = sh_dmae_nmi_handler,
41427
41428 /* Run before NMI debug handler and KGDB */
41429diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
41430index 592af5f..bb1d583 100644
41431--- a/drivers/edac/edac_device.c
41432+++ b/drivers/edac/edac_device.c
41433@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
41434 */
41435 int edac_device_alloc_index(void)
41436 {
41437- static atomic_t device_indexes = ATOMIC_INIT(0);
41438+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
41439
41440- return atomic_inc_return(&device_indexes) - 1;
41441+ return atomic_inc_return_unchecked(&device_indexes) - 1;
41442 }
41443 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
41444
41445diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
41446index 01fae82..1dd8289 100644
41447--- a/drivers/edac/edac_mc_sysfs.c
41448+++ b/drivers/edac/edac_mc_sysfs.c
41449@@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
41450 struct dev_ch_attribute {
41451 struct device_attribute attr;
41452 int channel;
41453-};
41454+} __do_const;
41455
41456 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
41457 struct dev_ch_attribute dev_attr_legacy_##_name = \
41458@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
41459 }
41460
41461 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
41462+ pax_open_kernel();
41463 if (mci->get_sdram_scrub_rate) {
41464- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41465- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41466+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41467+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41468 }
41469 if (mci->set_sdram_scrub_rate) {
41470- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41471- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41472+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41473+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41474 }
41475+ pax_close_kernel();
41476 err = device_create_file(&mci->dev,
41477 &dev_attr_sdram_scrub_rate);
41478 if (err) {
41479diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
41480index 2cf44b4d..6dd2dc7 100644
41481--- a/drivers/edac/edac_pci.c
41482+++ b/drivers/edac/edac_pci.c
41483@@ -29,7 +29,7 @@
41484
41485 static DEFINE_MUTEX(edac_pci_ctls_mutex);
41486 static LIST_HEAD(edac_pci_list);
41487-static atomic_t pci_indexes = ATOMIC_INIT(0);
41488+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
41489
41490 /*
41491 * edac_pci_alloc_ctl_info
41492@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
41493 */
41494 int edac_pci_alloc_index(void)
41495 {
41496- return atomic_inc_return(&pci_indexes) - 1;
41497+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
41498 }
41499 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
41500
41501diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
41502index e8658e4..22746d6 100644
41503--- a/drivers/edac/edac_pci_sysfs.c
41504+++ b/drivers/edac/edac_pci_sysfs.c
41505@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
41506 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
41507 static int edac_pci_poll_msec = 1000; /* one second workq period */
41508
41509-static atomic_t pci_parity_count = ATOMIC_INIT(0);
41510-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
41511+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
41512+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
41513
41514 static struct kobject *edac_pci_top_main_kobj;
41515 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
41516@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
41517 void *value;
41518 ssize_t(*show) (void *, char *);
41519 ssize_t(*store) (void *, const char *, size_t);
41520-};
41521+} __do_const;
41522
41523 /* Set of show/store abstract level functions for PCI Parity object */
41524 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
41525@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41526 edac_printk(KERN_CRIT, EDAC_PCI,
41527 "Signaled System Error on %s\n",
41528 pci_name(dev));
41529- atomic_inc(&pci_nonparity_count);
41530+ atomic_inc_unchecked(&pci_nonparity_count);
41531 }
41532
41533 if (status & (PCI_STATUS_PARITY)) {
41534@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41535 "Master Data Parity Error on %s\n",
41536 pci_name(dev));
41537
41538- atomic_inc(&pci_parity_count);
41539+ atomic_inc_unchecked(&pci_parity_count);
41540 }
41541
41542 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41543@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41544 "Detected Parity Error on %s\n",
41545 pci_name(dev));
41546
41547- atomic_inc(&pci_parity_count);
41548+ atomic_inc_unchecked(&pci_parity_count);
41549 }
41550 }
41551
41552@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41553 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
41554 "Signaled System Error on %s\n",
41555 pci_name(dev));
41556- atomic_inc(&pci_nonparity_count);
41557+ atomic_inc_unchecked(&pci_nonparity_count);
41558 }
41559
41560 if (status & (PCI_STATUS_PARITY)) {
41561@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41562 "Master Data Parity Error on "
41563 "%s\n", pci_name(dev));
41564
41565- atomic_inc(&pci_parity_count);
41566+ atomic_inc_unchecked(&pci_parity_count);
41567 }
41568
41569 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41570@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41571 "Detected Parity Error on %s\n",
41572 pci_name(dev));
41573
41574- atomic_inc(&pci_parity_count);
41575+ atomic_inc_unchecked(&pci_parity_count);
41576 }
41577 }
41578 }
41579@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
41580 if (!check_pci_errors)
41581 return;
41582
41583- before_count = atomic_read(&pci_parity_count);
41584+ before_count = atomic_read_unchecked(&pci_parity_count);
41585
41586 /* scan all PCI devices looking for a Parity Error on devices and
41587 * bridges.
41588@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
41589 /* Only if operator has selected panic on PCI Error */
41590 if (edac_pci_get_panic_on_pe()) {
41591 /* If the count is different 'after' from 'before' */
41592- if (before_count != atomic_read(&pci_parity_count))
41593+ if (before_count != atomic_read_unchecked(&pci_parity_count))
41594 panic("EDAC: PCI Parity Error");
41595 }
41596 }
41597diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
41598index 51b7e3a..aa8a3e8 100644
41599--- a/drivers/edac/mce_amd.h
41600+++ b/drivers/edac/mce_amd.h
41601@@ -77,7 +77,7 @@ struct amd_decoder_ops {
41602 bool (*mc0_mce)(u16, u8);
41603 bool (*mc1_mce)(u16, u8);
41604 bool (*mc2_mce)(u16, u8);
41605-};
41606+} __no_const;
41607
41608 void amd_report_gart_errors(bool);
41609 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
41610diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
41611index 57ea7f4..af06b76 100644
41612--- a/drivers/firewire/core-card.c
41613+++ b/drivers/firewire/core-card.c
41614@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
41615 const struct fw_card_driver *driver,
41616 struct device *device)
41617 {
41618- static atomic_t index = ATOMIC_INIT(-1);
41619+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
41620
41621- card->index = atomic_inc_return(&index);
41622+ card->index = atomic_inc_return_unchecked(&index);
41623 card->driver = driver;
41624 card->device = device;
41625 card->current_tlabel = 0;
41626@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
41627
41628 void fw_core_remove_card(struct fw_card *card)
41629 {
41630- struct fw_card_driver dummy_driver = dummy_driver_template;
41631+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
41632
41633 card->driver->update_phy_reg(card, 4,
41634 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
41635diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
41636index 2c6d5e1..a2cca6b 100644
41637--- a/drivers/firewire/core-device.c
41638+++ b/drivers/firewire/core-device.c
41639@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
41640 struct config_rom_attribute {
41641 struct device_attribute attr;
41642 u32 key;
41643-};
41644+} __do_const;
41645
41646 static ssize_t show_immediate(struct device *dev,
41647 struct device_attribute *dattr, char *buf)
41648diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
41649index eb6935c..3cc2bfa 100644
41650--- a/drivers/firewire/core-transaction.c
41651+++ b/drivers/firewire/core-transaction.c
41652@@ -38,6 +38,7 @@
41653 #include <linux/timer.h>
41654 #include <linux/types.h>
41655 #include <linux/workqueue.h>
41656+#include <linux/sched.h>
41657
41658 #include <asm/byteorder.h>
41659
41660diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
41661index e1480ff6..1a429bd 100644
41662--- a/drivers/firewire/core.h
41663+++ b/drivers/firewire/core.h
41664@@ -111,6 +111,7 @@ struct fw_card_driver {
41665
41666 int (*stop_iso)(struct fw_iso_context *ctx);
41667 };
41668+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
41669
41670 void fw_card_initialize(struct fw_card *card,
41671 const struct fw_card_driver *driver, struct device *device);
41672diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
41673index a66a321..f6caf20 100644
41674--- a/drivers/firewire/ohci.c
41675+++ b/drivers/firewire/ohci.c
41676@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
41677 be32_to_cpu(ohci->next_header));
41678 }
41679
41680+#ifndef CONFIG_GRKERNSEC
41681 if (param_remote_dma) {
41682 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
41683 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
41684 }
41685+#endif
41686
41687 spin_unlock_irq(&ohci->lock);
41688
41689@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
41690 unsigned long flags;
41691 int n, ret = 0;
41692
41693+#ifndef CONFIG_GRKERNSEC
41694 if (param_remote_dma)
41695 return 0;
41696+#endif
41697
41698 /*
41699 * FIXME: Make sure this bitmask is cleared when we clear the busReset
41700diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
41701index 94a58a0..f5eba42 100644
41702--- a/drivers/firmware/dmi-id.c
41703+++ b/drivers/firmware/dmi-id.c
41704@@ -16,7 +16,7 @@
41705 struct dmi_device_attribute{
41706 struct device_attribute dev_attr;
41707 int field;
41708-};
41709+} __do_const;
41710 #define to_dmi_dev_attr(_dev_attr) \
41711 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
41712
41713diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
41714index 17afc51..0ef90cd 100644
41715--- a/drivers/firmware/dmi_scan.c
41716+++ b/drivers/firmware/dmi_scan.c
41717@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
41718 if (buf == NULL)
41719 return -1;
41720
41721- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
41722+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
41723
41724 dmi_unmap(buf);
41725 return 0;
41726diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
41727index 1491dd4..aa910db 100644
41728--- a/drivers/firmware/efi/cper.c
41729+++ b/drivers/firmware/efi/cper.c
41730@@ -41,12 +41,12 @@
41731 */
41732 u64 cper_next_record_id(void)
41733 {
41734- static atomic64_t seq;
41735+ static atomic64_unchecked_t seq;
41736
41737- if (!atomic64_read(&seq))
41738- atomic64_set(&seq, ((u64)get_seconds()) << 32);
41739+ if (!atomic64_read_unchecked(&seq))
41740+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
41741
41742- return atomic64_inc_return(&seq);
41743+ return atomic64_inc_return_unchecked(&seq);
41744 }
41745 EXPORT_SYMBOL_GPL(cper_next_record_id);
41746
41747diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
41748index dc79346..b39bd69 100644
41749--- a/drivers/firmware/efi/efi.c
41750+++ b/drivers/firmware/efi/efi.c
41751@@ -122,14 +122,16 @@ static struct attribute_group efi_subsys_attr_group = {
41752 };
41753
41754 static struct efivars generic_efivars;
41755-static struct efivar_operations generic_ops;
41756+static efivar_operations_no_const generic_ops __read_only;
41757
41758 static int generic_ops_register(void)
41759 {
41760- generic_ops.get_variable = efi.get_variable;
41761- generic_ops.set_variable = efi.set_variable;
41762- generic_ops.get_next_variable = efi.get_next_variable;
41763- generic_ops.query_variable_store = efi_query_variable_store;
41764+ pax_open_kernel();
41765+ *(void **)&generic_ops.get_variable = efi.get_variable;
41766+ *(void **)&generic_ops.set_variable = efi.set_variable;
41767+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
41768+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
41769+ pax_close_kernel();
41770
41771 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
41772 }
41773diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
41774index 463c565..02a5640 100644
41775--- a/drivers/firmware/efi/efivars.c
41776+++ b/drivers/firmware/efi/efivars.c
41777@@ -588,7 +588,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
41778 static int
41779 create_efivars_bin_attributes(void)
41780 {
41781- struct bin_attribute *attr;
41782+ bin_attribute_no_const *attr;
41783 int error;
41784
41785 /* new_var */
41786diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
41787index 2f569aa..c95f4fb 100644
41788--- a/drivers/firmware/google/memconsole.c
41789+++ b/drivers/firmware/google/memconsole.c
41790@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
41791 if (!found_memconsole())
41792 return -ENODEV;
41793
41794- memconsole_bin_attr.size = memconsole_length;
41795+ pax_open_kernel();
41796+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
41797+ pax_close_kernel();
41798+
41799 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
41800 }
41801
41802diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
41803index cde3605..8b69df7 100644
41804--- a/drivers/gpio/gpio-em.c
41805+++ b/drivers/gpio/gpio-em.c
41806@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
41807 struct em_gio_priv *p;
41808 struct resource *io[2], *irq[2];
41809 struct gpio_chip *gpio_chip;
41810- struct irq_chip *irq_chip;
41811+ irq_chip_no_const *irq_chip;
41812 const char *name = dev_name(&pdev->dev);
41813 int ret;
41814
41815diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41816index 7030422..42a3fe9 100644
41817--- a/drivers/gpio/gpio-ich.c
41818+++ b/drivers/gpio/gpio-ich.c
41819@@ -94,7 +94,7 @@ struct ichx_desc {
41820 * this option allows driver caching written output values
41821 */
41822 bool use_outlvl_cache;
41823-};
41824+} __do_const;
41825
41826 static struct {
41827 spinlock_t lock;
41828diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41829index b6ae89e..ac7349c 100644
41830--- a/drivers/gpio/gpio-rcar.c
41831+++ b/drivers/gpio/gpio-rcar.c
41832@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41833 struct gpio_rcar_priv *p;
41834 struct resource *io, *irq;
41835 struct gpio_chip *gpio_chip;
41836- struct irq_chip *irq_chip;
41837+ irq_chip_no_const *irq_chip;
41838 struct device *dev = &pdev->dev;
41839 const char *name = dev_name(dev);
41840 int ret;
41841diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41842index 66cbcc1..0c5e622 100644
41843--- a/drivers/gpio/gpio-vr41xx.c
41844+++ b/drivers/gpio/gpio-vr41xx.c
41845@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41846 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41847 maskl, pendl, maskh, pendh);
41848
41849- atomic_inc(&irq_err_count);
41850+ atomic_inc_unchecked(&irq_err_count);
41851
41852 return -EINVAL;
41853 }
41854diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41855index 2ebc907..01bdd6e 100644
41856--- a/drivers/gpio/gpiolib.c
41857+++ b/drivers/gpio/gpiolib.c
41858@@ -1482,8 +1482,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41859 }
41860
41861 if (gpiochip->irqchip) {
41862- gpiochip->irqchip->irq_request_resources = NULL;
41863- gpiochip->irqchip->irq_release_resources = NULL;
41864+ pax_open_kernel();
41865+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41866+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41867+ pax_close_kernel();
41868 gpiochip->irqchip = NULL;
41869 }
41870 }
41871@@ -1549,8 +1551,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41872 gpiochip->irqchip = NULL;
41873 return -EINVAL;
41874 }
41875- irqchip->irq_request_resources = gpiochip_irq_reqres;
41876- irqchip->irq_release_resources = gpiochip_irq_relres;
41877+
41878+ pax_open_kernel();
41879+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41880+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41881+ pax_close_kernel();
41882
41883 /*
41884 * Prepare the mapping since the irqchip shall be orthogonal to
41885diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41886index fe94cc1..5e697b3 100644
41887--- a/drivers/gpu/drm/drm_crtc.c
41888+++ b/drivers/gpu/drm/drm_crtc.c
41889@@ -3584,7 +3584,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41890 goto done;
41891 }
41892
41893- if (copy_to_user(&enum_ptr[copied].name,
41894+ if (copy_to_user(enum_ptr[copied].name,
41895 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41896 ret = -EFAULT;
41897 goto done;
41898diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41899index 8218078..9960928a 100644
41900--- a/drivers/gpu/drm/drm_drv.c
41901+++ b/drivers/gpu/drm/drm_drv.c
41902@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
41903 /**
41904 * Copy and IOCTL return string to user space
41905 */
41906-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
41907+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
41908 {
41909 int len;
41910
41911@@ -342,7 +342,7 @@ long drm_ioctl(struct file *filp,
41912 struct drm_file *file_priv = filp->private_data;
41913 struct drm_device *dev;
41914 const struct drm_ioctl_desc *ioctl = NULL;
41915- drm_ioctl_t *func;
41916+ drm_ioctl_no_const_t func;
41917 unsigned int nr = DRM_IOCTL_NR(cmd);
41918 int retcode = -EINVAL;
41919 char stack_kdata[128];
41920diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41921index 021fe5d..abc9ce6 100644
41922--- a/drivers/gpu/drm/drm_fops.c
41923+++ b/drivers/gpu/drm/drm_fops.c
41924@@ -88,7 +88,7 @@ int drm_open(struct inode *inode, struct file *filp)
41925 return PTR_ERR(minor);
41926
41927 dev = minor->dev;
41928- if (!dev->open_count++)
41929+ if (local_inc_return(&dev->open_count) == 1)
41930 need_setup = 1;
41931
41932 /* share address_space across all char-devs of a single device */
41933@@ -105,7 +105,7 @@ int drm_open(struct inode *inode, struct file *filp)
41934 return 0;
41935
41936 err_undo:
41937- dev->open_count--;
41938+ local_dec(&dev->open_count);
41939 drm_minor_release(minor);
41940 return retcode;
41941 }
41942@@ -427,7 +427,7 @@ int drm_release(struct inode *inode, struct file *filp)
41943
41944 mutex_lock(&drm_global_mutex);
41945
41946- DRM_DEBUG("open_count = %d\n", dev->open_count);
41947+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41948
41949 if (dev->driver->preclose)
41950 dev->driver->preclose(dev, file_priv);
41951@@ -436,10 +436,10 @@ int drm_release(struct inode *inode, struct file *filp)
41952 * Begin inline drm_release
41953 */
41954
41955- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41956+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41957 task_pid_nr(current),
41958 (long)old_encode_dev(file_priv->minor->kdev->devt),
41959- dev->open_count);
41960+ local_read(&dev->open_count));
41961
41962 /* Release any auth tokens that might point to this file_priv,
41963 (do that under the drm_global_mutex) */
41964@@ -540,7 +540,7 @@ int drm_release(struct inode *inode, struct file *filp)
41965 * End inline drm_release
41966 */
41967
41968- if (!--dev->open_count) {
41969+ if (local_dec_and_test(&dev->open_count)) {
41970 retcode = drm_lastclose(dev);
41971 if (drm_device_is_unplugged(dev))
41972 drm_put_dev(dev);
41973diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41974index 3d2e91c..d31c4c9 100644
41975--- a/drivers/gpu/drm/drm_global.c
41976+++ b/drivers/gpu/drm/drm_global.c
41977@@ -36,7 +36,7 @@
41978 struct drm_global_item {
41979 struct mutex mutex;
41980 void *object;
41981- int refcount;
41982+ atomic_t refcount;
41983 };
41984
41985 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41986@@ -49,7 +49,7 @@ void drm_global_init(void)
41987 struct drm_global_item *item = &glob[i];
41988 mutex_init(&item->mutex);
41989 item->object = NULL;
41990- item->refcount = 0;
41991+ atomic_set(&item->refcount, 0);
41992 }
41993 }
41994
41995@@ -59,7 +59,7 @@ void drm_global_release(void)
41996 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41997 struct drm_global_item *item = &glob[i];
41998 BUG_ON(item->object != NULL);
41999- BUG_ON(item->refcount != 0);
42000+ BUG_ON(atomic_read(&item->refcount) != 0);
42001 }
42002 }
42003
42004@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
42005 struct drm_global_item *item = &glob[ref->global_type];
42006
42007 mutex_lock(&item->mutex);
42008- if (item->refcount == 0) {
42009+ if (atomic_read(&item->refcount) == 0) {
42010 item->object = kzalloc(ref->size, GFP_KERNEL);
42011 if (unlikely(item->object == NULL)) {
42012 ret = -ENOMEM;
42013@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
42014 goto out_err;
42015
42016 }
42017- ++item->refcount;
42018+ atomic_inc(&item->refcount);
42019 ref->object = item->object;
42020 mutex_unlock(&item->mutex);
42021 return 0;
42022@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
42023 struct drm_global_item *item = &glob[ref->global_type];
42024
42025 mutex_lock(&item->mutex);
42026- BUG_ON(item->refcount == 0);
42027+ BUG_ON(atomic_read(&item->refcount) == 0);
42028 BUG_ON(ref->object != item->object);
42029- if (--item->refcount == 0) {
42030+ if (atomic_dec_and_test(&item->refcount)) {
42031 ref->release(ref);
42032 item->object = NULL;
42033 }
42034diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
42035index 86feedd..cba70f5 100644
42036--- a/drivers/gpu/drm/drm_info.c
42037+++ b/drivers/gpu/drm/drm_info.c
42038@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
42039 struct drm_local_map *map;
42040 struct drm_map_list *r_list;
42041
42042- /* Hardcoded from _DRM_FRAME_BUFFER,
42043- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
42044- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
42045- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
42046+ static const char * const types[] = {
42047+ [_DRM_FRAME_BUFFER] = "FB",
42048+ [_DRM_REGISTERS] = "REG",
42049+ [_DRM_SHM] = "SHM",
42050+ [_DRM_AGP] = "AGP",
42051+ [_DRM_SCATTER_GATHER] = "SG",
42052+ [_DRM_CONSISTENT] = "PCI"};
42053 const char *type;
42054 int i;
42055
42056@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
42057 map = r_list->map;
42058 if (!map)
42059 continue;
42060- if (map->type < 0 || map->type > 5)
42061+ if (map->type >= ARRAY_SIZE(types))
42062 type = "??";
42063 else
42064 type = types[map->type];
42065@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
42066 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
42067 vma->vm_flags & VM_LOCKED ? 'l' : '-',
42068 vma->vm_flags & VM_IO ? 'i' : '-',
42069+#ifdef CONFIG_GRKERNSEC_HIDESYM
42070+ 0);
42071+#else
42072 vma->vm_pgoff);
42073+#endif
42074
42075 #if defined(__i386__)
42076 pgprot = pgprot_val(vma->vm_page_prot);
42077diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
42078index 2f4c4343..dd12cd2 100644
42079--- a/drivers/gpu/drm/drm_ioc32.c
42080+++ b/drivers/gpu/drm/drm_ioc32.c
42081@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
42082 request = compat_alloc_user_space(nbytes);
42083 if (!access_ok(VERIFY_WRITE, request, nbytes))
42084 return -EFAULT;
42085- list = (struct drm_buf_desc *) (request + 1);
42086+ list = (struct drm_buf_desc __user *) (request + 1);
42087
42088 if (__put_user(count, &request->count)
42089 || __put_user(list, &request->list))
42090@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
42091 request = compat_alloc_user_space(nbytes);
42092 if (!access_ok(VERIFY_WRITE, request, nbytes))
42093 return -EFAULT;
42094- list = (struct drm_buf_pub *) (request + 1);
42095+ list = (struct drm_buf_pub __user *) (request + 1);
42096
42097 if (__put_user(count, &request->count)
42098 || __put_user(list, &request->list))
42099@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
42100 return 0;
42101 }
42102
42103-drm_ioctl_compat_t *drm_compat_ioctls[] = {
42104+drm_ioctl_compat_t drm_compat_ioctls[] = {
42105 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
42106 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
42107 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
42108@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
42109 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42110 {
42111 unsigned int nr = DRM_IOCTL_NR(cmd);
42112- drm_ioctl_compat_t *fn;
42113 int ret;
42114
42115 /* Assume that ioctls without an explicit compat routine will just
42116@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42117 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
42118 return drm_ioctl(filp, cmd, arg);
42119
42120- fn = drm_compat_ioctls[nr];
42121-
42122- if (fn != NULL)
42123- ret = (*fn) (filp, cmd, arg);
42124+ if (drm_compat_ioctls[nr] != NULL)
42125+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
42126 else
42127 ret = drm_ioctl(filp, cmd, arg);
42128
42129diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
42130index 14d1646..99f9d49 100644
42131--- a/drivers/gpu/drm/drm_stub.c
42132+++ b/drivers/gpu/drm/drm_stub.c
42133@@ -455,7 +455,7 @@ void drm_unplug_dev(struct drm_device *dev)
42134
42135 drm_device_set_unplugged(dev);
42136
42137- if (dev->open_count == 0) {
42138+ if (local_read(&dev->open_count) == 0) {
42139 drm_put_dev(dev);
42140 }
42141 mutex_unlock(&drm_global_mutex);
42142diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
42143index 369b262..09ea3ab 100644
42144--- a/drivers/gpu/drm/drm_sysfs.c
42145+++ b/drivers/gpu/drm/drm_sysfs.c
42146@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
42147 */
42148 int drm_sysfs_device_add(struct drm_minor *minor)
42149 {
42150- char *minor_str;
42151+ const char *minor_str;
42152 int r;
42153
42154 if (minor->type == DRM_MINOR_CONTROL)
42155diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
42156index d4d16ed..8fb0b51 100644
42157--- a/drivers/gpu/drm/i810/i810_drv.h
42158+++ b/drivers/gpu/drm/i810/i810_drv.h
42159@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
42160 int page_flipping;
42161
42162 wait_queue_head_t irq_queue;
42163- atomic_t irq_received;
42164- atomic_t irq_emitted;
42165+ atomic_unchecked_t irq_received;
42166+ atomic_unchecked_t irq_emitted;
42167
42168 int front_offset;
42169 } drm_i810_private_t;
42170diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
42171index d443441..ab091dd 100644
42172--- a/drivers/gpu/drm/i915/i915_dma.c
42173+++ b/drivers/gpu/drm/i915/i915_dma.c
42174@@ -1290,7 +1290,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
42175 * locking inversion with the driver load path. And the access here is
42176 * completely racy anyway. So don't bother with locking for now.
42177 */
42178- return dev->open_count == 0;
42179+ return local_read(&dev->open_count) == 0;
42180 }
42181
42182 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
42183diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42184index 3a30133..ef4a743 100644
42185--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42186+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42187@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
42188
42189 static int
42190 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
42191- int count)
42192+ unsigned int count)
42193 {
42194- int i;
42195+ unsigned int i;
42196 unsigned relocs_total = 0;
42197 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
42198
42199diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
42200index 2e0613e..a8b94d9 100644
42201--- a/drivers/gpu/drm/i915/i915_ioc32.c
42202+++ b/drivers/gpu/drm/i915/i915_ioc32.c
42203@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
42204 (unsigned long)request);
42205 }
42206
42207-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42208+static drm_ioctl_compat_t i915_compat_ioctls[] = {
42209 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
42210 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
42211 [DRM_I915_GETPARAM] = compat_i915_getparam,
42212@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42213 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42214 {
42215 unsigned int nr = DRM_IOCTL_NR(cmd);
42216- drm_ioctl_compat_t *fn = NULL;
42217 int ret;
42218
42219 if (nr < DRM_COMMAND_BASE)
42220 return drm_compat_ioctl(filp, cmd, arg);
42221
42222- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
42223- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42224-
42225- if (fn != NULL)
42226+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
42227+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42228 ret = (*fn) (filp, cmd, arg);
42229- else
42230+ } else
42231 ret = drm_ioctl(filp, cmd, arg);
42232
42233 return ret;
42234diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
42235index f0be855..94e82d9 100644
42236--- a/drivers/gpu/drm/i915/intel_display.c
42237+++ b/drivers/gpu/drm/i915/intel_display.c
42238@@ -11604,13 +11604,13 @@ struct intel_quirk {
42239 int subsystem_vendor;
42240 int subsystem_device;
42241 void (*hook)(struct drm_device *dev);
42242-};
42243+} __do_const;
42244
42245 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
42246 struct intel_dmi_quirk {
42247 void (*hook)(struct drm_device *dev);
42248 const struct dmi_system_id (*dmi_id_list)[];
42249-};
42250+} __do_const;
42251
42252 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42253 {
42254@@ -11618,18 +11618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42255 return 1;
42256 }
42257
42258-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42259+static const struct dmi_system_id intel_dmi_quirks_table[] = {
42260 {
42261- .dmi_id_list = &(const struct dmi_system_id[]) {
42262- {
42263- .callback = intel_dmi_reverse_brightness,
42264- .ident = "NCR Corporation",
42265- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42266- DMI_MATCH(DMI_PRODUCT_NAME, ""),
42267- },
42268- },
42269- { } /* terminating entry */
42270+ .callback = intel_dmi_reverse_brightness,
42271+ .ident = "NCR Corporation",
42272+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42273+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
42274 },
42275+ },
42276+ { } /* terminating entry */
42277+};
42278+
42279+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42280+ {
42281+ .dmi_id_list = &intel_dmi_quirks_table,
42282 .hook = quirk_invert_brightness,
42283 },
42284 };
42285diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
42286index fe45321..836fdca 100644
42287--- a/drivers/gpu/drm/mga/mga_drv.h
42288+++ b/drivers/gpu/drm/mga/mga_drv.h
42289@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
42290 u32 clear_cmd;
42291 u32 maccess;
42292
42293- atomic_t vbl_received; /**< Number of vblanks received. */
42294+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
42295 wait_queue_head_t fence_queue;
42296- atomic_t last_fence_retired;
42297+ atomic_unchecked_t last_fence_retired;
42298 u32 next_fence_to_post;
42299
42300 unsigned int fb_cpp;
42301diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
42302index 729bfd5..ead8823 100644
42303--- a/drivers/gpu/drm/mga/mga_ioc32.c
42304+++ b/drivers/gpu/drm/mga/mga_ioc32.c
42305@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
42306 return 0;
42307 }
42308
42309-drm_ioctl_compat_t *mga_compat_ioctls[] = {
42310+drm_ioctl_compat_t mga_compat_ioctls[] = {
42311 [DRM_MGA_INIT] = compat_mga_init,
42312 [DRM_MGA_GETPARAM] = compat_mga_getparam,
42313 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
42314@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
42315 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42316 {
42317 unsigned int nr = DRM_IOCTL_NR(cmd);
42318- drm_ioctl_compat_t *fn = NULL;
42319 int ret;
42320
42321 if (nr < DRM_COMMAND_BASE)
42322 return drm_compat_ioctl(filp, cmd, arg);
42323
42324- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
42325- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42326-
42327- if (fn != NULL)
42328+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
42329+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42330 ret = (*fn) (filp, cmd, arg);
42331- else
42332+ } else
42333 ret = drm_ioctl(filp, cmd, arg);
42334
42335 return ret;
42336diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
42337index 1b071b8..de8601a 100644
42338--- a/drivers/gpu/drm/mga/mga_irq.c
42339+++ b/drivers/gpu/drm/mga/mga_irq.c
42340@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
42341 if (crtc != 0)
42342 return 0;
42343
42344- return atomic_read(&dev_priv->vbl_received);
42345+ return atomic_read_unchecked(&dev_priv->vbl_received);
42346 }
42347
42348
42349@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42350 /* VBLANK interrupt */
42351 if (status & MGA_VLINEPEN) {
42352 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
42353- atomic_inc(&dev_priv->vbl_received);
42354+ atomic_inc_unchecked(&dev_priv->vbl_received);
42355 drm_handle_vblank(dev, 0);
42356 handled = 1;
42357 }
42358@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42359 if ((prim_start & ~0x03) != (prim_end & ~0x03))
42360 MGA_WRITE(MGA_PRIMEND, prim_end);
42361
42362- atomic_inc(&dev_priv->last_fence_retired);
42363+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
42364 wake_up(&dev_priv->fence_queue);
42365 handled = 1;
42366 }
42367@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
42368 * using fences.
42369 */
42370 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
42371- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
42372+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
42373 - *sequence) <= (1 << 23)));
42374
42375 *sequence = cur_fence;
42376diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
42377index 8268a4c..5105708 100644
42378--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
42379+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
42380@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
42381 struct bit_table {
42382 const char id;
42383 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
42384-};
42385+} __no_const;
42386
42387 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
42388
42389diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
42390index 7efbafa..19f8087 100644
42391--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
42392+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
42393@@ -97,7 +97,6 @@ struct nouveau_drm {
42394 struct drm_global_reference mem_global_ref;
42395 struct ttm_bo_global_ref bo_global_ref;
42396 struct ttm_bo_device bdev;
42397- atomic_t validate_sequence;
42398 int (*move)(struct nouveau_channel *,
42399 struct ttm_buffer_object *,
42400 struct ttm_mem_reg *, struct ttm_mem_reg *);
42401diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42402index 462679a..88e32a7 100644
42403--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42404+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42405@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
42406 unsigned long arg)
42407 {
42408 unsigned int nr = DRM_IOCTL_NR(cmd);
42409- drm_ioctl_compat_t *fn = NULL;
42410+ drm_ioctl_compat_t fn = NULL;
42411 int ret;
42412
42413 if (nr < DRM_COMMAND_BASE)
42414diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42415index ab0228f..20b756b 100644
42416--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
42417+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42418@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42419 }
42420
42421 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
42422- nouveau_vram_manager_init,
42423- nouveau_vram_manager_fini,
42424- nouveau_vram_manager_new,
42425- nouveau_vram_manager_del,
42426- nouveau_vram_manager_debug
42427+ .init = nouveau_vram_manager_init,
42428+ .takedown = nouveau_vram_manager_fini,
42429+ .get_node = nouveau_vram_manager_new,
42430+ .put_node = nouveau_vram_manager_del,
42431+ .debug = nouveau_vram_manager_debug
42432 };
42433
42434 static int
42435@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42436 }
42437
42438 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
42439- nouveau_gart_manager_init,
42440- nouveau_gart_manager_fini,
42441- nouveau_gart_manager_new,
42442- nouveau_gart_manager_del,
42443- nouveau_gart_manager_debug
42444+ .init = nouveau_gart_manager_init,
42445+ .takedown = nouveau_gart_manager_fini,
42446+ .get_node = nouveau_gart_manager_new,
42447+ .put_node = nouveau_gart_manager_del,
42448+ .debug = nouveau_gart_manager_debug
42449 };
42450
42451 #include <core/subdev/vm/nv04.h>
42452@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42453 }
42454
42455 const struct ttm_mem_type_manager_func nv04_gart_manager = {
42456- nv04_gart_manager_init,
42457- nv04_gart_manager_fini,
42458- nv04_gart_manager_new,
42459- nv04_gart_manager_del,
42460- nv04_gart_manager_debug
42461+ .init = nv04_gart_manager_init,
42462+ .takedown = nv04_gart_manager_fini,
42463+ .get_node = nv04_gart_manager_new,
42464+ .put_node = nv04_gart_manager_del,
42465+ .debug = nv04_gart_manager_debug
42466 };
42467
42468 int
42469diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
42470index 4f4c3fe..2cce716 100644
42471--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
42472+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
42473@@ -70,7 +70,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
42474 * locking inversion with the driver load path. And the access here is
42475 * completely racy anyway. So don't bother with locking for now.
42476 */
42477- return dev->open_count == 0;
42478+ return local_read(&dev->open_count) == 0;
42479 }
42480
42481 static const struct vga_switcheroo_client_ops
42482diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
42483index eb89653..613cf71 100644
42484--- a/drivers/gpu/drm/qxl/qxl_cmd.c
42485+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
42486@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
42487 int ret;
42488
42489 mutex_lock(&qdev->async_io_mutex);
42490- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42491+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42492 if (qdev->last_sent_io_cmd > irq_num) {
42493 if (intr)
42494 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42495- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42496+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42497 else
42498 ret = wait_event_timeout(qdev->io_cmd_event,
42499- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42500+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42501 /* 0 is timeout, just bail the "hw" has gone away */
42502 if (ret <= 0)
42503 goto out;
42504- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42505+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42506 }
42507 outb(val, addr);
42508 qdev->last_sent_io_cmd = irq_num + 1;
42509 if (intr)
42510 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42511- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42512+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42513 else
42514 ret = wait_event_timeout(qdev->io_cmd_event,
42515- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42516+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42517 out:
42518 if (ret > 0)
42519 ret = 0;
42520diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
42521index c3c2bbd..bc3c0fb 100644
42522--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
42523+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
42524@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
42525 struct drm_info_node *node = (struct drm_info_node *) m->private;
42526 struct qxl_device *qdev = node->minor->dev->dev_private;
42527
42528- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
42529- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
42530- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
42531- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
42532+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
42533+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
42534+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
42535+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
42536 seq_printf(m, "%d\n", qdev->irq_received_error);
42537 return 0;
42538 }
42539diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
42540index 36ed40b..0397633 100644
42541--- a/drivers/gpu/drm/qxl/qxl_drv.h
42542+++ b/drivers/gpu/drm/qxl/qxl_drv.h
42543@@ -290,10 +290,10 @@ struct qxl_device {
42544 unsigned int last_sent_io_cmd;
42545
42546 /* interrupt handling */
42547- atomic_t irq_received;
42548- atomic_t irq_received_display;
42549- atomic_t irq_received_cursor;
42550- atomic_t irq_received_io_cmd;
42551+ atomic_unchecked_t irq_received;
42552+ atomic_unchecked_t irq_received_display;
42553+ atomic_unchecked_t irq_received_cursor;
42554+ atomic_unchecked_t irq_received_io_cmd;
42555 unsigned irq_received_error;
42556 wait_queue_head_t display_event;
42557 wait_queue_head_t cursor_event;
42558diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
42559index b110883..dd06418 100644
42560--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
42561+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
42562@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42563
42564 /* TODO copy slow path code from i915 */
42565 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
42566- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
42567+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
42568
42569 {
42570 struct qxl_drawable *draw = fb_cmd;
42571@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42572 struct drm_qxl_reloc reloc;
42573
42574 if (copy_from_user(&reloc,
42575- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
42576+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
42577 sizeof(reloc))) {
42578 ret = -EFAULT;
42579 goto out_free_bos;
42580@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
42581
42582 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
42583
42584- struct drm_qxl_command *commands =
42585- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
42586+ struct drm_qxl_command __user *commands =
42587+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
42588
42589- if (copy_from_user(&user_cmd, &commands[cmd_num],
42590+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
42591 sizeof(user_cmd)))
42592 return -EFAULT;
42593
42594diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
42595index 0bf1e20..42a7310 100644
42596--- a/drivers/gpu/drm/qxl/qxl_irq.c
42597+++ b/drivers/gpu/drm/qxl/qxl_irq.c
42598@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
42599 if (!pending)
42600 return IRQ_NONE;
42601
42602- atomic_inc(&qdev->irq_received);
42603+ atomic_inc_unchecked(&qdev->irq_received);
42604
42605 if (pending & QXL_INTERRUPT_DISPLAY) {
42606- atomic_inc(&qdev->irq_received_display);
42607+ atomic_inc_unchecked(&qdev->irq_received_display);
42608 wake_up_all(&qdev->display_event);
42609 qxl_queue_garbage_collect(qdev, false);
42610 }
42611 if (pending & QXL_INTERRUPT_CURSOR) {
42612- atomic_inc(&qdev->irq_received_cursor);
42613+ atomic_inc_unchecked(&qdev->irq_received_cursor);
42614 wake_up_all(&qdev->cursor_event);
42615 }
42616 if (pending & QXL_INTERRUPT_IO_CMD) {
42617- atomic_inc(&qdev->irq_received_io_cmd);
42618+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
42619 wake_up_all(&qdev->io_cmd_event);
42620 }
42621 if (pending & QXL_INTERRUPT_ERROR) {
42622@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
42623 init_waitqueue_head(&qdev->io_cmd_event);
42624 INIT_WORK(&qdev->client_monitors_config_work,
42625 qxl_client_monitors_config_work_func);
42626- atomic_set(&qdev->irq_received, 0);
42627- atomic_set(&qdev->irq_received_display, 0);
42628- atomic_set(&qdev->irq_received_cursor, 0);
42629- atomic_set(&qdev->irq_received_io_cmd, 0);
42630+ atomic_set_unchecked(&qdev->irq_received, 0);
42631+ atomic_set_unchecked(&qdev->irq_received_display, 0);
42632+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
42633+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
42634 qdev->irq_received_error = 0;
42635 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
42636 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
42637diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
42638index 71a1bae..cb1f103 100644
42639--- a/drivers/gpu/drm/qxl/qxl_ttm.c
42640+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
42641@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
42642 }
42643 }
42644
42645-static struct vm_operations_struct qxl_ttm_vm_ops;
42646+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
42647 static const struct vm_operations_struct *ttm_vm_ops;
42648
42649 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42650@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
42651 return r;
42652 if (unlikely(ttm_vm_ops == NULL)) {
42653 ttm_vm_ops = vma->vm_ops;
42654+ pax_open_kernel();
42655 qxl_ttm_vm_ops = *ttm_vm_ops;
42656 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
42657+ pax_close_kernel();
42658 }
42659 vma->vm_ops = &qxl_ttm_vm_ops;
42660 return 0;
42661@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
42662 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
42663 {
42664 #if defined(CONFIG_DEBUG_FS)
42665- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
42666- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
42667- unsigned i;
42668+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
42669+ {
42670+ .name = "qxl_mem_mm",
42671+ .show = &qxl_mm_dump_table,
42672+ },
42673+ {
42674+ .name = "qxl_surf_mm",
42675+ .show = &qxl_mm_dump_table,
42676+ }
42677+ };
42678
42679- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
42680- if (i == 0)
42681- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
42682- else
42683- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
42684- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
42685- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
42686- qxl_mem_types_list[i].driver_features = 0;
42687- if (i == 0)
42688- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42689- else
42690- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42691+ pax_open_kernel();
42692+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42693+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42694+ pax_close_kernel();
42695
42696- }
42697- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
42698+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
42699 #else
42700 return 0;
42701 #endif
42702diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
42703index 59459fe..be26b31 100644
42704--- a/drivers/gpu/drm/r128/r128_cce.c
42705+++ b/drivers/gpu/drm/r128/r128_cce.c
42706@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
42707
42708 /* GH: Simple idle check.
42709 */
42710- atomic_set(&dev_priv->idle_count, 0);
42711+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42712
42713 /* We don't support anything other than bus-mastering ring mode,
42714 * but the ring can be in either AGP or PCI space for the ring
42715diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
42716index 5bf3f5f..7000661 100644
42717--- a/drivers/gpu/drm/r128/r128_drv.h
42718+++ b/drivers/gpu/drm/r128/r128_drv.h
42719@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
42720 int is_pci;
42721 unsigned long cce_buffers_offset;
42722
42723- atomic_t idle_count;
42724+ atomic_unchecked_t idle_count;
42725
42726 int page_flipping;
42727 int current_page;
42728 u32 crtc_offset;
42729 u32 crtc_offset_cntl;
42730
42731- atomic_t vbl_received;
42732+ atomic_unchecked_t vbl_received;
42733
42734 u32 color_fmt;
42735 unsigned int front_offset;
42736diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
42737index 663f38c..c689495 100644
42738--- a/drivers/gpu/drm/r128/r128_ioc32.c
42739+++ b/drivers/gpu/drm/r128/r128_ioc32.c
42740@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
42741 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
42742 }
42743
42744-drm_ioctl_compat_t *r128_compat_ioctls[] = {
42745+drm_ioctl_compat_t r128_compat_ioctls[] = {
42746 [DRM_R128_INIT] = compat_r128_init,
42747 [DRM_R128_DEPTH] = compat_r128_depth,
42748 [DRM_R128_STIPPLE] = compat_r128_stipple,
42749@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
42750 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42751 {
42752 unsigned int nr = DRM_IOCTL_NR(cmd);
42753- drm_ioctl_compat_t *fn = NULL;
42754 int ret;
42755
42756 if (nr < DRM_COMMAND_BASE)
42757 return drm_compat_ioctl(filp, cmd, arg);
42758
42759- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
42760- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42761-
42762- if (fn != NULL)
42763+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
42764+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42765 ret = (*fn) (filp, cmd, arg);
42766- else
42767+ } else
42768 ret = drm_ioctl(filp, cmd, arg);
42769
42770 return ret;
42771diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
42772index c2ae496..30b5993 100644
42773--- a/drivers/gpu/drm/r128/r128_irq.c
42774+++ b/drivers/gpu/drm/r128/r128_irq.c
42775@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
42776 if (crtc != 0)
42777 return 0;
42778
42779- return atomic_read(&dev_priv->vbl_received);
42780+ return atomic_read_unchecked(&dev_priv->vbl_received);
42781 }
42782
42783 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42784@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42785 /* VBLANK interrupt */
42786 if (status & R128_CRTC_VBLANK_INT) {
42787 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
42788- atomic_inc(&dev_priv->vbl_received);
42789+ atomic_inc_unchecked(&dev_priv->vbl_received);
42790 drm_handle_vblank(dev, 0);
42791 return IRQ_HANDLED;
42792 }
42793diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
42794index 575e986..66e62ca 100644
42795--- a/drivers/gpu/drm/r128/r128_state.c
42796+++ b/drivers/gpu/drm/r128/r128_state.c
42797@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
42798
42799 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
42800 {
42801- if (atomic_read(&dev_priv->idle_count) == 0)
42802+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
42803 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
42804 else
42805- atomic_set(&dev_priv->idle_count, 0);
42806+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42807 }
42808
42809 #endif
42810diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
42811index 4a85bb6..aaea819 100644
42812--- a/drivers/gpu/drm/radeon/mkregtable.c
42813+++ b/drivers/gpu/drm/radeon/mkregtable.c
42814@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
42815 regex_t mask_rex;
42816 regmatch_t match[4];
42817 char buf[1024];
42818- size_t end;
42819+ long end;
42820 int len;
42821 int done = 0;
42822 int r;
42823 unsigned o;
42824 struct offset *offset;
42825 char last_reg_s[10];
42826- int last_reg;
42827+ unsigned long last_reg;
42828
42829 if (regcomp
42830 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42831diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42832index 697add2..9860f5b 100644
42833--- a/drivers/gpu/drm/radeon/radeon_device.c
42834+++ b/drivers/gpu/drm/radeon/radeon_device.c
42835@@ -1169,7 +1169,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42836 * locking inversion with the driver load path. And the access here is
42837 * completely racy anyway. So don't bother with locking for now.
42838 */
42839- return dev->open_count == 0;
42840+ return local_read(&dev->open_count) == 0;
42841 }
42842
42843 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42844diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42845index dafd812..1bf20c7 100644
42846--- a/drivers/gpu/drm/radeon/radeon_drv.h
42847+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42848@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42849
42850 /* SW interrupt */
42851 wait_queue_head_t swi_queue;
42852- atomic_t swi_emitted;
42853+ atomic_unchecked_t swi_emitted;
42854 int vblank_crtc;
42855 uint32_t irq_enable_reg;
42856 uint32_t r500_disp_irq_reg;
42857diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42858index 0b98ea1..0881827 100644
42859--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42860+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42861@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42862 request = compat_alloc_user_space(sizeof(*request));
42863 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42864 || __put_user(req32.param, &request->param)
42865- || __put_user((void __user *)(unsigned long)req32.value,
42866+ || __put_user((unsigned long)req32.value,
42867 &request->value))
42868 return -EFAULT;
42869
42870@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42871 #define compat_radeon_cp_setparam NULL
42872 #endif /* X86_64 || IA64 */
42873
42874-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42875+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42876 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42877 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42878 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42879@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42880 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42881 {
42882 unsigned int nr = DRM_IOCTL_NR(cmd);
42883- drm_ioctl_compat_t *fn = NULL;
42884 int ret;
42885
42886 if (nr < DRM_COMMAND_BASE)
42887 return drm_compat_ioctl(filp, cmd, arg);
42888
42889- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42890- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42891-
42892- if (fn != NULL)
42893+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42894+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42895 ret = (*fn) (filp, cmd, arg);
42896- else
42897+ } else
42898 ret = drm_ioctl(filp, cmd, arg);
42899
42900 return ret;
42901diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42902index 244b19b..c19226d 100644
42903--- a/drivers/gpu/drm/radeon/radeon_irq.c
42904+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42905@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42906 unsigned int ret;
42907 RING_LOCALS;
42908
42909- atomic_inc(&dev_priv->swi_emitted);
42910- ret = atomic_read(&dev_priv->swi_emitted);
42911+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42912+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42913
42914 BEGIN_RING(4);
42915 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42916@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42917 drm_radeon_private_t *dev_priv =
42918 (drm_radeon_private_t *) dev->dev_private;
42919
42920- atomic_set(&dev_priv->swi_emitted, 0);
42921+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42922 init_waitqueue_head(&dev_priv->swi_queue);
42923
42924 dev->max_vblank_count = 0x001fffff;
42925diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42926index 23bb64f..69d7234 100644
42927--- a/drivers/gpu/drm/radeon/radeon_state.c
42928+++ b/drivers/gpu/drm/radeon/radeon_state.c
42929@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42930 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42931 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42932
42933- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42934+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42935 sarea_priv->nbox * sizeof(depth_boxes[0])))
42936 return -EFAULT;
42937
42938@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42939 {
42940 drm_radeon_private_t *dev_priv = dev->dev_private;
42941 drm_radeon_getparam_t *param = data;
42942- int value;
42943+ int value = 0;
42944
42945 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42946
42947diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42948index c8a8a51..219dacc 100644
42949--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42950+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42951@@ -797,7 +797,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42952 man->size = size >> PAGE_SHIFT;
42953 }
42954
42955-static struct vm_operations_struct radeon_ttm_vm_ops;
42956+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42957 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42958
42959 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42960@@ -838,8 +838,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42961 }
42962 if (unlikely(ttm_vm_ops == NULL)) {
42963 ttm_vm_ops = vma->vm_ops;
42964+ pax_open_kernel();
42965 radeon_ttm_vm_ops = *ttm_vm_ops;
42966 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42967+ pax_close_kernel();
42968 }
42969 vma->vm_ops = &radeon_ttm_vm_ops;
42970 return 0;
42971diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42972index ef40381..347463e 100644
42973--- a/drivers/gpu/drm/tegra/dc.c
42974+++ b/drivers/gpu/drm/tegra/dc.c
42975@@ -1173,7 +1173,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42976 }
42977
42978 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42979- dc->debugfs_files[i].data = dc;
42980+ *(void **)&dc->debugfs_files[i].data = dc;
42981
42982 err = drm_debugfs_create_files(dc->debugfs_files,
42983 ARRAY_SIZE(debugfs_files),
42984diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42985index bd56f2a..255af4b 100644
42986--- a/drivers/gpu/drm/tegra/dsi.c
42987+++ b/drivers/gpu/drm/tegra/dsi.c
42988@@ -41,7 +41,7 @@ struct tegra_dsi {
42989 struct clk *clk_lp;
42990 struct clk *clk;
42991
42992- struct drm_info_list *debugfs_files;
42993+ drm_info_list_no_const *debugfs_files;
42994 struct drm_minor *minor;
42995 struct dentry *debugfs;
42996
42997diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42998index ba067bb..23afbbd 100644
42999--- a/drivers/gpu/drm/tegra/hdmi.c
43000+++ b/drivers/gpu/drm/tegra/hdmi.c
43001@@ -60,7 +60,7 @@ struct tegra_hdmi {
43002 bool stereo;
43003 bool dvi;
43004
43005- struct drm_info_list *debugfs_files;
43006+ drm_info_list_no_const *debugfs_files;
43007 struct drm_minor *minor;
43008 struct dentry *debugfs;
43009 };
43010diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
43011index bd850c9..d9f3573 100644
43012--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
43013+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
43014@@ -146,10 +146,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
43015 }
43016
43017 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
43018- ttm_bo_man_init,
43019- ttm_bo_man_takedown,
43020- ttm_bo_man_get_node,
43021- ttm_bo_man_put_node,
43022- ttm_bo_man_debug
43023+ .init = ttm_bo_man_init,
43024+ .takedown = ttm_bo_man_takedown,
43025+ .get_node = ttm_bo_man_get_node,
43026+ .put_node = ttm_bo_man_put_node,
43027+ .debug = ttm_bo_man_debug
43028 };
43029 EXPORT_SYMBOL(ttm_bo_manager_func);
43030diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
43031index dbc2def..0a9f710 100644
43032--- a/drivers/gpu/drm/ttm/ttm_memory.c
43033+++ b/drivers/gpu/drm/ttm/ttm_memory.c
43034@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
43035 zone->glob = glob;
43036 glob->zone_kernel = zone;
43037 ret = kobject_init_and_add(
43038- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
43039+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
43040 if (unlikely(ret != 0)) {
43041 kobject_put(&zone->kobj);
43042 return ret;
43043@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
43044 zone->glob = glob;
43045 glob->zone_dma32 = zone;
43046 ret = kobject_init_and_add(
43047- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
43048+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
43049 if (unlikely(ret != 0)) {
43050 kobject_put(&zone->kobj);
43051 return ret;
43052diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
43053index 863bef9..cba15cf 100644
43054--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
43055+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
43056@@ -391,9 +391,9 @@ out:
43057 static unsigned long
43058 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
43059 {
43060- static atomic_t start_pool = ATOMIC_INIT(0);
43061+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
43062 unsigned i;
43063- unsigned pool_offset = atomic_add_return(1, &start_pool);
43064+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
43065 struct ttm_page_pool *pool;
43066 int shrink_pages = sc->nr_to_scan;
43067 unsigned long freed = 0;
43068diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
43069index 3771763..883f206 100644
43070--- a/drivers/gpu/drm/udl/udl_fb.c
43071+++ b/drivers/gpu/drm/udl/udl_fb.c
43072@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
43073 fb_deferred_io_cleanup(info);
43074 kfree(info->fbdefio);
43075 info->fbdefio = NULL;
43076- info->fbops->fb_mmap = udl_fb_mmap;
43077 }
43078
43079 pr_warn("released /dev/fb%d user=%d count=%d\n",
43080diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
43081index ad02732..144f5ed 100644
43082--- a/drivers/gpu/drm/via/via_drv.h
43083+++ b/drivers/gpu/drm/via/via_drv.h
43084@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
43085 typedef uint32_t maskarray_t[5];
43086
43087 typedef struct drm_via_irq {
43088- atomic_t irq_received;
43089+ atomic_unchecked_t irq_received;
43090 uint32_t pending_mask;
43091 uint32_t enable_mask;
43092 wait_queue_head_t irq_queue;
43093@@ -75,7 +75,7 @@ typedef struct drm_via_private {
43094 struct timeval last_vblank;
43095 int last_vblank_valid;
43096 unsigned usec_per_vblank;
43097- atomic_t vbl_received;
43098+ atomic_unchecked_t vbl_received;
43099 drm_via_state_t hc_state;
43100 char pci_buf[VIA_PCI_BUF_SIZE];
43101 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
43102diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
43103index 1319433..a993b0c 100644
43104--- a/drivers/gpu/drm/via/via_irq.c
43105+++ b/drivers/gpu/drm/via/via_irq.c
43106@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
43107 if (crtc != 0)
43108 return 0;
43109
43110- return atomic_read(&dev_priv->vbl_received);
43111+ return atomic_read_unchecked(&dev_priv->vbl_received);
43112 }
43113
43114 irqreturn_t via_driver_irq_handler(int irq, void *arg)
43115@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43116
43117 status = VIA_READ(VIA_REG_INTERRUPT);
43118 if (status & VIA_IRQ_VBLANK_PENDING) {
43119- atomic_inc(&dev_priv->vbl_received);
43120- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
43121+ atomic_inc_unchecked(&dev_priv->vbl_received);
43122+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
43123 do_gettimeofday(&cur_vblank);
43124 if (dev_priv->last_vblank_valid) {
43125 dev_priv->usec_per_vblank =
43126@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43127 dev_priv->last_vblank = cur_vblank;
43128 dev_priv->last_vblank_valid = 1;
43129 }
43130- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
43131+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
43132 DRM_DEBUG("US per vblank is: %u\n",
43133 dev_priv->usec_per_vblank);
43134 }
43135@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43136
43137 for (i = 0; i < dev_priv->num_irqs; ++i) {
43138 if (status & cur_irq->pending_mask) {
43139- atomic_inc(&cur_irq->irq_received);
43140+ atomic_inc_unchecked(&cur_irq->irq_received);
43141 wake_up(&cur_irq->irq_queue);
43142 handled = 1;
43143 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
43144@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
43145 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43146 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
43147 masks[irq][4]));
43148- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
43149+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
43150 } else {
43151 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43152 (((cur_irq_sequence =
43153- atomic_read(&cur_irq->irq_received)) -
43154+ atomic_read_unchecked(&cur_irq->irq_received)) -
43155 *sequence) <= (1 << 23)));
43156 }
43157 *sequence = cur_irq_sequence;
43158@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
43159 }
43160
43161 for (i = 0; i < dev_priv->num_irqs; ++i) {
43162- atomic_set(&cur_irq->irq_received, 0);
43163+ atomic_set_unchecked(&cur_irq->irq_received, 0);
43164 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
43165 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
43166 init_waitqueue_head(&cur_irq->irq_queue);
43167@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
43168 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
43169 case VIA_IRQ_RELATIVE:
43170 irqwait->request.sequence +=
43171- atomic_read(&cur_irq->irq_received);
43172+ atomic_read_unchecked(&cur_irq->irq_received);
43173 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
43174 case VIA_IRQ_ABSOLUTE:
43175 break;
43176diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43177index 6b252a8..5975dfe 100644
43178--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43179+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43180@@ -437,7 +437,7 @@ struct vmw_private {
43181 * Fencing and IRQs.
43182 */
43183
43184- atomic_t marker_seq;
43185+ atomic_unchecked_t marker_seq;
43186 wait_queue_head_t fence_queue;
43187 wait_queue_head_t fifo_queue;
43188 int fence_queue_waiters; /* Protected by hw_mutex */
43189diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43190index 6ccd993..618d592 100644
43191--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43192+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43193@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
43194 (unsigned int) min,
43195 (unsigned int) fifo->capabilities);
43196
43197- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43198+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43199 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
43200 vmw_marker_queue_init(&fifo->marker_queue);
43201 return vmw_fifo_send_fence(dev_priv, &dummy);
43202@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
43203 if (reserveable)
43204 iowrite32(bytes, fifo_mem +
43205 SVGA_FIFO_RESERVED);
43206- return fifo_mem + (next_cmd >> 2);
43207+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
43208 } else {
43209 need_bounce = true;
43210 }
43211@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43212
43213 fm = vmw_fifo_reserve(dev_priv, bytes);
43214 if (unlikely(fm == NULL)) {
43215- *seqno = atomic_read(&dev_priv->marker_seq);
43216+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43217 ret = -ENOMEM;
43218 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
43219 false, 3*HZ);
43220@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43221 }
43222
43223 do {
43224- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
43225+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
43226 } while (*seqno == 0);
43227
43228 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
43229diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43230index b1273e8..9c274fd 100644
43231--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43232+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43233@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
43234 }
43235
43236 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
43237- vmw_gmrid_man_init,
43238- vmw_gmrid_man_takedown,
43239- vmw_gmrid_man_get_node,
43240- vmw_gmrid_man_put_node,
43241- vmw_gmrid_man_debug
43242+ .init = vmw_gmrid_man_init,
43243+ .takedown = vmw_gmrid_man_takedown,
43244+ .get_node = vmw_gmrid_man_get_node,
43245+ .put_node = vmw_gmrid_man_put_node,
43246+ .debug = vmw_gmrid_man_debug
43247 };
43248diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43249index 37881ec..319065d 100644
43250--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43251+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43252@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
43253 int ret;
43254
43255 num_clips = arg->num_clips;
43256- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43257+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43258
43259 if (unlikely(num_clips == 0))
43260 return 0;
43261@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
43262 int ret;
43263
43264 num_clips = arg->num_clips;
43265- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43266+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43267
43268 if (unlikely(num_clips == 0))
43269 return 0;
43270diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43271index 0c42376..6febe77 100644
43272--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43273+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43274@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
43275 * emitted. Then the fence is stale and signaled.
43276 */
43277
43278- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
43279+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
43280 > VMW_FENCE_WRAP);
43281
43282 return ret;
43283@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
43284
43285 if (fifo_idle)
43286 down_read(&fifo_state->rwsem);
43287- signal_seq = atomic_read(&dev_priv->marker_seq);
43288+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
43289 ret = 0;
43290
43291 for (;;) {
43292diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43293index 8a8725c2..afed796 100644
43294--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43295+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43296@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
43297 while (!vmw_lag_lt(queue, us)) {
43298 spin_lock(&queue->lock);
43299 if (list_empty(&queue->head))
43300- seqno = atomic_read(&dev_priv->marker_seq);
43301+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43302 else {
43303 marker = list_first_entry(&queue->head,
43304 struct vmw_marker, head);
43305diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
43306index 6866448..2ad2b34 100644
43307--- a/drivers/gpu/vga/vga_switcheroo.c
43308+++ b/drivers/gpu/vga/vga_switcheroo.c
43309@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
43310
43311 /* this version is for the case where the power switch is separate
43312 to the device being powered down. */
43313-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
43314+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
43315 {
43316 /* copy over all the bus versions */
43317 if (dev->bus && dev->bus->pm) {
43318@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
43319 return ret;
43320 }
43321
43322-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
43323+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
43324 {
43325 /* copy over all the bus versions */
43326 if (dev->bus && dev->bus->pm) {
43327diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
43328index 8ed66fd..38ff772 100644
43329--- a/drivers/hid/hid-core.c
43330+++ b/drivers/hid/hid-core.c
43331@@ -2488,7 +2488,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
43332
43333 int hid_add_device(struct hid_device *hdev)
43334 {
43335- static atomic_t id = ATOMIC_INIT(0);
43336+ static atomic_unchecked_t id = ATOMIC_INIT(0);
43337 int ret;
43338
43339 if (WARN_ON(hdev->status & HID_STAT_ADDED))
43340@@ -2530,7 +2530,7 @@ int hid_add_device(struct hid_device *hdev)
43341 /* XXX hack, any other cleaner solution after the driver core
43342 * is converted to allow more than 20 bytes as the device name? */
43343 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
43344- hdev->vendor, hdev->product, atomic_inc_return(&id));
43345+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
43346
43347 hid_debug_register(hdev, dev_name(&hdev->dev));
43348 ret = device_add(&hdev->dev);
43349diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
43350index ecc2cbf..29a74c1 100644
43351--- a/drivers/hid/hid-magicmouse.c
43352+++ b/drivers/hid/hid-magicmouse.c
43353@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43354 if (size < 4 || ((size - 4) % 9) != 0)
43355 return 0;
43356 npoints = (size - 4) / 9;
43357+ if (npoints > 15) {
43358+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
43359+ size);
43360+ return 0;
43361+ }
43362 msc->ntouches = 0;
43363 for (ii = 0; ii < npoints; ii++)
43364 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
43365@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43366 if (size < 6 || ((size - 6) % 8) != 0)
43367 return 0;
43368 npoints = (size - 6) / 8;
43369+ if (npoints > 15) {
43370+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
43371+ size);
43372+ return 0;
43373+ }
43374 msc->ntouches = 0;
43375 for (ii = 0; ii < npoints; ii++)
43376 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
43377diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
43378index acbb0210..020df3c 100644
43379--- a/drivers/hid/hid-picolcd_core.c
43380+++ b/drivers/hid/hid-picolcd_core.c
43381@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
43382 if (!data)
43383 return 1;
43384
43385+ if (size > 64) {
43386+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
43387+ size);
43388+ return 0;
43389+ }
43390+
43391 if (report->id == REPORT_KEY_STATE) {
43392 if (data->input_keys)
43393 ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
43394diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
43395index c13fb5b..55a3802 100644
43396--- a/drivers/hid/hid-wiimote-debug.c
43397+++ b/drivers/hid/hid-wiimote-debug.c
43398@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
43399 else if (size == 0)
43400 return -EIO;
43401
43402- if (copy_to_user(u, buf, size))
43403+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
43404 return -EFAULT;
43405
43406 *off += size;
43407diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
43408index 0cb92e3..c7d453d 100644
43409--- a/drivers/hid/uhid.c
43410+++ b/drivers/hid/uhid.c
43411@@ -47,7 +47,7 @@ struct uhid_device {
43412 struct mutex report_lock;
43413 wait_queue_head_t report_wait;
43414 atomic_t report_done;
43415- atomic_t report_id;
43416+ atomic_unchecked_t report_id;
43417 struct uhid_event report_buf;
43418 };
43419
43420@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
43421
43422 spin_lock_irqsave(&uhid->qlock, flags);
43423 ev->type = UHID_FEATURE;
43424- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
43425+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
43426 ev->u.feature.rnum = rnum;
43427 ev->u.feature.rtype = report_type;
43428
43429@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
43430 spin_lock_irqsave(&uhid->qlock, flags);
43431
43432 /* id for old report; drop it silently */
43433- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
43434+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
43435 goto unlock;
43436 if (atomic_read(&uhid->report_done))
43437 goto unlock;
43438diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
43439index 284cf66..084c627 100644
43440--- a/drivers/hv/channel.c
43441+++ b/drivers/hv/channel.c
43442@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
43443 int ret = 0;
43444 int t;
43445
43446- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
43447- atomic_inc(&vmbus_connection.next_gpadl_handle);
43448+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
43449+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
43450
43451 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
43452 if (ret)
43453diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
43454index edfc848..d83e195 100644
43455--- a/drivers/hv/hv.c
43456+++ b/drivers/hv/hv.c
43457@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
43458 u64 output_address = (output) ? virt_to_phys(output) : 0;
43459 u32 output_address_hi = output_address >> 32;
43460 u32 output_address_lo = output_address & 0xFFFFFFFF;
43461- void *hypercall_page = hv_context.hypercall_page;
43462+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
43463
43464 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
43465 "=a"(hv_status_lo) : "d" (control_hi),
43466@@ -154,7 +154,7 @@ int hv_init(void)
43467 /* See if the hypercall page is already set */
43468 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
43469
43470- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
43471+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
43472
43473 if (!virtaddr)
43474 goto cleanup;
43475diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
43476index 5e90c5d..d8fcefb 100644
43477--- a/drivers/hv/hv_balloon.c
43478+++ b/drivers/hv/hv_balloon.c
43479@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
43480
43481 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
43482 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
43483-static atomic_t trans_id = ATOMIC_INIT(0);
43484+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
43485
43486 static int dm_ring_size = (5 * PAGE_SIZE);
43487
43488@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
43489 pr_info("Memory hot add failed\n");
43490
43491 dm->state = DM_INITIALIZED;
43492- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43493+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43494 vmbus_sendpacket(dm->dev->channel, &resp,
43495 sizeof(struct dm_hot_add_response),
43496 (unsigned long)NULL,
43497@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
43498 memset(&status, 0, sizeof(struct dm_status));
43499 status.hdr.type = DM_STATUS_REPORT;
43500 status.hdr.size = sizeof(struct dm_status);
43501- status.hdr.trans_id = atomic_inc_return(&trans_id);
43502+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43503
43504 /*
43505 * The host expects the guest to report free memory.
43506@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
43507 * send the status. This can happen if we were interrupted
43508 * after we picked our transaction ID.
43509 */
43510- if (status.hdr.trans_id != atomic_read(&trans_id))
43511+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
43512 return;
43513
43514 /*
43515@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
43516 */
43517
43518 do {
43519- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
43520+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43521 ret = vmbus_sendpacket(dm_device.dev->channel,
43522 bl_resp,
43523 bl_resp->hdr.size,
43524@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
43525
43526 memset(&resp, 0, sizeof(struct dm_unballoon_response));
43527 resp.hdr.type = DM_UNBALLOON_RESPONSE;
43528- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43529+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43530 resp.hdr.size = sizeof(struct dm_unballoon_response);
43531
43532 vmbus_sendpacket(dm_device.dev->channel, &resp,
43533@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
43534 memset(&version_req, 0, sizeof(struct dm_version_request));
43535 version_req.hdr.type = DM_VERSION_REQUEST;
43536 version_req.hdr.size = sizeof(struct dm_version_request);
43537- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43538+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43539 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
43540 version_req.is_last_attempt = 1;
43541
43542@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
43543 memset(&version_req, 0, sizeof(struct dm_version_request));
43544 version_req.hdr.type = DM_VERSION_REQUEST;
43545 version_req.hdr.size = sizeof(struct dm_version_request);
43546- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43547+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43548 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
43549 version_req.is_last_attempt = 0;
43550
43551@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
43552 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
43553 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
43554 cap_msg.hdr.size = sizeof(struct dm_capabilities);
43555- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
43556+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43557
43558 cap_msg.caps.cap_bits.balloon = 1;
43559 cap_msg.caps.cap_bits.hot_add = 1;
43560diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
43561index 22b7507..fc2fc47 100644
43562--- a/drivers/hv/hyperv_vmbus.h
43563+++ b/drivers/hv/hyperv_vmbus.h
43564@@ -607,7 +607,7 @@ enum vmbus_connect_state {
43565 struct vmbus_connection {
43566 enum vmbus_connect_state conn_state;
43567
43568- atomic_t next_gpadl_handle;
43569+ atomic_unchecked_t next_gpadl_handle;
43570
43571 /*
43572 * Represents channel interrupts. Each bit position represents a
43573diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
43574index 4d6b269..2e23b86 100644
43575--- a/drivers/hv/vmbus_drv.c
43576+++ b/drivers/hv/vmbus_drv.c
43577@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
43578 {
43579 int ret = 0;
43580
43581- static atomic_t device_num = ATOMIC_INIT(0);
43582+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
43583
43584 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
43585- atomic_inc_return(&device_num));
43586+ atomic_inc_return_unchecked(&device_num));
43587
43588 child_device_obj->device.bus = &hv_bus;
43589 child_device_obj->device.parent = &hv_acpi_dev->dev;
43590diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
43591index 579bdf9..75118b5 100644
43592--- a/drivers/hwmon/acpi_power_meter.c
43593+++ b/drivers/hwmon/acpi_power_meter.c
43594@@ -116,7 +116,7 @@ struct sensor_template {
43595 struct device_attribute *devattr,
43596 const char *buf, size_t count);
43597 int index;
43598-};
43599+} __do_const;
43600
43601 /* Averaging interval */
43602 static int update_avg_interval(struct acpi_power_meter_resource *resource)
43603@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
43604 struct sensor_template *attrs)
43605 {
43606 struct device *dev = &resource->acpi_dev->dev;
43607- struct sensor_device_attribute *sensors =
43608+ sensor_device_attribute_no_const *sensors =
43609 &resource->sensors[resource->num_sensors];
43610 int res = 0;
43611
43612diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
43613index 3288f13..71cfb4e 100644
43614--- a/drivers/hwmon/applesmc.c
43615+++ b/drivers/hwmon/applesmc.c
43616@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
43617 {
43618 struct applesmc_node_group *grp;
43619 struct applesmc_dev_attr *node;
43620- struct attribute *attr;
43621+ attribute_no_const *attr;
43622 int ret, i;
43623
43624 for (grp = groups; grp->format; grp++) {
43625diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
43626index ae208f6..48b6c5b 100644
43627--- a/drivers/hwmon/asus_atk0110.c
43628+++ b/drivers/hwmon/asus_atk0110.c
43629@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
43630 struct atk_sensor_data {
43631 struct list_head list;
43632 struct atk_data *data;
43633- struct device_attribute label_attr;
43634- struct device_attribute input_attr;
43635- struct device_attribute limit1_attr;
43636- struct device_attribute limit2_attr;
43637+ device_attribute_no_const label_attr;
43638+ device_attribute_no_const input_attr;
43639+ device_attribute_no_const limit1_attr;
43640+ device_attribute_no_const limit2_attr;
43641 char label_attr_name[ATTR_NAME_SIZE];
43642 char input_attr_name[ATTR_NAME_SIZE];
43643 char limit1_attr_name[ATTR_NAME_SIZE];
43644@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
43645 static struct device_attribute atk_name_attr =
43646 __ATTR(name, 0444, atk_name_show, NULL);
43647
43648-static void atk_init_attribute(struct device_attribute *attr, char *name,
43649+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
43650 sysfs_show_func show)
43651 {
43652 sysfs_attr_init(&attr->attr);
43653diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
43654index d76f0b7..55ae976 100644
43655--- a/drivers/hwmon/coretemp.c
43656+++ b/drivers/hwmon/coretemp.c
43657@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
43658 return NOTIFY_OK;
43659 }
43660
43661-static struct notifier_block coretemp_cpu_notifier __refdata = {
43662+static struct notifier_block coretemp_cpu_notifier = {
43663 .notifier_call = coretemp_cpu_callback,
43664 };
43665
43666diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
43667index 632f1dc..57e6a58 100644
43668--- a/drivers/hwmon/ibmaem.c
43669+++ b/drivers/hwmon/ibmaem.c
43670@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
43671 struct aem_rw_sensor_template *rw)
43672 {
43673 struct device *dev = &data->pdev->dev;
43674- struct sensor_device_attribute *sensors = data->sensors;
43675+ sensor_device_attribute_no_const *sensors = data->sensors;
43676 int err;
43677
43678 /* Set up read-only sensors */
43679diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
43680index 14c82da..09b25d7 100644
43681--- a/drivers/hwmon/iio_hwmon.c
43682+++ b/drivers/hwmon/iio_hwmon.c
43683@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
43684 {
43685 struct device *dev = &pdev->dev;
43686 struct iio_hwmon_state *st;
43687- struct sensor_device_attribute *a;
43688+ sensor_device_attribute_no_const *a;
43689 int ret, i;
43690 int in_i = 1, temp_i = 1, curr_i = 1;
43691 enum iio_chan_type type;
43692diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
43693index 7710f46..427a28d 100644
43694--- a/drivers/hwmon/nct6683.c
43695+++ b/drivers/hwmon/nct6683.c
43696@@ -397,11 +397,11 @@ static struct attribute_group *
43697 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43698 int repeat)
43699 {
43700- struct sensor_device_attribute_2 *a2;
43701- struct sensor_device_attribute *a;
43702+ sensor_device_attribute_2_no_const *a2;
43703+ sensor_device_attribute_no_const *a;
43704 struct sensor_device_template **t;
43705 struct sensor_device_attr_u *su;
43706- struct attribute_group *group;
43707+ attribute_group_no_const *group;
43708 struct attribute **attrs;
43709 int i, j, count;
43710
43711diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
43712index 59d9a3f..2298fa4 100644
43713--- a/drivers/hwmon/nct6775.c
43714+++ b/drivers/hwmon/nct6775.c
43715@@ -944,10 +944,10 @@ static struct attribute_group *
43716 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43717 int repeat)
43718 {
43719- struct attribute_group *group;
43720+ attribute_group_no_const *group;
43721 struct sensor_device_attr_u *su;
43722- struct sensor_device_attribute *a;
43723- struct sensor_device_attribute_2 *a2;
43724+ sensor_device_attribute_no_const *a;
43725+ sensor_device_attribute_2_no_const *a2;
43726 struct attribute **attrs;
43727 struct sensor_device_template **t;
43728 int i, count;
43729diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
43730index 291d11f..3f0dbbd 100644
43731--- a/drivers/hwmon/pmbus/pmbus_core.c
43732+++ b/drivers/hwmon/pmbus/pmbus_core.c
43733@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
43734 return 0;
43735 }
43736
43737-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43738+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
43739 const char *name,
43740 umode_t mode,
43741 ssize_t (*show)(struct device *dev,
43742@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43743 dev_attr->store = store;
43744 }
43745
43746-static void pmbus_attr_init(struct sensor_device_attribute *a,
43747+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
43748 const char *name,
43749 umode_t mode,
43750 ssize_t (*show)(struct device *dev,
43751@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
43752 u16 reg, u8 mask)
43753 {
43754 struct pmbus_boolean *boolean;
43755- struct sensor_device_attribute *a;
43756+ sensor_device_attribute_no_const *a;
43757
43758 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43759 if (!boolean)
43760@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43761 bool update, bool readonly)
43762 {
43763 struct pmbus_sensor *sensor;
43764- struct device_attribute *a;
43765+ device_attribute_no_const *a;
43766
43767 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43768 if (!sensor)
43769@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43770 const char *lstring, int index)
43771 {
43772 struct pmbus_label *label;
43773- struct device_attribute *a;
43774+ device_attribute_no_const *a;
43775
43776 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43777 if (!label)
43778diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43779index 97cd45a..ac54d8b 100644
43780--- a/drivers/hwmon/sht15.c
43781+++ b/drivers/hwmon/sht15.c
43782@@ -169,7 +169,7 @@ struct sht15_data {
43783 int supply_uv;
43784 bool supply_uv_valid;
43785 struct work_struct update_supply_work;
43786- atomic_t interrupt_handled;
43787+ atomic_unchecked_t interrupt_handled;
43788 };
43789
43790 /**
43791@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43792 ret = gpio_direction_input(data->pdata->gpio_data);
43793 if (ret)
43794 return ret;
43795- atomic_set(&data->interrupt_handled, 0);
43796+ atomic_set_unchecked(&data->interrupt_handled, 0);
43797
43798 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43799 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43800 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43801 /* Only relevant if the interrupt hasn't occurred. */
43802- if (!atomic_read(&data->interrupt_handled))
43803+ if (!atomic_read_unchecked(&data->interrupt_handled))
43804 schedule_work(&data->read_work);
43805 }
43806 ret = wait_event_timeout(data->wait_queue,
43807@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43808
43809 /* First disable the interrupt */
43810 disable_irq_nosync(irq);
43811- atomic_inc(&data->interrupt_handled);
43812+ atomic_inc_unchecked(&data->interrupt_handled);
43813 /* Then schedule a reading work struct */
43814 if (data->state != SHT15_READING_NOTHING)
43815 schedule_work(&data->read_work);
43816@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43817 * If not, then start the interrupt again - care here as could
43818 * have gone low in meantime so verify it hasn't!
43819 */
43820- atomic_set(&data->interrupt_handled, 0);
43821+ atomic_set_unchecked(&data->interrupt_handled, 0);
43822 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43823 /* If still not occurred or another handler was scheduled */
43824 if (gpio_get_value(data->pdata->gpio_data)
43825- || atomic_read(&data->interrupt_handled))
43826+ || atomic_read_unchecked(&data->interrupt_handled))
43827 return;
43828 }
43829
43830diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43831index 8df43c5..b07b91d 100644
43832--- a/drivers/hwmon/via-cputemp.c
43833+++ b/drivers/hwmon/via-cputemp.c
43834@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43835 return NOTIFY_OK;
43836 }
43837
43838-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43839+static struct notifier_block via_cputemp_cpu_notifier = {
43840 .notifier_call = via_cputemp_cpu_callback,
43841 };
43842
43843diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43844index 41fc683..a39cfea 100644
43845--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43846+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43847@@ -43,7 +43,7 @@
43848 extern struct i2c_adapter amd756_smbus;
43849
43850 static struct i2c_adapter *s4882_adapter;
43851-static struct i2c_algorithm *s4882_algo;
43852+static i2c_algorithm_no_const *s4882_algo;
43853
43854 /* Wrapper access functions for multiplexed SMBus */
43855 static DEFINE_MUTEX(amd756_lock);
43856diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43857index b19a310..d6eece0 100644
43858--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43859+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43860@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43861 /* usb layer */
43862
43863 /* Send command to device, and get response. */
43864-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43865+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43866 {
43867 int ret = 0;
43868 int actual;
43869diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43870index b170bdf..3c76427 100644
43871--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43872+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43873@@ -41,7 +41,7 @@
43874 extern struct i2c_adapter *nforce2_smbus;
43875
43876 static struct i2c_adapter *s4985_adapter;
43877-static struct i2c_algorithm *s4985_algo;
43878+static i2c_algorithm_no_const *s4985_algo;
43879
43880 /* Wrapper access functions for multiplexed SMBus */
43881 static DEFINE_MUTEX(nforce2_lock);
43882diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43883index 80b47e8..1a6040d9 100644
43884--- a/drivers/i2c/i2c-dev.c
43885+++ b/drivers/i2c/i2c-dev.c
43886@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43887 break;
43888 }
43889
43890- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43891+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43892 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43893 if (IS_ERR(rdwr_pa[i].buf)) {
43894 res = PTR_ERR(rdwr_pa[i].buf);
43895diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43896index 0b510ba..4fbb5085 100644
43897--- a/drivers/ide/ide-cd.c
43898+++ b/drivers/ide/ide-cd.c
43899@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43900 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43901 if ((unsigned long)buf & alignment
43902 || blk_rq_bytes(rq) & q->dma_pad_mask
43903- || object_is_on_stack(buf))
43904+ || object_starts_on_stack(buf))
43905 drive->dma = 0;
43906 }
43907 }
43908diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43909index 4b1f375..770b95f 100644
43910--- a/drivers/iio/industrialio-core.c
43911+++ b/drivers/iio/industrialio-core.c
43912@@ -551,7 +551,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43913 }
43914
43915 static
43916-int __iio_device_attr_init(struct device_attribute *dev_attr,
43917+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43918 const char *postfix,
43919 struct iio_chan_spec const *chan,
43920 ssize_t (*readfunc)(struct device *dev,
43921diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43922index c323917..6ddea8b 100644
43923--- a/drivers/infiniband/core/cm.c
43924+++ b/drivers/infiniband/core/cm.c
43925@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43926
43927 struct cm_counter_group {
43928 struct kobject obj;
43929- atomic_long_t counter[CM_ATTR_COUNT];
43930+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43931 };
43932
43933 struct cm_counter_attribute {
43934@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43935 struct ib_mad_send_buf *msg = NULL;
43936 int ret;
43937
43938- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43939+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43940 counter[CM_REQ_COUNTER]);
43941
43942 /* Quick state check to discard duplicate REQs. */
43943@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43944 if (!cm_id_priv)
43945 return;
43946
43947- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43948+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43949 counter[CM_REP_COUNTER]);
43950 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43951 if (ret)
43952@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43953 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43954 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43955 spin_unlock_irq(&cm_id_priv->lock);
43956- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43957+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43958 counter[CM_RTU_COUNTER]);
43959 goto out;
43960 }
43961@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43962 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43963 dreq_msg->local_comm_id);
43964 if (!cm_id_priv) {
43965- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43966+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43967 counter[CM_DREQ_COUNTER]);
43968 cm_issue_drep(work->port, work->mad_recv_wc);
43969 return -EINVAL;
43970@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43971 case IB_CM_MRA_REP_RCVD:
43972 break;
43973 case IB_CM_TIMEWAIT:
43974- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43975+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43976 counter[CM_DREQ_COUNTER]);
43977 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43978 goto unlock;
43979@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43980 cm_free_msg(msg);
43981 goto deref;
43982 case IB_CM_DREQ_RCVD:
43983- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43984+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43985 counter[CM_DREQ_COUNTER]);
43986 goto unlock;
43987 default:
43988@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43989 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43990 cm_id_priv->msg, timeout)) {
43991 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43992- atomic_long_inc(&work->port->
43993+ atomic_long_inc_unchecked(&work->port->
43994 counter_group[CM_RECV_DUPLICATES].
43995 counter[CM_MRA_COUNTER]);
43996 goto out;
43997@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43998 break;
43999 case IB_CM_MRA_REQ_RCVD:
44000 case IB_CM_MRA_REP_RCVD:
44001- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44002+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44003 counter[CM_MRA_COUNTER]);
44004 /* fall through */
44005 default:
44006@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
44007 case IB_CM_LAP_IDLE:
44008 break;
44009 case IB_CM_MRA_LAP_SENT:
44010- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44011+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44012 counter[CM_LAP_COUNTER]);
44013 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
44014 goto unlock;
44015@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
44016 cm_free_msg(msg);
44017 goto deref;
44018 case IB_CM_LAP_RCVD:
44019- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44020+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44021 counter[CM_LAP_COUNTER]);
44022 goto unlock;
44023 default:
44024@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
44025 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
44026 if (cur_cm_id_priv) {
44027 spin_unlock_irq(&cm.lock);
44028- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44029+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44030 counter[CM_SIDR_REQ_COUNTER]);
44031 goto out; /* Duplicate message. */
44032 }
44033@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
44034 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
44035 msg->retries = 1;
44036
44037- atomic_long_add(1 + msg->retries,
44038+ atomic_long_add_unchecked(1 + msg->retries,
44039 &port->counter_group[CM_XMIT].counter[attr_index]);
44040 if (msg->retries)
44041- atomic_long_add(msg->retries,
44042+ atomic_long_add_unchecked(msg->retries,
44043 &port->counter_group[CM_XMIT_RETRIES].
44044 counter[attr_index]);
44045
44046@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
44047 }
44048
44049 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
44050- atomic_long_inc(&port->counter_group[CM_RECV].
44051+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
44052 counter[attr_id - CM_ATTR_ID_OFFSET]);
44053
44054 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
44055@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
44056 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
44057
44058 return sprintf(buf, "%ld\n",
44059- atomic_long_read(&group->counter[cm_attr->index]));
44060+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
44061 }
44062
44063 static const struct sysfs_ops cm_counter_ops = {
44064diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
44065index 9f5ad7c..588cd84 100644
44066--- a/drivers/infiniband/core/fmr_pool.c
44067+++ b/drivers/infiniband/core/fmr_pool.c
44068@@ -98,8 +98,8 @@ struct ib_fmr_pool {
44069
44070 struct task_struct *thread;
44071
44072- atomic_t req_ser;
44073- atomic_t flush_ser;
44074+ atomic_unchecked_t req_ser;
44075+ atomic_unchecked_t flush_ser;
44076
44077 wait_queue_head_t force_wait;
44078 };
44079@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44080 struct ib_fmr_pool *pool = pool_ptr;
44081
44082 do {
44083- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
44084+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
44085 ib_fmr_batch_release(pool);
44086
44087- atomic_inc(&pool->flush_ser);
44088+ atomic_inc_unchecked(&pool->flush_ser);
44089 wake_up_interruptible(&pool->force_wait);
44090
44091 if (pool->flush_function)
44092@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44093 }
44094
44095 set_current_state(TASK_INTERRUPTIBLE);
44096- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
44097+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
44098 !kthread_should_stop())
44099 schedule();
44100 __set_current_state(TASK_RUNNING);
44101@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
44102 pool->dirty_watermark = params->dirty_watermark;
44103 pool->dirty_len = 0;
44104 spin_lock_init(&pool->pool_lock);
44105- atomic_set(&pool->req_ser, 0);
44106- atomic_set(&pool->flush_ser, 0);
44107+ atomic_set_unchecked(&pool->req_ser, 0);
44108+ atomic_set_unchecked(&pool->flush_ser, 0);
44109 init_waitqueue_head(&pool->force_wait);
44110
44111 pool->thread = kthread_run(ib_fmr_cleanup_thread,
44112@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
44113 }
44114 spin_unlock_irq(&pool->pool_lock);
44115
44116- serial = atomic_inc_return(&pool->req_ser);
44117+ serial = atomic_inc_return_unchecked(&pool->req_ser);
44118 wake_up_process(pool->thread);
44119
44120 if (wait_event_interruptible(pool->force_wait,
44121- atomic_read(&pool->flush_ser) - serial >= 0))
44122+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
44123 return -EINTR;
44124
44125 return 0;
44126@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
44127 } else {
44128 list_add_tail(&fmr->list, &pool->dirty_list);
44129 if (++pool->dirty_len >= pool->dirty_watermark) {
44130- atomic_inc(&pool->req_ser);
44131+ atomic_inc_unchecked(&pool->req_ser);
44132 wake_up_process(pool->thread);
44133 }
44134 }
44135diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
44136index ec7a298..8742e59 100644
44137--- a/drivers/infiniband/hw/cxgb4/mem.c
44138+++ b/drivers/infiniband/hw/cxgb4/mem.c
44139@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44140 int err;
44141 struct fw_ri_tpte tpt;
44142 u32 stag_idx;
44143- static atomic_t key;
44144+ static atomic_unchecked_t key;
44145
44146 if (c4iw_fatal_error(rdev))
44147 return -EIO;
44148@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44149 if (rdev->stats.stag.cur > rdev->stats.stag.max)
44150 rdev->stats.stag.max = rdev->stats.stag.cur;
44151 mutex_unlock(&rdev->stats.lock);
44152- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
44153+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
44154 }
44155 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
44156 __func__, stag_state, type, pdid, stag_idx);
44157diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
44158index 79b3dbc..96e5fcc 100644
44159--- a/drivers/infiniband/hw/ipath/ipath_rc.c
44160+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
44161@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44162 struct ib_atomic_eth *ateth;
44163 struct ipath_ack_entry *e;
44164 u64 vaddr;
44165- atomic64_t *maddr;
44166+ atomic64_unchecked_t *maddr;
44167 u64 sdata;
44168 u32 rkey;
44169 u8 next;
44170@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44171 IB_ACCESS_REMOTE_ATOMIC)))
44172 goto nack_acc_unlck;
44173 /* Perform atomic OP and save result. */
44174- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44175+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44176 sdata = be64_to_cpu(ateth->swap_data);
44177 e = &qp->s_ack_queue[qp->r_head_ack_queue];
44178 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
44179- (u64) atomic64_add_return(sdata, maddr) - sdata :
44180+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44181 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44182 be64_to_cpu(ateth->compare_data),
44183 sdata);
44184diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
44185index 1f95bba..9530f87 100644
44186--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
44187+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
44188@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
44189 unsigned long flags;
44190 struct ib_wc wc;
44191 u64 sdata;
44192- atomic64_t *maddr;
44193+ atomic64_unchecked_t *maddr;
44194 enum ib_wc_status send_status;
44195
44196 /*
44197@@ -382,11 +382,11 @@ again:
44198 IB_ACCESS_REMOTE_ATOMIC)))
44199 goto acc_err;
44200 /* Perform atomic OP and save result. */
44201- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44202+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44203 sdata = wqe->wr.wr.atomic.compare_add;
44204 *(u64 *) sqp->s_sge.sge.vaddr =
44205 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
44206- (u64) atomic64_add_return(sdata, maddr) - sdata :
44207+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44208 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44209 sdata, wqe->wr.wr.atomic.swap);
44210 goto send_comp;
44211diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
44212index 287ad05..5ae7b44d 100644
44213--- a/drivers/infiniband/hw/mlx4/mad.c
44214+++ b/drivers/infiniband/hw/mlx4/mad.c
44215@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
44216
44217 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
44218 {
44219- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
44220+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
44221 cpu_to_be64(0xff00000000000000LL);
44222 }
44223
44224diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
44225index ed327e6..ca1739e0 100644
44226--- a/drivers/infiniband/hw/mlx4/mcg.c
44227+++ b/drivers/infiniband/hw/mlx4/mcg.c
44228@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
44229 {
44230 char name[20];
44231
44232- atomic_set(&ctx->tid, 0);
44233+ atomic_set_unchecked(&ctx->tid, 0);
44234 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
44235 ctx->mcg_wq = create_singlethread_workqueue(name);
44236 if (!ctx->mcg_wq)
44237diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44238index 369da3c..223e6e9 100644
44239--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
44240+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44241@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
44242 struct list_head mcg_mgid0_list;
44243 struct workqueue_struct *mcg_wq;
44244 struct mlx4_ib_demux_pv_ctx **tun;
44245- atomic_t tid;
44246+ atomic_unchecked_t tid;
44247 int flushing; /* flushing the work queue */
44248 };
44249
44250diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
44251index 9d3e5c1..6f166df 100644
44252--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
44253+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
44254@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
44255 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
44256 }
44257
44258-int mthca_QUERY_FW(struct mthca_dev *dev)
44259+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
44260 {
44261 struct mthca_mailbox *mailbox;
44262 u32 *outbox;
44263@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44264 CMD_TIME_CLASS_B);
44265 }
44266
44267-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44268+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44269 int num_mtt)
44270 {
44271 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
44272@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
44273 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
44274 }
44275
44276-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44277+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44278 int eq_num)
44279 {
44280 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
44281@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
44282 CMD_TIME_CLASS_B);
44283 }
44284
44285-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44286+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44287 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
44288 void *in_mad, void *response_mad)
44289 {
44290diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
44291index ded76c1..0cf0a08 100644
44292--- a/drivers/infiniband/hw/mthca/mthca_main.c
44293+++ b/drivers/infiniband/hw/mthca/mthca_main.c
44294@@ -692,7 +692,7 @@ err_close:
44295 return err;
44296 }
44297
44298-static int mthca_setup_hca(struct mthca_dev *dev)
44299+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
44300 {
44301 int err;
44302
44303diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
44304index ed9a989..6aa5dc2 100644
44305--- a/drivers/infiniband/hw/mthca/mthca_mr.c
44306+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
44307@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
44308 * through the bitmaps)
44309 */
44310
44311-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44312+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44313 {
44314 int o;
44315 int m;
44316@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
44317 return key;
44318 }
44319
44320-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44321+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44322 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
44323 {
44324 struct mthca_mailbox *mailbox;
44325@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
44326 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
44327 }
44328
44329-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44330+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44331 u64 *buffer_list, int buffer_size_shift,
44332 int list_len, u64 iova, u64 total_size,
44333 u32 access, struct mthca_mr *mr)
44334diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
44335index 415f8e1..e34214e 100644
44336--- a/drivers/infiniband/hw/mthca/mthca_provider.c
44337+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
44338@@ -764,7 +764,7 @@ unlock:
44339 return 0;
44340 }
44341
44342-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44343+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44344 {
44345 struct mthca_dev *dev = to_mdev(ibcq->device);
44346 struct mthca_cq *cq = to_mcq(ibcq);
44347diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
44348index 3b2a6dc..bce26ff 100644
44349--- a/drivers/infiniband/hw/nes/nes.c
44350+++ b/drivers/infiniband/hw/nes/nes.c
44351@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
44352 LIST_HEAD(nes_adapter_list);
44353 static LIST_HEAD(nes_dev_list);
44354
44355-atomic_t qps_destroyed;
44356+atomic_unchecked_t qps_destroyed;
44357
44358 static unsigned int ee_flsh_adapter;
44359 static unsigned int sysfs_nonidx_addr;
44360@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
44361 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
44362 struct nes_adapter *nesadapter = nesdev->nesadapter;
44363
44364- atomic_inc(&qps_destroyed);
44365+ atomic_inc_unchecked(&qps_destroyed);
44366
44367 /* Free the control structures */
44368
44369diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
44370index bd9d132..70d84f4 100644
44371--- a/drivers/infiniband/hw/nes/nes.h
44372+++ b/drivers/infiniband/hw/nes/nes.h
44373@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
44374 extern unsigned int wqm_quanta;
44375 extern struct list_head nes_adapter_list;
44376
44377-extern atomic_t cm_connects;
44378-extern atomic_t cm_accepts;
44379-extern atomic_t cm_disconnects;
44380-extern atomic_t cm_closes;
44381-extern atomic_t cm_connecteds;
44382-extern atomic_t cm_connect_reqs;
44383-extern atomic_t cm_rejects;
44384-extern atomic_t mod_qp_timouts;
44385-extern atomic_t qps_created;
44386-extern atomic_t qps_destroyed;
44387-extern atomic_t sw_qps_destroyed;
44388+extern atomic_unchecked_t cm_connects;
44389+extern atomic_unchecked_t cm_accepts;
44390+extern atomic_unchecked_t cm_disconnects;
44391+extern atomic_unchecked_t cm_closes;
44392+extern atomic_unchecked_t cm_connecteds;
44393+extern atomic_unchecked_t cm_connect_reqs;
44394+extern atomic_unchecked_t cm_rejects;
44395+extern atomic_unchecked_t mod_qp_timouts;
44396+extern atomic_unchecked_t qps_created;
44397+extern atomic_unchecked_t qps_destroyed;
44398+extern atomic_unchecked_t sw_qps_destroyed;
44399 extern u32 mh_detected;
44400 extern u32 mh_pauses_sent;
44401 extern u32 cm_packets_sent;
44402@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
44403 extern u32 cm_packets_received;
44404 extern u32 cm_packets_dropped;
44405 extern u32 cm_packets_retrans;
44406-extern atomic_t cm_listens_created;
44407-extern atomic_t cm_listens_destroyed;
44408+extern atomic_unchecked_t cm_listens_created;
44409+extern atomic_unchecked_t cm_listens_destroyed;
44410 extern u32 cm_backlog_drops;
44411-extern atomic_t cm_loopbacks;
44412-extern atomic_t cm_nodes_created;
44413-extern atomic_t cm_nodes_destroyed;
44414-extern atomic_t cm_accel_dropped_pkts;
44415-extern atomic_t cm_resets_recvd;
44416-extern atomic_t pau_qps_created;
44417-extern atomic_t pau_qps_destroyed;
44418+extern atomic_unchecked_t cm_loopbacks;
44419+extern atomic_unchecked_t cm_nodes_created;
44420+extern atomic_unchecked_t cm_nodes_destroyed;
44421+extern atomic_unchecked_t cm_accel_dropped_pkts;
44422+extern atomic_unchecked_t cm_resets_recvd;
44423+extern atomic_unchecked_t pau_qps_created;
44424+extern atomic_unchecked_t pau_qps_destroyed;
44425
44426 extern u32 int_mod_timer_init;
44427 extern u32 int_mod_cq_depth_256;
44428diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
44429index 6f09a72..cf4399d 100644
44430--- a/drivers/infiniband/hw/nes/nes_cm.c
44431+++ b/drivers/infiniband/hw/nes/nes_cm.c
44432@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
44433 u32 cm_packets_retrans;
44434 u32 cm_packets_created;
44435 u32 cm_packets_received;
44436-atomic_t cm_listens_created;
44437-atomic_t cm_listens_destroyed;
44438+atomic_unchecked_t cm_listens_created;
44439+atomic_unchecked_t cm_listens_destroyed;
44440 u32 cm_backlog_drops;
44441-atomic_t cm_loopbacks;
44442-atomic_t cm_nodes_created;
44443-atomic_t cm_nodes_destroyed;
44444-atomic_t cm_accel_dropped_pkts;
44445-atomic_t cm_resets_recvd;
44446+atomic_unchecked_t cm_loopbacks;
44447+atomic_unchecked_t cm_nodes_created;
44448+atomic_unchecked_t cm_nodes_destroyed;
44449+atomic_unchecked_t cm_accel_dropped_pkts;
44450+atomic_unchecked_t cm_resets_recvd;
44451
44452 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
44453 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
44454@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
44455 /* instance of function pointers for client API */
44456 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
44457 static struct nes_cm_ops nes_cm_api = {
44458- mini_cm_accelerated,
44459- mini_cm_listen,
44460- mini_cm_del_listen,
44461- mini_cm_connect,
44462- mini_cm_close,
44463- mini_cm_accept,
44464- mini_cm_reject,
44465- mini_cm_recv_pkt,
44466- mini_cm_dealloc_core,
44467- mini_cm_get,
44468- mini_cm_set
44469+ .accelerated = mini_cm_accelerated,
44470+ .listen = mini_cm_listen,
44471+ .stop_listener = mini_cm_del_listen,
44472+ .connect = mini_cm_connect,
44473+ .close = mini_cm_close,
44474+ .accept = mini_cm_accept,
44475+ .reject = mini_cm_reject,
44476+ .recv_pkt = mini_cm_recv_pkt,
44477+ .destroy_cm_core = mini_cm_dealloc_core,
44478+ .get = mini_cm_get,
44479+ .set = mini_cm_set
44480 };
44481
44482 static struct nes_cm_core *g_cm_core;
44483
44484-atomic_t cm_connects;
44485-atomic_t cm_accepts;
44486-atomic_t cm_disconnects;
44487-atomic_t cm_closes;
44488-atomic_t cm_connecteds;
44489-atomic_t cm_connect_reqs;
44490-atomic_t cm_rejects;
44491+atomic_unchecked_t cm_connects;
44492+atomic_unchecked_t cm_accepts;
44493+atomic_unchecked_t cm_disconnects;
44494+atomic_unchecked_t cm_closes;
44495+atomic_unchecked_t cm_connecteds;
44496+atomic_unchecked_t cm_connect_reqs;
44497+atomic_unchecked_t cm_rejects;
44498
44499 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
44500 {
44501@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
44502 kfree(listener);
44503 listener = NULL;
44504 ret = 0;
44505- atomic_inc(&cm_listens_destroyed);
44506+ atomic_inc_unchecked(&cm_listens_destroyed);
44507 } else {
44508 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
44509 }
44510@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
44511 cm_node->rem_mac);
44512
44513 add_hte_node(cm_core, cm_node);
44514- atomic_inc(&cm_nodes_created);
44515+ atomic_inc_unchecked(&cm_nodes_created);
44516
44517 return cm_node;
44518 }
44519@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
44520 }
44521
44522 atomic_dec(&cm_core->node_cnt);
44523- atomic_inc(&cm_nodes_destroyed);
44524+ atomic_inc_unchecked(&cm_nodes_destroyed);
44525 nesqp = cm_node->nesqp;
44526 if (nesqp) {
44527 nesqp->cm_node = NULL;
44528@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
44529
44530 static void drop_packet(struct sk_buff *skb)
44531 {
44532- atomic_inc(&cm_accel_dropped_pkts);
44533+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44534 dev_kfree_skb_any(skb);
44535 }
44536
44537@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
44538 {
44539
44540 int reset = 0; /* whether to send reset in case of err.. */
44541- atomic_inc(&cm_resets_recvd);
44542+ atomic_inc_unchecked(&cm_resets_recvd);
44543 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
44544 " refcnt=%d\n", cm_node, cm_node->state,
44545 atomic_read(&cm_node->ref_count));
44546@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
44547 rem_ref_cm_node(cm_node->cm_core, cm_node);
44548 return NULL;
44549 }
44550- atomic_inc(&cm_loopbacks);
44551+ atomic_inc_unchecked(&cm_loopbacks);
44552 loopbackremotenode->loopbackpartner = cm_node;
44553 loopbackremotenode->tcp_cntxt.rcv_wscale =
44554 NES_CM_DEFAULT_RCV_WND_SCALE;
44555@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
44556 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
44557 else {
44558 rem_ref_cm_node(cm_core, cm_node);
44559- atomic_inc(&cm_accel_dropped_pkts);
44560+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44561 dev_kfree_skb_any(skb);
44562 }
44563 break;
44564@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44565
44566 if ((cm_id) && (cm_id->event_handler)) {
44567 if (issue_disconn) {
44568- atomic_inc(&cm_disconnects);
44569+ atomic_inc_unchecked(&cm_disconnects);
44570 cm_event.event = IW_CM_EVENT_DISCONNECT;
44571 cm_event.status = disconn_status;
44572 cm_event.local_addr = cm_id->local_addr;
44573@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44574 }
44575
44576 if (issue_close) {
44577- atomic_inc(&cm_closes);
44578+ atomic_inc_unchecked(&cm_closes);
44579 nes_disconnect(nesqp, 1);
44580
44581 cm_id->provider_data = nesqp;
44582@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44583
44584 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
44585 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
44586- atomic_inc(&cm_accepts);
44587+ atomic_inc_unchecked(&cm_accepts);
44588
44589 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
44590 netdev_refcnt_read(nesvnic->netdev));
44591@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
44592 struct nes_cm_core *cm_core;
44593 u8 *start_buff;
44594
44595- atomic_inc(&cm_rejects);
44596+ atomic_inc_unchecked(&cm_rejects);
44597 cm_node = (struct nes_cm_node *)cm_id->provider_data;
44598 loopback = cm_node->loopbackpartner;
44599 cm_core = cm_node->cm_core;
44600@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44601 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
44602 ntohs(laddr->sin_port));
44603
44604- atomic_inc(&cm_connects);
44605+ atomic_inc_unchecked(&cm_connects);
44606 nesqp->active_conn = 1;
44607
44608 /* cache the cm_id in the qp */
44609@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
44610 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
44611 return err;
44612 }
44613- atomic_inc(&cm_listens_created);
44614+ atomic_inc_unchecked(&cm_listens_created);
44615 }
44616
44617 cm_id->add_ref(cm_id);
44618@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
44619
44620 if (nesqp->destroyed)
44621 return;
44622- atomic_inc(&cm_connecteds);
44623+ atomic_inc_unchecked(&cm_connecteds);
44624 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
44625 " local port 0x%04X. jiffies = %lu.\n",
44626 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
44627@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
44628
44629 cm_id->add_ref(cm_id);
44630 ret = cm_id->event_handler(cm_id, &cm_event);
44631- atomic_inc(&cm_closes);
44632+ atomic_inc_unchecked(&cm_closes);
44633 cm_event.event = IW_CM_EVENT_CLOSE;
44634 cm_event.status = 0;
44635 cm_event.provider_data = cm_id->provider_data;
44636@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
44637 return;
44638 cm_id = cm_node->cm_id;
44639
44640- atomic_inc(&cm_connect_reqs);
44641+ atomic_inc_unchecked(&cm_connect_reqs);
44642 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44643 cm_node, cm_id, jiffies);
44644
44645@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
44646 return;
44647 cm_id = cm_node->cm_id;
44648
44649- atomic_inc(&cm_connect_reqs);
44650+ atomic_inc_unchecked(&cm_connect_reqs);
44651 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44652 cm_node, cm_id, jiffies);
44653
44654diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
44655index 4166452..fc952c3 100644
44656--- a/drivers/infiniband/hw/nes/nes_mgt.c
44657+++ b/drivers/infiniband/hw/nes/nes_mgt.c
44658@@ -40,8 +40,8 @@
44659 #include "nes.h"
44660 #include "nes_mgt.h"
44661
44662-atomic_t pau_qps_created;
44663-atomic_t pau_qps_destroyed;
44664+atomic_unchecked_t pau_qps_created;
44665+atomic_unchecked_t pau_qps_destroyed;
44666
44667 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
44668 {
44669@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
44670 {
44671 struct sk_buff *skb;
44672 unsigned long flags;
44673- atomic_inc(&pau_qps_destroyed);
44674+ atomic_inc_unchecked(&pau_qps_destroyed);
44675
44676 /* Free packets that have not yet been forwarded */
44677 /* Lock is acquired by skb_dequeue when removing the skb */
44678@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
44679 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
44680 skb_queue_head_init(&nesqp->pau_list);
44681 spin_lock_init(&nesqp->pau_lock);
44682- atomic_inc(&pau_qps_created);
44683+ atomic_inc_unchecked(&pau_qps_created);
44684 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
44685 }
44686
44687diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
44688index 49eb511..a774366 100644
44689--- a/drivers/infiniband/hw/nes/nes_nic.c
44690+++ b/drivers/infiniband/hw/nes/nes_nic.c
44691@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
44692 target_stat_values[++index] = mh_detected;
44693 target_stat_values[++index] = mh_pauses_sent;
44694 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
44695- target_stat_values[++index] = atomic_read(&cm_connects);
44696- target_stat_values[++index] = atomic_read(&cm_accepts);
44697- target_stat_values[++index] = atomic_read(&cm_disconnects);
44698- target_stat_values[++index] = atomic_read(&cm_connecteds);
44699- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
44700- target_stat_values[++index] = atomic_read(&cm_rejects);
44701- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
44702- target_stat_values[++index] = atomic_read(&qps_created);
44703- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
44704- target_stat_values[++index] = atomic_read(&qps_destroyed);
44705- target_stat_values[++index] = atomic_read(&cm_closes);
44706+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
44707+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
44708+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
44709+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
44710+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
44711+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
44712+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
44713+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
44714+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
44715+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
44716+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
44717 target_stat_values[++index] = cm_packets_sent;
44718 target_stat_values[++index] = cm_packets_bounced;
44719 target_stat_values[++index] = cm_packets_created;
44720 target_stat_values[++index] = cm_packets_received;
44721 target_stat_values[++index] = cm_packets_dropped;
44722 target_stat_values[++index] = cm_packets_retrans;
44723- target_stat_values[++index] = atomic_read(&cm_listens_created);
44724- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
44725+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
44726+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
44727 target_stat_values[++index] = cm_backlog_drops;
44728- target_stat_values[++index] = atomic_read(&cm_loopbacks);
44729- target_stat_values[++index] = atomic_read(&cm_nodes_created);
44730- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
44731- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
44732- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
44733+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
44734+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
44735+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
44736+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
44737+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
44738 target_stat_values[++index] = nesadapter->free_4kpbl;
44739 target_stat_values[++index] = nesadapter->free_256pbl;
44740 target_stat_values[++index] = int_mod_timer_init;
44741 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44742 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44743 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44744- target_stat_values[++index] = atomic_read(&pau_qps_created);
44745- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44746+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44747+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44748 }
44749
44750 /**
44751diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44752index 218dd35..97ce31d 100644
44753--- a/drivers/infiniband/hw/nes/nes_verbs.c
44754+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44755@@ -46,9 +46,9 @@
44756
44757 #include <rdma/ib_umem.h>
44758
44759-atomic_t mod_qp_timouts;
44760-atomic_t qps_created;
44761-atomic_t sw_qps_destroyed;
44762+atomic_unchecked_t mod_qp_timouts;
44763+atomic_unchecked_t qps_created;
44764+atomic_unchecked_t sw_qps_destroyed;
44765
44766 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44767
44768@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44769 if (init_attr->create_flags)
44770 return ERR_PTR(-EINVAL);
44771
44772- atomic_inc(&qps_created);
44773+ atomic_inc_unchecked(&qps_created);
44774 switch (init_attr->qp_type) {
44775 case IB_QPT_RC:
44776 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44777@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44778 struct iw_cm_event cm_event;
44779 int ret = 0;
44780
44781- atomic_inc(&sw_qps_destroyed);
44782+ atomic_inc_unchecked(&sw_qps_destroyed);
44783 nesqp->destroyed = 1;
44784
44785 /* Blow away the connection if it exists. */
44786diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44787index c00ae09..04e91be 100644
44788--- a/drivers/infiniband/hw/qib/qib.h
44789+++ b/drivers/infiniband/hw/qib/qib.h
44790@@ -52,6 +52,7 @@
44791 #include <linux/kref.h>
44792 #include <linux/sched.h>
44793 #include <linux/kthread.h>
44794+#include <linux/slab.h>
44795
44796 #include "qib_common.h"
44797 #include "qib_verbs.h"
44798diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44799index 24c41ba..102d71f 100644
44800--- a/drivers/input/gameport/gameport.c
44801+++ b/drivers/input/gameport/gameport.c
44802@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44803 */
44804 static void gameport_init_port(struct gameport *gameport)
44805 {
44806- static atomic_t gameport_no = ATOMIC_INIT(0);
44807+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
44808
44809 __module_get(THIS_MODULE);
44810
44811 mutex_init(&gameport->drv_mutex);
44812 device_initialize(&gameport->dev);
44813 dev_set_name(&gameport->dev, "gameport%lu",
44814- (unsigned long)atomic_inc_return(&gameport_no) - 1);
44815+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
44816 gameport->dev.bus = &gameport_bus;
44817 gameport->dev.release = gameport_release_port;
44818 if (gameport->parent)
44819diff --git a/drivers/input/input.c b/drivers/input/input.c
44820index 29ca0bb..f4bc2e3 100644
44821--- a/drivers/input/input.c
44822+++ b/drivers/input/input.c
44823@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
44824 */
44825 struct input_dev *input_allocate_device(void)
44826 {
44827- static atomic_t input_no = ATOMIC_INIT(0);
44828+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
44829 struct input_dev *dev;
44830
44831 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44832@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
44833 INIT_LIST_HEAD(&dev->node);
44834
44835 dev_set_name(&dev->dev, "input%ld",
44836- (unsigned long) atomic_inc_return(&input_no) - 1);
44837+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
44838
44839 __module_get(THIS_MODULE);
44840 }
44841diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44842index 4a95b22..874c182 100644
44843--- a/drivers/input/joystick/sidewinder.c
44844+++ b/drivers/input/joystick/sidewinder.c
44845@@ -30,6 +30,7 @@
44846 #include <linux/kernel.h>
44847 #include <linux/module.h>
44848 #include <linux/slab.h>
44849+#include <linux/sched.h>
44850 #include <linux/input.h>
44851 #include <linux/gameport.h>
44852 #include <linux/jiffies.h>
44853diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44854index 603fe0d..f63decc 100644
44855--- a/drivers/input/joystick/xpad.c
44856+++ b/drivers/input/joystick/xpad.c
44857@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44858
44859 static int xpad_led_probe(struct usb_xpad *xpad)
44860 {
44861- static atomic_t led_seq = ATOMIC_INIT(0);
44862+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
44863 long led_no;
44864 struct xpad_led *led;
44865 struct led_classdev *led_cdev;
44866@@ -750,7 +750,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44867 if (!led)
44868 return -ENOMEM;
44869
44870- led_no = (long)atomic_inc_return(&led_seq) - 1;
44871+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44872
44873 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44874 led->xpad = xpad;
44875diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44876index 719410f..1896169 100644
44877--- a/drivers/input/misc/ims-pcu.c
44878+++ b/drivers/input/misc/ims-pcu.c
44879@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44880
44881 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44882 {
44883- static atomic_t device_no = ATOMIC_INIT(0);
44884+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44885
44886 const struct ims_pcu_device_info *info;
44887 int error;
44888@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44889 }
44890
44891 /* Device appears to be operable, complete initialization */
44892- pcu->device_no = atomic_inc_return(&device_no) - 1;
44893+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44894
44895 /*
44896 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44897diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44898index 2f0b39d..7370f13 100644
44899--- a/drivers/input/mouse/psmouse.h
44900+++ b/drivers/input/mouse/psmouse.h
44901@@ -116,7 +116,7 @@ struct psmouse_attribute {
44902 ssize_t (*set)(struct psmouse *psmouse, void *data,
44903 const char *buf, size_t count);
44904 bool protect;
44905-};
44906+} __do_const;
44907 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44908
44909 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44910diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44911index b604564..3f14ae4 100644
44912--- a/drivers/input/mousedev.c
44913+++ b/drivers/input/mousedev.c
44914@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44915
44916 spin_unlock_irq(&client->packet_lock);
44917
44918- if (copy_to_user(buffer, data, count))
44919+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44920 return -EFAULT;
44921
44922 return count;
44923diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44924index b29134d..394deb0 100644
44925--- a/drivers/input/serio/serio.c
44926+++ b/drivers/input/serio/serio.c
44927@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44928 */
44929 static void serio_init_port(struct serio *serio)
44930 {
44931- static atomic_t serio_no = ATOMIC_INIT(0);
44932+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44933
44934 __module_get(THIS_MODULE);
44935
44936@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44937 mutex_init(&serio->drv_mutex);
44938 device_initialize(&serio->dev);
44939 dev_set_name(&serio->dev, "serio%ld",
44940- (long)atomic_inc_return(&serio_no) - 1);
44941+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
44942 serio->dev.bus = &serio_bus;
44943 serio->dev.release = serio_release_port;
44944 serio->dev.groups = serio_device_attr_groups;
44945diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44946index c9a02fe..0debc75 100644
44947--- a/drivers/input/serio/serio_raw.c
44948+++ b/drivers/input/serio/serio_raw.c
44949@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44950
44951 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44952 {
44953- static atomic_t serio_raw_no = ATOMIC_INIT(0);
44954+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
44955 struct serio_raw *serio_raw;
44956 int err;
44957
44958@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44959 }
44960
44961 snprintf(serio_raw->name, sizeof(serio_raw->name),
44962- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
44963+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
44964 kref_init(&serio_raw->kref);
44965 INIT_LIST_HEAD(&serio_raw->client_list);
44966 init_waitqueue_head(&serio_raw->wait);
44967diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44968index e5555fc..937986d 100644
44969--- a/drivers/iommu/iommu.c
44970+++ b/drivers/iommu/iommu.c
44971@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
44972 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
44973 {
44974 bus_register_notifier(bus, &iommu_bus_nb);
44975- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
44976+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
44977 }
44978
44979 /**
44980diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44981index 33c4395..e06447e 100644
44982--- a/drivers/iommu/irq_remapping.c
44983+++ b/drivers/iommu/irq_remapping.c
44984@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44985 void panic_if_irq_remap(const char *msg)
44986 {
44987 if (irq_remapping_enabled)
44988- panic(msg);
44989+ panic("%s", msg);
44990 }
44991
44992 static void ir_ack_apic_edge(struct irq_data *data)
44993@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44994
44995 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44996 {
44997- chip->irq_print_chip = ir_print_prefix;
44998- chip->irq_ack = ir_ack_apic_edge;
44999- chip->irq_eoi = ir_ack_apic_level;
45000- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45001+ pax_open_kernel();
45002+ *(void **)&chip->irq_print_chip = ir_print_prefix;
45003+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
45004+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
45005+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45006+ pax_close_kernel();
45007 }
45008
45009 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
45010diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
45011index 7c131cf..035129b 100644
45012--- a/drivers/irqchip/irq-gic.c
45013+++ b/drivers/irqchip/irq-gic.c
45014@@ -85,7 +85,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
45015 * Supported arch specific GIC irq extension.
45016 * Default make them NULL.
45017 */
45018-struct irq_chip gic_arch_extn = {
45019+irq_chip_no_const gic_arch_extn = {
45020 .irq_eoi = NULL,
45021 .irq_mask = NULL,
45022 .irq_unmask = NULL,
45023@@ -337,7 +337,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
45024 chained_irq_exit(chip, desc);
45025 }
45026
45027-static struct irq_chip gic_chip = {
45028+static irq_chip_no_const gic_chip __read_only = {
45029 .name = "GIC",
45030 .irq_mask = gic_mask_irq,
45031 .irq_unmask = gic_unmask_irq,
45032diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
45033index 8777065..a4a9967 100644
45034--- a/drivers/irqchip/irq-renesas-irqc.c
45035+++ b/drivers/irqchip/irq-renesas-irqc.c
45036@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
45037 struct irqc_priv *p;
45038 struct resource *io;
45039 struct resource *irq;
45040- struct irq_chip *irq_chip;
45041+ irq_chip_no_const *irq_chip;
45042 const char *name = dev_name(&pdev->dev);
45043 int ret;
45044 int k;
45045diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
45046index f9a87ed..3fdd854 100644
45047--- a/drivers/isdn/capi/capi.c
45048+++ b/drivers/isdn/capi/capi.c
45049@@ -81,8 +81,8 @@ struct capiminor {
45050
45051 struct capi20_appl *ap;
45052 u32 ncci;
45053- atomic_t datahandle;
45054- atomic_t msgid;
45055+ atomic_unchecked_t datahandle;
45056+ atomic_unchecked_t msgid;
45057
45058 struct tty_port port;
45059 int ttyinstop;
45060@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
45061 capimsg_setu16(s, 2, mp->ap->applid);
45062 capimsg_setu8 (s, 4, CAPI_DATA_B3);
45063 capimsg_setu8 (s, 5, CAPI_RESP);
45064- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
45065+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
45066 capimsg_setu32(s, 8, mp->ncci);
45067 capimsg_setu16(s, 12, datahandle);
45068 }
45069@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
45070 mp->outbytes -= len;
45071 spin_unlock_bh(&mp->outlock);
45072
45073- datahandle = atomic_inc_return(&mp->datahandle);
45074+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
45075 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
45076 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45077 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45078 capimsg_setu16(skb->data, 2, mp->ap->applid);
45079 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
45080 capimsg_setu8 (skb->data, 5, CAPI_REQ);
45081- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
45082+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
45083 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
45084 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
45085 capimsg_setu16(skb->data, 16, len); /* Data length */
45086diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
45087index b7ae0a0..04590fa 100644
45088--- a/drivers/isdn/gigaset/bas-gigaset.c
45089+++ b/drivers/isdn/gigaset/bas-gigaset.c
45090@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
45091
45092
45093 static const struct gigaset_ops gigops = {
45094- gigaset_write_cmd,
45095- gigaset_write_room,
45096- gigaset_chars_in_buffer,
45097- gigaset_brkchars,
45098- gigaset_init_bchannel,
45099- gigaset_close_bchannel,
45100- gigaset_initbcshw,
45101- gigaset_freebcshw,
45102- gigaset_reinitbcshw,
45103- gigaset_initcshw,
45104- gigaset_freecshw,
45105- gigaset_set_modem_ctrl,
45106- gigaset_baud_rate,
45107- gigaset_set_line_ctrl,
45108- gigaset_isoc_send_skb,
45109- gigaset_isoc_input,
45110+ .write_cmd = gigaset_write_cmd,
45111+ .write_room = gigaset_write_room,
45112+ .chars_in_buffer = gigaset_chars_in_buffer,
45113+ .brkchars = gigaset_brkchars,
45114+ .init_bchannel = gigaset_init_bchannel,
45115+ .close_bchannel = gigaset_close_bchannel,
45116+ .initbcshw = gigaset_initbcshw,
45117+ .freebcshw = gigaset_freebcshw,
45118+ .reinitbcshw = gigaset_reinitbcshw,
45119+ .initcshw = gigaset_initcshw,
45120+ .freecshw = gigaset_freecshw,
45121+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45122+ .baud_rate = gigaset_baud_rate,
45123+ .set_line_ctrl = gigaset_set_line_ctrl,
45124+ .send_skb = gigaset_isoc_send_skb,
45125+ .handle_input = gigaset_isoc_input,
45126 };
45127
45128 /* bas_gigaset_init
45129diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
45130index 600c79b..3752bab 100644
45131--- a/drivers/isdn/gigaset/interface.c
45132+++ b/drivers/isdn/gigaset/interface.c
45133@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
45134 }
45135 tty->driver_data = cs;
45136
45137- ++cs->port.count;
45138+ atomic_inc(&cs->port.count);
45139
45140- if (cs->port.count == 1) {
45141+ if (atomic_read(&cs->port.count) == 1) {
45142 tty_port_tty_set(&cs->port, tty);
45143 cs->port.low_latency = 1;
45144 }
45145@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45146
45147 if (!cs->connected)
45148 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45149- else if (!cs->port.count)
45150+ else if (!atomic_read(&cs->port.count))
45151 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45152- else if (!--cs->port.count)
45153+ else if (!atomic_dec_return(&cs->port.count))
45154 tty_port_tty_set(&cs->port, NULL);
45155
45156 mutex_unlock(&cs->mutex);
45157diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45158index 8c91fd5..14f13ce 100644
45159--- a/drivers/isdn/gigaset/ser-gigaset.c
45160+++ b/drivers/isdn/gigaset/ser-gigaset.c
45161@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45162 }
45163
45164 static const struct gigaset_ops ops = {
45165- gigaset_write_cmd,
45166- gigaset_write_room,
45167- gigaset_chars_in_buffer,
45168- gigaset_brkchars,
45169- gigaset_init_bchannel,
45170- gigaset_close_bchannel,
45171- gigaset_initbcshw,
45172- gigaset_freebcshw,
45173- gigaset_reinitbcshw,
45174- gigaset_initcshw,
45175- gigaset_freecshw,
45176- gigaset_set_modem_ctrl,
45177- gigaset_baud_rate,
45178- gigaset_set_line_ctrl,
45179- gigaset_m10x_send_skb, /* asyncdata.c */
45180- gigaset_m10x_input, /* asyncdata.c */
45181+ .write_cmd = gigaset_write_cmd,
45182+ .write_room = gigaset_write_room,
45183+ .chars_in_buffer = gigaset_chars_in_buffer,
45184+ .brkchars = gigaset_brkchars,
45185+ .init_bchannel = gigaset_init_bchannel,
45186+ .close_bchannel = gigaset_close_bchannel,
45187+ .initbcshw = gigaset_initbcshw,
45188+ .freebcshw = gigaset_freebcshw,
45189+ .reinitbcshw = gigaset_reinitbcshw,
45190+ .initcshw = gigaset_initcshw,
45191+ .freecshw = gigaset_freecshw,
45192+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45193+ .baud_rate = gigaset_baud_rate,
45194+ .set_line_ctrl = gigaset_set_line_ctrl,
45195+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45196+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45197 };
45198
45199
45200diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45201index d0a41cb..b953e50 100644
45202--- a/drivers/isdn/gigaset/usb-gigaset.c
45203+++ b/drivers/isdn/gigaset/usb-gigaset.c
45204@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45205 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45206 memcpy(cs->hw.usb->bchars, buf, 6);
45207 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45208- 0, 0, &buf, 6, 2000);
45209+ 0, 0, buf, 6, 2000);
45210 }
45211
45212 static void gigaset_freebcshw(struct bc_state *bcs)
45213@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45214 }
45215
45216 static const struct gigaset_ops ops = {
45217- gigaset_write_cmd,
45218- gigaset_write_room,
45219- gigaset_chars_in_buffer,
45220- gigaset_brkchars,
45221- gigaset_init_bchannel,
45222- gigaset_close_bchannel,
45223- gigaset_initbcshw,
45224- gigaset_freebcshw,
45225- gigaset_reinitbcshw,
45226- gigaset_initcshw,
45227- gigaset_freecshw,
45228- gigaset_set_modem_ctrl,
45229- gigaset_baud_rate,
45230- gigaset_set_line_ctrl,
45231- gigaset_m10x_send_skb,
45232- gigaset_m10x_input,
45233+ .write_cmd = gigaset_write_cmd,
45234+ .write_room = gigaset_write_room,
45235+ .chars_in_buffer = gigaset_chars_in_buffer,
45236+ .brkchars = gigaset_brkchars,
45237+ .init_bchannel = gigaset_init_bchannel,
45238+ .close_bchannel = gigaset_close_bchannel,
45239+ .initbcshw = gigaset_initbcshw,
45240+ .freebcshw = gigaset_freebcshw,
45241+ .reinitbcshw = gigaset_reinitbcshw,
45242+ .initcshw = gigaset_initcshw,
45243+ .freecshw = gigaset_freecshw,
45244+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45245+ .baud_rate = gigaset_baud_rate,
45246+ .set_line_ctrl = gigaset_set_line_ctrl,
45247+ .send_skb = gigaset_m10x_send_skb,
45248+ .handle_input = gigaset_m10x_input,
45249 };
45250
45251 /*
45252diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45253index 4d9b195..455075c 100644
45254--- a/drivers/isdn/hardware/avm/b1.c
45255+++ b/drivers/isdn/hardware/avm/b1.c
45256@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45257 }
45258 if (left) {
45259 if (t4file->user) {
45260- if (copy_from_user(buf, dp, left))
45261+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45262 return -EFAULT;
45263 } else {
45264 memcpy(buf, dp, left);
45265@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45266 }
45267 if (left) {
45268 if (config->user) {
45269- if (copy_from_user(buf, dp, left))
45270+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45271 return -EFAULT;
45272 } else {
45273 memcpy(buf, dp, left);
45274diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45275index 9b856e1..fa03c92 100644
45276--- a/drivers/isdn/i4l/isdn_common.c
45277+++ b/drivers/isdn/i4l/isdn_common.c
45278@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45279 } else
45280 return -EINVAL;
45281 case IIOCDBGVAR:
45282+ if (!capable(CAP_SYS_RAWIO))
45283+ return -EPERM;
45284 if (arg) {
45285 if (copy_to_user(argp, &dev, sizeof(ulong)))
45286 return -EFAULT;
45287diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45288index 91d5730..336523e 100644
45289--- a/drivers/isdn/i4l/isdn_concap.c
45290+++ b/drivers/isdn/i4l/isdn_concap.c
45291@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45292 }
45293
45294 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45295- &isdn_concap_dl_data_req,
45296- &isdn_concap_dl_connect_req,
45297- &isdn_concap_dl_disconn_req
45298+ .data_req = &isdn_concap_dl_data_req,
45299+ .connect_req = &isdn_concap_dl_connect_req,
45300+ .disconn_req = &isdn_concap_dl_disconn_req
45301 };
45302
45303 /* The following should better go into a dedicated source file such that
45304diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
45305index 62f0688..38ceac5 100644
45306--- a/drivers/isdn/i4l/isdn_ppp.c
45307+++ b/drivers/isdn/i4l/isdn_ppp.c
45308@@ -378,15 +378,10 @@ isdn_ppp_release(int min, struct file *file)
45309 is->slcomp = NULL;
45310 #endif
45311 #ifdef CONFIG_IPPP_FILTER
45312- if (is->pass_filter) {
45313- sk_unattached_filter_destroy(is->pass_filter);
45314- is->pass_filter = NULL;
45315- }
45316-
45317- if (is->active_filter) {
45318- sk_unattached_filter_destroy(is->active_filter);
45319- is->active_filter = NULL;
45320- }
45321+ kfree(is->pass_filter);
45322+ is->pass_filter = NULL;
45323+ kfree(is->active_filter);
45324+ is->active_filter = NULL;
45325 #endif
45326
45327 /* TODO: if this was the previous master: link the stuff to the new master */
45328@@ -442,7 +437,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45329 {
45330 struct sock_fprog uprog;
45331 struct sock_filter *code = NULL;
45332- int len;
45333+ int len, err;
45334
45335 if (copy_from_user(&uprog, arg, sizeof(uprog)))
45336 return -EFAULT;
45337@@ -458,6 +453,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45338 if (IS_ERR(code))
45339 return PTR_ERR(code);
45340
45341+ err = sk_chk_filter(code, uprog.len);
45342+ if (err) {
45343+ kfree(code);
45344+ return err;
45345+ }
45346+
45347 *p = code;
45348 return uprog.len;
45349 }
45350@@ -628,53 +629,25 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
45351 #ifdef CONFIG_IPPP_FILTER
45352 case PPPIOCSPASS:
45353 {
45354- struct sock_fprog_kern fprog;
45355 struct sock_filter *code;
45356- int err, len = get_filter(argp, &code);
45357-
45358+ int len = get_filter(argp, &code);
45359 if (len < 0)
45360 return len;
45361-
45362- fprog.len = len;
45363- fprog.filter = code;
45364-
45365- if (is->pass_filter) {
45366- sk_unattached_filter_destroy(is->pass_filter);
45367- is->pass_filter = NULL;
45368- }
45369- if (fprog.filter != NULL)
45370- err = sk_unattached_filter_create(&is->pass_filter,
45371- &fprog);
45372- else
45373- err = 0;
45374- kfree(code);
45375-
45376- return err;
45377+ kfree(is->pass_filter);
45378+ is->pass_filter = code;
45379+ is->pass_len = len;
45380+ break;
45381 }
45382 case PPPIOCSACTIVE:
45383 {
45384- struct sock_fprog_kern fprog;
45385 struct sock_filter *code;
45386- int err, len = get_filter(argp, &code);
45387-
45388+ int len = get_filter(argp, &code);
45389 if (len < 0)
45390 return len;
45391-
45392- fprog.len = len;
45393- fprog.filter = code;
45394-
45395- if (is->active_filter) {
45396- sk_unattached_filter_destroy(is->active_filter);
45397- is->active_filter = NULL;
45398- }
45399- if (fprog.filter != NULL)
45400- err = sk_unattached_filter_create(&is->active_filter,
45401- &fprog);
45402- else
45403- err = 0;
45404- kfree(code);
45405-
45406- return err;
45407+ kfree(is->active_filter);
45408+ is->active_filter = code;
45409+ is->active_len = len;
45410+ break;
45411 }
45412 #endif /* CONFIG_IPPP_FILTER */
45413 default:
45414@@ -1174,14 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
45415 }
45416
45417 if (is->pass_filter
45418- && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
45419+ && sk_run_filter(skb, is->pass_filter) == 0) {
45420 if (is->debug & 0x2)
45421 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
45422 kfree_skb(skb);
45423 return;
45424 }
45425 if (!(is->active_filter
45426- && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
45427+ && sk_run_filter(skb, is->active_filter) == 0)) {
45428 if (is->debug & 0x2)
45429 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45430 lp->huptimer = 0;
45431@@ -1320,14 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
45432 }
45433
45434 if (ipt->pass_filter
45435- && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
45436+ && sk_run_filter(skb, ipt->pass_filter) == 0) {
45437 if (ipt->debug & 0x4)
45438 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
45439 kfree_skb(skb);
45440 goto unlock;
45441 }
45442 if (!(ipt->active_filter
45443- && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
45444+ && sk_run_filter(skb, ipt->active_filter) == 0)) {
45445 if (ipt->debug & 0x4)
45446 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45447 lp->huptimer = 0;
45448@@ -1517,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
45449 }
45450
45451 drop |= is->pass_filter
45452- && SK_RUN_FILTER(is->pass_filter, skb) == 0;
45453+ && sk_run_filter(skb, is->pass_filter) == 0;
45454 drop |= is->active_filter
45455- && SK_RUN_FILTER(is->active_filter, skb) == 0;
45456+ && sk_run_filter(skb, is->active_filter) == 0;
45457
45458 skb_push(skb, IPPP_MAX_HEADER - 4);
45459 return drop;
45460diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45461index 3c5f249..5fac4d0 100644
45462--- a/drivers/isdn/i4l/isdn_tty.c
45463+++ b/drivers/isdn/i4l/isdn_tty.c
45464@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45465
45466 #ifdef ISDN_DEBUG_MODEM_OPEN
45467 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45468- port->count);
45469+ atomic_read(&port->count));
45470 #endif
45471- port->count++;
45472+ atomic_inc(&port->count);
45473 port->tty = tty;
45474 /*
45475 * Start up serial port
45476@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45477 #endif
45478 return;
45479 }
45480- if ((tty->count == 1) && (port->count != 1)) {
45481+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45482 /*
45483 * Uh, oh. tty->count is 1, which means that the tty
45484 * structure will be freed. Info->count should always
45485@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45486 * serial port won't be shutdown.
45487 */
45488 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45489- "info->count is %d\n", port->count);
45490- port->count = 1;
45491+ "info->count is %d\n", atomic_read(&port->count));
45492+ atomic_set(&port->count, 1);
45493 }
45494- if (--port->count < 0) {
45495+ if (atomic_dec_return(&port->count) < 0) {
45496 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45497- info->line, port->count);
45498- port->count = 0;
45499+ info->line, atomic_read(&port->count));
45500+ atomic_set(&port->count, 0);
45501 }
45502- if (port->count) {
45503+ if (atomic_read(&port->count)) {
45504 #ifdef ISDN_DEBUG_MODEM_OPEN
45505 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45506 #endif
45507@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45508 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45509 return;
45510 isdn_tty_shutdown(info);
45511- port->count = 0;
45512+ atomic_set(&port->count, 0);
45513 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45514 port->tty = NULL;
45515 wake_up_interruptible(&port->open_wait);
45516@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45517 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45518 modem_info *info = &dev->mdm.info[i];
45519
45520- if (info->port.count == 0)
45521+ if (atomic_read(&info->port.count) == 0)
45522 continue;
45523 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45524 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45525diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45526index e2d4e58..40cd045 100644
45527--- a/drivers/isdn/i4l/isdn_x25iface.c
45528+++ b/drivers/isdn/i4l/isdn_x25iface.c
45529@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45530
45531
45532 static struct concap_proto_ops ix25_pops = {
45533- &isdn_x25iface_proto_new,
45534- &isdn_x25iface_proto_del,
45535- &isdn_x25iface_proto_restart,
45536- &isdn_x25iface_proto_close,
45537- &isdn_x25iface_xmit,
45538- &isdn_x25iface_receive,
45539- &isdn_x25iface_connect_ind,
45540- &isdn_x25iface_disconn_ind
45541+ .proto_new = &isdn_x25iface_proto_new,
45542+ .proto_del = &isdn_x25iface_proto_del,
45543+ .restart = &isdn_x25iface_proto_restart,
45544+ .close = &isdn_x25iface_proto_close,
45545+ .encap_and_xmit = &isdn_x25iface_xmit,
45546+ .data_ind = &isdn_x25iface_receive,
45547+ .connect_ind = &isdn_x25iface_connect_ind,
45548+ .disconn_ind = &isdn_x25iface_disconn_ind
45549 };
45550
45551 /* error message helper function */
45552diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45553index 6a7447c..cae33fe 100644
45554--- a/drivers/isdn/icn/icn.c
45555+++ b/drivers/isdn/icn/icn.c
45556@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45557 if (count > len)
45558 count = len;
45559 if (user) {
45560- if (copy_from_user(msg, buf, count))
45561+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45562 return -EFAULT;
45563 } else
45564 memcpy(msg, buf, count);
45565diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45566index a4f05c5..1433bc5 100644
45567--- a/drivers/isdn/mISDN/dsp_cmx.c
45568+++ b/drivers/isdn/mISDN/dsp_cmx.c
45569@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45570 static u16 dsp_count; /* last sample count */
45571 static int dsp_count_valid; /* if we have last sample count */
45572
45573-void
45574+void __intentional_overflow(-1)
45575 dsp_cmx_send(void *arg)
45576 {
45577 struct dsp_conf *conf;
45578diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45579index f58a354..fbae176 100644
45580--- a/drivers/leds/leds-clevo-mail.c
45581+++ b/drivers/leds/leds-clevo-mail.c
45582@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45583 * detected as working, but in reality it is not) as low as
45584 * possible.
45585 */
45586-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45587+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45588 {
45589 .callback = clevo_mail_led_dmi_callback,
45590 .ident = "Clevo D410J",
45591diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45592index 2eb3ef6..295891f 100644
45593--- a/drivers/leds/leds-ss4200.c
45594+++ b/drivers/leds/leds-ss4200.c
45595@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45596 * detected as working, but in reality it is not) as low as
45597 * possible.
45598 */
45599-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45600+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45601 {
45602 .callback = ss4200_led_dmi_callback,
45603 .ident = "Intel SS4200-E",
45604diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45605index 0bf1e4e..b4bf44e 100644
45606--- a/drivers/lguest/core.c
45607+++ b/drivers/lguest/core.c
45608@@ -97,9 +97,17 @@ static __init int map_switcher(void)
45609 * The end address needs +1 because __get_vm_area allocates an
45610 * extra guard page, so we need space for that.
45611 */
45612+
45613+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45614+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45615+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45616+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45617+#else
45618 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45619 VM_ALLOC, switcher_addr, switcher_addr
45620 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45621+#endif
45622+
45623 if (!switcher_vma) {
45624 err = -ENOMEM;
45625 printk("lguest: could not map switcher pages high\n");
45626@@ -124,7 +132,7 @@ static __init int map_switcher(void)
45627 * Now the Switcher is mapped at the right address, we can't fail!
45628 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45629 */
45630- memcpy(switcher_vma->addr, start_switcher_text,
45631+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45632 end_switcher_text - start_switcher_text);
45633
45634 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45635diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45636index e8b55c3..3514c37 100644
45637--- a/drivers/lguest/page_tables.c
45638+++ b/drivers/lguest/page_tables.c
45639@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45640 /*:*/
45641
45642 #ifdef CONFIG_X86_PAE
45643-static void release_pmd(pmd_t *spmd)
45644+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45645 {
45646 /* If the entry's not present, there's nothing to release. */
45647 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45648diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45649index 922a1ac..9dd0c2a 100644
45650--- a/drivers/lguest/x86/core.c
45651+++ b/drivers/lguest/x86/core.c
45652@@ -59,7 +59,7 @@ static struct {
45653 /* Offset from where switcher.S was compiled to where we've copied it */
45654 static unsigned long switcher_offset(void)
45655 {
45656- return switcher_addr - (unsigned long)start_switcher_text;
45657+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45658 }
45659
45660 /* This cpu's struct lguest_pages (after the Switcher text page) */
45661@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45662 * These copies are pretty cheap, so we do them unconditionally: */
45663 /* Save the current Host top-level page directory.
45664 */
45665+
45666+#ifdef CONFIG_PAX_PER_CPU_PGD
45667+ pages->state.host_cr3 = read_cr3();
45668+#else
45669 pages->state.host_cr3 = __pa(current->mm->pgd);
45670+#endif
45671+
45672 /*
45673 * Set up the Guest's page tables to see this CPU's pages (and no
45674 * other CPU's pages).
45675@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
45676 * compiled-in switcher code and the high-mapped copy we just made.
45677 */
45678 for (i = 0; i < IDT_ENTRIES; i++)
45679- default_idt_entries[i] += switcher_offset();
45680+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45681
45682 /*
45683 * Set up the Switcher's per-cpu areas.
45684@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
45685 * it will be undisturbed when we switch. To change %cs and jump we
45686 * need this structure to feed to Intel's "lcall" instruction.
45687 */
45688- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45689+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45690 lguest_entry.segment = LGUEST_CS;
45691
45692 /*
45693diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45694index 40634b0..4f5855e 100644
45695--- a/drivers/lguest/x86/switcher_32.S
45696+++ b/drivers/lguest/x86/switcher_32.S
45697@@ -87,6 +87,7 @@
45698 #include <asm/page.h>
45699 #include <asm/segment.h>
45700 #include <asm/lguest.h>
45701+#include <asm/processor-flags.h>
45702
45703 // We mark the start of the code to copy
45704 // It's placed in .text tho it's never run here
45705@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45706 // Changes type when we load it: damn Intel!
45707 // For after we switch over our page tables
45708 // That entry will be read-only: we'd crash.
45709+
45710+#ifdef CONFIG_PAX_KERNEXEC
45711+ mov %cr0, %edx
45712+ xor $X86_CR0_WP, %edx
45713+ mov %edx, %cr0
45714+#endif
45715+
45716 movl $(GDT_ENTRY_TSS*8), %edx
45717 ltr %dx
45718
45719@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45720 // Let's clear it again for our return.
45721 // The GDT descriptor of the Host
45722 // Points to the table after two "size" bytes
45723- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45724+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45725 // Clear "used" from type field (byte 5, bit 2)
45726- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45727+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45728+
45729+#ifdef CONFIG_PAX_KERNEXEC
45730+ mov %cr0, %eax
45731+ xor $X86_CR0_WP, %eax
45732+ mov %eax, %cr0
45733+#endif
45734
45735 // Once our page table's switched, the Guest is live!
45736 // The Host fades as we run this final step.
45737@@ -295,13 +309,12 @@ deliver_to_host:
45738 // I consulted gcc, and it gave
45739 // These instructions, which I gladly credit:
45740 leal (%edx,%ebx,8), %eax
45741- movzwl (%eax),%edx
45742- movl 4(%eax), %eax
45743- xorw %ax, %ax
45744- orl %eax, %edx
45745+ movl 4(%eax), %edx
45746+ movw (%eax), %dx
45747 // Now the address of the handler's in %edx
45748 // We call it now: its "iret" drops us home.
45749- jmp *%edx
45750+ ljmp $__KERNEL_CS, $1f
45751+1: jmp *%edx
45752
45753 // Every interrupt can come to us here
45754 // But we must truly tell each apart.
45755diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45756index a08e3ee..df8ade2 100644
45757--- a/drivers/md/bcache/closure.h
45758+++ b/drivers/md/bcache/closure.h
45759@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45760 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45761 struct workqueue_struct *wq)
45762 {
45763- BUG_ON(object_is_on_stack(cl));
45764+ BUG_ON(object_starts_on_stack(cl));
45765 closure_set_ip(cl);
45766 cl->fn = fn;
45767 cl->wq = wq;
45768diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45769index 67f8b31..9418f2b 100644
45770--- a/drivers/md/bitmap.c
45771+++ b/drivers/md/bitmap.c
45772@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45773 chunk_kb ? "KB" : "B");
45774 if (bitmap->storage.file) {
45775 seq_printf(seq, ", file: ");
45776- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45777+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45778 }
45779
45780 seq_printf(seq, "\n");
45781diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45782index 5152142..623d141 100644
45783--- a/drivers/md/dm-ioctl.c
45784+++ b/drivers/md/dm-ioctl.c
45785@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45786 cmd == DM_LIST_VERSIONS_CMD)
45787 return 0;
45788
45789- if ((cmd == DM_DEV_CREATE_CMD)) {
45790+ if (cmd == DM_DEV_CREATE_CMD) {
45791 if (!*param->name) {
45792 DMWARN("name not supplied when creating device");
45793 return -EINVAL;
45794diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45795index 7dfdb5c..4caada6 100644
45796--- a/drivers/md/dm-raid1.c
45797+++ b/drivers/md/dm-raid1.c
45798@@ -40,7 +40,7 @@ enum dm_raid1_error {
45799
45800 struct mirror {
45801 struct mirror_set *ms;
45802- atomic_t error_count;
45803+ atomic_unchecked_t error_count;
45804 unsigned long error_type;
45805 struct dm_dev *dev;
45806 sector_t offset;
45807@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45808 struct mirror *m;
45809
45810 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45811- if (!atomic_read(&m->error_count))
45812+ if (!atomic_read_unchecked(&m->error_count))
45813 return m;
45814
45815 return NULL;
45816@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45817 * simple way to tell if a device has encountered
45818 * errors.
45819 */
45820- atomic_inc(&m->error_count);
45821+ atomic_inc_unchecked(&m->error_count);
45822
45823 if (test_and_set_bit(error_type, &m->error_type))
45824 return;
45825@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45826 struct mirror *m = get_default_mirror(ms);
45827
45828 do {
45829- if (likely(!atomic_read(&m->error_count)))
45830+ if (likely(!atomic_read_unchecked(&m->error_count)))
45831 return m;
45832
45833 if (m-- == ms->mirror)
45834@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45835 {
45836 struct mirror *default_mirror = get_default_mirror(m->ms);
45837
45838- return !atomic_read(&default_mirror->error_count);
45839+ return !atomic_read_unchecked(&default_mirror->error_count);
45840 }
45841
45842 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45843@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45844 */
45845 if (likely(region_in_sync(ms, region, 1)))
45846 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45847- else if (m && atomic_read(&m->error_count))
45848+ else if (m && atomic_read_unchecked(&m->error_count))
45849 m = NULL;
45850
45851 if (likely(m))
45852@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45853 }
45854
45855 ms->mirror[mirror].ms = ms;
45856- atomic_set(&(ms->mirror[mirror].error_count), 0);
45857+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45858 ms->mirror[mirror].error_type = 0;
45859 ms->mirror[mirror].offset = offset;
45860
45861@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
45862 */
45863 static char device_status_char(struct mirror *m)
45864 {
45865- if (!atomic_read(&(m->error_count)))
45866+ if (!atomic_read_unchecked(&(m->error_count)))
45867 return 'A';
45868
45869 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45870diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45871index 28a9012..9c0f6a5 100644
45872--- a/drivers/md/dm-stats.c
45873+++ b/drivers/md/dm-stats.c
45874@@ -382,7 +382,7 @@ do_sync_free:
45875 synchronize_rcu_expedited();
45876 dm_stat_free(&s->rcu_head);
45877 } else {
45878- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45879+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45880 call_rcu(&s->rcu_head, dm_stat_free);
45881 }
45882 return 0;
45883@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45884 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45885 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45886 ));
45887- ACCESS_ONCE(last->last_sector) = end_sector;
45888- ACCESS_ONCE(last->last_rw) = bi_rw;
45889+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45890+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45891 }
45892
45893 rcu_read_lock();
45894diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45895index d1600d2..4c3af3a 100644
45896--- a/drivers/md/dm-stripe.c
45897+++ b/drivers/md/dm-stripe.c
45898@@ -21,7 +21,7 @@ struct stripe {
45899 struct dm_dev *dev;
45900 sector_t physical_start;
45901
45902- atomic_t error_count;
45903+ atomic_unchecked_t error_count;
45904 };
45905
45906 struct stripe_c {
45907@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45908 kfree(sc);
45909 return r;
45910 }
45911- atomic_set(&(sc->stripe[i].error_count), 0);
45912+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45913 }
45914
45915 ti->private = sc;
45916@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45917 DMEMIT("%d ", sc->stripes);
45918 for (i = 0; i < sc->stripes; i++) {
45919 DMEMIT("%s ", sc->stripe[i].dev->name);
45920- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45921+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45922 'D' : 'A';
45923 }
45924 buffer[i] = '\0';
45925@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45926 */
45927 for (i = 0; i < sc->stripes; i++)
45928 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45929- atomic_inc(&(sc->stripe[i].error_count));
45930- if (atomic_read(&(sc->stripe[i].error_count)) <
45931+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45932+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45933 DM_IO_ERROR_THRESHOLD)
45934 schedule_work(&sc->trigger_event);
45935 }
45936diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45937index 5f59f1e..01bd02e 100644
45938--- a/drivers/md/dm-table.c
45939+++ b/drivers/md/dm-table.c
45940@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
45941 static int open_dev(struct dm_dev_internal *d, dev_t dev,
45942 struct mapped_device *md)
45943 {
45944- static char *_claim_ptr = "I belong to device-mapper";
45945+ static char _claim_ptr[] = "I belong to device-mapper";
45946 struct block_device *bdev;
45947
45948 int r;
45949@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45950 if (!dev_size)
45951 return 0;
45952
45953- if ((start >= dev_size) || (start + len > dev_size)) {
45954+ if ((start >= dev_size) || (len > dev_size - start)) {
45955 DMWARN("%s: %s too small for target: "
45956 "start=%llu, len=%llu, dev_size=%llu",
45957 dm_device_name(ti->table->md), bdevname(bdev, b),
45958diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45959index e9d33ad..dae9880d 100644
45960--- a/drivers/md/dm-thin-metadata.c
45961+++ b/drivers/md/dm-thin-metadata.c
45962@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45963 {
45964 pmd->info.tm = pmd->tm;
45965 pmd->info.levels = 2;
45966- pmd->info.value_type.context = pmd->data_sm;
45967+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45968 pmd->info.value_type.size = sizeof(__le64);
45969 pmd->info.value_type.inc = data_block_inc;
45970 pmd->info.value_type.dec = data_block_dec;
45971@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45972
45973 pmd->bl_info.tm = pmd->tm;
45974 pmd->bl_info.levels = 1;
45975- pmd->bl_info.value_type.context = pmd->data_sm;
45976+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45977 pmd->bl_info.value_type.size = sizeof(__le64);
45978 pmd->bl_info.value_type.inc = data_block_inc;
45979 pmd->bl_info.value_type.dec = data_block_dec;
45980diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45981index 32b958d..34011e8 100644
45982--- a/drivers/md/dm.c
45983+++ b/drivers/md/dm.c
45984@@ -180,9 +180,9 @@ struct mapped_device {
45985 /*
45986 * Event handling.
45987 */
45988- atomic_t event_nr;
45989+ atomic_unchecked_t event_nr;
45990 wait_queue_head_t eventq;
45991- atomic_t uevent_seq;
45992+ atomic_unchecked_t uevent_seq;
45993 struct list_head uevent_list;
45994 spinlock_t uevent_lock; /* Protect access to uevent_list */
45995
45996@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
45997 spin_lock_init(&md->deferred_lock);
45998 atomic_set(&md->holders, 1);
45999 atomic_set(&md->open_count, 0);
46000- atomic_set(&md->event_nr, 0);
46001- atomic_set(&md->uevent_seq, 0);
46002+ atomic_set_unchecked(&md->event_nr, 0);
46003+ atomic_set_unchecked(&md->uevent_seq, 0);
46004 INIT_LIST_HEAD(&md->uevent_list);
46005 spin_lock_init(&md->uevent_lock);
46006
46007@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
46008
46009 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
46010
46011- atomic_inc(&md->event_nr);
46012+ atomic_inc_unchecked(&md->event_nr);
46013 wake_up(&md->eventq);
46014 }
46015
46016@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
46017
46018 uint32_t dm_next_uevent_seq(struct mapped_device *md)
46019 {
46020- return atomic_add_return(1, &md->uevent_seq);
46021+ return atomic_add_return_unchecked(1, &md->uevent_seq);
46022 }
46023
46024 uint32_t dm_get_event_nr(struct mapped_device *md)
46025 {
46026- return atomic_read(&md->event_nr);
46027+ return atomic_read_unchecked(&md->event_nr);
46028 }
46029
46030 int dm_wait_event(struct mapped_device *md, int event_nr)
46031 {
46032 return wait_event_interruptible(md->eventq,
46033- (event_nr != atomic_read(&md->event_nr)));
46034+ (event_nr != atomic_read_unchecked(&md->event_nr)));
46035 }
46036
46037 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
46038diff --git a/drivers/md/md.c b/drivers/md/md.c
46039index 32fc19c..cb6eba3 100644
46040--- a/drivers/md/md.c
46041+++ b/drivers/md/md.c
46042@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
46043 * start build, activate spare
46044 */
46045 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
46046-static atomic_t md_event_count;
46047+static atomic_unchecked_t md_event_count;
46048 void md_new_event(struct mddev *mddev)
46049 {
46050- atomic_inc(&md_event_count);
46051+ atomic_inc_unchecked(&md_event_count);
46052 wake_up(&md_event_waiters);
46053 }
46054 EXPORT_SYMBOL_GPL(md_new_event);
46055@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
46056 */
46057 static void md_new_event_inintr(struct mddev *mddev)
46058 {
46059- atomic_inc(&md_event_count);
46060+ atomic_inc_unchecked(&md_event_count);
46061 wake_up(&md_event_waiters);
46062 }
46063
46064@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
46065 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
46066 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
46067 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
46068- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46069+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46070
46071 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
46072 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
46073@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
46074 else
46075 sb->resync_offset = cpu_to_le64(0);
46076
46077- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
46078+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
46079
46080 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
46081 sb->size = cpu_to_le64(mddev->dev_sectors);
46082@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
46083 static ssize_t
46084 errors_show(struct md_rdev *rdev, char *page)
46085 {
46086- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
46087+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
46088 }
46089
46090 static ssize_t
46091@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
46092 char *e;
46093 unsigned long n = simple_strtoul(buf, &e, 10);
46094 if (*buf && (*e == 0 || *e == '\n')) {
46095- atomic_set(&rdev->corrected_errors, n);
46096+ atomic_set_unchecked(&rdev->corrected_errors, n);
46097 return len;
46098 }
46099 return -EINVAL;
46100@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
46101 rdev->sb_loaded = 0;
46102 rdev->bb_page = NULL;
46103 atomic_set(&rdev->nr_pending, 0);
46104- atomic_set(&rdev->read_errors, 0);
46105- atomic_set(&rdev->corrected_errors, 0);
46106+ atomic_set_unchecked(&rdev->read_errors, 0);
46107+ atomic_set_unchecked(&rdev->corrected_errors, 0);
46108
46109 INIT_LIST_HEAD(&rdev->same_set);
46110 init_waitqueue_head(&rdev->blocked_wait);
46111@@ -7068,7 +7068,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
46112
46113 spin_unlock(&pers_lock);
46114 seq_printf(seq, "\n");
46115- seq->poll_event = atomic_read(&md_event_count);
46116+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46117 return 0;
46118 }
46119 if (v == (void*)2) {
46120@@ -7171,7 +7171,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
46121 return error;
46122
46123 seq = file->private_data;
46124- seq->poll_event = atomic_read(&md_event_count);
46125+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46126 return error;
46127 }
46128
46129@@ -7188,7 +7188,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
46130 /* always allow read */
46131 mask = POLLIN | POLLRDNORM;
46132
46133- if (seq->poll_event != atomic_read(&md_event_count))
46134+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
46135 mask |= POLLERR | POLLPRI;
46136 return mask;
46137 }
46138@@ -7232,7 +7232,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
46139 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
46140 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
46141 (int)part_stat_read(&disk->part0, sectors[1]) -
46142- atomic_read(&disk->sync_io);
46143+ atomic_read_unchecked(&disk->sync_io);
46144 /* sync IO will cause sync_io to increase before the disk_stats
46145 * as sync_io is counted when a request starts, and
46146 * disk_stats is counted when it completes.
46147diff --git a/drivers/md/md.h b/drivers/md/md.h
46148index a49d991..3582bb7 100644
46149--- a/drivers/md/md.h
46150+++ b/drivers/md/md.h
46151@@ -94,13 +94,13 @@ struct md_rdev {
46152 * only maintained for arrays that
46153 * support hot removal
46154 */
46155- atomic_t read_errors; /* number of consecutive read errors that
46156+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
46157 * we have tried to ignore.
46158 */
46159 struct timespec last_read_error; /* monotonic time since our
46160 * last read error
46161 */
46162- atomic_t corrected_errors; /* number of corrected read errors,
46163+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
46164 * for reporting to userspace and storing
46165 * in superblock.
46166 */
46167@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
46168
46169 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
46170 {
46171- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46172+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46173 }
46174
46175 struct md_personality
46176diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
46177index 786b689..ea8c956 100644
46178--- a/drivers/md/persistent-data/dm-space-map-metadata.c
46179+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
46180@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
46181 * Flick into a mode where all blocks get allocated in the new area.
46182 */
46183 smm->begin = old_len;
46184- memcpy(sm, &bootstrap_ops, sizeof(*sm));
46185+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
46186
46187 /*
46188 * Extend.
46189@@ -710,7 +710,7 @@ out:
46190 /*
46191 * Switch back to normal behaviour.
46192 */
46193- memcpy(sm, &ops, sizeof(*sm));
46194+ memcpy((void *)sm, &ops, sizeof(*sm));
46195 return r;
46196 }
46197
46198diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
46199index 3e6d115..ffecdeb 100644
46200--- a/drivers/md/persistent-data/dm-space-map.h
46201+++ b/drivers/md/persistent-data/dm-space-map.h
46202@@ -71,6 +71,7 @@ struct dm_space_map {
46203 dm_sm_threshold_fn fn,
46204 void *context);
46205 };
46206+typedef struct dm_space_map __no_const dm_space_map_no_const;
46207
46208 /*----------------------------------------------------------------*/
46209
46210diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
46211index 56e24c0..e1c8e1f 100644
46212--- a/drivers/md/raid1.c
46213+++ b/drivers/md/raid1.c
46214@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
46215 if (r1_sync_page_io(rdev, sect, s,
46216 bio->bi_io_vec[idx].bv_page,
46217 READ) != 0)
46218- atomic_add(s, &rdev->corrected_errors);
46219+ atomic_add_unchecked(s, &rdev->corrected_errors);
46220 }
46221 sectors -= s;
46222 sect += s;
46223@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
46224 test_bit(In_sync, &rdev->flags)) {
46225 if (r1_sync_page_io(rdev, sect, s,
46226 conf->tmppage, READ)) {
46227- atomic_add(s, &rdev->corrected_errors);
46228+ atomic_add_unchecked(s, &rdev->corrected_errors);
46229 printk(KERN_INFO
46230 "md/raid1:%s: read error corrected "
46231 "(%d sectors at %llu on %s)\n",
46232diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
46233index cb882aa..cb8aeca 100644
46234--- a/drivers/md/raid10.c
46235+++ b/drivers/md/raid10.c
46236@@ -1949,7 +1949,7 @@ static void end_sync_read(struct bio *bio, int error)
46237 /* The write handler will notice the lack of
46238 * R10BIO_Uptodate and record any errors etc
46239 */
46240- atomic_add(r10_bio->sectors,
46241+ atomic_add_unchecked(r10_bio->sectors,
46242 &conf->mirrors[d].rdev->corrected_errors);
46243
46244 /* for reconstruct, we always reschedule after a read.
46245@@ -2307,7 +2307,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46246 {
46247 struct timespec cur_time_mon;
46248 unsigned long hours_since_last;
46249- unsigned int read_errors = atomic_read(&rdev->read_errors);
46250+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
46251
46252 ktime_get_ts(&cur_time_mon);
46253
46254@@ -2329,9 +2329,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46255 * overflowing the shift of read_errors by hours_since_last.
46256 */
46257 if (hours_since_last >= 8 * sizeof(read_errors))
46258- atomic_set(&rdev->read_errors, 0);
46259+ atomic_set_unchecked(&rdev->read_errors, 0);
46260 else
46261- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
46262+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
46263 }
46264
46265 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
46266@@ -2385,8 +2385,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46267 return;
46268
46269 check_decay_read_errors(mddev, rdev);
46270- atomic_inc(&rdev->read_errors);
46271- if (atomic_read(&rdev->read_errors) > max_read_errors) {
46272+ atomic_inc_unchecked(&rdev->read_errors);
46273+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
46274 char b[BDEVNAME_SIZE];
46275 bdevname(rdev->bdev, b);
46276
46277@@ -2394,7 +2394,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46278 "md/raid10:%s: %s: Raid device exceeded "
46279 "read_error threshold [cur %d:max %d]\n",
46280 mdname(mddev), b,
46281- atomic_read(&rdev->read_errors), max_read_errors);
46282+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
46283 printk(KERN_NOTICE
46284 "md/raid10:%s: %s: Failing raid device\n",
46285 mdname(mddev), b);
46286@@ -2549,7 +2549,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46287 sect +
46288 choose_data_offset(r10_bio, rdev)),
46289 bdevname(rdev->bdev, b));
46290- atomic_add(s, &rdev->corrected_errors);
46291+ atomic_add_unchecked(s, &rdev->corrected_errors);
46292 }
46293
46294 rdev_dec_pending(rdev, mddev);
46295@@ -2954,6 +2954,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
46296 */
46297 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
46298 end_reshape(conf);
46299+ close_sync(conf);
46300 return 0;
46301 }
46302
46303@@ -4411,7 +4412,7 @@ read_more:
46304 read_bio->bi_private = r10_bio;
46305 read_bio->bi_end_io = end_sync_read;
46306 read_bio->bi_rw = READ;
46307- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
46308+ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
46309 read_bio->bi_flags |= 1 << BIO_UPTODATE;
46310 read_bio->bi_vcnt = 0;
46311 read_bio->bi_iter.bi_size = 0;
46312diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46313index 6234b2e..4990801 100644
46314--- a/drivers/md/raid5.c
46315+++ b/drivers/md/raid5.c
46316@@ -1731,6 +1731,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46317 return 1;
46318 }
46319
46320+#ifdef CONFIG_GRKERNSEC_HIDESYM
46321+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46322+#endif
46323+
46324 static int grow_stripes(struct r5conf *conf, int num)
46325 {
46326 struct kmem_cache *sc;
46327@@ -1742,7 +1746,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46328 "raid%d-%s", conf->level, mdname(conf->mddev));
46329 else
46330 sprintf(conf->cache_name[0],
46331+#ifdef CONFIG_GRKERNSEC_HIDESYM
46332+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46333+#else
46334 "raid%d-%p", conf->level, conf->mddev);
46335+#endif
46336 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46337
46338 conf->active_name = 0;
46339@@ -2018,21 +2026,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46340 mdname(conf->mddev), STRIPE_SECTORS,
46341 (unsigned long long)s,
46342 bdevname(rdev->bdev, b));
46343- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46344+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46345 clear_bit(R5_ReadError, &sh->dev[i].flags);
46346 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46347 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46348 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46349
46350- if (atomic_read(&rdev->read_errors))
46351- atomic_set(&rdev->read_errors, 0);
46352+ if (atomic_read_unchecked(&rdev->read_errors))
46353+ atomic_set_unchecked(&rdev->read_errors, 0);
46354 } else {
46355 const char *bdn = bdevname(rdev->bdev, b);
46356 int retry = 0;
46357 int set_bad = 0;
46358
46359 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46360- atomic_inc(&rdev->read_errors);
46361+ atomic_inc_unchecked(&rdev->read_errors);
46362 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46363 printk_ratelimited(
46364 KERN_WARNING
46365@@ -2060,7 +2068,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46366 mdname(conf->mddev),
46367 (unsigned long long)s,
46368 bdn);
46369- } else if (atomic_read(&rdev->read_errors)
46370+ } else if (atomic_read_unchecked(&rdev->read_errors)
46371 > conf->max_nr_stripes)
46372 printk(KERN_WARNING
46373 "md/raid:%s: Too many read errors, failing device %s.\n",
46374@@ -3817,6 +3825,8 @@ static void handle_stripe(struct stripe_head *sh)
46375 set_bit(R5_Wantwrite, &dev->flags);
46376 if (prexor)
46377 continue;
46378+ if (s.failed > 1)
46379+ continue;
46380 if (!test_bit(R5_Insync, &dev->flags) ||
46381 ((i == sh->pd_idx || i == sh->qd_idx) &&
46382 s.failed == 0))
46383diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46384index 983db75..ef9248c 100644
46385--- a/drivers/media/dvb-core/dvbdev.c
46386+++ b/drivers/media/dvb-core/dvbdev.c
46387@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46388 const struct dvb_device *template, void *priv, int type)
46389 {
46390 struct dvb_device *dvbdev;
46391- struct file_operations *dvbdevfops;
46392+ file_operations_no_const *dvbdevfops;
46393 struct device *clsdev;
46394 int minor;
46395 int id;
46396diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46397index 539f4db..cdd403b 100644
46398--- a/drivers/media/dvb-frontends/af9033.h
46399+++ b/drivers/media/dvb-frontends/af9033.h
46400@@ -82,7 +82,7 @@ struct af9033_ops {
46401 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46402 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46403 int onoff);
46404-};
46405+} __no_const;
46406
46407
46408 #if IS_ENABLED(CONFIG_DVB_AF9033)
46409diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46410index 9b6c3bb..baeb5c7 100644
46411--- a/drivers/media/dvb-frontends/dib3000.h
46412+++ b/drivers/media/dvb-frontends/dib3000.h
46413@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46414 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46415 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46416 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46417-};
46418+} __no_const;
46419
46420 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46421 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46422diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46423index ed8cb90..5ef7f79 100644
46424--- a/drivers/media/pci/cx88/cx88-video.c
46425+++ b/drivers/media/pci/cx88/cx88-video.c
46426@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46427
46428 /* ------------------------------------------------------------------ */
46429
46430-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46431-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46432-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46433+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46434+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46435+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46436
46437 module_param_array(video_nr, int, NULL, 0444);
46438 module_param_array(vbi_nr, int, NULL, 0444);
46439diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46440index 802642d..5534900 100644
46441--- a/drivers/media/pci/ivtv/ivtv-driver.c
46442+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46443@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46444 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46445
46446 /* ivtv instance counter */
46447-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46448+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46449
46450 /* Parameter declarations */
46451 static int cardtype[IVTV_MAX_CARDS];
46452diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46453index 9a726ea..f5e9b52 100644
46454--- a/drivers/media/platform/omap/omap_vout.c
46455+++ b/drivers/media/platform/omap/omap_vout.c
46456@@ -63,7 +63,6 @@ enum omap_vout_channels {
46457 OMAP_VIDEO2,
46458 };
46459
46460-static struct videobuf_queue_ops video_vbq_ops;
46461 /* Variables configurable through module params*/
46462 static u32 video1_numbuffers = 3;
46463 static u32 video2_numbuffers = 3;
46464@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
46465 {
46466 struct videobuf_queue *q;
46467 struct omap_vout_device *vout = NULL;
46468+ static struct videobuf_queue_ops video_vbq_ops = {
46469+ .buf_setup = omap_vout_buffer_setup,
46470+ .buf_prepare = omap_vout_buffer_prepare,
46471+ .buf_release = omap_vout_buffer_release,
46472+ .buf_queue = omap_vout_buffer_queue,
46473+ };
46474
46475 vout = video_drvdata(file);
46476 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46477@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
46478 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46479
46480 q = &vout->vbq;
46481- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46482- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46483- video_vbq_ops.buf_release = omap_vout_buffer_release;
46484- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46485 spin_lock_init(&vout->vbq_lock);
46486
46487 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46488diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46489index fb2acc5..a2fcbdc4 100644
46490--- a/drivers/media/platform/s5p-tv/mixer.h
46491+++ b/drivers/media/platform/s5p-tv/mixer.h
46492@@ -156,7 +156,7 @@ struct mxr_layer {
46493 /** layer index (unique identifier) */
46494 int idx;
46495 /** callbacks for layer methods */
46496- struct mxr_layer_ops ops;
46497+ struct mxr_layer_ops *ops;
46498 /** format array */
46499 const struct mxr_format **fmt_array;
46500 /** size of format array */
46501diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46502index 74344c7..a39e70e 100644
46503--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46504+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46505@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46506 {
46507 struct mxr_layer *layer;
46508 int ret;
46509- struct mxr_layer_ops ops = {
46510+ static struct mxr_layer_ops ops = {
46511 .release = mxr_graph_layer_release,
46512 .buffer_set = mxr_graph_buffer_set,
46513 .stream_set = mxr_graph_stream_set,
46514diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46515index b713403..53cb5ad 100644
46516--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46517+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46518@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46519 layer->update_buf = next;
46520 }
46521
46522- layer->ops.buffer_set(layer, layer->update_buf);
46523+ layer->ops->buffer_set(layer, layer->update_buf);
46524
46525 if (done && done != layer->shadow_buf)
46526 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46527diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46528index 8a8dbc8..b74c62d 100644
46529--- a/drivers/media/platform/s5p-tv/mixer_video.c
46530+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46531@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46532 layer->geo.src.height = layer->geo.src.full_height;
46533
46534 mxr_geometry_dump(mdev, &layer->geo);
46535- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46536+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46537 mxr_geometry_dump(mdev, &layer->geo);
46538 }
46539
46540@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46541 layer->geo.dst.full_width = mbus_fmt.width;
46542 layer->geo.dst.full_height = mbus_fmt.height;
46543 layer->geo.dst.field = mbus_fmt.field;
46544- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46545+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46546
46547 mxr_geometry_dump(mdev, &layer->geo);
46548 }
46549@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46550 /* set source size to highest accepted value */
46551 geo->src.full_width = max(geo->dst.full_width, pix->width);
46552 geo->src.full_height = max(geo->dst.full_height, pix->height);
46553- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46554+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46555 mxr_geometry_dump(mdev, &layer->geo);
46556 /* set cropping to total visible screen */
46557 geo->src.width = pix->width;
46558@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46559 geo->src.x_offset = 0;
46560 geo->src.y_offset = 0;
46561 /* assure consistency of geometry */
46562- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46563+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46564 mxr_geometry_dump(mdev, &layer->geo);
46565 /* set full size to lowest possible value */
46566 geo->src.full_width = 0;
46567 geo->src.full_height = 0;
46568- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46569+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46570 mxr_geometry_dump(mdev, &layer->geo);
46571
46572 /* returning results */
46573@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46574 target->width = s->r.width;
46575 target->height = s->r.height;
46576
46577- layer->ops.fix_geometry(layer, stage, s->flags);
46578+ layer->ops->fix_geometry(layer, stage, s->flags);
46579
46580 /* retrieve update selection rectangle */
46581 res.left = target->x_offset;
46582@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46583 mxr_output_get(mdev);
46584
46585 mxr_layer_update_output(layer);
46586- layer->ops.format_set(layer);
46587+ layer->ops->format_set(layer);
46588 /* enabling layer in hardware */
46589 spin_lock_irqsave(&layer->enq_slock, flags);
46590 layer->state = MXR_LAYER_STREAMING;
46591 spin_unlock_irqrestore(&layer->enq_slock, flags);
46592
46593- layer->ops.stream_set(layer, MXR_ENABLE);
46594+ layer->ops->stream_set(layer, MXR_ENABLE);
46595 mxr_streamer_get(mdev);
46596
46597 return 0;
46598@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
46599 spin_unlock_irqrestore(&layer->enq_slock, flags);
46600
46601 /* disabling layer in hardware */
46602- layer->ops.stream_set(layer, MXR_DISABLE);
46603+ layer->ops->stream_set(layer, MXR_DISABLE);
46604 /* remove one streamer */
46605 mxr_streamer_put(mdev);
46606 /* allow changes in output configuration */
46607@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46608
46609 void mxr_layer_release(struct mxr_layer *layer)
46610 {
46611- if (layer->ops.release)
46612- layer->ops.release(layer);
46613+ if (layer->ops->release)
46614+ layer->ops->release(layer);
46615 }
46616
46617 void mxr_base_layer_release(struct mxr_layer *layer)
46618@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46619
46620 layer->mdev = mdev;
46621 layer->idx = idx;
46622- layer->ops = *ops;
46623+ layer->ops = ops;
46624
46625 spin_lock_init(&layer->enq_slock);
46626 INIT_LIST_HEAD(&layer->enq_list);
46627diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46628index c9388c4..ce71ece 100644
46629--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46630+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46631@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46632 {
46633 struct mxr_layer *layer;
46634 int ret;
46635- struct mxr_layer_ops ops = {
46636+ static struct mxr_layer_ops ops = {
46637 .release = mxr_vp_layer_release,
46638 .buffer_set = mxr_vp_buffer_set,
46639 .stream_set = mxr_vp_stream_set,
46640diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
46641index d00bf3d..1301a0c 100644
46642--- a/drivers/media/platform/vivi.c
46643+++ b/drivers/media/platform/vivi.c
46644@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
46645 MODULE_LICENSE("Dual BSD/GPL");
46646 MODULE_VERSION(VIVI_VERSION);
46647
46648-static unsigned video_nr = -1;
46649-module_param(video_nr, uint, 0644);
46650+static int video_nr = -1;
46651+module_param(video_nr, int, 0644);
46652 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
46653
46654 static unsigned n_devs = 1;
46655diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46656index d719e59..63f3470 100644
46657--- a/drivers/media/radio/radio-cadet.c
46658+++ b/drivers/media/radio/radio-cadet.c
46659@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46660 unsigned char readbuf[RDS_BUFFER];
46661 int i = 0;
46662
46663+ if (count > RDS_BUFFER)
46664+ return -EFAULT;
46665 mutex_lock(&dev->lock);
46666 if (dev->rdsstat == 0)
46667 cadet_start_rds(dev);
46668@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46669 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46670 mutex_unlock(&dev->lock);
46671
46672- if (i && copy_to_user(data, readbuf, i))
46673- return -EFAULT;
46674+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46675+ i = -EFAULT;
46676+
46677 return i;
46678 }
46679
46680diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46681index 5236035..c622c74 100644
46682--- a/drivers/media/radio/radio-maxiradio.c
46683+++ b/drivers/media/radio/radio-maxiradio.c
46684@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46685 /* TEA5757 pin mappings */
46686 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46687
46688-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46689+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46690
46691 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46692 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46693diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46694index 050b3bb..79f62b9 100644
46695--- a/drivers/media/radio/radio-shark.c
46696+++ b/drivers/media/radio/radio-shark.c
46697@@ -79,7 +79,7 @@ struct shark_device {
46698 u32 last_val;
46699 };
46700
46701-static atomic_t shark_instance = ATOMIC_INIT(0);
46702+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46703
46704 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46705 {
46706diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46707index 8654e0d..0608a64 100644
46708--- a/drivers/media/radio/radio-shark2.c
46709+++ b/drivers/media/radio/radio-shark2.c
46710@@ -74,7 +74,7 @@ struct shark_device {
46711 u8 *transfer_buffer;
46712 };
46713
46714-static atomic_t shark_instance = ATOMIC_INIT(0);
46715+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46716
46717 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46718 {
46719diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46720index 2fd9009..278cc1e 100644
46721--- a/drivers/media/radio/radio-si476x.c
46722+++ b/drivers/media/radio/radio-si476x.c
46723@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46724 struct si476x_radio *radio;
46725 struct v4l2_ctrl *ctrl;
46726
46727- static atomic_t instance = ATOMIC_INIT(0);
46728+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46729
46730 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46731 if (!radio)
46732diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46733index 9fd1527..8927230 100644
46734--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46735+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46736@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46737
46738 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46739 {
46740- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46741- char result[64];
46742- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46743- sizeof(result), 0);
46744+ char *buf;
46745+ char *result;
46746+ int retval;
46747+
46748+ buf = kmalloc(2, GFP_KERNEL);
46749+ if (buf == NULL)
46750+ return -ENOMEM;
46751+ result = kmalloc(64, GFP_KERNEL);
46752+ if (result == NULL) {
46753+ kfree(buf);
46754+ return -ENOMEM;
46755+ }
46756+
46757+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46758+ buf[1] = enable ? 1 : 0;
46759+
46760+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46761+
46762+ kfree(buf);
46763+ kfree(result);
46764+ return retval;
46765 }
46766
46767 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46768 {
46769- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46770- char state[3];
46771- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46772+ char *buf;
46773+ char *state;
46774+ int retval;
46775+
46776+ buf = kmalloc(2, GFP_KERNEL);
46777+ if (buf == NULL)
46778+ return -ENOMEM;
46779+ state = kmalloc(3, GFP_KERNEL);
46780+ if (state == NULL) {
46781+ kfree(buf);
46782+ return -ENOMEM;
46783+ }
46784+
46785+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46786+ buf[1] = enable ? 1 : 0;
46787+
46788+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46789+
46790+ kfree(buf);
46791+ kfree(state);
46792+ return retval;
46793 }
46794
46795 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46796 {
46797- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46798- char state[3];
46799+ char *query;
46800+ char *state;
46801 int ret;
46802+ query = kmalloc(1, GFP_KERNEL);
46803+ if (query == NULL)
46804+ return -ENOMEM;
46805+ state = kmalloc(3, GFP_KERNEL);
46806+ if (state == NULL) {
46807+ kfree(query);
46808+ return -ENOMEM;
46809+ }
46810+
46811+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46812
46813 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46814
46815- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46816- sizeof(state), 0);
46817+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46818 if (ret < 0) {
46819 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46820 "state info\n");
46821@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46822
46823 /* Copy this pointer as we are gonna need it in the release phase */
46824 cinergyt2_usb_device = adap->dev;
46825-
46826+ kfree(query);
46827+ kfree(state);
46828 return 0;
46829 }
46830
46831@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46832 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46833 {
46834 struct cinergyt2_state *st = d->priv;
46835- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46836+ u8 *key, *cmd;
46837 int i;
46838
46839+ cmd = kmalloc(1, GFP_KERNEL);
46840+ if (cmd == NULL)
46841+ return -EINVAL;
46842+ key = kzalloc(5, GFP_KERNEL);
46843+ if (key == NULL) {
46844+ kfree(cmd);
46845+ return -EINVAL;
46846+ }
46847+
46848+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46849+
46850 *state = REMOTE_NO_KEY_PRESSED;
46851
46852- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46853+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46854 if (key[4] == 0xff) {
46855 /* key repeat */
46856 st->rc_counter++;
46857@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46858 *event = d->last_event;
46859 deb_rc("repeat key, event %x\n",
46860 *event);
46861- return 0;
46862+ goto out;
46863 }
46864 }
46865 deb_rc("repeated key (non repeatable)\n");
46866 }
46867- return 0;
46868+ goto out;
46869 }
46870
46871 /* hack to pass checksum on the custom field */
46872@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46873
46874 deb_rc("key: %*ph\n", 5, key);
46875 }
46876+out:
46877+ kfree(cmd);
46878+ kfree(key);
46879 return 0;
46880 }
46881
46882diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46883index c890fe4..f9b2ae6 100644
46884--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46885+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46886@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46887 fe_status_t *status)
46888 {
46889 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46890- struct dvbt_get_status_msg result;
46891- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46892+ struct dvbt_get_status_msg *result;
46893+ u8 *cmd;
46894 int ret;
46895
46896- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46897- sizeof(result), 0);
46898+ cmd = kmalloc(1, GFP_KERNEL);
46899+ if (cmd == NULL)
46900+ return -ENOMEM;
46901+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46902+ if (result == NULL) {
46903+ kfree(cmd);
46904+ return -ENOMEM;
46905+ }
46906+
46907+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46908+
46909+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46910+ sizeof(*result), 0);
46911 if (ret < 0)
46912- return ret;
46913+ goto out;
46914
46915 *status = 0;
46916
46917- if (0xffff - le16_to_cpu(result.gain) > 30)
46918+ if (0xffff - le16_to_cpu(result->gain) > 30)
46919 *status |= FE_HAS_SIGNAL;
46920- if (result.lock_bits & (1 << 6))
46921+ if (result->lock_bits & (1 << 6))
46922 *status |= FE_HAS_LOCK;
46923- if (result.lock_bits & (1 << 5))
46924+ if (result->lock_bits & (1 << 5))
46925 *status |= FE_HAS_SYNC;
46926- if (result.lock_bits & (1 << 4))
46927+ if (result->lock_bits & (1 << 4))
46928 *status |= FE_HAS_CARRIER;
46929- if (result.lock_bits & (1 << 1))
46930+ if (result->lock_bits & (1 << 1))
46931 *status |= FE_HAS_VITERBI;
46932
46933 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46934 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46935 *status &= ~FE_HAS_LOCK;
46936
46937- return 0;
46938+out:
46939+ kfree(cmd);
46940+ kfree(result);
46941+ return ret;
46942 }
46943
46944 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46945 {
46946 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46947- struct dvbt_get_status_msg status;
46948- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46949+ struct dvbt_get_status_msg *status;
46950+ char *cmd;
46951 int ret;
46952
46953- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46954- sizeof(status), 0);
46955+ cmd = kmalloc(1, GFP_KERNEL);
46956+ if (cmd == NULL)
46957+ return -ENOMEM;
46958+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46959+ if (status == NULL) {
46960+ kfree(cmd);
46961+ return -ENOMEM;
46962+ }
46963+
46964+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46965+
46966+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46967+ sizeof(*status), 0);
46968 if (ret < 0)
46969- return ret;
46970+ goto out;
46971
46972- *ber = le32_to_cpu(status.viterbi_error_rate);
46973+ *ber = le32_to_cpu(status->viterbi_error_rate);
46974+out:
46975+ kfree(cmd);
46976+ kfree(status);
46977 return 0;
46978 }
46979
46980 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46981 {
46982 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46983- struct dvbt_get_status_msg status;
46984- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46985+ struct dvbt_get_status_msg *status;
46986+ u8 *cmd;
46987 int ret;
46988
46989- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46990- sizeof(status), 0);
46991+ cmd = kmalloc(1, GFP_KERNEL);
46992+ if (cmd == NULL)
46993+ return -ENOMEM;
46994+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46995+ if (status == NULL) {
46996+ kfree(cmd);
46997+ return -ENOMEM;
46998+ }
46999+
47000+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47001+
47002+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
47003+ sizeof(*status), 0);
47004 if (ret < 0) {
47005 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
47006 ret);
47007- return ret;
47008+ goto out;
47009 }
47010- *unc = le32_to_cpu(status.uncorrected_block_count);
47011- return 0;
47012+ *unc = le32_to_cpu(status->uncorrected_block_count);
47013+
47014+out:
47015+ kfree(cmd);
47016+ kfree(status);
47017+ return ret;
47018 }
47019
47020 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
47021 u16 *strength)
47022 {
47023 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47024- struct dvbt_get_status_msg status;
47025- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47026+ struct dvbt_get_status_msg *status;
47027+ char *cmd;
47028 int ret;
47029
47030- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
47031- sizeof(status), 0);
47032+ cmd = kmalloc(1, GFP_KERNEL);
47033+ if (cmd == NULL)
47034+ return -ENOMEM;
47035+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47036+ if (status == NULL) {
47037+ kfree(cmd);
47038+ return -ENOMEM;
47039+ }
47040+
47041+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47042+
47043+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47044+ sizeof(*status), 0);
47045 if (ret < 0) {
47046 err("cinergyt2_fe_read_signal_strength() Failed!"
47047 " (Error=%d)\n", ret);
47048- return ret;
47049+ goto out;
47050 }
47051- *strength = (0xffff - le16_to_cpu(status.gain));
47052+ *strength = (0xffff - le16_to_cpu(status->gain));
47053+
47054+out:
47055+ kfree(cmd);
47056+ kfree(status);
47057 return 0;
47058 }
47059
47060 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
47061 {
47062 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47063- struct dvbt_get_status_msg status;
47064- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47065+ struct dvbt_get_status_msg *status;
47066+ char *cmd;
47067 int ret;
47068
47069- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
47070- sizeof(status), 0);
47071+ cmd = kmalloc(1, GFP_KERNEL);
47072+ if (cmd == NULL)
47073+ return -ENOMEM;
47074+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47075+ if (status == NULL) {
47076+ kfree(cmd);
47077+ return -ENOMEM;
47078+ }
47079+
47080+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47081+
47082+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47083+ sizeof(*status), 0);
47084 if (ret < 0) {
47085 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
47086- return ret;
47087+ goto out;
47088 }
47089- *snr = (status.snr << 8) | status.snr;
47090- return 0;
47091+ *snr = (status->snr << 8) | status->snr;
47092+
47093+out:
47094+ kfree(cmd);
47095+ kfree(status);
47096+ return ret;
47097 }
47098
47099 static int cinergyt2_fe_init(struct dvb_frontend *fe)
47100@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
47101 {
47102 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
47103 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47104- struct dvbt_set_parameters_msg param;
47105- char result[2];
47106+ struct dvbt_set_parameters_msg *param;
47107+ char *result;
47108 int err;
47109
47110- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
47111- param.tps = cpu_to_le16(compute_tps(fep));
47112- param.freq = cpu_to_le32(fep->frequency / 1000);
47113- param.flags = 0;
47114+ result = kmalloc(2, GFP_KERNEL);
47115+ if (result == NULL)
47116+ return -ENOMEM;
47117+ param = kmalloc(sizeof(*param), GFP_KERNEL);
47118+ if (param == NULL) {
47119+ kfree(result);
47120+ return -ENOMEM;
47121+ }
47122+
47123+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
47124+ param->tps = cpu_to_le16(compute_tps(fep));
47125+ param->freq = cpu_to_le32(fep->frequency / 1000);
47126+ param->flags = 0;
47127
47128 switch (fep->bandwidth_hz) {
47129 default:
47130 case 8000000:
47131- param.bandwidth = 8;
47132+ param->bandwidth = 8;
47133 break;
47134 case 7000000:
47135- param.bandwidth = 7;
47136+ param->bandwidth = 7;
47137 break;
47138 case 6000000:
47139- param.bandwidth = 6;
47140+ param->bandwidth = 6;
47141 break;
47142 }
47143
47144 err = dvb_usb_generic_rw(state->d,
47145- (char *)&param, sizeof(param),
47146- result, sizeof(result), 0);
47147+ (char *)param, sizeof(*param),
47148+ result, 2, 0);
47149 if (err < 0)
47150 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
47151
47152- return (err < 0) ? err : 0;
47153+ kfree(result);
47154+ kfree(param);
47155+ return err;
47156 }
47157
47158 static void cinergyt2_fe_release(struct dvb_frontend *fe)
47159diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
47160index a1c641e..3007da9 100644
47161--- a/drivers/media/usb/dvb-usb/cxusb.c
47162+++ b/drivers/media/usb/dvb-usb/cxusb.c
47163@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
47164
47165 struct dib0700_adapter_state {
47166 int (*set_param_save) (struct dvb_frontend *);
47167-};
47168+} __no_const;
47169
47170 static int dib7070_set_param_override(struct dvb_frontend *fe)
47171 {
47172diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47173index 733a7ff..f8b52e3 100644
47174--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47175+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47176@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
47177
47178 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
47179 {
47180- struct hexline hx;
47181- u8 reset;
47182+ struct hexline *hx;
47183+ u8 *reset;
47184 int ret,pos=0;
47185
47186+ reset = kmalloc(1, GFP_KERNEL);
47187+ if (reset == NULL)
47188+ return -ENOMEM;
47189+
47190+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
47191+ if (hx == NULL) {
47192+ kfree(reset);
47193+ return -ENOMEM;
47194+ }
47195+
47196 /* stop the CPU */
47197- reset = 1;
47198- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
47199+ reset[0] = 1;
47200+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
47201 err("could not stop the USB controller CPU.");
47202
47203- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
47204- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
47205- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
47206+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
47207+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
47208+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
47209
47210- if (ret != hx.len) {
47211+ if (ret != hx->len) {
47212 err("error while transferring firmware "
47213 "(transferred size: %d, block size: %d)",
47214- ret,hx.len);
47215+ ret,hx->len);
47216 ret = -EINVAL;
47217 break;
47218 }
47219 }
47220 if (ret < 0) {
47221 err("firmware download failed at %d with %d",pos,ret);
47222+ kfree(reset);
47223+ kfree(hx);
47224 return ret;
47225 }
47226
47227 if (ret == 0) {
47228 /* restart the CPU */
47229- reset = 0;
47230- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
47231+ reset[0] = 0;
47232+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
47233 err("could not restart the USB controller CPU.");
47234 ret = -EINVAL;
47235 }
47236 } else
47237 ret = -EIO;
47238
47239+ kfree(reset);
47240+ kfree(hx);
47241+
47242 return ret;
47243 }
47244 EXPORT_SYMBOL(usb_cypress_load_firmware);
47245diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
47246index ae0f56a..ec71784 100644
47247--- a/drivers/media/usb/dvb-usb/dw2102.c
47248+++ b/drivers/media/usb/dvb-usb/dw2102.c
47249@@ -118,7 +118,7 @@ struct su3000_state {
47250
47251 struct s6x0_state {
47252 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
47253-};
47254+} __no_const;
47255
47256 /* debug */
47257 static int dvb_usb_dw2102_debug;
47258diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47259index d947e03..87fef42 100644
47260--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47261+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47262@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47263 static int technisat_usb2_i2c_access(struct usb_device *udev,
47264 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47265 {
47266- u8 b[64];
47267- int ret, actual_length;
47268+ u8 *b = kmalloc(64, GFP_KERNEL);
47269+ int ret, actual_length, error = 0;
47270+
47271+ if (b == NULL)
47272+ return -ENOMEM;
47273
47274 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47275 debug_dump(tx, txlen, deb_i2c);
47276@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47277
47278 if (ret < 0) {
47279 err("i2c-error: out failed %02x = %d", device_addr, ret);
47280- return -ENODEV;
47281+ error = -ENODEV;
47282+ goto out;
47283 }
47284
47285 ret = usb_bulk_msg(udev,
47286@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47287 b, 64, &actual_length, 1000);
47288 if (ret < 0) {
47289 err("i2c-error: in failed %02x = %d", device_addr, ret);
47290- return -ENODEV;
47291+ error = -ENODEV;
47292+ goto out;
47293 }
47294
47295 if (b[0] != I2C_STATUS_OK) {
47296@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47297 /* handle tuner-i2c-nak */
47298 if (!(b[0] == I2C_STATUS_NAK &&
47299 device_addr == 0x60
47300- /* && device_is_technisat_usb2 */))
47301- return -ENODEV;
47302+ /* && device_is_technisat_usb2 */)) {
47303+ error = -ENODEV;
47304+ goto out;
47305+ }
47306 }
47307
47308 deb_i2c("status: %d, ", b[0]);
47309@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47310
47311 deb_i2c("\n");
47312
47313- return 0;
47314+out:
47315+ kfree(b);
47316+ return error;
47317 }
47318
47319 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47320@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47321 {
47322 int ret;
47323
47324- u8 led[8] = {
47325- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47326- 0
47327- };
47328+ u8 *led = kzalloc(8, GFP_KERNEL);
47329+
47330+ if (led == NULL)
47331+ return -ENOMEM;
47332
47333 if (disable_led_control && state != TECH_LED_OFF)
47334 return 0;
47335
47336+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47337+
47338 switch (state) {
47339 case TECH_LED_ON:
47340 led[1] = 0x82;
47341@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47342 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47343 USB_TYPE_VENDOR | USB_DIR_OUT,
47344 0, 0,
47345- led, sizeof(led), 500);
47346+ led, 8, 500);
47347
47348 mutex_unlock(&d->i2c_mutex);
47349+
47350+ kfree(led);
47351+
47352 return ret;
47353 }
47354
47355 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47356 {
47357 int ret;
47358- u8 b = 0;
47359+ u8 *b = kzalloc(1, GFP_KERNEL);
47360+
47361+ if (b == NULL)
47362+ return -ENOMEM;
47363
47364 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47365 return -EAGAIN;
47366@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47367 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47368 USB_TYPE_VENDOR | USB_DIR_OUT,
47369 (red << 8) | green, 0,
47370- &b, 1, 500);
47371+ b, 1, 500);
47372
47373 mutex_unlock(&d->i2c_mutex);
47374
47375+ kfree(b);
47376+
47377 return ret;
47378 }
47379
47380@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47381 struct dvb_usb_device_description **desc, int *cold)
47382 {
47383 int ret;
47384- u8 version[3];
47385+ u8 *version = kmalloc(3, GFP_KERNEL);
47386
47387 /* first select the interface */
47388 if (usb_set_interface(udev, 0, 1) != 0)
47389@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47390
47391 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47392
47393+ if (version == NULL)
47394+ return 0;
47395+
47396 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47397 GET_VERSION_INFO_VENDOR_REQUEST,
47398 USB_TYPE_VENDOR | USB_DIR_IN,
47399 0, 0,
47400- version, sizeof(version), 500);
47401+ version, 3, 500);
47402
47403 if (ret < 0)
47404 *cold = 1;
47405@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47406 *cold = 0;
47407 }
47408
47409+ kfree(version);
47410+
47411 return 0;
47412 }
47413
47414@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47415
47416 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47417 {
47418- u8 buf[62], *b;
47419+ u8 *buf, *b;
47420 int ret;
47421 struct ir_raw_event ev;
47422
47423+ buf = kmalloc(62, GFP_KERNEL);
47424+
47425+ if (buf == NULL)
47426+ return -ENOMEM;
47427+
47428 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47429 buf[1] = 0x08;
47430 buf[2] = 0x8f;
47431@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47432 GET_IR_DATA_VENDOR_REQUEST,
47433 USB_TYPE_VENDOR | USB_DIR_IN,
47434 0x8080, 0,
47435- buf, sizeof(buf), 500);
47436+ buf, 62, 500);
47437
47438 unlock:
47439 mutex_unlock(&d->i2c_mutex);
47440
47441- if (ret < 0)
47442+ if (ret < 0) {
47443+ kfree(buf);
47444 return ret;
47445+ }
47446
47447- if (ret == 1)
47448+ if (ret == 1) {
47449+ kfree(buf);
47450 return 0; /* no key pressed */
47451+ }
47452
47453 /* decoding */
47454 b = buf+1;
47455@@ -653,6 +686,8 @@ unlock:
47456
47457 ir_raw_event_handle(d->rc_dev);
47458
47459+ kfree(buf);
47460+
47461 return 1;
47462 }
47463
47464diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47465index 7e2411c..cef73ca 100644
47466--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47467+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47468@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
47469 __u32 reserved;
47470 };
47471
47472-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47473+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47474 enum v4l2_memory memory)
47475 {
47476 void __user *up_pln;
47477@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47478 return 0;
47479 }
47480
47481-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47482+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47483 enum v4l2_memory memory)
47484 {
47485 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
47486@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47487 * by passing a very big num_planes value */
47488 uplane = compat_alloc_user_space(num_planes *
47489 sizeof(struct v4l2_plane));
47490- kp->m.planes = uplane;
47491+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
47492
47493 while (--num_planes >= 0) {
47494 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47495@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47496 if (num_planes == 0)
47497 return 0;
47498
47499- uplane = kp->m.planes;
47500+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47501 if (get_user(p, &up->m.planes))
47502 return -EFAULT;
47503 uplane32 = compat_ptr(p);
47504@@ -552,7 +552,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47505 get_user(kp->capability, &up->capability) ||
47506 get_user(kp->flags, &up->flags))
47507 return -EFAULT;
47508- kp->base = compat_ptr(tmp);
47509+ kp->base = (void __force_kernel *)compat_ptr(tmp);
47510 get_v4l2_pix_format(&kp->fmt, &up->fmt);
47511 return 0;
47512 }
47513@@ -658,7 +658,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47514 n * sizeof(struct v4l2_ext_control32)))
47515 return -EFAULT;
47516 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47517- kp->controls = kcontrols;
47518+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
47519 while (--n >= 0) {
47520 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
47521 return -EFAULT;
47522@@ -680,7 +680,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47523 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
47524 {
47525 struct v4l2_ext_control32 __user *ucontrols;
47526- struct v4l2_ext_control __user *kcontrols = kp->controls;
47527+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
47528 int n = kp->count;
47529 compat_caddr_t p;
47530
47531@@ -774,7 +774,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47532 put_user(kp->start_block, &up->start_block) ||
47533 put_user(kp->blocks, &up->blocks) ||
47534 put_user(tmp, &up->edid) ||
47535- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47536+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
47537 return -EFAULT;
47538 return 0;
47539 }
47540diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
47541index 55c6832..a91c7a6 100644
47542--- a/drivers/media/v4l2-core/v4l2-ctrls.c
47543+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
47544@@ -1431,8 +1431,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
47545 return 0;
47546
47547 case V4L2_CTRL_TYPE_STRING:
47548- len = strlen(c->string);
47549- if (len < ctrl->minimum)
47550+ len = strlen_user(c->string);
47551+ if (!len || len < ctrl->minimum)
47552 return -ERANGE;
47553 if ((len - ctrl->minimum) % ctrl->step)
47554 return -ERANGE;
47555diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47556index 015f92a..59e311e 100644
47557--- a/drivers/media/v4l2-core/v4l2-device.c
47558+++ b/drivers/media/v4l2-core/v4l2-device.c
47559@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47560 EXPORT_SYMBOL_GPL(v4l2_device_put);
47561
47562 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47563- atomic_t *instance)
47564+ atomic_unchecked_t *instance)
47565 {
47566- int num = atomic_inc_return(instance) - 1;
47567+ int num = atomic_inc_return_unchecked(instance) - 1;
47568 int len = strlen(basename);
47569
47570 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47571diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47572index 16bffd8..3ab516a 100644
47573--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47574+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47575@@ -2003,7 +2003,8 @@ struct v4l2_ioctl_info {
47576 struct file *file, void *fh, void *p);
47577 } u;
47578 void (*debug)(const void *arg, bool write_only);
47579-};
47580+} __do_const;
47581+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47582
47583 /* This control needs a priority check */
47584 #define INFO_FL_PRIO (1 << 0)
47585@@ -2186,7 +2187,7 @@ static long __video_do_ioctl(struct file *file,
47586 struct video_device *vfd = video_devdata(file);
47587 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47588 bool write_only = false;
47589- struct v4l2_ioctl_info default_info;
47590+ v4l2_ioctl_info_no_const default_info;
47591 const struct v4l2_ioctl_info *info;
47592 void *fh = file->private_data;
47593 struct v4l2_fh *vfh = NULL;
47594@@ -2276,7 +2277,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47595 ret = -EINVAL;
47596 break;
47597 }
47598- *user_ptr = (void __user *)buf->m.planes;
47599+ *user_ptr = (void __force_user *)buf->m.planes;
47600 *kernel_ptr = (void **)&buf->m.planes;
47601 *array_size = sizeof(struct v4l2_plane) * buf->length;
47602 ret = 1;
47603@@ -2293,7 +2294,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47604 ret = -EINVAL;
47605 break;
47606 }
47607- *user_ptr = (void __user *)edid->edid;
47608+ *user_ptr = (void __force_user *)edid->edid;
47609 *kernel_ptr = (void **)&edid->edid;
47610 *array_size = edid->blocks * 128;
47611 ret = 1;
47612@@ -2311,7 +2312,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47613 ret = -EINVAL;
47614 break;
47615 }
47616- *user_ptr = (void __user *)ctrls->controls;
47617+ *user_ptr = (void __force_user *)ctrls->controls;
47618 *kernel_ptr = (void **)&ctrls->controls;
47619 *array_size = sizeof(struct v4l2_ext_control)
47620 * ctrls->count;
47621@@ -2412,7 +2413,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47622 }
47623
47624 if (has_array_args) {
47625- *kernel_ptr = (void __force *)user_ptr;
47626+ *kernel_ptr = (void __force_kernel *)user_ptr;
47627 if (copy_to_user(user_ptr, mbuf, array_size))
47628 err = -EFAULT;
47629 goto out_array_args;
47630diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47631index ebc0af7..baed058 100644
47632--- a/drivers/message/fusion/mptbase.c
47633+++ b/drivers/message/fusion/mptbase.c
47634@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47635 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47636 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47637
47638+#ifdef CONFIG_GRKERNSEC_HIDESYM
47639+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47640+#else
47641 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47642 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47643+#endif
47644+
47645 /*
47646 * Rounding UP to nearest 4-kB boundary here...
47647 */
47648@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47649 ioc->facts.GlobalCredits);
47650
47651 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47652+#ifdef CONFIG_GRKERNSEC_HIDESYM
47653+ NULL, NULL);
47654+#else
47655 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47656+#endif
47657 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47658 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47659 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47660diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47661index 711fcb5..5da1fb0 100644
47662--- a/drivers/message/fusion/mptsas.c
47663+++ b/drivers/message/fusion/mptsas.c
47664@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47665 return 0;
47666 }
47667
47668+static inline void
47669+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47670+{
47671+ if (phy_info->port_details) {
47672+ phy_info->port_details->rphy = rphy;
47673+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47674+ ioc->name, rphy));
47675+ }
47676+
47677+ if (rphy) {
47678+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47679+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47680+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47681+ ioc->name, rphy, rphy->dev.release));
47682+ }
47683+}
47684+
47685 /* no mutex */
47686 static void
47687 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47688@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47689 return NULL;
47690 }
47691
47692-static inline void
47693-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47694-{
47695- if (phy_info->port_details) {
47696- phy_info->port_details->rphy = rphy;
47697- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47698- ioc->name, rphy));
47699- }
47700-
47701- if (rphy) {
47702- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47703- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47704- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47705- ioc->name, rphy, rphy->dev.release));
47706- }
47707-}
47708-
47709 static inline struct sas_port *
47710 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47711 {
47712diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
47713index 2a1c6f2..a04c6a2 100644
47714--- a/drivers/message/fusion/mptscsih.c
47715+++ b/drivers/message/fusion/mptscsih.c
47716@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
47717
47718 h = shost_priv(SChost);
47719
47720- if (h) {
47721- if (h->info_kbuf == NULL)
47722- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47723- return h->info_kbuf;
47724- h->info_kbuf[0] = '\0';
47725+ if (!h)
47726+ return NULL;
47727
47728- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47729- h->info_kbuf[size-1] = '\0';
47730- }
47731+ if (h->info_kbuf == NULL)
47732+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47733+ return h->info_kbuf;
47734+ h->info_kbuf[0] = '\0';
47735+
47736+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47737+ h->info_kbuf[size-1] = '\0';
47738
47739 return h->info_kbuf;
47740 }
47741diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
47742index b7d87cd..3fb36da 100644
47743--- a/drivers/message/i2o/i2o_proc.c
47744+++ b/drivers/message/i2o/i2o_proc.c
47745@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
47746 "Array Controller Device"
47747 };
47748
47749-static char *chtostr(char *tmp, u8 *chars, int n)
47750-{
47751- tmp[0] = 0;
47752- return strncat(tmp, (char *)chars, n);
47753-}
47754-
47755 static int i2o_report_query_status(struct seq_file *seq, int block_status,
47756 char *group)
47757 {
47758@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
47759 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
47760 {
47761 struct i2o_controller *c = (struct i2o_controller *)seq->private;
47762- static u32 work32[5];
47763- static u8 *work8 = (u8 *) work32;
47764- static u16 *work16 = (u16 *) work32;
47765+ u32 work32[5];
47766+ u8 *work8 = (u8 *) work32;
47767+ u16 *work16 = (u16 *) work32;
47768 int token;
47769 u32 hwcap;
47770
47771@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47772 } *result;
47773
47774 i2o_exec_execute_ddm_table ddm_table;
47775- char tmp[28 + 1];
47776
47777 result = kmalloc(sizeof(*result), GFP_KERNEL);
47778 if (!result)
47779@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47780
47781 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
47782 seq_printf(seq, "%-#8x", ddm_table.module_id);
47783- seq_printf(seq, "%-29s",
47784- chtostr(tmp, ddm_table.module_name_version, 28));
47785+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
47786 seq_printf(seq, "%9d ", ddm_table.data_size);
47787 seq_printf(seq, "%8d", ddm_table.code_size);
47788
47789@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47790
47791 i2o_driver_result_table *result;
47792 i2o_driver_store_table *dst;
47793- char tmp[28 + 1];
47794
47795 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
47796 if (result == NULL)
47797@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47798
47799 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
47800 seq_printf(seq, "%-#8x", dst->module_id);
47801- seq_printf(seq, "%-29s",
47802- chtostr(tmp, dst->module_name_version, 28));
47803- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
47804+ seq_printf(seq, "%-.28s", dst->module_name_version);
47805+ seq_printf(seq, "%-.8s", dst->date);
47806 seq_printf(seq, "%8d ", dst->module_size);
47807 seq_printf(seq, "%8d ", dst->mpb_size);
47808 seq_printf(seq, "0x%04x", dst->module_flags);
47809@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
47810 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47811 {
47812 struct i2o_device *d = (struct i2o_device *)seq->private;
47813- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47814+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47815 // == (allow) 512d bytes (max)
47816- static u16 *work16 = (u16 *) work32;
47817+ u16 *work16 = (u16 *) work32;
47818 int token;
47819- char tmp[16 + 1];
47820
47821 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
47822
47823@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47824 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
47825 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
47826 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
47827- seq_printf(seq, "Vendor info : %s\n",
47828- chtostr(tmp, (u8 *) (work32 + 2), 16));
47829- seq_printf(seq, "Product info : %s\n",
47830- chtostr(tmp, (u8 *) (work32 + 6), 16));
47831- seq_printf(seq, "Description : %s\n",
47832- chtostr(tmp, (u8 *) (work32 + 10), 16));
47833- seq_printf(seq, "Product rev. : %s\n",
47834- chtostr(tmp, (u8 *) (work32 + 14), 8));
47835+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
47836+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
47837+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
47838+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
47839
47840 seq_printf(seq, "Serial number : ");
47841 print_serial_number(seq, (u8 *) (work32 + 16),
47842@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47843 u8 pad[256]; // allow up to 256 byte (max) serial number
47844 } result;
47845
47846- char tmp[24 + 1];
47847-
47848 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
47849
47850 if (token < 0) {
47851@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47852 }
47853
47854 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
47855- seq_printf(seq, "Module name : %s\n",
47856- chtostr(tmp, result.module_name, 24));
47857- seq_printf(seq, "Module revision : %s\n",
47858- chtostr(tmp, result.module_rev, 8));
47859+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
47860+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
47861
47862 seq_printf(seq, "Serial number : ");
47863 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
47864@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47865 u8 instance_number[4];
47866 } result;
47867
47868- char tmp[64 + 1];
47869-
47870 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
47871
47872 if (token < 0) {
47873@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47874 return 0;
47875 }
47876
47877- seq_printf(seq, "Device name : %s\n",
47878- chtostr(tmp, result.device_name, 64));
47879- seq_printf(seq, "Service name : %s\n",
47880- chtostr(tmp, result.service_name, 64));
47881- seq_printf(seq, "Physical name : %s\n",
47882- chtostr(tmp, result.physical_location, 64));
47883- seq_printf(seq, "Instance number : %s\n",
47884- chtostr(tmp, result.instance_number, 4));
47885+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
47886+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
47887+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
47888+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
47889
47890 return 0;
47891 }
47892@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47893 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
47894 {
47895 struct i2o_device *d = (struct i2o_device *)seq->private;
47896- static u32 work32[12];
47897- static u16 *work16 = (u16 *) work32;
47898- static u8 *work8 = (u8 *) work32;
47899+ u32 work32[12];
47900+ u16 *work16 = (u16 *) work32;
47901+ u8 *work8 = (u8 *) work32;
47902 int token;
47903
47904 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
47905diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
47906index 92752fb..a7494f6 100644
47907--- a/drivers/message/i2o/iop.c
47908+++ b/drivers/message/i2o/iop.c
47909@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
47910
47911 spin_lock_irqsave(&c->context_list_lock, flags);
47912
47913- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
47914- atomic_inc(&c->context_list_counter);
47915+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
47916+ atomic_inc_unchecked(&c->context_list_counter);
47917
47918- entry->context = atomic_read(&c->context_list_counter);
47919+ entry->context = atomic_read_unchecked(&c->context_list_counter);
47920
47921 list_add(&entry->list, &c->context_list);
47922
47923@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
47924
47925 #if BITS_PER_LONG == 64
47926 spin_lock_init(&c->context_list_lock);
47927- atomic_set(&c->context_list_counter, 0);
47928+ atomic_set_unchecked(&c->context_list_counter, 0);
47929 INIT_LIST_HEAD(&c->context_list);
47930 #endif
47931
47932diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47933index d1a22aa..d0f7bf7 100644
47934--- a/drivers/mfd/ab8500-debugfs.c
47935+++ b/drivers/mfd/ab8500-debugfs.c
47936@@ -100,7 +100,7 @@ static int irq_last;
47937 static u32 *irq_count;
47938 static int num_irqs;
47939
47940-static struct device_attribute **dev_attr;
47941+static device_attribute_no_const **dev_attr;
47942 static char **event_name;
47943
47944 static u8 avg_sample = SAMPLE_16;
47945diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47946index a83eed5..62a58a9 100644
47947--- a/drivers/mfd/max8925-i2c.c
47948+++ b/drivers/mfd/max8925-i2c.c
47949@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47950 const struct i2c_device_id *id)
47951 {
47952 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47953- static struct max8925_chip *chip;
47954+ struct max8925_chip *chip;
47955 struct device_node *node = client->dev.of_node;
47956
47957 if (node && !pdata) {
47958diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47959index f9e42ea..614d240 100644
47960--- a/drivers/mfd/tps65910.c
47961+++ b/drivers/mfd/tps65910.c
47962@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47963 struct tps65910_platform_data *pdata)
47964 {
47965 int ret = 0;
47966- static struct regmap_irq_chip *tps6591x_irqs_chip;
47967+ struct regmap_irq_chip *tps6591x_irqs_chip;
47968
47969 if (!irq) {
47970 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47971diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47972index 596b1f6..5b6ab74 100644
47973--- a/drivers/mfd/twl4030-irq.c
47974+++ b/drivers/mfd/twl4030-irq.c
47975@@ -34,6 +34,7 @@
47976 #include <linux/of.h>
47977 #include <linux/irqdomain.h>
47978 #include <linux/i2c/twl.h>
47979+#include <asm/pgtable.h>
47980
47981 #include "twl-core.h"
47982
47983@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47984 * Install an irq handler for each of the SIH modules;
47985 * clone dummy irq_chip since PIH can't *do* anything
47986 */
47987- twl4030_irq_chip = dummy_irq_chip;
47988- twl4030_irq_chip.name = "twl4030";
47989+ pax_open_kernel();
47990+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47991+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47992
47993- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47994+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47995+ pax_close_kernel();
47996
47997 for (i = irq_base; i < irq_end; i++) {
47998 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47999diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
48000index 464419b..64bae8d 100644
48001--- a/drivers/misc/c2port/core.c
48002+++ b/drivers/misc/c2port/core.c
48003@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
48004 goto error_idr_alloc;
48005 c2dev->id = ret;
48006
48007- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
48008+ pax_open_kernel();
48009+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
48010+ pax_close_kernel();
48011
48012 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
48013 "c2port%d", c2dev->id);
48014diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
48015index 3f2b625..945e179 100644
48016--- a/drivers/misc/eeprom/sunxi_sid.c
48017+++ b/drivers/misc/eeprom/sunxi_sid.c
48018@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
48019
48020 platform_set_drvdata(pdev, sid_data);
48021
48022- sid_bin_attr.size = sid_data->keysize;
48023+ pax_open_kernel();
48024+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
48025+ pax_close_kernel();
48026 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
48027 return -ENODEV;
48028
48029diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
48030index 36f5d52..32311c3 100644
48031--- a/drivers/misc/kgdbts.c
48032+++ b/drivers/misc/kgdbts.c
48033@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
48034 char before[BREAK_INSTR_SIZE];
48035 char after[BREAK_INSTR_SIZE];
48036
48037- probe_kernel_read(before, (char *)kgdbts_break_test,
48038+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
48039 BREAK_INSTR_SIZE);
48040 init_simple_test();
48041 ts.tst = plant_and_detach_test;
48042@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
48043 /* Activate test with initial breakpoint */
48044 if (!is_early)
48045 kgdb_breakpoint();
48046- probe_kernel_read(after, (char *)kgdbts_break_test,
48047+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
48048 BREAK_INSTR_SIZE);
48049 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
48050 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
48051diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
48052index 3ef4627..8d00486 100644
48053--- a/drivers/misc/lis3lv02d/lis3lv02d.c
48054+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
48055@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
48056 * the lid is closed. This leads to interrupts as soon as a little move
48057 * is done.
48058 */
48059- atomic_inc(&lis3->count);
48060+ atomic_inc_unchecked(&lis3->count);
48061
48062 wake_up_interruptible(&lis3->misc_wait);
48063 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
48064@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
48065 if (lis3->pm_dev)
48066 pm_runtime_get_sync(lis3->pm_dev);
48067
48068- atomic_set(&lis3->count, 0);
48069+ atomic_set_unchecked(&lis3->count, 0);
48070 return 0;
48071 }
48072
48073@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
48074 add_wait_queue(&lis3->misc_wait, &wait);
48075 while (true) {
48076 set_current_state(TASK_INTERRUPTIBLE);
48077- data = atomic_xchg(&lis3->count, 0);
48078+ data = atomic_xchg_unchecked(&lis3->count, 0);
48079 if (data)
48080 break;
48081
48082@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
48083 struct lis3lv02d, miscdev);
48084
48085 poll_wait(file, &lis3->misc_wait, wait);
48086- if (atomic_read(&lis3->count))
48087+ if (atomic_read_unchecked(&lis3->count))
48088 return POLLIN | POLLRDNORM;
48089 return 0;
48090 }
48091diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
48092index c439c82..1f20f57 100644
48093--- a/drivers/misc/lis3lv02d/lis3lv02d.h
48094+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
48095@@ -297,7 +297,7 @@ struct lis3lv02d {
48096 struct input_polled_dev *idev; /* input device */
48097 struct platform_device *pdev; /* platform device */
48098 struct regulator_bulk_data regulators[2];
48099- atomic_t count; /* interrupt count after last read */
48100+ atomic_unchecked_t count; /* interrupt count after last read */
48101 union axis_conversion ac; /* hw -> logical axis */
48102 int mapped_btns[3];
48103
48104diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
48105index 2f30bad..c4c13d0 100644
48106--- a/drivers/misc/sgi-gru/gruhandles.c
48107+++ b/drivers/misc/sgi-gru/gruhandles.c
48108@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
48109 unsigned long nsec;
48110
48111 nsec = CLKS2NSEC(clks);
48112- atomic_long_inc(&mcs_op_statistics[op].count);
48113- atomic_long_add(nsec, &mcs_op_statistics[op].total);
48114+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
48115+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
48116 if (mcs_op_statistics[op].max < nsec)
48117 mcs_op_statistics[op].max = nsec;
48118 }
48119diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
48120index 4f76359..cdfcb2e 100644
48121--- a/drivers/misc/sgi-gru/gruprocfs.c
48122+++ b/drivers/misc/sgi-gru/gruprocfs.c
48123@@ -32,9 +32,9 @@
48124
48125 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
48126
48127-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
48128+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
48129 {
48130- unsigned long val = atomic_long_read(v);
48131+ unsigned long val = atomic_long_read_unchecked(v);
48132
48133 seq_printf(s, "%16lu %s\n", val, id);
48134 }
48135@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
48136
48137 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
48138 for (op = 0; op < mcsop_last; op++) {
48139- count = atomic_long_read(&mcs_op_statistics[op].count);
48140- total = atomic_long_read(&mcs_op_statistics[op].total);
48141+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
48142+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
48143 max = mcs_op_statistics[op].max;
48144 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
48145 count ? total / count : 0, max);
48146diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
48147index 5c3ce24..4915ccb 100644
48148--- a/drivers/misc/sgi-gru/grutables.h
48149+++ b/drivers/misc/sgi-gru/grutables.h
48150@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
48151 * GRU statistics.
48152 */
48153 struct gru_stats_s {
48154- atomic_long_t vdata_alloc;
48155- atomic_long_t vdata_free;
48156- atomic_long_t gts_alloc;
48157- atomic_long_t gts_free;
48158- atomic_long_t gms_alloc;
48159- atomic_long_t gms_free;
48160- atomic_long_t gts_double_allocate;
48161- atomic_long_t assign_context;
48162- atomic_long_t assign_context_failed;
48163- atomic_long_t free_context;
48164- atomic_long_t load_user_context;
48165- atomic_long_t load_kernel_context;
48166- atomic_long_t lock_kernel_context;
48167- atomic_long_t unlock_kernel_context;
48168- atomic_long_t steal_user_context;
48169- atomic_long_t steal_kernel_context;
48170- atomic_long_t steal_context_failed;
48171- atomic_long_t nopfn;
48172- atomic_long_t asid_new;
48173- atomic_long_t asid_next;
48174- atomic_long_t asid_wrap;
48175- atomic_long_t asid_reuse;
48176- atomic_long_t intr;
48177- atomic_long_t intr_cbr;
48178- atomic_long_t intr_tfh;
48179- atomic_long_t intr_spurious;
48180- atomic_long_t intr_mm_lock_failed;
48181- atomic_long_t call_os;
48182- atomic_long_t call_os_wait_queue;
48183- atomic_long_t user_flush_tlb;
48184- atomic_long_t user_unload_context;
48185- atomic_long_t user_exception;
48186- atomic_long_t set_context_option;
48187- atomic_long_t check_context_retarget_intr;
48188- atomic_long_t check_context_unload;
48189- atomic_long_t tlb_dropin;
48190- atomic_long_t tlb_preload_page;
48191- atomic_long_t tlb_dropin_fail_no_asid;
48192- atomic_long_t tlb_dropin_fail_upm;
48193- atomic_long_t tlb_dropin_fail_invalid;
48194- atomic_long_t tlb_dropin_fail_range_active;
48195- atomic_long_t tlb_dropin_fail_idle;
48196- atomic_long_t tlb_dropin_fail_fmm;
48197- atomic_long_t tlb_dropin_fail_no_exception;
48198- atomic_long_t tfh_stale_on_fault;
48199- atomic_long_t mmu_invalidate_range;
48200- atomic_long_t mmu_invalidate_page;
48201- atomic_long_t flush_tlb;
48202- atomic_long_t flush_tlb_gru;
48203- atomic_long_t flush_tlb_gru_tgh;
48204- atomic_long_t flush_tlb_gru_zero_asid;
48205+ atomic_long_unchecked_t vdata_alloc;
48206+ atomic_long_unchecked_t vdata_free;
48207+ atomic_long_unchecked_t gts_alloc;
48208+ atomic_long_unchecked_t gts_free;
48209+ atomic_long_unchecked_t gms_alloc;
48210+ atomic_long_unchecked_t gms_free;
48211+ atomic_long_unchecked_t gts_double_allocate;
48212+ atomic_long_unchecked_t assign_context;
48213+ atomic_long_unchecked_t assign_context_failed;
48214+ atomic_long_unchecked_t free_context;
48215+ atomic_long_unchecked_t load_user_context;
48216+ atomic_long_unchecked_t load_kernel_context;
48217+ atomic_long_unchecked_t lock_kernel_context;
48218+ atomic_long_unchecked_t unlock_kernel_context;
48219+ atomic_long_unchecked_t steal_user_context;
48220+ atomic_long_unchecked_t steal_kernel_context;
48221+ atomic_long_unchecked_t steal_context_failed;
48222+ atomic_long_unchecked_t nopfn;
48223+ atomic_long_unchecked_t asid_new;
48224+ atomic_long_unchecked_t asid_next;
48225+ atomic_long_unchecked_t asid_wrap;
48226+ atomic_long_unchecked_t asid_reuse;
48227+ atomic_long_unchecked_t intr;
48228+ atomic_long_unchecked_t intr_cbr;
48229+ atomic_long_unchecked_t intr_tfh;
48230+ atomic_long_unchecked_t intr_spurious;
48231+ atomic_long_unchecked_t intr_mm_lock_failed;
48232+ atomic_long_unchecked_t call_os;
48233+ atomic_long_unchecked_t call_os_wait_queue;
48234+ atomic_long_unchecked_t user_flush_tlb;
48235+ atomic_long_unchecked_t user_unload_context;
48236+ atomic_long_unchecked_t user_exception;
48237+ atomic_long_unchecked_t set_context_option;
48238+ atomic_long_unchecked_t check_context_retarget_intr;
48239+ atomic_long_unchecked_t check_context_unload;
48240+ atomic_long_unchecked_t tlb_dropin;
48241+ atomic_long_unchecked_t tlb_preload_page;
48242+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
48243+ atomic_long_unchecked_t tlb_dropin_fail_upm;
48244+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
48245+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
48246+ atomic_long_unchecked_t tlb_dropin_fail_idle;
48247+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
48248+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
48249+ atomic_long_unchecked_t tfh_stale_on_fault;
48250+ atomic_long_unchecked_t mmu_invalidate_range;
48251+ atomic_long_unchecked_t mmu_invalidate_page;
48252+ atomic_long_unchecked_t flush_tlb;
48253+ atomic_long_unchecked_t flush_tlb_gru;
48254+ atomic_long_unchecked_t flush_tlb_gru_tgh;
48255+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
48256
48257- atomic_long_t copy_gpa;
48258- atomic_long_t read_gpa;
48259+ atomic_long_unchecked_t copy_gpa;
48260+ atomic_long_unchecked_t read_gpa;
48261
48262- atomic_long_t mesq_receive;
48263- atomic_long_t mesq_receive_none;
48264- atomic_long_t mesq_send;
48265- atomic_long_t mesq_send_failed;
48266- atomic_long_t mesq_noop;
48267- atomic_long_t mesq_send_unexpected_error;
48268- atomic_long_t mesq_send_lb_overflow;
48269- atomic_long_t mesq_send_qlimit_reached;
48270- atomic_long_t mesq_send_amo_nacked;
48271- atomic_long_t mesq_send_put_nacked;
48272- atomic_long_t mesq_page_overflow;
48273- atomic_long_t mesq_qf_locked;
48274- atomic_long_t mesq_qf_noop_not_full;
48275- atomic_long_t mesq_qf_switch_head_failed;
48276- atomic_long_t mesq_qf_unexpected_error;
48277- atomic_long_t mesq_noop_unexpected_error;
48278- atomic_long_t mesq_noop_lb_overflow;
48279- atomic_long_t mesq_noop_qlimit_reached;
48280- atomic_long_t mesq_noop_amo_nacked;
48281- atomic_long_t mesq_noop_put_nacked;
48282- atomic_long_t mesq_noop_page_overflow;
48283+ atomic_long_unchecked_t mesq_receive;
48284+ atomic_long_unchecked_t mesq_receive_none;
48285+ atomic_long_unchecked_t mesq_send;
48286+ atomic_long_unchecked_t mesq_send_failed;
48287+ atomic_long_unchecked_t mesq_noop;
48288+ atomic_long_unchecked_t mesq_send_unexpected_error;
48289+ atomic_long_unchecked_t mesq_send_lb_overflow;
48290+ atomic_long_unchecked_t mesq_send_qlimit_reached;
48291+ atomic_long_unchecked_t mesq_send_amo_nacked;
48292+ atomic_long_unchecked_t mesq_send_put_nacked;
48293+ atomic_long_unchecked_t mesq_page_overflow;
48294+ atomic_long_unchecked_t mesq_qf_locked;
48295+ atomic_long_unchecked_t mesq_qf_noop_not_full;
48296+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
48297+ atomic_long_unchecked_t mesq_qf_unexpected_error;
48298+ atomic_long_unchecked_t mesq_noop_unexpected_error;
48299+ atomic_long_unchecked_t mesq_noop_lb_overflow;
48300+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
48301+ atomic_long_unchecked_t mesq_noop_amo_nacked;
48302+ atomic_long_unchecked_t mesq_noop_put_nacked;
48303+ atomic_long_unchecked_t mesq_noop_page_overflow;
48304
48305 };
48306
48307@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
48308 tghop_invalidate, mcsop_last};
48309
48310 struct mcs_op_statistic {
48311- atomic_long_t count;
48312- atomic_long_t total;
48313+ atomic_long_unchecked_t count;
48314+ atomic_long_unchecked_t total;
48315 unsigned long max;
48316 };
48317
48318@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
48319
48320 #define STAT(id) do { \
48321 if (gru_options & OPT_STATS) \
48322- atomic_long_inc(&gru_stats.id); \
48323+ atomic_long_inc_unchecked(&gru_stats.id); \
48324 } while (0)
48325
48326 #ifdef CONFIG_SGI_GRU_DEBUG
48327diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
48328index c862cd4..0d176fe 100644
48329--- a/drivers/misc/sgi-xp/xp.h
48330+++ b/drivers/misc/sgi-xp/xp.h
48331@@ -288,7 +288,7 @@ struct xpc_interface {
48332 xpc_notify_func, void *);
48333 void (*received) (short, int, void *);
48334 enum xp_retval (*partid_to_nasids) (short, void *);
48335-};
48336+} __no_const;
48337
48338 extern struct xpc_interface xpc_interface;
48339
48340diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
48341index 01be66d..e3a0c7e 100644
48342--- a/drivers/misc/sgi-xp/xp_main.c
48343+++ b/drivers/misc/sgi-xp/xp_main.c
48344@@ -78,13 +78,13 @@ xpc_notloaded(void)
48345 }
48346
48347 struct xpc_interface xpc_interface = {
48348- (void (*)(int))xpc_notloaded,
48349- (void (*)(int))xpc_notloaded,
48350- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48351- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48352+ .connect = (void (*)(int))xpc_notloaded,
48353+ .disconnect = (void (*)(int))xpc_notloaded,
48354+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48355+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48356 void *))xpc_notloaded,
48357- (void (*)(short, int, void *))xpc_notloaded,
48358- (enum xp_retval(*)(short, void *))xpc_notloaded
48359+ .received = (void (*)(short, int, void *))xpc_notloaded,
48360+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
48361 };
48362 EXPORT_SYMBOL_GPL(xpc_interface);
48363
48364diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
48365index b94d5f7..7f494c5 100644
48366--- a/drivers/misc/sgi-xp/xpc.h
48367+++ b/drivers/misc/sgi-xp/xpc.h
48368@@ -835,6 +835,7 @@ struct xpc_arch_operations {
48369 void (*received_payload) (struct xpc_channel *, void *);
48370 void (*notify_senders_of_disconnect) (struct xpc_channel *);
48371 };
48372+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
48373
48374 /* struct xpc_partition act_state values (for XPC HB) */
48375
48376@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
48377 /* found in xpc_main.c */
48378 extern struct device *xpc_part;
48379 extern struct device *xpc_chan;
48380-extern struct xpc_arch_operations xpc_arch_ops;
48381+extern xpc_arch_operations_no_const xpc_arch_ops;
48382 extern int xpc_disengage_timelimit;
48383 extern int xpc_disengage_timedout;
48384 extern int xpc_activate_IRQ_rcvd;
48385diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
48386index 82dc574..8539ab2 100644
48387--- a/drivers/misc/sgi-xp/xpc_main.c
48388+++ b/drivers/misc/sgi-xp/xpc_main.c
48389@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
48390 .notifier_call = xpc_system_die,
48391 };
48392
48393-struct xpc_arch_operations xpc_arch_ops;
48394+xpc_arch_operations_no_const xpc_arch_ops;
48395
48396 /*
48397 * Timer function to enforce the timelimit on the partition disengage.
48398@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
48399
48400 if (((die_args->trapnr == X86_TRAP_MF) ||
48401 (die_args->trapnr == X86_TRAP_XF)) &&
48402- !user_mode_vm(die_args->regs))
48403+ !user_mode(die_args->regs))
48404 xpc_die_deactivate();
48405
48406 break;
48407diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
48408index 452782b..0c10e40 100644
48409--- a/drivers/mmc/card/block.c
48410+++ b/drivers/mmc/card/block.c
48411@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48412 if (idata->ic.postsleep_min_us)
48413 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48414
48415- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48416+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48417 err = -EFAULT;
48418 goto cmd_rel_host;
48419 }
48420diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
48421index f51b5ba..86614a7 100644
48422--- a/drivers/mmc/core/mmc_ops.c
48423+++ b/drivers/mmc/core/mmc_ops.c
48424@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
48425 void *data_buf;
48426 int is_on_stack;
48427
48428- is_on_stack = object_is_on_stack(buf);
48429+ is_on_stack = object_starts_on_stack(buf);
48430 if (is_on_stack) {
48431 /*
48432 * dma onto stack is unsafe/nonportable, but callers to this
48433diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48434index 738fa24..1568451 100644
48435--- a/drivers/mmc/host/dw_mmc.h
48436+++ b/drivers/mmc/host/dw_mmc.h
48437@@ -257,5 +257,5 @@ struct dw_mci_drv_data {
48438 int (*parse_dt)(struct dw_mci *host);
48439 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
48440 struct dw_mci_tuning_data *tuning_data);
48441-};
48442+} __do_const;
48443 #endif /* _DW_MMC_H_ */
48444diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48445index 249ab80..9314ce1 100644
48446--- a/drivers/mmc/host/mmci.c
48447+++ b/drivers/mmc/host/mmci.c
48448@@ -1507,7 +1507,9 @@ static int mmci_probe(struct amba_device *dev,
48449 mmc->caps |= MMC_CAP_CMD23;
48450
48451 if (variant->busy_detect) {
48452- mmci_ops.card_busy = mmci_card_busy;
48453+ pax_open_kernel();
48454+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48455+ pax_close_kernel();
48456 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48457 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48458 mmc->max_busy_timeout = 0;
48459diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48460index ccec0e3..199f9ce 100644
48461--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48462+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48463@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48464 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48465 }
48466
48467- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48468- sdhci_esdhc_ops.platform_execute_tuning =
48469+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48470+ pax_open_kernel();
48471+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48472 esdhc_executing_tuning;
48473+ pax_close_kernel();
48474+ }
48475
48476 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48477 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48478diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48479index fa5954a..56840e5 100644
48480--- a/drivers/mmc/host/sdhci-s3c.c
48481+++ b/drivers/mmc/host/sdhci-s3c.c
48482@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48483 * we can use overriding functions instead of default.
48484 */
48485 if (sc->no_divider) {
48486- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48487- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48488- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48489+ pax_open_kernel();
48490+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48491+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48492+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48493+ pax_close_kernel();
48494 }
48495
48496 /* It supports additional host capabilities if needed */
48497diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48498index 423666b..81ff5eb 100644
48499--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48500+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48501@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48502 size_t totlen = 0, thislen;
48503 int ret = 0;
48504 size_t buflen = 0;
48505- static char *buffer;
48506+ char *buffer;
48507
48508 if (!ECCBUF_SIZE) {
48509 /* We should fall back to a general writev implementation.
48510diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48511index 9f2012a..a81c720 100644
48512--- a/drivers/mtd/nand/denali.c
48513+++ b/drivers/mtd/nand/denali.c
48514@@ -24,6 +24,7 @@
48515 #include <linux/slab.h>
48516 #include <linux/mtd/mtd.h>
48517 #include <linux/module.h>
48518+#include <linux/slab.h>
48519
48520 #include "denali.h"
48521
48522diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48523index f638cd8..2cbf586 100644
48524--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48525+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48526@@ -387,7 +387,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48527
48528 /* first try to map the upper buffer directly */
48529 if (virt_addr_valid(this->upper_buf) &&
48530- !object_is_on_stack(this->upper_buf)) {
48531+ !object_starts_on_stack(this->upper_buf)) {
48532 sg_init_one(sgl, this->upper_buf, this->upper_len);
48533 ret = dma_map_sg(this->dev, sgl, 1, dr);
48534 if (ret == 0)
48535diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48536index 51b9d6a..52af9a7 100644
48537--- a/drivers/mtd/nftlmount.c
48538+++ b/drivers/mtd/nftlmount.c
48539@@ -24,6 +24,7 @@
48540 #include <asm/errno.h>
48541 #include <linux/delay.h>
48542 #include <linux/slab.h>
48543+#include <linux/sched.h>
48544 #include <linux/mtd/mtd.h>
48545 #include <linux/mtd/nand.h>
48546 #include <linux/mtd/nftl.h>
48547diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48548index cf49c22..971b133 100644
48549--- a/drivers/mtd/sm_ftl.c
48550+++ b/drivers/mtd/sm_ftl.c
48551@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48552 #define SM_CIS_VENDOR_OFFSET 0x59
48553 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48554 {
48555- struct attribute_group *attr_group;
48556+ attribute_group_no_const *attr_group;
48557 struct attribute **attributes;
48558 struct sm_sysfs_attribute *vendor_attribute;
48559 char *vendor;
48560diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48561index 5ab3c18..5c3a836 100644
48562--- a/drivers/net/bonding/bond_netlink.c
48563+++ b/drivers/net/bonding/bond_netlink.c
48564@@ -542,7 +542,7 @@ nla_put_failure:
48565 return -EMSGSIZE;
48566 }
48567
48568-struct rtnl_link_ops bond_link_ops __read_mostly = {
48569+struct rtnl_link_ops bond_link_ops = {
48570 .kind = "bond",
48571 .priv_size = sizeof(struct bonding),
48572 .setup = bond_setup,
48573diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48574index 4168822..f38eeddf 100644
48575--- a/drivers/net/can/Kconfig
48576+++ b/drivers/net/can/Kconfig
48577@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48578
48579 config CAN_FLEXCAN
48580 tristate "Support for Freescale FLEXCAN based chips"
48581- depends on ARM || PPC
48582+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48583 ---help---
48584 Say Y here if you want to support for Freescale FlexCAN.
48585
48586diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48587index 1d162cc..b546a75 100644
48588--- a/drivers/net/ethernet/8390/ax88796.c
48589+++ b/drivers/net/ethernet/8390/ax88796.c
48590@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48591 if (ax->plat->reg_offsets)
48592 ei_local->reg_offset = ax->plat->reg_offsets;
48593 else {
48594+ resource_size_t _mem_size = mem_size;
48595+ do_div(_mem_size, 0x18);
48596 ei_local->reg_offset = ax->reg_offsets;
48597 for (ret = 0; ret < 0x18; ret++)
48598- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48599+ ax->reg_offsets[ret] = _mem_size * ret;
48600 }
48601
48602 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48603diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48604index 7330681..7e9e463 100644
48605--- a/drivers/net/ethernet/altera/altera_tse_main.c
48606+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48607@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
48608 return 0;
48609 }
48610
48611-static struct net_device_ops altera_tse_netdev_ops = {
48612+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48613 .ndo_open = tse_open,
48614 .ndo_stop = tse_shutdown,
48615 .ndo_start_xmit = tse_start_xmit,
48616@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48617 ndev->netdev_ops = &altera_tse_netdev_ops;
48618 altera_tse_set_ethtool_ops(ndev);
48619
48620+ pax_open_kernel();
48621 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48622
48623 if (priv->hash_filter)
48624 altera_tse_netdev_ops.ndo_set_rx_mode =
48625 tse_set_rx_mode_hashfilter;
48626+ pax_close_kernel();
48627
48628 /* Scatter/gather IO is not supported,
48629 * so it is turned off
48630diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48631index bf462ee8..18b8375 100644
48632--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48633+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48634@@ -986,14 +986,14 @@ do { \
48635 * operations, everything works on mask values.
48636 */
48637 #define XMDIO_READ(_pdata, _mmd, _reg) \
48638- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48639+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48640 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48641
48642 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48643 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48644
48645 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48646- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48647+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48648 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48649
48650 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48651diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48652index 6bb76d5..ded47a8 100644
48653--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48654+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48655@@ -273,7 +273,7 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
48656 struct xgbe_prv_data *pdata = filp->private_data;
48657 unsigned int value;
48658
48659- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48660+ value = pdata->hw_if->read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48661 pdata->debugfs_xpcs_reg);
48662
48663 return xgbe_common_read(buffer, count, ppos, value);
48664@@ -291,7 +291,7 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
48665 if (len < 0)
48666 return len;
48667
48668- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48669+ pdata->hw_if->write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48670 pdata->debugfs_xpcs_reg, value);
48671
48672 return len;
48673diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48674index 6f1c859..e96ac1a 100644
48675--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48676+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48677@@ -236,7 +236,7 @@ err_ring:
48678
48679 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48680 {
48681- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48682+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48683 struct xgbe_channel *channel;
48684 struct xgbe_ring *ring;
48685 struct xgbe_ring_data *rdata;
48686@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48687
48688 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48689 {
48690- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48691+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48692 struct xgbe_channel *channel;
48693 struct xgbe_ring *ring;
48694 struct xgbe_ring_desc *rdesc;
48695@@ -496,7 +496,7 @@ err_out:
48696 static void xgbe_realloc_skb(struct xgbe_channel *channel)
48697 {
48698 struct xgbe_prv_data *pdata = channel->pdata;
48699- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48700+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48701 struct xgbe_ring *ring = channel->rx_ring;
48702 struct xgbe_ring_data *rdata;
48703 struct sk_buff *skb = NULL;
48704@@ -540,17 +540,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
48705 DBGPR("<--xgbe_realloc_skb\n");
48706 }
48707
48708-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48709-{
48710- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48711-
48712- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48713- desc_if->free_ring_resources = xgbe_free_ring_resources;
48714- desc_if->map_tx_skb = xgbe_map_tx_skb;
48715- desc_if->realloc_skb = xgbe_realloc_skb;
48716- desc_if->unmap_skb = xgbe_unmap_skb;
48717- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48718- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48719-
48720- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48721-}
48722+const struct xgbe_desc_if default_xgbe_desc_if = {
48723+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48724+ .free_ring_resources = xgbe_free_ring_resources,
48725+ .map_tx_skb = xgbe_map_tx_skb,
48726+ .realloc_skb = xgbe_realloc_skb,
48727+ .unmap_skb = xgbe_unmap_skb,
48728+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48729+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48730+};
48731diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48732index 002293b..5ced1dd 100644
48733--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48734+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48735@@ -2030,7 +2030,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48736
48737 static int xgbe_init(struct xgbe_prv_data *pdata)
48738 {
48739- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48740+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48741 int ret;
48742
48743 DBGPR("-->xgbe_init\n");
48744@@ -2096,87 +2096,82 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48745 return 0;
48746 }
48747
48748-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48749-{
48750- DBGPR("-->xgbe_init_function_ptrs\n");
48751-
48752- hw_if->tx_complete = xgbe_tx_complete;
48753-
48754- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48755- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48756- hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
48757- hw_if->set_mac_address = xgbe_set_mac_address;
48758-
48759- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48760- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48761-
48762- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48763- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48764-
48765- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48766- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48767-
48768- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48769- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48770- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48771-
48772- hw_if->enable_tx = xgbe_enable_tx;
48773- hw_if->disable_tx = xgbe_disable_tx;
48774- hw_if->enable_rx = xgbe_enable_rx;
48775- hw_if->disable_rx = xgbe_disable_rx;
48776-
48777- hw_if->powerup_tx = xgbe_powerup_tx;
48778- hw_if->powerdown_tx = xgbe_powerdown_tx;
48779- hw_if->powerup_rx = xgbe_powerup_rx;
48780- hw_if->powerdown_rx = xgbe_powerdown_rx;
48781-
48782- hw_if->pre_xmit = xgbe_pre_xmit;
48783- hw_if->dev_read = xgbe_dev_read;
48784- hw_if->enable_int = xgbe_enable_int;
48785- hw_if->disable_int = xgbe_disable_int;
48786- hw_if->init = xgbe_init;
48787- hw_if->exit = xgbe_exit;
48788+const struct xgbe_hw_if default_xgbe_hw_if = {
48789+ .tx_complete = xgbe_tx_complete,
48790+
48791+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48792+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48793+ .set_addn_mac_addrs = xgbe_set_addn_mac_addrs,
48794+ .set_mac_address = xgbe_set_mac_address,
48795+
48796+ .enable_rx_csum = xgbe_enable_rx_csum,
48797+ .disable_rx_csum = xgbe_disable_rx_csum,
48798+
48799+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48800+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48801+
48802+ .read_mmd_regs = xgbe_read_mmd_regs,
48803+ .write_mmd_regs = xgbe_write_mmd_regs,
48804+
48805+ .set_gmii_speed = xgbe_set_gmii_speed,
48806+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48807+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48808+
48809+ .enable_tx = xgbe_enable_tx,
48810+ .disable_tx = xgbe_disable_tx,
48811+ .enable_rx = xgbe_enable_rx,
48812+ .disable_rx = xgbe_disable_rx,
48813+
48814+ .powerup_tx = xgbe_powerup_tx,
48815+ .powerdown_tx = xgbe_powerdown_tx,
48816+ .powerup_rx = xgbe_powerup_rx,
48817+ .powerdown_rx = xgbe_powerdown_rx,
48818+
48819+ .pre_xmit = xgbe_pre_xmit,
48820+ .dev_read = xgbe_dev_read,
48821+ .enable_int = xgbe_enable_int,
48822+ .disable_int = xgbe_disable_int,
48823+ .init = xgbe_init,
48824+ .exit = xgbe_exit,
48825
48826 /* Descriptor related Sequences have to be initialized here */
48827- hw_if->tx_desc_init = xgbe_tx_desc_init;
48828- hw_if->rx_desc_init = xgbe_rx_desc_init;
48829- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48830- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48831- hw_if->is_last_desc = xgbe_is_last_desc;
48832- hw_if->is_context_desc = xgbe_is_context_desc;
48833+ .tx_desc_init = xgbe_tx_desc_init,
48834+ .rx_desc_init = xgbe_rx_desc_init,
48835+ .tx_desc_reset = xgbe_tx_desc_reset,
48836+ .rx_desc_reset = xgbe_rx_desc_reset,
48837+ .is_last_desc = xgbe_is_last_desc,
48838+ .is_context_desc = xgbe_is_context_desc,
48839
48840 /* For FLOW ctrl */
48841- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48842- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48843+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48844+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48845
48846 /* For RX coalescing */
48847- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48848- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48849- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48850- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48851+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48852+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48853+ .usec_to_riwt = xgbe_usec_to_riwt,
48854+ .riwt_to_usec = xgbe_riwt_to_usec,
48855
48856 /* For RX and TX threshold config */
48857- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48858- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48859+ .config_rx_threshold = xgbe_config_rx_threshold,
48860+ .config_tx_threshold = xgbe_config_tx_threshold,
48861
48862 /* For RX and TX Store and Forward Mode config */
48863- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48864- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48865+ .config_rsf_mode = xgbe_config_rsf_mode,
48866+ .config_tsf_mode = xgbe_config_tsf_mode,
48867
48868 /* For TX DMA Operating on Second Frame config */
48869- hw_if->config_osp_mode = xgbe_config_osp_mode;
48870+ .config_osp_mode = xgbe_config_osp_mode,
48871
48872 /* For RX and TX PBL config */
48873- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48874- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48875- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48876- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48877- hw_if->config_pblx8 = xgbe_config_pblx8;
48878+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48879+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48880+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48881+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48882+ .config_pblx8 = xgbe_config_pblx8,
48883
48884 /* For MMC statistics support */
48885- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48886- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48887- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48888-
48889- DBGPR("<--xgbe_init_function_ptrs\n");
48890-}
48891+ .tx_mmc_int = xgbe_tx_mmc_int,
48892+ .rx_mmc_int = xgbe_rx_mmc_int,
48893+ .read_mmc_stats = xgbe_read_mmc_stats,
48894+};
48895diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48896index cfe3d93..07a78ae 100644
48897--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48898+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48899@@ -153,7 +153,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48900
48901 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48902 {
48903- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48904+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48905 struct xgbe_channel *channel;
48906 unsigned int i;
48907
48908@@ -170,7 +170,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48909
48910 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48911 {
48912- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48913+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48914 struct xgbe_channel *channel;
48915 unsigned int i;
48916
48917@@ -188,7 +188,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48918 static irqreturn_t xgbe_isr(int irq, void *data)
48919 {
48920 struct xgbe_prv_data *pdata = data;
48921- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48922+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48923 struct xgbe_channel *channel;
48924 unsigned int dma_isr, dma_ch_isr;
48925 unsigned int mac_isr;
48926@@ -403,7 +403,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
48927
48928 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48929 {
48930- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48931+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48932
48933 DBGPR("-->xgbe_init_tx_coalesce\n");
48934
48935@@ -417,7 +417,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48936
48937 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48938 {
48939- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48940+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48941
48942 DBGPR("-->xgbe_init_rx_coalesce\n");
48943
48944@@ -431,7 +431,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48945
48946 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48947 {
48948- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48949+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48950 struct xgbe_channel *channel;
48951 struct xgbe_ring *ring;
48952 struct xgbe_ring_data *rdata;
48953@@ -456,7 +456,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48954
48955 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48956 {
48957- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48958+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48959 struct xgbe_channel *channel;
48960 struct xgbe_ring *ring;
48961 struct xgbe_ring_data *rdata;
48962@@ -482,7 +482,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48963 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48964 {
48965 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48966- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48967+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48968 unsigned long flags;
48969
48970 DBGPR("-->xgbe_powerdown\n");
48971@@ -520,7 +520,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48972 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48973 {
48974 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48975- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48976+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48977 unsigned long flags;
48978
48979 DBGPR("-->xgbe_powerup\n");
48980@@ -557,7 +557,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48981
48982 static int xgbe_start(struct xgbe_prv_data *pdata)
48983 {
48984- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48985+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48986 struct net_device *netdev = pdata->netdev;
48987
48988 DBGPR("-->xgbe_start\n");
48989@@ -583,7 +583,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
48990
48991 static void xgbe_stop(struct xgbe_prv_data *pdata)
48992 {
48993- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48994+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48995 struct net_device *netdev = pdata->netdev;
48996
48997 DBGPR("-->xgbe_stop\n");
48998@@ -603,7 +603,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
48999
49000 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
49001 {
49002- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49003+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49004
49005 DBGPR("-->xgbe_restart_dev\n");
49006
49007@@ -741,8 +741,8 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
49008 static int xgbe_open(struct net_device *netdev)
49009 {
49010 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49011- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49012- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49013+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49014+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49015 int ret;
49016
49017 DBGPR("-->xgbe_open\n");
49018@@ -804,8 +804,8 @@ err_clk:
49019 static int xgbe_close(struct net_device *netdev)
49020 {
49021 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49022- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49023- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49024+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49025+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49026
49027 DBGPR("-->xgbe_close\n");
49028
49029@@ -835,8 +835,8 @@ static int xgbe_close(struct net_device *netdev)
49030 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
49031 {
49032 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49033- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49034- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49035+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49036+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49037 struct xgbe_channel *channel;
49038 struct xgbe_ring *ring;
49039 struct xgbe_packet_data *packet;
49040@@ -903,7 +903,7 @@ tx_netdev_return:
49041 static void xgbe_set_rx_mode(struct net_device *netdev)
49042 {
49043 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49044- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49045+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49046 unsigned int pr_mode, am_mode;
49047
49048 DBGPR("-->xgbe_set_rx_mode\n");
49049@@ -930,7 +930,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
49050 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
49051 {
49052 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49053- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49054+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49055 struct sockaddr *saddr = addr;
49056
49057 DBGPR("-->xgbe_set_mac_address\n");
49058@@ -976,7 +976,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
49059
49060 DBGPR("-->%s\n", __func__);
49061
49062- pdata->hw_if.read_mmc_stats(pdata);
49063+ pdata->hw_if->read_mmc_stats(pdata);
49064
49065 s->rx_packets = pstats->rxframecount_gb;
49066 s->rx_bytes = pstats->rxoctetcount_gb;
49067@@ -1020,7 +1020,7 @@ static int xgbe_set_features(struct net_device *netdev,
49068 netdev_features_t features)
49069 {
49070 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49071- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49072+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49073 unsigned int rxcsum_enabled, rxvlan_enabled;
49074
49075 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
49076@@ -1072,8 +1072,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
49077 static int xgbe_tx_poll(struct xgbe_channel *channel)
49078 {
49079 struct xgbe_prv_data *pdata = channel->pdata;
49080- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49081- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49082+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49083+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49084 struct xgbe_ring *ring = channel->tx_ring;
49085 struct xgbe_ring_data *rdata;
49086 struct xgbe_ring_desc *rdesc;
49087@@ -1124,8 +1124,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
49088 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
49089 {
49090 struct xgbe_prv_data *pdata = channel->pdata;
49091- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49092- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49093+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49094+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49095 struct xgbe_ring *ring = channel->rx_ring;
49096 struct xgbe_ring_data *rdata;
49097 struct xgbe_packet_data *packet;
49098diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49099index 8909f2b..719e767 100644
49100--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49101+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49102@@ -202,7 +202,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
49103
49104 DBGPR("-->%s\n", __func__);
49105
49106- pdata->hw_if.read_mmc_stats(pdata);
49107+ pdata->hw_if->read_mmc_stats(pdata);
49108 for (i = 0; i < XGBE_STATS_COUNT; i++) {
49109 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
49110 *data++ = *(u64 *)stat;
49111@@ -387,7 +387,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
49112 struct ethtool_coalesce *ec)
49113 {
49114 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49115- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49116+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49117 unsigned int riwt;
49118
49119 DBGPR("-->xgbe_get_coalesce\n");
49120@@ -410,7 +410,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
49121 struct ethtool_coalesce *ec)
49122 {
49123 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49124- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49125+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49126 unsigned int rx_frames, rx_riwt, rx_usecs;
49127 unsigned int tx_frames, tx_usecs;
49128
49129diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49130index 5a1891f..1b7888e 100644
49131--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49132+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49133@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
49134 DBGPR("<--xgbe_default_config\n");
49135 }
49136
49137-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
49138-{
49139- xgbe_init_function_ptrs_dev(&pdata->hw_if);
49140- xgbe_init_function_ptrs_desc(&pdata->desc_if);
49141-}
49142-
49143 static int xgbe_probe(struct platform_device *pdev)
49144 {
49145 struct xgbe_prv_data *pdata;
49146@@ -306,9 +300,8 @@ static int xgbe_probe(struct platform_device *pdev)
49147 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
49148
49149 /* Set all the function pointers */
49150- xgbe_init_all_fptrs(pdata);
49151- hw_if = &pdata->hw_if;
49152- desc_if = &pdata->desc_if;
49153+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
49154+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
49155
49156 /* Issue software reset to device */
49157 hw_if->exit(pdata);
49158diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49159index ea7a5d6..d10a742 100644
49160--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49161+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49162@@ -128,7 +128,7 @@
49163 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
49164 {
49165 struct xgbe_prv_data *pdata = mii->priv;
49166- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49167+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49168 int mmd_data;
49169
49170 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
49171@@ -145,7 +145,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
49172 u16 mmd_val)
49173 {
49174 struct xgbe_prv_data *pdata = mii->priv;
49175- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49176+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49177 int mmd_data = mmd_val;
49178
49179 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
49180@@ -161,7 +161,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
49181 static void xgbe_adjust_link(struct net_device *netdev)
49182 {
49183 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49184- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49185+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49186 struct phy_device *phydev = pdata->phydev;
49187 unsigned long flags;
49188 int new_state = 0;
49189diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
49190index ab06271..a560fa7 100644
49191--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
49192+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
49193@@ -527,8 +527,8 @@ struct xgbe_prv_data {
49194
49195 int irq_number;
49196
49197- struct xgbe_hw_if hw_if;
49198- struct xgbe_desc_if desc_if;
49199+ const struct xgbe_hw_if *hw_if;
49200+ const struct xgbe_desc_if *desc_if;
49201
49202 /* Rings for Tx/Rx on a DMA channel */
49203 struct xgbe_channel *channel;
49204@@ -611,6 +611,9 @@ struct xgbe_prv_data {
49205 #endif
49206 };
49207
49208+extern const struct xgbe_hw_if default_xgbe_hw_if;
49209+extern const struct xgbe_desc_if default_xgbe_desc_if;
49210+
49211 /* Function prototypes*/
49212
49213 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
49214diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49215index 571427c..e9fe9e7 100644
49216--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49217+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49218@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
49219 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
49220 {
49221 /* RX_MODE controlling object */
49222- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
49223+ bnx2x_init_rx_mode_obj(bp);
49224
49225 /* multicast configuration controlling object */
49226 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
49227diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49228index b193604..8873bfd 100644
49229--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49230+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49231@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
49232 return rc;
49233 }
49234
49235-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49236- struct bnx2x_rx_mode_obj *o)
49237+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
49238 {
49239 if (CHIP_IS_E1x(bp)) {
49240- o->wait_comp = bnx2x_empty_rx_mode_wait;
49241- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
49242+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
49243+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
49244 } else {
49245- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
49246- o->config_rx_mode = bnx2x_set_rx_mode_e2;
49247+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
49248+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
49249 }
49250 }
49251
49252diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49253index 718ecd2..2183b2f 100644
49254--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49255+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49256@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
49257
49258 /********************* RX MODE ****************/
49259
49260-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49261- struct bnx2x_rx_mode_obj *o);
49262+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
49263
49264 /**
49265 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
49266diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
49267index 461acca..2b546ba 100644
49268--- a/drivers/net/ethernet/broadcom/tg3.h
49269+++ b/drivers/net/ethernet/broadcom/tg3.h
49270@@ -150,6 +150,7 @@
49271 #define CHIPREV_ID_5750_A0 0x4000
49272 #define CHIPREV_ID_5750_A1 0x4001
49273 #define CHIPREV_ID_5750_A3 0x4003
49274+#define CHIPREV_ID_5750_C1 0x4201
49275 #define CHIPREV_ID_5750_C2 0x4202
49276 #define CHIPREV_ID_5752_A0_HW 0x5000
49277 #define CHIPREV_ID_5752_A0 0x6000
49278diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
49279index 13f9636..228040f 100644
49280--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
49281+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
49282@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
49283 }
49284
49285 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
49286- bna_cb_ioceth_enable,
49287- bna_cb_ioceth_disable,
49288- bna_cb_ioceth_hbfail,
49289- bna_cb_ioceth_reset
49290+ .enable_cbfn = bna_cb_ioceth_enable,
49291+ .disable_cbfn = bna_cb_ioceth_disable,
49292+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
49293+ .reset_cbfn = bna_cb_ioceth_reset
49294 };
49295
49296 static void bna_attr_init(struct bna_ioceth *ioceth)
49297diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49298index 8cffcdf..aadf043 100644
49299--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49300+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49301@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
49302 */
49303 struct l2t_skb_cb {
49304 arp_failure_handler_func arp_failure_handler;
49305-};
49306+} __no_const;
49307
49308 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49309
49310diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49311index a83271c..cf00874 100644
49312--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49313+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49314@@ -2174,7 +2174,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49315
49316 int i;
49317 struct adapter *ap = netdev2adap(dev);
49318- static const unsigned int *reg_ranges;
49319+ const unsigned int *reg_ranges;
49320 int arr_size = 0, buf_size = 0;
49321
49322 if (is_t4(ap->params.chip)) {
49323diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49324index c05b66d..ed69872 100644
49325--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49326+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49327@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49328 for (i=0; i<ETH_ALEN; i++) {
49329 tmp.addr[i] = dev->dev_addr[i];
49330 }
49331- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49332+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49333 break;
49334
49335 case DE4X5_SET_HWADDR: /* Set the hardware address */
49336@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49337 spin_lock_irqsave(&lp->lock, flags);
49338 memcpy(&statbuf, &lp->pktStats, ioc->len);
49339 spin_unlock_irqrestore(&lp->lock, flags);
49340- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49341+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49342 return -EFAULT;
49343 break;
49344 }
49345diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49346index 1e187fb..d024547 100644
49347--- a/drivers/net/ethernet/emulex/benet/be_main.c
49348+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49349@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49350
49351 if (wrapped)
49352 newacc += 65536;
49353- ACCESS_ONCE(*acc) = newacc;
49354+ ACCESS_ONCE_RW(*acc) = newacc;
49355 }
49356
49357 static void populate_erx_stats(struct be_adapter *adapter,
49358diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49359index c77fa4a..7fd42fc 100644
49360--- a/drivers/net/ethernet/faraday/ftgmac100.c
49361+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49362@@ -30,6 +30,8 @@
49363 #include <linux/netdevice.h>
49364 #include <linux/phy.h>
49365 #include <linux/platform_device.h>
49366+#include <linux/interrupt.h>
49367+#include <linux/irqreturn.h>
49368 #include <net/ip.h>
49369
49370 #include "ftgmac100.h"
49371diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49372index 4ff1adc..0ea6bf4 100644
49373--- a/drivers/net/ethernet/faraday/ftmac100.c
49374+++ b/drivers/net/ethernet/faraday/ftmac100.c
49375@@ -31,6 +31,8 @@
49376 #include <linux/module.h>
49377 #include <linux/netdevice.h>
49378 #include <linux/platform_device.h>
49379+#include <linux/interrupt.h>
49380+#include <linux/irqreturn.h>
49381
49382 #include "ftmac100.h"
49383
49384diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49385index 101f439..59e7ec6 100644
49386--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49387+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49388@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49389 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49390
49391 /* Update the base adjustement value. */
49392- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49393+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49394 smp_mb(); /* Force the above update. */
49395 }
49396
49397diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49398index 68f87ec..241dbe3 100644
49399--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49400+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49401@@ -792,7 +792,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49402 }
49403
49404 /* update the base incval used to calculate frequency adjustment */
49405- ACCESS_ONCE(adapter->base_incval) = incval;
49406+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49407 smp_mb();
49408
49409 /* need lock to prevent incorrect read while modifying cyclecounter */
49410diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49411index 2bbd01f..e8baa64 100644
49412--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49413+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49414@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49415 struct __vxge_hw_fifo *fifo;
49416 struct vxge_hw_fifo_config *config;
49417 u32 txdl_size, txdl_per_memblock;
49418- struct vxge_hw_mempool_cbs fifo_mp_callback;
49419+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49420+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49421+ };
49422+
49423 struct __vxge_hw_virtualpath *vpath;
49424
49425 if ((vp == NULL) || (attr == NULL)) {
49426@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49427 goto exit;
49428 }
49429
49430- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49431-
49432 fifo->mempool =
49433 __vxge_hw_mempool_create(vpath->hldev,
49434 fifo->config->memblock_size,
49435diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49436index 73e6683..464e910 100644
49437--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49438+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49439@@ -120,6 +120,10 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
49440 int data);
49441 static void pch_gbe_set_multi(struct net_device *netdev);
49442
49443+static struct sock_filter ptp_filter[] = {
49444+ PTP_FILTER
49445+};
49446+
49447 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49448 {
49449 u8 *data = skb->data;
49450@@ -127,7 +131,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49451 u16 *hi, *id;
49452 u32 lo;
49453
49454- if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
49455+ if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
49456 return 0;
49457
49458 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49459@@ -2631,6 +2635,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
49460
49461 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
49462 PCI_DEVFN(12, 4));
49463+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49464+ dev_err(&pdev->dev, "Bad ptp filter\n");
49465+ ret = -EINVAL;
49466+ goto err_free_netdev;
49467+ }
49468
49469 netdev->netdev_ops = &pch_gbe_netdev_ops;
49470 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
49471diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49472index f33559b..c7f50ac 100644
49473--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49474+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49475@@ -2176,7 +2176,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49476 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49477 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49478 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49479- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49480+ pax_open_kernel();
49481+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49482+ pax_close_kernel();
49483 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49484 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49485 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49486diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49487index be7d7a6..a8983f8 100644
49488--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49489+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49490@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49491 case QLCNIC_NON_PRIV_FUNC:
49492 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49493 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49494- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49495+ pax_open_kernel();
49496+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49497+ pax_close_kernel();
49498 break;
49499 case QLCNIC_PRIV_FUNC:
49500 ahw->op_mode = QLCNIC_PRIV_FUNC;
49501 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49502- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49503+ pax_open_kernel();
49504+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49505+ pax_close_kernel();
49506 break;
49507 case QLCNIC_MGMT_FUNC:
49508 ahw->op_mode = QLCNIC_MGMT_FUNC;
49509 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49510- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49511+ pax_open_kernel();
49512+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49513+ pax_close_kernel();
49514 break;
49515 default:
49516 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49517diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49518index e46fc39..abe135b 100644
49519--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49520+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49521@@ -1228,7 +1228,7 @@ flash_temp:
49522 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49523 {
49524 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49525- static const struct qlcnic_dump_operations *fw_dump_ops;
49526+ const struct qlcnic_dump_operations *fw_dump_ops;
49527 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49528 u32 entry_offset, dump, no_entries, buf_offset = 0;
49529 int i, k, ops_cnt, ops_index, dump_size = 0;
49530diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49531index 61623e9..ac97c27 100644
49532--- a/drivers/net/ethernet/realtek/r8169.c
49533+++ b/drivers/net/ethernet/realtek/r8169.c
49534@@ -759,22 +759,22 @@ struct rtl8169_private {
49535 struct mdio_ops {
49536 void (*write)(struct rtl8169_private *, int, int);
49537 int (*read)(struct rtl8169_private *, int);
49538- } mdio_ops;
49539+ } __no_const mdio_ops;
49540
49541 struct pll_power_ops {
49542 void (*down)(struct rtl8169_private *);
49543 void (*up)(struct rtl8169_private *);
49544- } pll_power_ops;
49545+ } __no_const pll_power_ops;
49546
49547 struct jumbo_ops {
49548 void (*enable)(struct rtl8169_private *);
49549 void (*disable)(struct rtl8169_private *);
49550- } jumbo_ops;
49551+ } __no_const jumbo_ops;
49552
49553 struct csi_ops {
49554 void (*write)(struct rtl8169_private *, int, int);
49555 u32 (*read)(struct rtl8169_private *, int);
49556- } csi_ops;
49557+ } __no_const csi_ops;
49558
49559 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49560 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49561diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49562index 6b861e3..204ac86 100644
49563--- a/drivers/net/ethernet/sfc/ptp.c
49564+++ b/drivers/net/ethernet/sfc/ptp.c
49565@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49566 ptp->start.dma_addr);
49567
49568 /* Clear flag that signals MC ready */
49569- ACCESS_ONCE(*start) = 0;
49570+ ACCESS_ONCE_RW(*start) = 0;
49571 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49572 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49573 EFX_BUG_ON_PARANOID(rc);
49574diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49575index 50617c5..b13724c 100644
49576--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49577+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49578@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49579
49580 writel(value, ioaddr + MMC_CNTRL);
49581
49582- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49583- MMC_CNTRL, value);
49584+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49585+// MMC_CNTRL, value);
49586 }
49587
49588 /* To mask all all interrupts.*/
49589diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
49590index 6b56f85..50e285f 100644
49591--- a/drivers/net/ethernet/ti/cpts.c
49592+++ b/drivers/net/ethernet/ti/cpts.c
49593@@ -33,6 +33,10 @@
49594
49595 #ifdef CONFIG_TI_CPTS
49596
49597+static struct sock_filter ptp_filter[] = {
49598+ PTP_FILTER
49599+};
49600+
49601 #define cpts_read32(c, r) __raw_readl(&c->reg->r)
49602 #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
49603
49604@@ -296,7 +300,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
49605 u64 ns = 0;
49606 struct cpts_event *event;
49607 struct list_head *this, *next;
49608- unsigned int class = ptp_classify_raw(skb);
49609+ unsigned int class = sk_run_filter(skb, ptp_filter);
49610 unsigned long flags;
49611 u16 seqid;
49612 u8 mtype;
49613@@ -367,6 +371,10 @@ int cpts_register(struct device *dev, struct cpts *cpts,
49614 int err, i;
49615 unsigned long flags;
49616
49617+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49618+ pr_err("cpts: bad ptp filter\n");
49619+ return -EINVAL;
49620+ }
49621 cpts->info = cpts_info;
49622 cpts->clock = ptp_clock_register(&cpts->info, dev);
49623 if (IS_ERR(cpts->clock)) {
49624diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
49625index b81bc9f..3f43101 100644
49626--- a/drivers/net/ethernet/xscale/Kconfig
49627+++ b/drivers/net/ethernet/xscale/Kconfig
49628@@ -23,7 +23,6 @@ config IXP4XX_ETH
49629 tristate "Intel IXP4xx Ethernet support"
49630 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
49631 select PHYLIB
49632- select NET_PTP_CLASSIFY
49633 ---help---
49634 Say Y here if you want to use built-in Ethernet ports
49635 on IXP4xx processor.
49636diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49637index f7e0f0f..25283f1 100644
49638--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
49639+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49640@@ -256,6 +256,10 @@ static int ports_open;
49641 static struct port *npe_port_tab[MAX_NPES];
49642 static struct dma_pool *dma_pool;
49643
49644+static struct sock_filter ptp_filter[] = {
49645+ PTP_FILTER
49646+};
49647+
49648 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49649 {
49650 u8 *data = skb->data;
49651@@ -263,7 +267,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49652 u16 *hi, *id;
49653 u32 lo;
49654
49655- if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
49656+ if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
49657 return 0;
49658
49659 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49660@@ -1409,6 +1413,11 @@ static int eth_init_one(struct platform_device *pdev)
49661 char phy_id[MII_BUS_ID_SIZE + 3];
49662 int err;
49663
49664+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49665+ pr_err("ixp4xx_eth: bad ptp filter\n");
49666+ return -EINVAL;
49667+ }
49668+
49669 if (!(dev = alloc_etherdev(sizeof(struct port))))
49670 return -ENOMEM;
49671
49672diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49673index 6cc37c1..fdd9d77 100644
49674--- a/drivers/net/hyperv/hyperv_net.h
49675+++ b/drivers/net/hyperv/hyperv_net.h
49676@@ -170,7 +170,7 @@ struct rndis_device {
49677
49678 enum rndis_device_state state;
49679 bool link_state;
49680- atomic_t new_req_id;
49681+ atomic_unchecked_t new_req_id;
49682
49683 spinlock_t request_lock;
49684 struct list_head req_list;
49685diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49686index 99c527a..6a2ce38 100644
49687--- a/drivers/net/hyperv/rndis_filter.c
49688+++ b/drivers/net/hyperv/rndis_filter.c
49689@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49690 * template
49691 */
49692 set = &rndis_msg->msg.set_req;
49693- set->req_id = atomic_inc_return(&dev->new_req_id);
49694+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49695
49696 /* Add to the request list */
49697 spin_lock_irqsave(&dev->request_lock, flags);
49698@@ -930,7 +930,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49699
49700 /* Setup the rndis set */
49701 halt = &request->request_msg.msg.halt_req;
49702- halt->req_id = atomic_inc_return(&dev->new_req_id);
49703+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49704
49705 /* Ignore return since this msg is optional. */
49706 rndis_filter_send_request(dev, request);
49707diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
49708index 78f18be..1d19c62 100644
49709--- a/drivers/net/ieee802154/fakehard.c
49710+++ b/drivers/net/ieee802154/fakehard.c
49711@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
49712 phy->transmit_power = 0xbf;
49713
49714 dev->netdev_ops = &fake_ops;
49715- dev->ml_priv = &fake_mlme;
49716+ dev->ml_priv = (void *)&fake_mlme;
49717
49718 priv = netdev_priv(dev);
49719 priv->phy = phy;
49720diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49721index ef8a5c2..76877d6 100644
49722--- a/drivers/net/macvlan.c
49723+++ b/drivers/net/macvlan.c
49724@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49725 free_nskb:
49726 kfree_skb(nskb);
49727 err:
49728- atomic_long_inc(&skb->dev->rx_dropped);
49729+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49730 }
49731
49732 /* called under rcu_read_lock() from netif_receive_skb */
49733@@ -1134,13 +1134,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49734 int macvlan_link_register(struct rtnl_link_ops *ops)
49735 {
49736 /* common fields */
49737- ops->priv_size = sizeof(struct macvlan_dev);
49738- ops->validate = macvlan_validate;
49739- ops->maxtype = IFLA_MACVLAN_MAX;
49740- ops->policy = macvlan_policy;
49741- ops->changelink = macvlan_changelink;
49742- ops->get_size = macvlan_get_size;
49743- ops->fill_info = macvlan_fill_info;
49744+ pax_open_kernel();
49745+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49746+ *(void **)&ops->validate = macvlan_validate;
49747+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49748+ *(const void **)&ops->policy = macvlan_policy;
49749+ *(void **)&ops->changelink = macvlan_changelink;
49750+ *(void **)&ops->get_size = macvlan_get_size;
49751+ *(void **)&ops->fill_info = macvlan_fill_info;
49752+ pax_close_kernel();
49753
49754 return rtnl_link_register(ops);
49755 };
49756@@ -1220,7 +1222,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49757 return NOTIFY_DONE;
49758 }
49759
49760-static struct notifier_block macvlan_notifier_block __read_mostly = {
49761+static struct notifier_block macvlan_notifier_block = {
49762 .notifier_call = macvlan_device_event,
49763 };
49764
49765diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49766index 3381c4f..dea5fd5 100644
49767--- a/drivers/net/macvtap.c
49768+++ b/drivers/net/macvtap.c
49769@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49770 }
49771
49772 ret = 0;
49773- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49774+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49775 put_user(q->flags, &ifr->ifr_flags))
49776 ret = -EFAULT;
49777 macvtap_put_vlan(vlan);
49778@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49779 return NOTIFY_DONE;
49780 }
49781
49782-static struct notifier_block macvtap_notifier_block __read_mostly = {
49783+static struct notifier_block macvtap_notifier_block = {
49784 .notifier_call = macvtap_device_event,
49785 };
49786
49787diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
49788index 9408157..d53b924 100644
49789--- a/drivers/net/phy/dp83640.c
49790+++ b/drivers/net/phy/dp83640.c
49791@@ -27,7 +27,6 @@
49792 #include <linux/module.h>
49793 #include <linux/net_tstamp.h>
49794 #include <linux/netdevice.h>
49795-#include <linux/if_vlan.h>
49796 #include <linux/phy.h>
49797 #include <linux/ptp_classify.h>
49798 #include <linux/ptp_clock_kernel.h>
49799diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49800index d5b77ef..72ff14b 100644
49801--- a/drivers/net/ppp/ppp_generic.c
49802+++ b/drivers/net/ppp/ppp_generic.c
49803@@ -143,8 +143,9 @@ struct ppp {
49804 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
49805 #endif /* CONFIG_PPP_MULTILINK */
49806 #ifdef CONFIG_PPP_FILTER
49807- struct sk_filter *pass_filter; /* filter for packets to pass */
49808- struct sk_filter *active_filter;/* filter for pkts to reset idle */
49809+ struct sock_filter *pass_filter; /* filter for packets to pass */
49810+ struct sock_filter *active_filter;/* filter for pkts to reset idle */
49811+ unsigned pass_len, active_len;
49812 #endif /* CONFIG_PPP_FILTER */
49813 struct net *ppp_net; /* the net we belong to */
49814 struct ppp_link_stats stats64; /* 64 bit network stats */
49815@@ -539,7 +540,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49816 {
49817 struct sock_fprog uprog;
49818 struct sock_filter *code = NULL;
49819- int len;
49820+ int len, err;
49821
49822 if (copy_from_user(&uprog, arg, sizeof(uprog)))
49823 return -EFAULT;
49824@@ -554,6 +555,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49825 if (IS_ERR(code))
49826 return PTR_ERR(code);
49827
49828+ err = sk_chk_filter(code, uprog.len);
49829+ if (err) {
49830+ kfree(code);
49831+ return err;
49832+ }
49833+
49834 *p = code;
49835 return uprog.len;
49836 }
49837@@ -748,52 +755,28 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
49838 case PPPIOCSPASS:
49839 {
49840 struct sock_filter *code;
49841-
49842 err = get_filter(argp, &code);
49843 if (err >= 0) {
49844- struct sock_fprog_kern fprog = {
49845- .len = err,
49846- .filter = code,
49847- };
49848-
49849 ppp_lock(ppp);
49850- if (ppp->pass_filter) {
49851- sk_unattached_filter_destroy(ppp->pass_filter);
49852- ppp->pass_filter = NULL;
49853- }
49854- if (fprog.filter != NULL)
49855- err = sk_unattached_filter_create(&ppp->pass_filter,
49856- &fprog);
49857- else
49858- err = 0;
49859- kfree(code);
49860+ kfree(ppp->pass_filter);
49861+ ppp->pass_filter = code;
49862+ ppp->pass_len = err;
49863 ppp_unlock(ppp);
49864+ err = 0;
49865 }
49866 break;
49867 }
49868 case PPPIOCSACTIVE:
49869 {
49870 struct sock_filter *code;
49871-
49872 err = get_filter(argp, &code);
49873 if (err >= 0) {
49874- struct sock_fprog_kern fprog = {
49875- .len = err,
49876- .filter = code,
49877- };
49878-
49879 ppp_lock(ppp);
49880- if (ppp->active_filter) {
49881- sk_unattached_filter_destroy(ppp->active_filter);
49882- ppp->active_filter = NULL;
49883- }
49884- if (fprog.filter != NULL)
49885- err = sk_unattached_filter_create(&ppp->active_filter,
49886- &fprog);
49887- else
49888- err = 0;
49889- kfree(code);
49890+ kfree(ppp->active_filter);
49891+ ppp->active_filter = code;
49892+ ppp->active_len = err;
49893 ppp_unlock(ppp);
49894+ err = 0;
49895 }
49896 break;
49897 }
49898@@ -1201,7 +1184,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49899 a four-byte PPP header on each packet */
49900 *skb_push(skb, 2) = 1;
49901 if (ppp->pass_filter &&
49902- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49903+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49904 if (ppp->debug & 1)
49905 netdev_printk(KERN_DEBUG, ppp->dev,
49906 "PPP: outbound frame "
49907@@ -1211,7 +1194,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49908 }
49909 /* if this packet passes the active filter, record the time */
49910 if (!(ppp->active_filter &&
49911- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49912+ sk_run_filter(skb, ppp->active_filter) == 0))
49913 ppp->last_xmit = jiffies;
49914 skb_pull(skb, 2);
49915 #else
49916@@ -1835,7 +1818,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49917
49918 *skb_push(skb, 2) = 0;
49919 if (ppp->pass_filter &&
49920- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49921+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49922 if (ppp->debug & 1)
49923 netdev_printk(KERN_DEBUG, ppp->dev,
49924 "PPP: inbound frame "
49925@@ -1844,7 +1827,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49926 return;
49927 }
49928 if (!(ppp->active_filter &&
49929- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49930+ sk_run_filter(skb, ppp->active_filter) == 0))
49931 ppp->last_recv = jiffies;
49932 __skb_pull(skb, 2);
49933 } else
49934@@ -2689,10 +2672,6 @@ ppp_create_interface(struct net *net, int unit, int *retp)
49935 ppp->minseq = -1;
49936 skb_queue_head_init(&ppp->mrq);
49937 #endif /* CONFIG_PPP_MULTILINK */
49938-#ifdef CONFIG_PPP_FILTER
49939- ppp->pass_filter = NULL;
49940- ppp->active_filter = NULL;
49941-#endif /* CONFIG_PPP_FILTER */
49942
49943 /*
49944 * drum roll: don't forget to set
49945@@ -2823,15 +2802,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
49946 skb_queue_purge(&ppp->mrq);
49947 #endif /* CONFIG_PPP_MULTILINK */
49948 #ifdef CONFIG_PPP_FILTER
49949- if (ppp->pass_filter) {
49950- sk_unattached_filter_destroy(ppp->pass_filter);
49951- ppp->pass_filter = NULL;
49952- }
49953-
49954- if (ppp->active_filter) {
49955- sk_unattached_filter_destroy(ppp->active_filter);
49956- ppp->active_filter = NULL;
49957- }
49958+ kfree(ppp->pass_filter);
49959+ ppp->pass_filter = NULL;
49960+ kfree(ppp->active_filter);
49961+ ppp->active_filter = NULL;
49962 #endif /* CONFIG_PPP_FILTER */
49963
49964 kfree_skb(ppp->xmit_pending);
49965diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49966index 1252d9c..80e660b 100644
49967--- a/drivers/net/slip/slhc.c
49968+++ b/drivers/net/slip/slhc.c
49969@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49970 register struct tcphdr *thp;
49971 register struct iphdr *ip;
49972 register struct cstate *cs;
49973- int len, hdrlen;
49974+ long len, hdrlen;
49975 unsigned char *cp = icp;
49976
49977 /* We've got a compressed packet; read the change byte */
49978diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49979index b4958c7..277cb96 100644
49980--- a/drivers/net/team/team.c
49981+++ b/drivers/net/team/team.c
49982@@ -2868,7 +2868,7 @@ static int team_device_event(struct notifier_block *unused,
49983 return NOTIFY_DONE;
49984 }
49985
49986-static struct notifier_block team_notifier_block __read_mostly = {
49987+static struct notifier_block team_notifier_block = {
49988 .notifier_call = team_device_event,
49989 };
49990
49991diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
49992index a58dfeb..dbde341 100644
49993--- a/drivers/net/team/team_mode_loadbalance.c
49994+++ b/drivers/net/team/team_mode_loadbalance.c
49995@@ -49,7 +49,7 @@ struct lb_port_mapping {
49996 struct lb_priv_ex {
49997 struct team *team;
49998 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
49999- struct sock_fprog_kern *orig_fprog;
50000+ struct sock_fprog *orig_fprog;
50001 struct {
50002 unsigned int refresh_interval; /* in tenths of second */
50003 struct delayed_work refresh_dw;
50004@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
50005 return 0;
50006 }
50007
50008-static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
50009+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
50010 const void *data)
50011 {
50012- struct sock_fprog_kern *fprog;
50013+ struct sock_fprog *fprog;
50014 struct sock_filter *filter = (struct sock_filter *) data;
50015
50016 if (data_len % sizeof(struct sock_filter))
50017 return -EINVAL;
50018- fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
50019+ fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
50020 if (!fprog)
50021 return -ENOMEM;
50022 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
50023@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
50024 return 0;
50025 }
50026
50027-static void __fprog_destroy(struct sock_fprog_kern *fprog)
50028+static void __fprog_destroy(struct sock_fprog *fprog)
50029 {
50030 kfree(fprog->filter);
50031 kfree(fprog);
50032@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
50033 struct lb_priv *lb_priv = get_lb_priv(team);
50034 struct sk_filter *fp = NULL;
50035 struct sk_filter *orig_fp;
50036- struct sock_fprog_kern *fprog = NULL;
50037+ struct sock_fprog *fprog = NULL;
50038 int err;
50039
50040 if (ctx->data.bin_val.len) {
50041diff --git a/drivers/net/tun.c b/drivers/net/tun.c
50042index 98bad1f..f197d7a 100644
50043--- a/drivers/net/tun.c
50044+++ b/drivers/net/tun.c
50045@@ -1854,7 +1854,7 @@ unlock:
50046 }
50047
50048 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
50049- unsigned long arg, int ifreq_len)
50050+ unsigned long arg, size_t ifreq_len)
50051 {
50052 struct tun_file *tfile = file->private_data;
50053 struct tun_struct *tun;
50054@@ -1867,6 +1867,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
50055 unsigned int ifindex;
50056 int ret;
50057
50058+ if (ifreq_len > sizeof ifr)
50059+ return -EFAULT;
50060+
50061 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
50062 if (copy_from_user(&ifr, argp, ifreq_len))
50063 return -EFAULT;
50064diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
50065index a4272ed..cdd69ff 100644
50066--- a/drivers/net/usb/hso.c
50067+++ b/drivers/net/usb/hso.c
50068@@ -71,7 +71,7 @@
50069 #include <asm/byteorder.h>
50070 #include <linux/serial_core.h>
50071 #include <linux/serial.h>
50072-
50073+#include <asm/local.h>
50074
50075 #define MOD_AUTHOR "Option Wireless"
50076 #define MOD_DESCRIPTION "USB High Speed Option driver"
50077@@ -1177,7 +1177,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
50078 struct urb *urb;
50079
50080 urb = serial->rx_urb[0];
50081- if (serial->port.count > 0) {
50082+ if (atomic_read(&serial->port.count) > 0) {
50083 count = put_rxbuf_data(urb, serial);
50084 if (count == -1)
50085 return;
50086@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
50087 DUMP1(urb->transfer_buffer, urb->actual_length);
50088
50089 /* Anyone listening? */
50090- if (serial->port.count == 0)
50091+ if (atomic_read(&serial->port.count) == 0)
50092 return;
50093
50094 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
50095@@ -1277,8 +1277,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
50096 tty_port_tty_set(&serial->port, tty);
50097
50098 /* check for port already opened, if not set the termios */
50099- serial->port.count++;
50100- if (serial->port.count == 1) {
50101+ if (atomic_inc_return(&serial->port.count) == 1) {
50102 serial->rx_state = RX_IDLE;
50103 /* Force default termio settings */
50104 _hso_serial_set_termios(tty, NULL);
50105@@ -1288,7 +1287,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
50106 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
50107 if (result) {
50108 hso_stop_serial_device(serial->parent);
50109- serial->port.count--;
50110+ atomic_dec(&serial->port.count);
50111 kref_put(&serial->parent->ref, hso_serial_ref_free);
50112 }
50113 } else {
50114@@ -1325,10 +1324,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
50115
50116 /* reset the rts and dtr */
50117 /* do the actual close */
50118- serial->port.count--;
50119+ atomic_dec(&serial->port.count);
50120
50121- if (serial->port.count <= 0) {
50122- serial->port.count = 0;
50123+ if (atomic_read(&serial->port.count) <= 0) {
50124+ atomic_set(&serial->port.count, 0);
50125 tty_port_tty_set(&serial->port, NULL);
50126 if (!usb_gone)
50127 hso_stop_serial_device(serial->parent);
50128@@ -1403,7 +1402,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
50129
50130 /* the actual setup */
50131 spin_lock_irqsave(&serial->serial_lock, flags);
50132- if (serial->port.count)
50133+ if (atomic_read(&serial->port.count))
50134 _hso_serial_set_termios(tty, old);
50135 else
50136 tty->termios = *old;
50137@@ -1872,7 +1871,7 @@ static void intr_callback(struct urb *urb)
50138 D1("Pending read interrupt on port %d\n", i);
50139 spin_lock(&serial->serial_lock);
50140 if (serial->rx_state == RX_IDLE &&
50141- serial->port.count > 0) {
50142+ atomic_read(&serial->port.count) > 0) {
50143 /* Setup and send a ctrl req read on
50144 * port i */
50145 if (!serial->rx_urb_filled[0]) {
50146@@ -3045,7 +3044,7 @@ static int hso_resume(struct usb_interface *iface)
50147 /* Start all serial ports */
50148 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
50149 if (serial_table[i] && (serial_table[i]->interface == iface)) {
50150- if (dev2ser(serial_table[i])->port.count) {
50151+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
50152 result =
50153 hso_start_serial_device(serial_table[i], GFP_NOIO);
50154 hso_kick_transmit(dev2ser(serial_table[i]));
50155diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
50156index 3eab74c..fb6097c 100644
50157--- a/drivers/net/usb/r8152.c
50158+++ b/drivers/net/usb/r8152.c
50159@@ -567,7 +567,7 @@ struct r8152 {
50160 void (*up)(struct r8152 *);
50161 void (*down)(struct r8152 *);
50162 void (*unload)(struct r8152 *);
50163- } rtl_ops;
50164+ } __no_const rtl_ops;
50165
50166 int intr_interval;
50167 u32 saved_wolopts;
50168diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
50169index a2515887..6d13233 100644
50170--- a/drivers/net/usb/sierra_net.c
50171+++ b/drivers/net/usb/sierra_net.c
50172@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
50173 /* atomic counter partially included in MAC address to make sure 2 devices
50174 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
50175 */
50176-static atomic_t iface_counter = ATOMIC_INIT(0);
50177+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
50178
50179 /*
50180 * SYNC Timer Delay definition used to set the expiry time
50181@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
50182 dev->net->netdev_ops = &sierra_net_device_ops;
50183
50184 /* change MAC addr to include, ifacenum, and to be unique */
50185- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
50186+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
50187 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
50188
50189 /* we will have to manufacture ethernet headers, prepare template */
50190diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
50191index 7d9f84a..7f690da 100644
50192--- a/drivers/net/virtio_net.c
50193+++ b/drivers/net/virtio_net.c
50194@@ -47,7 +47,7 @@ module_param(gso, bool, 0444);
50195 #define RECEIVE_AVG_WEIGHT 64
50196
50197 /* Minimum alignment for mergeable packet buffers. */
50198-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
50199+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
50200
50201 #define VIRTNET_DRIVER_VERSION "1.0.0"
50202
50203diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
50204index 9f79192..838cf95 100644
50205--- a/drivers/net/vxlan.c
50206+++ b/drivers/net/vxlan.c
50207@@ -2838,7 +2838,7 @@ nla_put_failure:
50208 return -EMSGSIZE;
50209 }
50210
50211-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
50212+static struct rtnl_link_ops vxlan_link_ops = {
50213 .kind = "vxlan",
50214 .maxtype = IFLA_VXLAN_MAX,
50215 .policy = vxlan_policy,
50216@@ -2885,7 +2885,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
50217 return NOTIFY_DONE;
50218 }
50219
50220-static struct notifier_block vxlan_notifier_block __read_mostly = {
50221+static struct notifier_block vxlan_notifier_block = {
50222 .notifier_call = vxlan_lowerdev_event,
50223 };
50224
50225diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
50226index 5920c99..ff2e4a5 100644
50227--- a/drivers/net/wan/lmc/lmc_media.c
50228+++ b/drivers/net/wan/lmc/lmc_media.c
50229@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
50230 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
50231
50232 lmc_media_t lmc_ds3_media = {
50233- lmc_ds3_init, /* special media init stuff */
50234- lmc_ds3_default, /* reset to default state */
50235- lmc_ds3_set_status, /* reset status to state provided */
50236- lmc_dummy_set_1, /* set clock source */
50237- lmc_dummy_set2_1, /* set line speed */
50238- lmc_ds3_set_100ft, /* set cable length */
50239- lmc_ds3_set_scram, /* set scrambler */
50240- lmc_ds3_get_link_status, /* get link status */
50241- lmc_dummy_set_1, /* set link status */
50242- lmc_ds3_set_crc_length, /* set CRC length */
50243- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50244- lmc_ds3_watchdog
50245+ .init = lmc_ds3_init, /* special media init stuff */
50246+ .defaults = lmc_ds3_default, /* reset to default state */
50247+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
50248+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
50249+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50250+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
50251+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
50252+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
50253+ .set_link_status = lmc_dummy_set_1, /* set link status */
50254+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
50255+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50256+ .watchdog = lmc_ds3_watchdog
50257 };
50258
50259 lmc_media_t lmc_hssi_media = {
50260- lmc_hssi_init, /* special media init stuff */
50261- lmc_hssi_default, /* reset to default state */
50262- lmc_hssi_set_status, /* reset status to state provided */
50263- lmc_hssi_set_clock, /* set clock source */
50264- lmc_dummy_set2_1, /* set line speed */
50265- lmc_dummy_set_1, /* set cable length */
50266- lmc_dummy_set_1, /* set scrambler */
50267- lmc_hssi_get_link_status, /* get link status */
50268- lmc_hssi_set_link_status, /* set link status */
50269- lmc_hssi_set_crc_length, /* set CRC length */
50270- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50271- lmc_hssi_watchdog
50272+ .init = lmc_hssi_init, /* special media init stuff */
50273+ .defaults = lmc_hssi_default, /* reset to default state */
50274+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
50275+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
50276+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50277+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50278+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50279+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
50280+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
50281+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
50282+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50283+ .watchdog = lmc_hssi_watchdog
50284 };
50285
50286-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
50287- lmc_ssi_default, /* reset to default state */
50288- lmc_ssi_set_status, /* reset status to state provided */
50289- lmc_ssi_set_clock, /* set clock source */
50290- lmc_ssi_set_speed, /* set line speed */
50291- lmc_dummy_set_1, /* set cable length */
50292- lmc_dummy_set_1, /* set scrambler */
50293- lmc_ssi_get_link_status, /* get link status */
50294- lmc_ssi_set_link_status, /* set link status */
50295- lmc_ssi_set_crc_length, /* set CRC length */
50296- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50297- lmc_ssi_watchdog
50298+lmc_media_t lmc_ssi_media = {
50299+ .init = lmc_ssi_init, /* special media init stuff */
50300+ .defaults = lmc_ssi_default, /* reset to default state */
50301+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
50302+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
50303+ .set_speed = lmc_ssi_set_speed, /* set line speed */
50304+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50305+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50306+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
50307+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
50308+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
50309+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50310+ .watchdog = lmc_ssi_watchdog
50311 };
50312
50313 lmc_media_t lmc_t1_media = {
50314- lmc_t1_init, /* special media init stuff */
50315- lmc_t1_default, /* reset to default state */
50316- lmc_t1_set_status, /* reset status to state provided */
50317- lmc_t1_set_clock, /* set clock source */
50318- lmc_dummy_set2_1, /* set line speed */
50319- lmc_dummy_set_1, /* set cable length */
50320- lmc_dummy_set_1, /* set scrambler */
50321- lmc_t1_get_link_status, /* get link status */
50322- lmc_dummy_set_1, /* set link status */
50323- lmc_t1_set_crc_length, /* set CRC length */
50324- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
50325- lmc_t1_watchdog
50326+ .init = lmc_t1_init, /* special media init stuff */
50327+ .defaults = lmc_t1_default, /* reset to default state */
50328+ .set_status = lmc_t1_set_status, /* reset status to state provided */
50329+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
50330+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50331+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50332+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50333+ .get_link_status = lmc_t1_get_link_status, /* get link status */
50334+ .set_link_status = lmc_dummy_set_1, /* set link status */
50335+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
50336+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
50337+ .watchdog = lmc_t1_watchdog
50338 };
50339
50340 static void
50341diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
50342index feacc3b..5bac0de 100644
50343--- a/drivers/net/wan/z85230.c
50344+++ b/drivers/net/wan/z85230.c
50345@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
50346
50347 struct z8530_irqhandler z8530_sync =
50348 {
50349- z8530_rx,
50350- z8530_tx,
50351- z8530_status
50352+ .rx = z8530_rx,
50353+ .tx = z8530_tx,
50354+ .status = z8530_status
50355 };
50356
50357 EXPORT_SYMBOL(z8530_sync);
50358@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
50359 }
50360
50361 static struct z8530_irqhandler z8530_dma_sync = {
50362- z8530_dma_rx,
50363- z8530_dma_tx,
50364- z8530_dma_status
50365+ .rx = z8530_dma_rx,
50366+ .tx = z8530_dma_tx,
50367+ .status = z8530_dma_status
50368 };
50369
50370 static struct z8530_irqhandler z8530_txdma_sync = {
50371- z8530_rx,
50372- z8530_dma_tx,
50373- z8530_dma_status
50374+ .rx = z8530_rx,
50375+ .tx = z8530_dma_tx,
50376+ .status = z8530_dma_status
50377 };
50378
50379 /**
50380@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
50381
50382 struct z8530_irqhandler z8530_nop=
50383 {
50384- z8530_rx_clear,
50385- z8530_tx_clear,
50386- z8530_status_clear
50387+ .rx = z8530_rx_clear,
50388+ .tx = z8530_tx_clear,
50389+ .status = z8530_status_clear
50390 };
50391
50392
50393diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
50394index 0b60295..b8bfa5b 100644
50395--- a/drivers/net/wimax/i2400m/rx.c
50396+++ b/drivers/net/wimax/i2400m/rx.c
50397@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
50398 if (i2400m->rx_roq == NULL)
50399 goto error_roq_alloc;
50400
50401- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
50402+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
50403 GFP_KERNEL);
50404 if (rd == NULL) {
50405 result = -ENOMEM;
50406diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
50407index 64747d4..17c4cf3 100644
50408--- a/drivers/net/wireless/airo.c
50409+++ b/drivers/net/wireless/airo.c
50410@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
50411 struct airo_info *ai = dev->ml_priv;
50412 int ridcode;
50413 int enabled;
50414- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50415+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50416 unsigned char *iobuf;
50417
50418 /* Only super-user can write RIDs */
50419diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
50420index d48776e..373d049 100644
50421--- a/drivers/net/wireless/at76c50x-usb.c
50422+++ b/drivers/net/wireless/at76c50x-usb.c
50423@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
50424 }
50425
50426 /* Convert timeout from the DFU status to jiffies */
50427-static inline unsigned long at76_get_timeout(struct dfu_status *s)
50428+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
50429 {
50430 return msecs_to_jiffies((s->poll_timeout[2] << 16)
50431 | (s->poll_timeout[1] << 8)
50432diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
50433index e493db4..2c1853a 100644
50434--- a/drivers/net/wireless/ath/ath10k/htc.c
50435+++ b/drivers/net/wireless/ath/ath10k/htc.c
50436@@ -840,7 +840,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
50437 /* registered target arrival callback from the HIF layer */
50438 int ath10k_htc_init(struct ath10k *ar)
50439 {
50440- struct ath10k_hif_cb htc_callbacks;
50441+ static struct ath10k_hif_cb htc_callbacks = {
50442+ .rx_completion = ath10k_htc_rx_completion_handler,
50443+ .tx_completion = ath10k_htc_tx_completion_handler,
50444+ };
50445 struct ath10k_htc_ep *ep = NULL;
50446 struct ath10k_htc *htc = &ar->htc;
50447
50448@@ -850,8 +853,6 @@ int ath10k_htc_init(struct ath10k *ar)
50449 ath10k_htc_reset_endpoint_states(htc);
50450
50451 /* setup HIF layer callbacks */
50452- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
50453- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
50454 htc->ar = ar;
50455
50456 /* Get HIF default pipe for HTC message exchange */
50457diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50458index 4716d33..a688310 100644
50459--- a/drivers/net/wireless/ath/ath10k/htc.h
50460+++ b/drivers/net/wireless/ath/ath10k/htc.h
50461@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
50462
50463 struct ath10k_htc_ops {
50464 void (*target_send_suspend_complete)(struct ath10k *ar);
50465-};
50466+} __no_const;
50467
50468 struct ath10k_htc_ep_ops {
50469 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50470 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50471 void (*ep_tx_credits)(struct ath10k *);
50472-};
50473+} __no_const;
50474
50475 /* service connection information */
50476 struct ath10k_htc_svc_conn_req {
50477diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50478index 741b38d..b7ae41b 100644
50479--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50480+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50481@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50482 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50483 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50484
50485- ACCESS_ONCE(ads->ds_link) = i->link;
50486- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50487+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50488+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50489
50490 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50491 ctl6 = SM(i->keytype, AR_EncrType);
50492@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50493
50494 if ((i->is_first || i->is_last) &&
50495 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50496- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50497+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50498 | set11nTries(i->rates, 1)
50499 | set11nTries(i->rates, 2)
50500 | set11nTries(i->rates, 3)
50501 | (i->dur_update ? AR_DurUpdateEna : 0)
50502 | SM(0, AR_BurstDur);
50503
50504- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50505+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50506 | set11nRate(i->rates, 1)
50507 | set11nRate(i->rates, 2)
50508 | set11nRate(i->rates, 3);
50509 } else {
50510- ACCESS_ONCE(ads->ds_ctl2) = 0;
50511- ACCESS_ONCE(ads->ds_ctl3) = 0;
50512+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50513+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50514 }
50515
50516 if (!i->is_first) {
50517- ACCESS_ONCE(ads->ds_ctl0) = 0;
50518- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50519- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50520+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50521+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50522+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50523 return;
50524 }
50525
50526@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50527 break;
50528 }
50529
50530- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50531+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50532 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50533 | SM(i->txpower, AR_XmitPower)
50534 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50535@@ -289,19 +289,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50536 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50537 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50538
50539- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50540- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50541+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50542+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50543
50544 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50545 return;
50546
50547- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50548+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50549 | set11nPktDurRTSCTS(i->rates, 1);
50550
50551- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50552+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50553 | set11nPktDurRTSCTS(i->rates, 3);
50554
50555- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50556+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50557 | set11nRateFlags(i->rates, 1)
50558 | set11nRateFlags(i->rates, 2)
50559 | set11nRateFlags(i->rates, 3)
50560diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50561index 729ffbf..49f50e3 100644
50562--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50563+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50564@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50565 (i->qcu << AR_TxQcuNum_S) | desc_len;
50566
50567 checksum += val;
50568- ACCESS_ONCE(ads->info) = val;
50569+ ACCESS_ONCE_RW(ads->info) = val;
50570
50571 checksum += i->link;
50572- ACCESS_ONCE(ads->link) = i->link;
50573+ ACCESS_ONCE_RW(ads->link) = i->link;
50574
50575 checksum += i->buf_addr[0];
50576- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50577+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50578 checksum += i->buf_addr[1];
50579- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50580+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50581 checksum += i->buf_addr[2];
50582- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50583+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50584 checksum += i->buf_addr[3];
50585- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50586+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50587
50588 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50589- ACCESS_ONCE(ads->ctl3) = val;
50590+ ACCESS_ONCE_RW(ads->ctl3) = val;
50591 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50592- ACCESS_ONCE(ads->ctl5) = val;
50593+ ACCESS_ONCE_RW(ads->ctl5) = val;
50594 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50595- ACCESS_ONCE(ads->ctl7) = val;
50596+ ACCESS_ONCE_RW(ads->ctl7) = val;
50597 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50598- ACCESS_ONCE(ads->ctl9) = val;
50599+ ACCESS_ONCE_RW(ads->ctl9) = val;
50600
50601 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50602- ACCESS_ONCE(ads->ctl10) = checksum;
50603+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50604
50605 if (i->is_first || i->is_last) {
50606- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50607+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50608 | set11nTries(i->rates, 1)
50609 | set11nTries(i->rates, 2)
50610 | set11nTries(i->rates, 3)
50611 | (i->dur_update ? AR_DurUpdateEna : 0)
50612 | SM(0, AR_BurstDur);
50613
50614- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50615+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50616 | set11nRate(i->rates, 1)
50617 | set11nRate(i->rates, 2)
50618 | set11nRate(i->rates, 3);
50619 } else {
50620- ACCESS_ONCE(ads->ctl13) = 0;
50621- ACCESS_ONCE(ads->ctl14) = 0;
50622+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50623+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50624 }
50625
50626 ads->ctl20 = 0;
50627@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50628
50629 ctl17 = SM(i->keytype, AR_EncrType);
50630 if (!i->is_first) {
50631- ACCESS_ONCE(ads->ctl11) = 0;
50632- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50633- ACCESS_ONCE(ads->ctl15) = 0;
50634- ACCESS_ONCE(ads->ctl16) = 0;
50635- ACCESS_ONCE(ads->ctl17) = ctl17;
50636- ACCESS_ONCE(ads->ctl18) = 0;
50637- ACCESS_ONCE(ads->ctl19) = 0;
50638+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50639+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50640+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50641+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50642+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50643+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50644+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50645 return;
50646 }
50647
50648- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50649+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50650 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50651 | SM(i->txpower, AR_XmitPower)
50652 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50653@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50654 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50655 ctl12 |= SM(val, AR_PAPRDChainMask);
50656
50657- ACCESS_ONCE(ads->ctl12) = ctl12;
50658- ACCESS_ONCE(ads->ctl17) = ctl17;
50659+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50660+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50661
50662- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50663+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50664 | set11nPktDurRTSCTS(i->rates, 1);
50665
50666- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50667+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50668 | set11nPktDurRTSCTS(i->rates, 3);
50669
50670- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50671+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50672 | set11nRateFlags(i->rates, 1)
50673 | set11nRateFlags(i->rates, 2)
50674 | set11nRateFlags(i->rates, 3)
50675 | SM(i->rtscts_rate, AR_RTSCTSRate);
50676
50677- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50678+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50679 }
50680
50681 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50682diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50683index 0acd4b5..0591c91 100644
50684--- a/drivers/net/wireless/ath/ath9k/hw.h
50685+++ b/drivers/net/wireless/ath/ath9k/hw.h
50686@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
50687
50688 /* ANI */
50689 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50690-};
50691+} __no_const;
50692
50693 /**
50694 * struct ath_spec_scan - parameters for Atheros spectral scan
50695@@ -706,7 +706,7 @@ struct ath_hw_ops {
50696 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50697 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50698 #endif
50699-};
50700+} __no_const;
50701
50702 struct ath_nf_limits {
50703 s16 max;
50704diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50705index 92190da..f3a4c4c 100644
50706--- a/drivers/net/wireless/b43/phy_lp.c
50707+++ b/drivers/net/wireless/b43/phy_lp.c
50708@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50709 {
50710 struct ssb_bus *bus = dev->dev->sdev->bus;
50711
50712- static const struct b206x_channel *chandata = NULL;
50713+ const struct b206x_channel *chandata = NULL;
50714 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50715 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50716 u16 old_comm15, scale;
50717diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50718index dc1d20c..f7a4f06 100644
50719--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50720+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50721@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50722 */
50723 if (il3945_mod_params.disable_hw_scan) {
50724 D_INFO("Disabling hw_scan\n");
50725- il3945_mac_ops.hw_scan = NULL;
50726+ pax_open_kernel();
50727+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50728+ pax_close_kernel();
50729 }
50730
50731 D_INFO("*** LOAD DRIVER ***\n");
50732diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50733index 0ffb6ff..c0b7f0e 100644
50734--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50735+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50736@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50737 {
50738 struct iwl_priv *priv = file->private_data;
50739 char buf[64];
50740- int buf_size;
50741+ size_t buf_size;
50742 u32 offset, len;
50743
50744 memset(buf, 0, sizeof(buf));
50745@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50746 struct iwl_priv *priv = file->private_data;
50747
50748 char buf[8];
50749- int buf_size;
50750+ size_t buf_size;
50751 u32 reset_flag;
50752
50753 memset(buf, 0, sizeof(buf));
50754@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50755 {
50756 struct iwl_priv *priv = file->private_data;
50757 char buf[8];
50758- int buf_size;
50759+ size_t buf_size;
50760 int ht40;
50761
50762 memset(buf, 0, sizeof(buf));
50763@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50764 {
50765 struct iwl_priv *priv = file->private_data;
50766 char buf[8];
50767- int buf_size;
50768+ size_t buf_size;
50769 int value;
50770
50771 memset(buf, 0, sizeof(buf));
50772@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50773 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50774 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50775
50776-static const char *fmt_value = " %-30s %10u\n";
50777-static const char *fmt_hex = " %-30s 0x%02X\n";
50778-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50779-static const char *fmt_header =
50780+static const char fmt_value[] = " %-30s %10u\n";
50781+static const char fmt_hex[] = " %-30s 0x%02X\n";
50782+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50783+static const char fmt_header[] =
50784 "%-32s current cumulative delta max\n";
50785
50786 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50787@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50788 {
50789 struct iwl_priv *priv = file->private_data;
50790 char buf[8];
50791- int buf_size;
50792+ size_t buf_size;
50793 int clear;
50794
50795 memset(buf, 0, sizeof(buf));
50796@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50797 {
50798 struct iwl_priv *priv = file->private_data;
50799 char buf[8];
50800- int buf_size;
50801+ size_t buf_size;
50802 int trace;
50803
50804 memset(buf, 0, sizeof(buf));
50805@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50806 {
50807 struct iwl_priv *priv = file->private_data;
50808 char buf[8];
50809- int buf_size;
50810+ size_t buf_size;
50811 int missed;
50812
50813 memset(buf, 0, sizeof(buf));
50814@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50815
50816 struct iwl_priv *priv = file->private_data;
50817 char buf[8];
50818- int buf_size;
50819+ size_t buf_size;
50820 int plcp;
50821
50822 memset(buf, 0, sizeof(buf));
50823@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50824
50825 struct iwl_priv *priv = file->private_data;
50826 char buf[8];
50827- int buf_size;
50828+ size_t buf_size;
50829 int flush;
50830
50831 memset(buf, 0, sizeof(buf));
50832@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50833
50834 struct iwl_priv *priv = file->private_data;
50835 char buf[8];
50836- int buf_size;
50837+ size_t buf_size;
50838 int rts;
50839
50840 if (!priv->cfg->ht_params)
50841@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50842 {
50843 struct iwl_priv *priv = file->private_data;
50844 char buf[8];
50845- int buf_size;
50846+ size_t buf_size;
50847
50848 memset(buf, 0, sizeof(buf));
50849 buf_size = min(count, sizeof(buf) - 1);
50850@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50851 struct iwl_priv *priv = file->private_data;
50852 u32 event_log_flag;
50853 char buf[8];
50854- int buf_size;
50855+ size_t buf_size;
50856
50857 /* check that the interface is up */
50858 if (!iwl_is_ready(priv))
50859@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50860 struct iwl_priv *priv = file->private_data;
50861 char buf[8];
50862 u32 calib_disabled;
50863- int buf_size;
50864+ size_t buf_size;
50865
50866 memset(buf, 0, sizeof(buf));
50867 buf_size = min(count, sizeof(buf) - 1);
50868diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50869index 788085b..0bc852a 100644
50870--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50871+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50872@@ -1598,7 +1598,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50873 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50874
50875 char buf[8];
50876- int buf_size;
50877+ size_t buf_size;
50878 u32 reset_flag;
50879
50880 memset(buf, 0, sizeof(buf));
50881@@ -1619,7 +1619,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50882 {
50883 struct iwl_trans *trans = file->private_data;
50884 char buf[8];
50885- int buf_size;
50886+ size_t buf_size;
50887 int csr;
50888
50889 memset(buf, 0, sizeof(buf));
50890diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50891index a312c65..162b13a 100644
50892--- a/drivers/net/wireless/mac80211_hwsim.c
50893+++ b/drivers/net/wireless/mac80211_hwsim.c
50894@@ -2573,20 +2573,20 @@ static int __init init_mac80211_hwsim(void)
50895 if (channels < 1)
50896 return -EINVAL;
50897
50898- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50899- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50900- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50901- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50902- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50903- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50904- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50905- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50906- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50907- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50908- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50909- mac80211_hwsim_assign_vif_chanctx;
50910- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50911- mac80211_hwsim_unassign_vif_chanctx;
50912+ pax_open_kernel();
50913+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50914+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50915+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50916+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50917+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50918+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50919+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50920+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50921+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50922+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50923+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50924+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50925+ pax_close_kernel();
50926
50927 spin_lock_init(&hwsim_radio_lock);
50928 INIT_LIST_HEAD(&hwsim_radios);
50929diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50930index d2a9a08..0cb175d 100644
50931--- a/drivers/net/wireless/rndis_wlan.c
50932+++ b/drivers/net/wireless/rndis_wlan.c
50933@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50934
50935 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50936
50937- if (rts_threshold < 0 || rts_threshold > 2347)
50938+ if (rts_threshold > 2347)
50939 rts_threshold = 2347;
50940
50941 tmp = cpu_to_le32(rts_threshold);
50942diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50943index d13f25c..2573994 100644
50944--- a/drivers/net/wireless/rt2x00/rt2x00.h
50945+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50946@@ -375,7 +375,7 @@ struct rt2x00_intf {
50947 * for hardware which doesn't support hardware
50948 * sequence counting.
50949 */
50950- atomic_t seqno;
50951+ atomic_unchecked_t seqno;
50952 };
50953
50954 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50955diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50956index 5642ccc..01f03eb 100644
50957--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50958+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50959@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50960 * sequence counter given by mac80211.
50961 */
50962 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50963- seqno = atomic_add_return(0x10, &intf->seqno);
50964+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50965 else
50966- seqno = atomic_read(&intf->seqno);
50967+ seqno = atomic_read_unchecked(&intf->seqno);
50968
50969 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50970 hdr->seq_ctrl |= cpu_to_le16(seqno);
50971diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50972index b661f896..ddf7d2b 100644
50973--- a/drivers/net/wireless/ti/wl1251/sdio.c
50974+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50975@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50976
50977 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50978
50979- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50980- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50981+ pax_open_kernel();
50982+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50983+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50984+ pax_close_kernel();
50985
50986 wl1251_info("using dedicated interrupt line");
50987 } else {
50988- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50989- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50990+ pax_open_kernel();
50991+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50992+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50993+ pax_close_kernel();
50994
50995 wl1251_info("using SDIO interrupt");
50996 }
50997diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50998index d50dfac..0a6f5be3 100644
50999--- a/drivers/net/wireless/ti/wl12xx/main.c
51000+++ b/drivers/net/wireless/ti/wl12xx/main.c
51001@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
51002 sizeof(wl->conf.mem));
51003
51004 /* read data preparation is only needed by wl127x */
51005- wl->ops->prepare_read = wl127x_prepare_read;
51006+ pax_open_kernel();
51007+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
51008+ pax_close_kernel();
51009
51010 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
51011 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
51012@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
51013 sizeof(wl->conf.mem));
51014
51015 /* read data preparation is only needed by wl127x */
51016- wl->ops->prepare_read = wl127x_prepare_read;
51017+ pax_open_kernel();
51018+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
51019+ pax_close_kernel();
51020
51021 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
51022 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
51023diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
51024index de5b4fa..7996ec6 100644
51025--- a/drivers/net/wireless/ti/wl18xx/main.c
51026+++ b/drivers/net/wireless/ti/wl18xx/main.c
51027@@ -1900,8 +1900,10 @@ static int wl18xx_setup(struct wl1271 *wl)
51028 }
51029
51030 if (!checksum_param) {
51031- wl18xx_ops.set_rx_csum = NULL;
51032- wl18xx_ops.init_vif = NULL;
51033+ pax_open_kernel();
51034+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
51035+ *(void **)&wl18xx_ops.init_vif = NULL;
51036+ pax_close_kernel();
51037 }
51038
51039 /* Enable 11a Band only if we have 5G antennas */
51040diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
51041index a912dc0..a8225ba 100644
51042--- a/drivers/net/wireless/zd1211rw/zd_usb.c
51043+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
51044@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
51045 {
51046 struct zd_usb *usb = urb->context;
51047 struct zd_usb_interrupt *intr = &usb->intr;
51048- int len;
51049+ unsigned int len;
51050 u16 int_num;
51051
51052 ZD_ASSERT(in_interrupt());
51053diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
51054index 683671a..4519fc2 100644
51055--- a/drivers/nfc/nfcwilink.c
51056+++ b/drivers/nfc/nfcwilink.c
51057@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
51058
51059 static int nfcwilink_probe(struct platform_device *pdev)
51060 {
51061- static struct nfcwilink *drv;
51062+ struct nfcwilink *drv;
51063 int rc;
51064 __u32 protocols;
51065
51066diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
51067index d93b2b6..ae50401 100644
51068--- a/drivers/oprofile/buffer_sync.c
51069+++ b/drivers/oprofile/buffer_sync.c
51070@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
51071 if (cookie == NO_COOKIE)
51072 offset = pc;
51073 if (cookie == INVALID_COOKIE) {
51074- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
51075+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
51076 offset = pc;
51077 }
51078 if (cookie != last_cookie) {
51079@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
51080 /* add userspace sample */
51081
51082 if (!mm) {
51083- atomic_inc(&oprofile_stats.sample_lost_no_mm);
51084+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
51085 return 0;
51086 }
51087
51088 cookie = lookup_dcookie(mm, s->eip, &offset);
51089
51090 if (cookie == INVALID_COOKIE) {
51091- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
51092+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
51093 return 0;
51094 }
51095
51096@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
51097 /* ignore backtraces if failed to add a sample */
51098 if (state == sb_bt_start) {
51099 state = sb_bt_ignore;
51100- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
51101+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
51102 }
51103 }
51104 release_mm(mm);
51105diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
51106index c0cc4e7..44d4e54 100644
51107--- a/drivers/oprofile/event_buffer.c
51108+++ b/drivers/oprofile/event_buffer.c
51109@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
51110 }
51111
51112 if (buffer_pos == buffer_size) {
51113- atomic_inc(&oprofile_stats.event_lost_overflow);
51114+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
51115 return;
51116 }
51117
51118diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
51119index ed2c3ec..deda85a 100644
51120--- a/drivers/oprofile/oprof.c
51121+++ b/drivers/oprofile/oprof.c
51122@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
51123 if (oprofile_ops.switch_events())
51124 return;
51125
51126- atomic_inc(&oprofile_stats.multiplex_counter);
51127+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
51128 start_switch_worker();
51129 }
51130
51131diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
51132index ee2cfce..7f8f699 100644
51133--- a/drivers/oprofile/oprofile_files.c
51134+++ b/drivers/oprofile/oprofile_files.c
51135@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
51136
51137 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
51138
51139-static ssize_t timeout_read(struct file *file, char __user *buf,
51140+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
51141 size_t count, loff_t *offset)
51142 {
51143 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
51144diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
51145index 59659ce..6c860a0 100644
51146--- a/drivers/oprofile/oprofile_stats.c
51147+++ b/drivers/oprofile/oprofile_stats.c
51148@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
51149 cpu_buf->sample_invalid_eip = 0;
51150 }
51151
51152- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
51153- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
51154- atomic_set(&oprofile_stats.event_lost_overflow, 0);
51155- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
51156- atomic_set(&oprofile_stats.multiplex_counter, 0);
51157+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
51158+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
51159+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
51160+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
51161+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
51162 }
51163
51164
51165diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
51166index 1fc622b..8c48fc3 100644
51167--- a/drivers/oprofile/oprofile_stats.h
51168+++ b/drivers/oprofile/oprofile_stats.h
51169@@ -13,11 +13,11 @@
51170 #include <linux/atomic.h>
51171
51172 struct oprofile_stat_struct {
51173- atomic_t sample_lost_no_mm;
51174- atomic_t sample_lost_no_mapping;
51175- atomic_t bt_lost_no_mapping;
51176- atomic_t event_lost_overflow;
51177- atomic_t multiplex_counter;
51178+ atomic_unchecked_t sample_lost_no_mm;
51179+ atomic_unchecked_t sample_lost_no_mapping;
51180+ atomic_unchecked_t bt_lost_no_mapping;
51181+ atomic_unchecked_t event_lost_overflow;
51182+ atomic_unchecked_t multiplex_counter;
51183 };
51184
51185 extern struct oprofile_stat_struct oprofile_stats;
51186diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
51187index 3f49345..c750d0b 100644
51188--- a/drivers/oprofile/oprofilefs.c
51189+++ b/drivers/oprofile/oprofilefs.c
51190@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
51191
51192 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
51193 {
51194- atomic_t *val = file->private_data;
51195- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
51196+ atomic_unchecked_t *val = file->private_data;
51197+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
51198 }
51199
51200
51201@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
51202
51203
51204 int oprofilefs_create_ro_atomic(struct dentry *root,
51205- char const *name, atomic_t *val)
51206+ char const *name, atomic_unchecked_t *val)
51207 {
51208 return __oprofilefs_create_file(root, name,
51209 &atomic_ro_fops, 0444, val);
51210diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
51211index 61be1d9..dec05d7 100644
51212--- a/drivers/oprofile/timer_int.c
51213+++ b/drivers/oprofile/timer_int.c
51214@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
51215 return NOTIFY_OK;
51216 }
51217
51218-static struct notifier_block __refdata oprofile_cpu_notifier = {
51219+static struct notifier_block oprofile_cpu_notifier = {
51220 .notifier_call = oprofile_cpu_notify,
51221 };
51222
51223diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
51224index 3b47080..6cd05dd 100644
51225--- a/drivers/parport/procfs.c
51226+++ b/drivers/parport/procfs.c
51227@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
51228
51229 *ppos += len;
51230
51231- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
51232+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
51233 }
51234
51235 #ifdef CONFIG_PARPORT_1284
51236@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
51237
51238 *ppos += len;
51239
51240- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
51241+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
51242 }
51243 #endif /* IEEE1284.3 support. */
51244
51245diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
51246index 8dcccff..35d701d 100644
51247--- a/drivers/pci/hotplug/acpiphp_ibm.c
51248+++ b/drivers/pci/hotplug/acpiphp_ibm.c
51249@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
51250 goto init_cleanup;
51251 }
51252
51253- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
51254+ pax_open_kernel();
51255+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
51256+ pax_close_kernel();
51257 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
51258
51259 return retval;
51260diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
51261index 04fcd78..39e83f1 100644
51262--- a/drivers/pci/hotplug/cpcihp_generic.c
51263+++ b/drivers/pci/hotplug/cpcihp_generic.c
51264@@ -73,7 +73,6 @@ static u16 port;
51265 static unsigned int enum_bit;
51266 static u8 enum_mask;
51267
51268-static struct cpci_hp_controller_ops generic_hpc_ops;
51269 static struct cpci_hp_controller generic_hpc;
51270
51271 static int __init validate_parameters(void)
51272@@ -139,6 +138,10 @@ static int query_enum(void)
51273 return ((value & enum_mask) == enum_mask);
51274 }
51275
51276+static struct cpci_hp_controller_ops generic_hpc_ops = {
51277+ .query_enum = query_enum,
51278+};
51279+
51280 static int __init cpcihp_generic_init(void)
51281 {
51282 int status;
51283@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
51284 pci_dev_put(dev);
51285
51286 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
51287- generic_hpc_ops.query_enum = query_enum;
51288 generic_hpc.ops = &generic_hpc_ops;
51289
51290 status = cpci_hp_register_controller(&generic_hpc);
51291diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
51292index 6757b3e..d3bad62 100644
51293--- a/drivers/pci/hotplug/cpcihp_zt5550.c
51294+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
51295@@ -59,7 +59,6 @@
51296 /* local variables */
51297 static bool debug;
51298 static bool poll;
51299-static struct cpci_hp_controller_ops zt5550_hpc_ops;
51300 static struct cpci_hp_controller zt5550_hpc;
51301
51302 /* Primary cPCI bus bridge device */
51303@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
51304 return 0;
51305 }
51306
51307+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
51308+ .query_enum = zt5550_hc_query_enum,
51309+};
51310+
51311 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
51312 {
51313 int status;
51314@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
51315 dbg("returned from zt5550_hc_config");
51316
51317 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
51318- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
51319 zt5550_hpc.ops = &zt5550_hpc_ops;
51320 if(!poll) {
51321 zt5550_hpc.irq = hc_dev->irq;
51322 zt5550_hpc.irq_flags = IRQF_SHARED;
51323 zt5550_hpc.dev_id = hc_dev;
51324
51325- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51326- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51327- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51328+ pax_open_kernel();
51329+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51330+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51331+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51332+ pax_open_kernel();
51333 } else {
51334 info("using ENUM# polling mode");
51335 }
51336diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
51337index 0968a9b..5a00edf 100644
51338--- a/drivers/pci/hotplug/cpqphp_nvram.c
51339+++ b/drivers/pci/hotplug/cpqphp_nvram.c
51340@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
51341
51342 void compaq_nvram_init (void __iomem *rom_start)
51343 {
51344+
51345+#ifndef CONFIG_PAX_KERNEXEC
51346 if (rom_start) {
51347 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
51348 }
51349+#endif
51350+
51351 dbg("int15 entry = %p\n", compaq_int15_entry_point);
51352
51353 /* initialize our int15 lock */
51354diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
51355index 56d8486..f26113f 100644
51356--- a/drivers/pci/hotplug/pci_hotplug_core.c
51357+++ b/drivers/pci/hotplug/pci_hotplug_core.c
51358@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
51359 return -EINVAL;
51360 }
51361
51362- slot->ops->owner = owner;
51363- slot->ops->mod_name = mod_name;
51364+ pax_open_kernel();
51365+ *(struct module **)&slot->ops->owner = owner;
51366+ *(const char **)&slot->ops->mod_name = mod_name;
51367+ pax_close_kernel();
51368
51369 mutex_lock(&pci_hp_mutex);
51370 /*
51371diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51372index a2297db..7c7d161 100644
51373--- a/drivers/pci/hotplug/pciehp_core.c
51374+++ b/drivers/pci/hotplug/pciehp_core.c
51375@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51376 struct slot *slot = ctrl->slot;
51377 struct hotplug_slot *hotplug = NULL;
51378 struct hotplug_slot_info *info = NULL;
51379- struct hotplug_slot_ops *ops = NULL;
51380+ hotplug_slot_ops_no_const *ops = NULL;
51381 char name[SLOT_NAME_SIZE];
51382 int retval = -ENOMEM;
51383
51384diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51385index 13f3d30..363cb44 100644
51386--- a/drivers/pci/msi.c
51387+++ b/drivers/pci/msi.c
51388@@ -523,8 +523,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51389 {
51390 struct attribute **msi_attrs;
51391 struct attribute *msi_attr;
51392- struct device_attribute *msi_dev_attr;
51393- struct attribute_group *msi_irq_group;
51394+ device_attribute_no_const *msi_dev_attr;
51395+ attribute_group_no_const *msi_irq_group;
51396 const struct attribute_group **msi_irq_groups;
51397 struct msi_desc *entry;
51398 int ret = -ENOMEM;
51399@@ -584,7 +584,7 @@ error_attrs:
51400 count = 0;
51401 msi_attr = msi_attrs[count];
51402 while (msi_attr) {
51403- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51404+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51405 kfree(msi_attr->name);
51406 kfree(msi_dev_attr);
51407 ++count;
51408diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51409index 9ff0a90..e819dda 100644
51410--- a/drivers/pci/pci-sysfs.c
51411+++ b/drivers/pci/pci-sysfs.c
51412@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51413 {
51414 /* allocate attribute structure, piggyback attribute name */
51415 int name_len = write_combine ? 13 : 10;
51416- struct bin_attribute *res_attr;
51417+ bin_attribute_no_const *res_attr;
51418 int retval;
51419
51420 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51421@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51422 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51423 {
51424 int retval;
51425- struct bin_attribute *attr;
51426+ bin_attribute_no_const *attr;
51427
51428 /* If the device has VPD, try to expose it in sysfs. */
51429 if (dev->vpd) {
51430@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51431 {
51432 int retval;
51433 int rom_size = 0;
51434- struct bin_attribute *attr;
51435+ bin_attribute_no_const *attr;
51436
51437 if (!sysfs_initialized)
51438 return -EACCES;
51439diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51440index 0601890..dc15007 100644
51441--- a/drivers/pci/pci.h
51442+++ b/drivers/pci/pci.h
51443@@ -91,7 +91,7 @@ struct pci_vpd_ops {
51444 struct pci_vpd {
51445 unsigned int len;
51446 const struct pci_vpd_ops *ops;
51447- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51448+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51449 };
51450
51451 int pci_vpd_pci22_init(struct pci_dev *dev);
51452diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51453index e1e7026..d28dd33 100644
51454--- a/drivers/pci/pcie/aspm.c
51455+++ b/drivers/pci/pcie/aspm.c
51456@@ -27,9 +27,9 @@
51457 #define MODULE_PARAM_PREFIX "pcie_aspm."
51458
51459 /* Note: those are not register definitions */
51460-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51461-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51462-#define ASPM_STATE_L1 (4) /* L1 state */
51463+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51464+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51465+#define ASPM_STATE_L1 (4U) /* L1 state */
51466 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51467 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51468
51469diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51470index e3cf8a2..be1baf0 100644
51471--- a/drivers/pci/probe.c
51472+++ b/drivers/pci/probe.c
51473@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51474 struct pci_bus_region region, inverted_region;
51475 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
51476
51477- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51478+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51479
51480 /* No printks while decoding is disabled! */
51481 if (!dev->mmio_always_on) {
51482diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51483index 3f155e7..0f4b1f0 100644
51484--- a/drivers/pci/proc.c
51485+++ b/drivers/pci/proc.c
51486@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51487 static int __init pci_proc_init(void)
51488 {
51489 struct pci_dev *dev = NULL;
51490+
51491+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51492+#ifdef CONFIG_GRKERNSEC_PROC_USER
51493+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51494+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51495+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51496+#endif
51497+#else
51498 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51499+#endif
51500 proc_create("devices", 0, proc_bus_pci_dir,
51501 &proc_bus_pci_dev_operations);
51502 proc_initialized = 1;
51503diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
51504index 7f1a2e2..bc4b405 100644
51505--- a/drivers/platform/chrome/chromeos_laptop.c
51506+++ b/drivers/platform/chrome/chromeos_laptop.c
51507@@ -395,7 +395,7 @@ static struct chromeos_laptop cr48 = {
51508 .callback = chromeos_laptop_dmi_matched, \
51509 .driver_data = (void *)&board_
51510
51511-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
51512+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
51513 {
51514 .ident = "Samsung Series 5 550",
51515 .matches = {
51516diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51517index 297b664..ab91e39 100644
51518--- a/drivers/platform/x86/alienware-wmi.c
51519+++ b/drivers/platform/x86/alienware-wmi.c
51520@@ -133,7 +133,7 @@ struct wmax_led_args {
51521 } __packed;
51522
51523 static struct platform_device *platform_device;
51524-static struct device_attribute *zone_dev_attrs;
51525+static device_attribute_no_const *zone_dev_attrs;
51526 static struct attribute **zone_attrs;
51527 static struct platform_zone *zone_data;
51528
51529@@ -144,7 +144,7 @@ static struct platform_driver platform_driver = {
51530 }
51531 };
51532
51533-static struct attribute_group zone_attribute_group = {
51534+static attribute_group_no_const zone_attribute_group = {
51535 .name = "rgb_zones",
51536 };
51537
51538diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51539index 3c6cced..12e0771 100644
51540--- a/drivers/platform/x86/asus-wmi.c
51541+++ b/drivers/platform/x86/asus-wmi.c
51542@@ -1592,6 +1592,10 @@ static int show_dsts(struct seq_file *m, void *data)
51543 int err;
51544 u32 retval = -1;
51545
51546+#ifdef CONFIG_GRKERNSEC_KMEM
51547+ return -EPERM;
51548+#endif
51549+
51550 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51551
51552 if (err < 0)
51553@@ -1608,6 +1612,10 @@ static int show_devs(struct seq_file *m, void *data)
51554 int err;
51555 u32 retval = -1;
51556
51557+#ifdef CONFIG_GRKERNSEC_KMEM
51558+ return -EPERM;
51559+#endif
51560+
51561 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51562 &retval);
51563
51564@@ -1632,6 +1640,10 @@ static int show_call(struct seq_file *m, void *data)
51565 union acpi_object *obj;
51566 acpi_status status;
51567
51568+#ifdef CONFIG_GRKERNSEC_KMEM
51569+ return -EPERM;
51570+#endif
51571+
51572 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51573 1, asus->debug.method_id,
51574 &input, &output);
51575diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51576index 62f8030..c7f2a45 100644
51577--- a/drivers/platform/x86/msi-laptop.c
51578+++ b/drivers/platform/x86/msi-laptop.c
51579@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51580
51581 if (!quirks->ec_read_only) {
51582 /* allow userland write sysfs file */
51583- dev_attr_bluetooth.store = store_bluetooth;
51584- dev_attr_wlan.store = store_wlan;
51585- dev_attr_threeg.store = store_threeg;
51586- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51587- dev_attr_wlan.attr.mode |= S_IWUSR;
51588- dev_attr_threeg.attr.mode |= S_IWUSR;
51589+ pax_open_kernel();
51590+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51591+ *(void **)&dev_attr_wlan.store = store_wlan;
51592+ *(void **)&dev_attr_threeg.store = store_threeg;
51593+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51594+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51595+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51596+ pax_close_kernel();
51597 }
51598
51599 /* disable hardware control by fn key */
51600diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51601index 70222f2..8c8ce66 100644
51602--- a/drivers/platform/x86/msi-wmi.c
51603+++ b/drivers/platform/x86/msi-wmi.c
51604@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51605 static void msi_wmi_notify(u32 value, void *context)
51606 {
51607 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51608- static struct key_entry *key;
51609+ struct key_entry *key;
51610 union acpi_object *obj;
51611 acpi_status status;
51612
51613diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51614index 9c5a074..06c976a 100644
51615--- a/drivers/platform/x86/sony-laptop.c
51616+++ b/drivers/platform/x86/sony-laptop.c
51617@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51618 }
51619
51620 /* High speed charging function */
51621-static struct device_attribute *hsc_handle;
51622+static device_attribute_no_const *hsc_handle;
51623
51624 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51625 struct device_attribute *attr,
51626@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51627 }
51628
51629 /* low battery function */
51630-static struct device_attribute *lowbatt_handle;
51631+static device_attribute_no_const *lowbatt_handle;
51632
51633 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51634 struct device_attribute *attr,
51635@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51636 }
51637
51638 /* fan speed function */
51639-static struct device_attribute *fan_handle, *hsf_handle;
51640+static device_attribute_no_const *fan_handle, *hsf_handle;
51641
51642 static ssize_t sony_nc_hsfan_store(struct device *dev,
51643 struct device_attribute *attr,
51644@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51645 }
51646
51647 /* USB charge function */
51648-static struct device_attribute *uc_handle;
51649+static device_attribute_no_const *uc_handle;
51650
51651 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51652 struct device_attribute *attr,
51653@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51654 }
51655
51656 /* Panel ID function */
51657-static struct device_attribute *panel_handle;
51658+static device_attribute_no_const *panel_handle;
51659
51660 static ssize_t sony_nc_panelid_show(struct device *dev,
51661 struct device_attribute *attr, char *buffer)
51662@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51663 }
51664
51665 /* smart connect function */
51666-static struct device_attribute *sc_handle;
51667+static device_attribute_no_const *sc_handle;
51668
51669 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51670 struct device_attribute *attr,
51671diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51672index d82f196..5458f34 100644
51673--- a/drivers/platform/x86/thinkpad_acpi.c
51674+++ b/drivers/platform/x86/thinkpad_acpi.c
51675@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
51676 return 0;
51677 }
51678
51679-void static hotkey_mask_warn_incomplete_mask(void)
51680+static void hotkey_mask_warn_incomplete_mask(void)
51681 {
51682 /* log only what the user can fix... */
51683 const u32 wantedmask = hotkey_driver_mask &
51684@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51685 && !tp_features.bright_unkfw)
51686 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51687 }
51688+}
51689
51690 #undef TPACPI_COMPARE_KEY
51691 #undef TPACPI_MAY_SEND_KEY
51692-}
51693
51694 /*
51695 * Polling driver
51696diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51697index 438d4c7..ca8a2fb 100644
51698--- a/drivers/pnp/pnpbios/bioscalls.c
51699+++ b/drivers/pnp/pnpbios/bioscalls.c
51700@@ -59,7 +59,7 @@ do { \
51701 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51702 } while(0)
51703
51704-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51705+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51706 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51707
51708 /*
51709@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51710
51711 cpu = get_cpu();
51712 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51713+
51714+ pax_open_kernel();
51715 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51716+ pax_close_kernel();
51717
51718 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51719 spin_lock_irqsave(&pnp_bios_lock, flags);
51720@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51721 :"memory");
51722 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51723
51724+ pax_open_kernel();
51725 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51726+ pax_close_kernel();
51727+
51728 put_cpu();
51729
51730 /* If we get here and this is set then the PnP BIOS faulted on us. */
51731@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51732 return status;
51733 }
51734
51735-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51736+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51737 {
51738 int i;
51739
51740@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51741 pnp_bios_callpoint.offset = header->fields.pm16offset;
51742 pnp_bios_callpoint.segment = PNP_CS16;
51743
51744+ pax_open_kernel();
51745+
51746 for_each_possible_cpu(i) {
51747 struct desc_struct *gdt = get_cpu_gdt_table(i);
51748 if (!gdt)
51749@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51750 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51751 (unsigned long)__va(header->fields.pm16dseg));
51752 }
51753+
51754+ pax_close_kernel();
51755 }
51756diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51757index 0c52e2a..3421ab7 100644
51758--- a/drivers/power/pda_power.c
51759+++ b/drivers/power/pda_power.c
51760@@ -37,7 +37,11 @@ static int polling;
51761
51762 #if IS_ENABLED(CONFIG_USB_PHY)
51763 static struct usb_phy *transceiver;
51764-static struct notifier_block otg_nb;
51765+static int otg_handle_notification(struct notifier_block *nb,
51766+ unsigned long event, void *unused);
51767+static struct notifier_block otg_nb = {
51768+ .notifier_call = otg_handle_notification
51769+};
51770 #endif
51771
51772 static struct regulator *ac_draw;
51773@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51774
51775 #if IS_ENABLED(CONFIG_USB_PHY)
51776 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51777- otg_nb.notifier_call = otg_handle_notification;
51778 ret = usb_register_notifier(transceiver, &otg_nb);
51779 if (ret) {
51780 dev_err(dev, "failure to register otg notifier\n");
51781diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51782index cc439fd..8fa30df 100644
51783--- a/drivers/power/power_supply.h
51784+++ b/drivers/power/power_supply.h
51785@@ -16,12 +16,12 @@ struct power_supply;
51786
51787 #ifdef CONFIG_SYSFS
51788
51789-extern void power_supply_init_attrs(struct device_type *dev_type);
51790+extern void power_supply_init_attrs(void);
51791 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51792
51793 #else
51794
51795-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51796+static inline void power_supply_init_attrs(void) {}
51797 #define power_supply_uevent NULL
51798
51799 #endif /* CONFIG_SYSFS */
51800diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51801index 5a5a24e..f7a3754 100644
51802--- a/drivers/power/power_supply_core.c
51803+++ b/drivers/power/power_supply_core.c
51804@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51805 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51806 EXPORT_SYMBOL_GPL(power_supply_notifier);
51807
51808-static struct device_type power_supply_dev_type;
51809+extern const struct attribute_group *power_supply_attr_groups[];
51810+static struct device_type power_supply_dev_type = {
51811+ .groups = power_supply_attr_groups,
51812+};
51813
51814 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51815 struct power_supply *supply)
51816@@ -639,7 +642,7 @@ static int __init power_supply_class_init(void)
51817 return PTR_ERR(power_supply_class);
51818
51819 power_supply_class->dev_uevent = power_supply_uevent;
51820- power_supply_init_attrs(&power_supply_dev_type);
51821+ power_supply_init_attrs();
51822
51823 return 0;
51824 }
51825diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51826index 44420d1..967126e 100644
51827--- a/drivers/power/power_supply_sysfs.c
51828+++ b/drivers/power/power_supply_sysfs.c
51829@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
51830 .is_visible = power_supply_attr_is_visible,
51831 };
51832
51833-static const struct attribute_group *power_supply_attr_groups[] = {
51834+const struct attribute_group *power_supply_attr_groups[] = {
51835 &power_supply_attr_group,
51836 NULL,
51837 };
51838
51839-void power_supply_init_attrs(struct device_type *dev_type)
51840+void power_supply_init_attrs(void)
51841 {
51842 int i;
51843
51844- dev_type->groups = power_supply_attr_groups;
51845-
51846 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51847 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51848 }
51849diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51850index 84419af..268ede8 100644
51851--- a/drivers/powercap/powercap_sys.c
51852+++ b/drivers/powercap/powercap_sys.c
51853@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51854 struct device_attribute name_attr;
51855 };
51856
51857+static ssize_t show_constraint_name(struct device *dev,
51858+ struct device_attribute *dev_attr,
51859+ char *buf);
51860+
51861 static struct powercap_constraint_attr
51862- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51863+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51864+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51865+ .power_limit_attr = {
51866+ .attr = {
51867+ .name = NULL,
51868+ .mode = S_IWUSR | S_IRUGO
51869+ },
51870+ .show = show_constraint_power_limit_uw,
51871+ .store = store_constraint_power_limit_uw
51872+ },
51873+
51874+ .time_window_attr = {
51875+ .attr = {
51876+ .name = NULL,
51877+ .mode = S_IWUSR | S_IRUGO
51878+ },
51879+ .show = show_constraint_time_window_us,
51880+ .store = store_constraint_time_window_us
51881+ },
51882+
51883+ .max_power_attr = {
51884+ .attr = {
51885+ .name = NULL,
51886+ .mode = S_IRUGO
51887+ },
51888+ .show = show_constraint_max_power_uw,
51889+ .store = NULL
51890+ },
51891+
51892+ .min_power_attr = {
51893+ .attr = {
51894+ .name = NULL,
51895+ .mode = S_IRUGO
51896+ },
51897+ .show = show_constraint_min_power_uw,
51898+ .store = NULL
51899+ },
51900+
51901+ .max_time_window_attr = {
51902+ .attr = {
51903+ .name = NULL,
51904+ .mode = S_IRUGO
51905+ },
51906+ .show = show_constraint_max_time_window_us,
51907+ .store = NULL
51908+ },
51909+
51910+ .min_time_window_attr = {
51911+ .attr = {
51912+ .name = NULL,
51913+ .mode = S_IRUGO
51914+ },
51915+ .show = show_constraint_min_time_window_us,
51916+ .store = NULL
51917+ },
51918+
51919+ .name_attr = {
51920+ .attr = {
51921+ .name = NULL,
51922+ .mode = S_IRUGO
51923+ },
51924+ .show = show_constraint_name,
51925+ .store = NULL
51926+ }
51927+ }
51928+};
51929
51930 /* A list of powercap control_types */
51931 static LIST_HEAD(powercap_cntrl_list);
51932@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51933 }
51934
51935 static int create_constraint_attribute(int id, const char *name,
51936- int mode,
51937- struct device_attribute *dev_attr,
51938- ssize_t (*show)(struct device *,
51939- struct device_attribute *, char *),
51940- ssize_t (*store)(struct device *,
51941- struct device_attribute *,
51942- const char *, size_t)
51943- )
51944+ struct device_attribute *dev_attr)
51945 {
51946+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51947
51948- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51949- id, name);
51950- if (!dev_attr->attr.name)
51951+ if (!name)
51952 return -ENOMEM;
51953- dev_attr->attr.mode = mode;
51954- dev_attr->show = show;
51955- dev_attr->store = store;
51956+
51957+ pax_open_kernel();
51958+ *(const char **)&dev_attr->attr.name = name;
51959+ pax_close_kernel();
51960
51961 return 0;
51962 }
51963@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51964
51965 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51966 ret = create_constraint_attribute(i, "power_limit_uw",
51967- S_IWUSR | S_IRUGO,
51968- &constraint_attrs[i].power_limit_attr,
51969- show_constraint_power_limit_uw,
51970- store_constraint_power_limit_uw);
51971+ &constraint_attrs[i].power_limit_attr);
51972 if (ret)
51973 goto err_alloc;
51974 ret = create_constraint_attribute(i, "time_window_us",
51975- S_IWUSR | S_IRUGO,
51976- &constraint_attrs[i].time_window_attr,
51977- show_constraint_time_window_us,
51978- store_constraint_time_window_us);
51979+ &constraint_attrs[i].time_window_attr);
51980 if (ret)
51981 goto err_alloc;
51982- ret = create_constraint_attribute(i, "name", S_IRUGO,
51983- &constraint_attrs[i].name_attr,
51984- show_constraint_name,
51985- NULL);
51986+ ret = create_constraint_attribute(i, "name",
51987+ &constraint_attrs[i].name_attr);
51988 if (ret)
51989 goto err_alloc;
51990- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51991- &constraint_attrs[i].max_power_attr,
51992- show_constraint_max_power_uw,
51993- NULL);
51994+ ret = create_constraint_attribute(i, "max_power_uw",
51995+ &constraint_attrs[i].max_power_attr);
51996 if (ret)
51997 goto err_alloc;
51998- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51999- &constraint_attrs[i].min_power_attr,
52000- show_constraint_min_power_uw,
52001- NULL);
52002+ ret = create_constraint_attribute(i, "min_power_uw",
52003+ &constraint_attrs[i].min_power_attr);
52004 if (ret)
52005 goto err_alloc;
52006 ret = create_constraint_attribute(i, "max_time_window_us",
52007- S_IRUGO,
52008- &constraint_attrs[i].max_time_window_attr,
52009- show_constraint_max_time_window_us,
52010- NULL);
52011+ &constraint_attrs[i].max_time_window_attr);
52012 if (ret)
52013 goto err_alloc;
52014 ret = create_constraint_attribute(i, "min_time_window_us",
52015- S_IRUGO,
52016- &constraint_attrs[i].min_time_window_attr,
52017- show_constraint_min_time_window_us,
52018- NULL);
52019+ &constraint_attrs[i].min_time_window_attr);
52020 if (ret)
52021 goto err_alloc;
52022
52023@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
52024 power_zone->zone_dev_attrs[count++] =
52025 &dev_attr_max_energy_range_uj.attr;
52026 if (power_zone->ops->get_energy_uj) {
52027+ pax_open_kernel();
52028 if (power_zone->ops->reset_energy_uj)
52029- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
52030+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
52031 else
52032- dev_attr_energy_uj.attr.mode = S_IRUGO;
52033+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
52034+ pax_close_kernel();
52035 power_zone->zone_dev_attrs[count++] =
52036 &dev_attr_energy_uj.attr;
52037 }
52038diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
52039index ee3de34..bec7285 100644
52040--- a/drivers/ptp/Kconfig
52041+++ b/drivers/ptp/Kconfig
52042@@ -8,7 +8,6 @@ config PTP_1588_CLOCK
52043 tristate "PTP clock support"
52044 depends on NET
52045 select PPS
52046- select NET_PTP_CLASSIFY
52047 help
52048 The IEEE 1588 standard defines a method to precisely
52049 synchronize distributed clocks over Ethernet networks. The
52050diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
52051index 9c5d414..c7900ce 100644
52052--- a/drivers/ptp/ptp_private.h
52053+++ b/drivers/ptp/ptp_private.h
52054@@ -51,7 +51,7 @@ struct ptp_clock {
52055 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
52056 wait_queue_head_t tsev_wq;
52057 int defunct; /* tells readers to go away when clock is being removed */
52058- struct device_attribute *pin_dev_attr;
52059+ device_attribute_no_const *pin_dev_attr;
52060 struct attribute **pin_attr;
52061 struct attribute_group pin_attr_group;
52062 };
52063diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
52064index 302e626..12579af 100644
52065--- a/drivers/ptp/ptp_sysfs.c
52066+++ b/drivers/ptp/ptp_sysfs.c
52067@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
52068 goto no_pin_attr;
52069
52070 for (i = 0; i < n_pins; i++) {
52071- struct device_attribute *da = &ptp->pin_dev_attr[i];
52072+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
52073 sysfs_attr_init(&da->attr);
52074 da->attr.name = info->pin_config[i].name;
52075 da->attr.mode = 0644;
52076diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
52077index 4c1f999..11078c9 100644
52078--- a/drivers/regulator/core.c
52079+++ b/drivers/regulator/core.c
52080@@ -3391,7 +3391,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
52081 {
52082 const struct regulation_constraints *constraints = NULL;
52083 const struct regulator_init_data *init_data;
52084- static atomic_t regulator_no = ATOMIC_INIT(0);
52085+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
52086 struct regulator_dev *rdev;
52087 struct device *dev;
52088 int ret, i;
52089@@ -3461,7 +3461,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
52090 rdev->dev.of_node = of_node_get(config->of_node);
52091 rdev->dev.parent = dev;
52092 dev_set_name(&rdev->dev, "regulator.%d",
52093- atomic_inc_return(&regulator_no) - 1);
52094+ atomic_inc_return_unchecked(&regulator_no) - 1);
52095 ret = device_register(&rdev->dev);
52096 if (ret != 0) {
52097 put_device(&rdev->dev);
52098diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
52099index 2fc4111..6aa88ca 100644
52100--- a/drivers/regulator/max8660.c
52101+++ b/drivers/regulator/max8660.c
52102@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
52103 max8660->shadow_regs[MAX8660_OVER1] = 5;
52104 } else {
52105 /* Otherwise devices can be toggled via software */
52106- max8660_dcdc_ops.enable = max8660_dcdc_enable;
52107- max8660_dcdc_ops.disable = max8660_dcdc_disable;
52108+ pax_open_kernel();
52109+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
52110+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
52111+ pax_close_kernel();
52112 }
52113
52114 /*
52115diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
52116index dbedf17..18ff6b7 100644
52117--- a/drivers/regulator/max8973-regulator.c
52118+++ b/drivers/regulator/max8973-regulator.c
52119@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
52120 if (!pdata || !pdata->enable_ext_control) {
52121 max->desc.enable_reg = MAX8973_VOUT;
52122 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
52123- max->ops.enable = regulator_enable_regmap;
52124- max->ops.disable = regulator_disable_regmap;
52125- max->ops.is_enabled = regulator_is_enabled_regmap;
52126+ pax_open_kernel();
52127+ *(void **)&max->ops.enable = regulator_enable_regmap;
52128+ *(void **)&max->ops.disable = regulator_disable_regmap;
52129+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
52130+ pax_close_kernel();
52131 }
52132
52133 if (pdata) {
52134diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
52135index f374fa5..26f0683 100644
52136--- a/drivers/regulator/mc13892-regulator.c
52137+++ b/drivers/regulator/mc13892-regulator.c
52138@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
52139 }
52140 mc13xxx_unlock(mc13892);
52141
52142- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
52143+ pax_open_kernel();
52144+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
52145 = mc13892_vcam_set_mode;
52146- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
52147+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
52148 = mc13892_vcam_get_mode;
52149+ pax_close_kernel();
52150
52151 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
52152 ARRAY_SIZE(mc13892_regulators));
52153diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
52154index b0e4a3e..e5dc11e 100644
52155--- a/drivers/rtc/rtc-cmos.c
52156+++ b/drivers/rtc/rtc-cmos.c
52157@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
52158 hpet_rtc_timer_init();
52159
52160 /* export at least the first block of NVRAM */
52161- nvram.size = address_space - NVRAM_OFFSET;
52162+ pax_open_kernel();
52163+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
52164+ pax_close_kernel();
52165 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
52166 if (retval < 0) {
52167 dev_dbg(dev, "can't create nvram file? %d\n", retval);
52168diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
52169index d049393..bb20be0 100644
52170--- a/drivers/rtc/rtc-dev.c
52171+++ b/drivers/rtc/rtc-dev.c
52172@@ -16,6 +16,7 @@
52173 #include <linux/module.h>
52174 #include <linux/rtc.h>
52175 #include <linux/sched.h>
52176+#include <linux/grsecurity.h>
52177 #include "rtc-core.h"
52178
52179 static dev_t rtc_devt;
52180@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
52181 if (copy_from_user(&tm, uarg, sizeof(tm)))
52182 return -EFAULT;
52183
52184+ gr_log_timechange();
52185+
52186 return rtc_set_time(rtc, &tm);
52187
52188 case RTC_PIE_ON:
52189diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
52190index f03d5ba..8325bf6 100644
52191--- a/drivers/rtc/rtc-ds1307.c
52192+++ b/drivers/rtc/rtc-ds1307.c
52193@@ -107,7 +107,7 @@ struct ds1307 {
52194 u8 offset; /* register's offset */
52195 u8 regs[11];
52196 u16 nvram_offset;
52197- struct bin_attribute *nvram;
52198+ bin_attribute_no_const *nvram;
52199 enum ds_type type;
52200 unsigned long flags;
52201 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
52202diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
52203index 11880c1..b823aa4 100644
52204--- a/drivers/rtc/rtc-m48t59.c
52205+++ b/drivers/rtc/rtc-m48t59.c
52206@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
52207 if (IS_ERR(m48t59->rtc))
52208 return PTR_ERR(m48t59->rtc);
52209
52210- m48t59_nvram_attr.size = pdata->offset;
52211+ pax_open_kernel();
52212+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
52213+ pax_close_kernel();
52214
52215 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
52216 if (ret)
52217diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
52218index e693af6..2e525b6 100644
52219--- a/drivers/scsi/bfa/bfa_fcpim.h
52220+++ b/drivers/scsi/bfa/bfa_fcpim.h
52221@@ -36,7 +36,7 @@ struct bfa_iotag_s {
52222
52223 struct bfa_itn_s {
52224 bfa_isr_func_t isr;
52225-};
52226+} __no_const;
52227
52228 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
52229 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
52230diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
52231index a3ab5cc..8143622 100644
52232--- a/drivers/scsi/bfa/bfa_fcs.c
52233+++ b/drivers/scsi/bfa/bfa_fcs.c
52234@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
52235 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
52236
52237 static struct bfa_fcs_mod_s fcs_modules[] = {
52238- { bfa_fcs_port_attach, NULL, NULL },
52239- { bfa_fcs_uf_attach, NULL, NULL },
52240- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
52241- bfa_fcs_fabric_modexit },
52242+ {
52243+ .attach = bfa_fcs_port_attach,
52244+ .modinit = NULL,
52245+ .modexit = NULL
52246+ },
52247+ {
52248+ .attach = bfa_fcs_uf_attach,
52249+ .modinit = NULL,
52250+ .modexit = NULL
52251+ },
52252+ {
52253+ .attach = bfa_fcs_fabric_attach,
52254+ .modinit = bfa_fcs_fabric_modinit,
52255+ .modexit = bfa_fcs_fabric_modexit
52256+ },
52257 };
52258
52259 /*
52260diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
52261index ff75ef8..2dfe00a 100644
52262--- a/drivers/scsi/bfa/bfa_fcs_lport.c
52263+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
52264@@ -89,15 +89,26 @@ static struct {
52265 void (*offline) (struct bfa_fcs_lport_s *port);
52266 } __port_action[] = {
52267 {
52268- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
52269- bfa_fcs_lport_unknown_offline}, {
52270- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
52271- bfa_fcs_lport_fab_offline}, {
52272- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
52273- bfa_fcs_lport_n2n_offline}, {
52274- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
52275- bfa_fcs_lport_loop_offline},
52276- };
52277+ .init = bfa_fcs_lport_unknown_init,
52278+ .online = bfa_fcs_lport_unknown_online,
52279+ .offline = bfa_fcs_lport_unknown_offline
52280+ },
52281+ {
52282+ .init = bfa_fcs_lport_fab_init,
52283+ .online = bfa_fcs_lport_fab_online,
52284+ .offline = bfa_fcs_lport_fab_offline
52285+ },
52286+ {
52287+ .init = bfa_fcs_lport_n2n_init,
52288+ .online = bfa_fcs_lport_n2n_online,
52289+ .offline = bfa_fcs_lport_n2n_offline
52290+ },
52291+ {
52292+ .init = bfa_fcs_lport_loop_init,
52293+ .online = bfa_fcs_lport_loop_online,
52294+ .offline = bfa_fcs_lport_loop_offline
52295+ },
52296+};
52297
52298 /*
52299 * fcs_port_sm FCS logical port state machine
52300diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
52301index 2e28392..9d865b6 100644
52302--- a/drivers/scsi/bfa/bfa_ioc.h
52303+++ b/drivers/scsi/bfa/bfa_ioc.h
52304@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
52305 bfa_ioc_disable_cbfn_t disable_cbfn;
52306 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
52307 bfa_ioc_reset_cbfn_t reset_cbfn;
52308-};
52309+} __no_const;
52310
52311 /*
52312 * IOC event notification mechanism.
52313@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
52314 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
52315 enum bfi_ioc_state fwstate);
52316 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
52317-};
52318+} __no_const;
52319
52320 /*
52321 * Queue element to wait for room in request queue. FIFO order is
52322diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
52323index a14c784..6de6790 100644
52324--- a/drivers/scsi/bfa/bfa_modules.h
52325+++ b/drivers/scsi/bfa/bfa_modules.h
52326@@ -78,12 +78,12 @@ enum {
52327 \
52328 extern struct bfa_module_s hal_mod_ ## __mod; \
52329 struct bfa_module_s hal_mod_ ## __mod = { \
52330- bfa_ ## __mod ## _meminfo, \
52331- bfa_ ## __mod ## _attach, \
52332- bfa_ ## __mod ## _detach, \
52333- bfa_ ## __mod ## _start, \
52334- bfa_ ## __mod ## _stop, \
52335- bfa_ ## __mod ## _iocdisable, \
52336+ .meminfo = bfa_ ## __mod ## _meminfo, \
52337+ .attach = bfa_ ## __mod ## _attach, \
52338+ .detach = bfa_ ## __mod ## _detach, \
52339+ .start = bfa_ ## __mod ## _start, \
52340+ .stop = bfa_ ## __mod ## _stop, \
52341+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
52342 }
52343
52344 #define BFA_CACHELINE_SZ (256)
52345diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
52346index 045c4e1..13de803 100644
52347--- a/drivers/scsi/fcoe/fcoe_sysfs.c
52348+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
52349@@ -33,8 +33,8 @@
52350 */
52351 #include "libfcoe.h"
52352
52353-static atomic_t ctlr_num;
52354-static atomic_t fcf_num;
52355+static atomic_unchecked_t ctlr_num;
52356+static atomic_unchecked_t fcf_num;
52357
52358 /*
52359 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
52360@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
52361 if (!ctlr)
52362 goto out;
52363
52364- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52365+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52366 ctlr->f = f;
52367 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52368 INIT_LIST_HEAD(&ctlr->fcfs);
52369@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52370 fcf->dev.parent = &ctlr->dev;
52371 fcf->dev.bus = &fcoe_bus_type;
52372 fcf->dev.type = &fcoe_fcf_device_type;
52373- fcf->id = atomic_inc_return(&fcf_num) - 1;
52374+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52375 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52376
52377 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52378@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52379 {
52380 int error;
52381
52382- atomic_set(&ctlr_num, 0);
52383- atomic_set(&fcf_num, 0);
52384+ atomic_set_unchecked(&ctlr_num, 0);
52385+ atomic_set_unchecked(&fcf_num, 0);
52386
52387 error = bus_register(&fcoe_bus_type);
52388 if (error)
52389diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52390index 3cbb57a..95e47a3 100644
52391--- a/drivers/scsi/hosts.c
52392+++ b/drivers/scsi/hosts.c
52393@@ -42,7 +42,7 @@
52394 #include "scsi_logging.h"
52395
52396
52397-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52398+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52399
52400
52401 static void scsi_host_cls_release(struct device *dev)
52402@@ -369,7 +369,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52403 * subtract one because we increment first then return, but we need to
52404 * know what the next host number was before increment
52405 */
52406- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52407+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52408 shost->dma_channel = 0xff;
52409
52410 /* These three are default values which can be overridden */
52411diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52412index 489e83b..193815b 100644
52413--- a/drivers/scsi/hpsa.c
52414+++ b/drivers/scsi/hpsa.c
52415@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52416 unsigned long flags;
52417
52418 if (h->transMethod & CFGTBL_Trans_io_accel1)
52419- return h->access.command_completed(h, q);
52420+ return h->access->command_completed(h, q);
52421
52422 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52423- return h->access.command_completed(h, q);
52424+ return h->access->command_completed(h, q);
52425
52426 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52427 a = rq->head[rq->current_entry];
52428@@ -5455,7 +5455,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52429 while (!list_empty(&h->reqQ)) {
52430 c = list_entry(h->reqQ.next, struct CommandList, list);
52431 /* can't do anything if fifo is full */
52432- if ((h->access.fifo_full(h))) {
52433+ if ((h->access->fifo_full(h))) {
52434 h->fifo_recently_full = 1;
52435 dev_warn(&h->pdev->dev, "fifo full\n");
52436 break;
52437@@ -5477,7 +5477,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52438
52439 /* Tell the controller execute command */
52440 spin_unlock_irqrestore(&h->lock, *flags);
52441- h->access.submit_command(h, c);
52442+ h->access->submit_command(h, c);
52443 spin_lock_irqsave(&h->lock, *flags);
52444 }
52445 }
52446@@ -5493,17 +5493,17 @@ static void lock_and_start_io(struct ctlr_info *h)
52447
52448 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52449 {
52450- return h->access.command_completed(h, q);
52451+ return h->access->command_completed(h, q);
52452 }
52453
52454 static inline bool interrupt_pending(struct ctlr_info *h)
52455 {
52456- return h->access.intr_pending(h);
52457+ return h->access->intr_pending(h);
52458 }
52459
52460 static inline long interrupt_not_for_us(struct ctlr_info *h)
52461 {
52462- return (h->access.intr_pending(h) == 0) ||
52463+ return (h->access->intr_pending(h) == 0) ||
52464 (h->interrupts_enabled == 0);
52465 }
52466
52467@@ -6459,7 +6459,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52468 if (prod_index < 0)
52469 return -ENODEV;
52470 h->product_name = products[prod_index].product_name;
52471- h->access = *(products[prod_index].access);
52472+ h->access = products[prod_index].access;
52473
52474 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52475 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52476@@ -6781,7 +6781,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52477 unsigned long flags;
52478 u32 lockup_detected;
52479
52480- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52481+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52482 spin_lock_irqsave(&h->lock, flags);
52483 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52484 if (!lockup_detected) {
52485@@ -7022,7 +7022,7 @@ reinit_after_soft_reset:
52486 }
52487
52488 /* make sure the board interrupts are off */
52489- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52490+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52491
52492 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52493 goto clean2;
52494@@ -7057,7 +7057,7 @@ reinit_after_soft_reset:
52495 * fake ones to scoop up any residual completions.
52496 */
52497 spin_lock_irqsave(&h->lock, flags);
52498- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52499+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52500 spin_unlock_irqrestore(&h->lock, flags);
52501 free_irqs(h);
52502 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
52503@@ -7076,9 +7076,9 @@ reinit_after_soft_reset:
52504 dev_info(&h->pdev->dev, "Board READY.\n");
52505 dev_info(&h->pdev->dev,
52506 "Waiting for stale completions to drain.\n");
52507- h->access.set_intr_mask(h, HPSA_INTR_ON);
52508+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52509 msleep(10000);
52510- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52511+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52512
52513 rc = controller_reset_failed(h->cfgtable);
52514 if (rc)
52515@@ -7104,7 +7104,7 @@ reinit_after_soft_reset:
52516 h->drv_req_rescan = 0;
52517
52518 /* Turn the interrupts on so we can service requests */
52519- h->access.set_intr_mask(h, HPSA_INTR_ON);
52520+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52521
52522 hpsa_hba_inquiry(h);
52523 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52524@@ -7169,7 +7169,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52525 * To write all data in the battery backed cache to disks
52526 */
52527 hpsa_flush_cache(h);
52528- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52529+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52530 hpsa_free_irqs_and_disable_msix(h);
52531 }
52532
52533@@ -7287,7 +7287,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52534 CFGTBL_Trans_enable_directed_msix |
52535 (trans_support & (CFGTBL_Trans_io_accel1 |
52536 CFGTBL_Trans_io_accel2));
52537- struct access_method access = SA5_performant_access;
52538+ struct access_method *access = &SA5_performant_access;
52539
52540 /* This is a bit complicated. There are 8 registers on
52541 * the controller which we write to to tell it 8 different
52542@@ -7329,7 +7329,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52543 * perform the superfluous readl() after each command submission.
52544 */
52545 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52546- access = SA5_performant_access_no_read;
52547+ access = &SA5_performant_access_no_read;
52548
52549 /* Controller spec: zero out this buffer. */
52550 for (i = 0; i < h->nreply_queues; i++)
52551@@ -7359,12 +7359,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52552 * enable outbound interrupt coalescing in accelerator mode;
52553 */
52554 if (trans_support & CFGTBL_Trans_io_accel1) {
52555- access = SA5_ioaccel_mode1_access;
52556+ access = &SA5_ioaccel_mode1_access;
52557 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52558 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52559 } else {
52560 if (trans_support & CFGTBL_Trans_io_accel2) {
52561- access = SA5_ioaccel_mode2_access;
52562+ access = &SA5_ioaccel_mode2_access;
52563 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52564 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52565 }
52566diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52567index 24472ce..8782caf 100644
52568--- a/drivers/scsi/hpsa.h
52569+++ b/drivers/scsi/hpsa.h
52570@@ -127,7 +127,7 @@ struct ctlr_info {
52571 unsigned int msix_vector;
52572 unsigned int msi_vector;
52573 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52574- struct access_method access;
52575+ struct access_method *access;
52576 char hba_mode_enabled;
52577
52578 /* queue and queue Info */
52579@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52580 }
52581
52582 static struct access_method SA5_access = {
52583- SA5_submit_command,
52584- SA5_intr_mask,
52585- SA5_fifo_full,
52586- SA5_intr_pending,
52587- SA5_completed,
52588+ .submit_command = SA5_submit_command,
52589+ .set_intr_mask = SA5_intr_mask,
52590+ .fifo_full = SA5_fifo_full,
52591+ .intr_pending = SA5_intr_pending,
52592+ .command_completed = SA5_completed,
52593 };
52594
52595 static struct access_method SA5_ioaccel_mode1_access = {
52596- SA5_submit_command,
52597- SA5_performant_intr_mask,
52598- SA5_fifo_full,
52599- SA5_ioaccel_mode1_intr_pending,
52600- SA5_ioaccel_mode1_completed,
52601+ .submit_command = SA5_submit_command,
52602+ .set_intr_mask = SA5_performant_intr_mask,
52603+ .fifo_full = SA5_fifo_full,
52604+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52605+ .command_completed = SA5_ioaccel_mode1_completed,
52606 };
52607
52608 static struct access_method SA5_ioaccel_mode2_access = {
52609- SA5_submit_command_ioaccel2,
52610- SA5_performant_intr_mask,
52611- SA5_fifo_full,
52612- SA5_performant_intr_pending,
52613- SA5_performant_completed,
52614+ .submit_command = SA5_submit_command_ioaccel2,
52615+ .set_intr_mask = SA5_performant_intr_mask,
52616+ .fifo_full = SA5_fifo_full,
52617+ .intr_pending = SA5_performant_intr_pending,
52618+ .command_completed = SA5_performant_completed,
52619 };
52620
52621 static struct access_method SA5_performant_access = {
52622- SA5_submit_command,
52623- SA5_performant_intr_mask,
52624- SA5_fifo_full,
52625- SA5_performant_intr_pending,
52626- SA5_performant_completed,
52627+ .submit_command = SA5_submit_command,
52628+ .set_intr_mask = SA5_performant_intr_mask,
52629+ .fifo_full = SA5_fifo_full,
52630+ .intr_pending = SA5_performant_intr_pending,
52631+ .command_completed = SA5_performant_completed,
52632 };
52633
52634 static struct access_method SA5_performant_access_no_read = {
52635- SA5_submit_command_no_read,
52636- SA5_performant_intr_mask,
52637- SA5_fifo_full,
52638- SA5_performant_intr_pending,
52639- SA5_performant_completed,
52640+ .submit_command = SA5_submit_command_no_read,
52641+ .set_intr_mask = SA5_performant_intr_mask,
52642+ .fifo_full = SA5_fifo_full,
52643+ .intr_pending = SA5_performant_intr_pending,
52644+ .command_completed = SA5_performant_completed,
52645 };
52646
52647 struct board_type {
52648diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52649index 1b3a094..068e683 100644
52650--- a/drivers/scsi/libfc/fc_exch.c
52651+++ b/drivers/scsi/libfc/fc_exch.c
52652@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52653 u16 pool_max_index;
52654
52655 struct {
52656- atomic_t no_free_exch;
52657- atomic_t no_free_exch_xid;
52658- atomic_t xid_not_found;
52659- atomic_t xid_busy;
52660- atomic_t seq_not_found;
52661- atomic_t non_bls_resp;
52662+ atomic_unchecked_t no_free_exch;
52663+ atomic_unchecked_t no_free_exch_xid;
52664+ atomic_unchecked_t xid_not_found;
52665+ atomic_unchecked_t xid_busy;
52666+ atomic_unchecked_t seq_not_found;
52667+ atomic_unchecked_t non_bls_resp;
52668 } stats;
52669 };
52670
52671@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52672 /* allocate memory for exchange */
52673 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52674 if (!ep) {
52675- atomic_inc(&mp->stats.no_free_exch);
52676+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52677 goto out;
52678 }
52679 memset(ep, 0, sizeof(*ep));
52680@@ -874,7 +874,7 @@ out:
52681 return ep;
52682 err:
52683 spin_unlock_bh(&pool->lock);
52684- atomic_inc(&mp->stats.no_free_exch_xid);
52685+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52686 mempool_free(ep, mp->ep_pool);
52687 return NULL;
52688 }
52689@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52690 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52691 ep = fc_exch_find(mp, xid);
52692 if (!ep) {
52693- atomic_inc(&mp->stats.xid_not_found);
52694+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52695 reject = FC_RJT_OX_ID;
52696 goto out;
52697 }
52698@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52699 ep = fc_exch_find(mp, xid);
52700 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52701 if (ep) {
52702- atomic_inc(&mp->stats.xid_busy);
52703+ atomic_inc_unchecked(&mp->stats.xid_busy);
52704 reject = FC_RJT_RX_ID;
52705 goto rel;
52706 }
52707@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52708 }
52709 xid = ep->xid; /* get our XID */
52710 } else if (!ep) {
52711- atomic_inc(&mp->stats.xid_not_found);
52712+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52713 reject = FC_RJT_RX_ID; /* XID not found */
52714 goto out;
52715 }
52716@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52717 } else {
52718 sp = &ep->seq;
52719 if (sp->id != fh->fh_seq_id) {
52720- atomic_inc(&mp->stats.seq_not_found);
52721+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52722 if (f_ctl & FC_FC_END_SEQ) {
52723 /*
52724 * Update sequence_id based on incoming last
52725@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52726
52727 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52728 if (!ep) {
52729- atomic_inc(&mp->stats.xid_not_found);
52730+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52731 goto out;
52732 }
52733 if (ep->esb_stat & ESB_ST_COMPLETE) {
52734- atomic_inc(&mp->stats.xid_not_found);
52735+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52736 goto rel;
52737 }
52738 if (ep->rxid == FC_XID_UNKNOWN)
52739 ep->rxid = ntohs(fh->fh_rx_id);
52740 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52741- atomic_inc(&mp->stats.xid_not_found);
52742+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52743 goto rel;
52744 }
52745 if (ep->did != ntoh24(fh->fh_s_id) &&
52746 ep->did != FC_FID_FLOGI) {
52747- atomic_inc(&mp->stats.xid_not_found);
52748+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52749 goto rel;
52750 }
52751 sof = fr_sof(fp);
52752@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52753 sp->ssb_stat |= SSB_ST_RESP;
52754 sp->id = fh->fh_seq_id;
52755 } else if (sp->id != fh->fh_seq_id) {
52756- atomic_inc(&mp->stats.seq_not_found);
52757+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52758 goto rel;
52759 }
52760
52761@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52762 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52763
52764 if (!sp)
52765- atomic_inc(&mp->stats.xid_not_found);
52766+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52767 else
52768- atomic_inc(&mp->stats.non_bls_resp);
52769+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52770
52771 fc_frame_free(fp);
52772 }
52773@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52774
52775 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52776 mp = ema->mp;
52777- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52778+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52779 st->fc_no_free_exch_xid +=
52780- atomic_read(&mp->stats.no_free_exch_xid);
52781- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52782- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52783- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52784- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52785+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52786+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52787+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52788+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52789+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52790 }
52791 }
52792 EXPORT_SYMBOL(fc_exch_update_stats);
52793diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52794index 766098a..1c6c971 100644
52795--- a/drivers/scsi/libsas/sas_ata.c
52796+++ b/drivers/scsi/libsas/sas_ata.c
52797@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
52798 .postreset = ata_std_postreset,
52799 .error_handler = ata_std_error_handler,
52800 .post_internal_cmd = sas_ata_post_internal,
52801- .qc_defer = ata_std_qc_defer,
52802+ .qc_defer = ata_std_qc_defer,
52803 .qc_prep = ata_noop_qc_prep,
52804 .qc_issue = sas_ata_qc_issue,
52805 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52806diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52807index 434e903..5a4a79b 100644
52808--- a/drivers/scsi/lpfc/lpfc.h
52809+++ b/drivers/scsi/lpfc/lpfc.h
52810@@ -430,7 +430,7 @@ struct lpfc_vport {
52811 struct dentry *debug_nodelist;
52812 struct dentry *vport_debugfs_root;
52813 struct lpfc_debugfs_trc *disc_trc;
52814- atomic_t disc_trc_cnt;
52815+ atomic_unchecked_t disc_trc_cnt;
52816 #endif
52817 uint8_t stat_data_enabled;
52818 uint8_t stat_data_blocked;
52819@@ -880,8 +880,8 @@ struct lpfc_hba {
52820 struct timer_list fabric_block_timer;
52821 unsigned long bit_flags;
52822 #define FABRIC_COMANDS_BLOCKED 0
52823- atomic_t num_rsrc_err;
52824- atomic_t num_cmd_success;
52825+ atomic_unchecked_t num_rsrc_err;
52826+ atomic_unchecked_t num_cmd_success;
52827 unsigned long last_rsrc_error_time;
52828 unsigned long last_ramp_down_time;
52829 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52830@@ -916,7 +916,7 @@ struct lpfc_hba {
52831
52832 struct dentry *debug_slow_ring_trc;
52833 struct lpfc_debugfs_trc *slow_ring_trc;
52834- atomic_t slow_ring_trc_cnt;
52835+ atomic_unchecked_t slow_ring_trc_cnt;
52836 /* iDiag debugfs sub-directory */
52837 struct dentry *idiag_root;
52838 struct dentry *idiag_pci_cfg;
52839diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52840index b0aedce..89c6ca6 100644
52841--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52842+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52843@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52844
52845 #include <linux/debugfs.h>
52846
52847-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52848+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52849 static unsigned long lpfc_debugfs_start_time = 0L;
52850
52851 /* iDiag */
52852@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52853 lpfc_debugfs_enable = 0;
52854
52855 len = 0;
52856- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52857+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52858 (lpfc_debugfs_max_disc_trc - 1);
52859 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52860 dtp = vport->disc_trc + i;
52861@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52862 lpfc_debugfs_enable = 0;
52863
52864 len = 0;
52865- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52866+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52867 (lpfc_debugfs_max_slow_ring_trc - 1);
52868 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52869 dtp = phba->slow_ring_trc + i;
52870@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52871 !vport || !vport->disc_trc)
52872 return;
52873
52874- index = atomic_inc_return(&vport->disc_trc_cnt) &
52875+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52876 (lpfc_debugfs_max_disc_trc - 1);
52877 dtp = vport->disc_trc + index;
52878 dtp->fmt = fmt;
52879 dtp->data1 = data1;
52880 dtp->data2 = data2;
52881 dtp->data3 = data3;
52882- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52883+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52884 dtp->jif = jiffies;
52885 #endif
52886 return;
52887@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52888 !phba || !phba->slow_ring_trc)
52889 return;
52890
52891- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52892+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52893 (lpfc_debugfs_max_slow_ring_trc - 1);
52894 dtp = phba->slow_ring_trc + index;
52895 dtp->fmt = fmt;
52896 dtp->data1 = data1;
52897 dtp->data2 = data2;
52898 dtp->data3 = data3;
52899- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52900+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52901 dtp->jif = jiffies;
52902 #endif
52903 return;
52904@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52905 "slow_ring buffer\n");
52906 goto debug_failed;
52907 }
52908- atomic_set(&phba->slow_ring_trc_cnt, 0);
52909+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52910 memset(phba->slow_ring_trc, 0,
52911 (sizeof(struct lpfc_debugfs_trc) *
52912 lpfc_debugfs_max_slow_ring_trc));
52913@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52914 "buffer\n");
52915 goto debug_failed;
52916 }
52917- atomic_set(&vport->disc_trc_cnt, 0);
52918+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52919
52920 snprintf(name, sizeof(name), "discovery_trace");
52921 vport->debug_disc_trc =
52922diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52923index 06f9a5b..82812092 100644
52924--- a/drivers/scsi/lpfc/lpfc_init.c
52925+++ b/drivers/scsi/lpfc/lpfc_init.c
52926@@ -11296,8 +11296,10 @@ lpfc_init(void)
52927 "misc_register returned with status %d", error);
52928
52929 if (lpfc_enable_npiv) {
52930- lpfc_transport_functions.vport_create = lpfc_vport_create;
52931- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52932+ pax_open_kernel();
52933+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52934+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52935+ pax_close_kernel();
52936 }
52937 lpfc_transport_template =
52938 fc_attach_transport(&lpfc_transport_functions);
52939diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52940index 2df11da..e660a2c 100644
52941--- a/drivers/scsi/lpfc/lpfc_scsi.c
52942+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52943@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52944 uint32_t evt_posted;
52945
52946 spin_lock_irqsave(&phba->hbalock, flags);
52947- atomic_inc(&phba->num_rsrc_err);
52948+ atomic_inc_unchecked(&phba->num_rsrc_err);
52949 phba->last_rsrc_error_time = jiffies;
52950
52951 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
52952@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52953 unsigned long num_rsrc_err, num_cmd_success;
52954 int i;
52955
52956- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52957- num_cmd_success = atomic_read(&phba->num_cmd_success);
52958+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52959+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52960
52961 /*
52962 * The error and success command counters are global per
52963@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52964 }
52965 }
52966 lpfc_destroy_vport_work_array(phba, vports);
52967- atomic_set(&phba->num_rsrc_err, 0);
52968- atomic_set(&phba->num_cmd_success, 0);
52969+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52970+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52971 }
52972
52973 /**
52974diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52975index 5055f92..376cd98 100644
52976--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52977+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52978@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
52979 {
52980 struct scsi_device *sdev = to_scsi_device(dev);
52981 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52982- static struct _raid_device *raid_device;
52983+ struct _raid_device *raid_device;
52984 unsigned long flags;
52985 Mpi2RaidVolPage0_t vol_pg0;
52986 Mpi2ConfigReply_t mpi_reply;
52987@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
52988 {
52989 struct scsi_device *sdev = to_scsi_device(dev);
52990 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52991- static struct _raid_device *raid_device;
52992+ struct _raid_device *raid_device;
52993 unsigned long flags;
52994 Mpi2RaidVolPage0_t vol_pg0;
52995 Mpi2ConfigReply_t mpi_reply;
52996@@ -6631,7 +6631,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52997 struct fw_event_work *fw_event)
52998 {
52999 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
53000- static struct _raid_device *raid_device;
53001+ struct _raid_device *raid_device;
53002 unsigned long flags;
53003 u16 handle;
53004
53005@@ -7102,7 +7102,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
53006 u64 sas_address;
53007 struct _sas_device *sas_device;
53008 struct _sas_node *expander_device;
53009- static struct _raid_device *raid_device;
53010+ struct _raid_device *raid_device;
53011 u8 retry_count;
53012 unsigned long flags;
53013
53014diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
53015index be8ce54..94ed33a 100644
53016--- a/drivers/scsi/pmcraid.c
53017+++ b/drivers/scsi/pmcraid.c
53018@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
53019 res->scsi_dev = scsi_dev;
53020 scsi_dev->hostdata = res;
53021 res->change_detected = 0;
53022- atomic_set(&res->read_failures, 0);
53023- atomic_set(&res->write_failures, 0);
53024+ atomic_set_unchecked(&res->read_failures, 0);
53025+ atomic_set_unchecked(&res->write_failures, 0);
53026 rc = 0;
53027 }
53028 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
53029@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
53030
53031 /* If this was a SCSI read/write command keep count of errors */
53032 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
53033- atomic_inc(&res->read_failures);
53034+ atomic_inc_unchecked(&res->read_failures);
53035 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
53036- atomic_inc(&res->write_failures);
53037+ atomic_inc_unchecked(&res->write_failures);
53038
53039 if (!RES_IS_GSCSI(res->cfg_entry) &&
53040 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
53041@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
53042 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
53043 * hrrq_id assigned here in queuecommand
53044 */
53045- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
53046+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
53047 pinstance->num_hrrq;
53048 cmd->cmd_done = pmcraid_io_done;
53049
53050@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
53051 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
53052 * hrrq_id assigned here in queuecommand
53053 */
53054- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
53055+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
53056 pinstance->num_hrrq;
53057
53058 if (request_size) {
53059@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
53060
53061 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
53062 /* add resources only after host is added into system */
53063- if (!atomic_read(&pinstance->expose_resources))
53064+ if (!atomic_read_unchecked(&pinstance->expose_resources))
53065 return;
53066
53067 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
53068@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
53069 init_waitqueue_head(&pinstance->reset_wait_q);
53070
53071 atomic_set(&pinstance->outstanding_cmds, 0);
53072- atomic_set(&pinstance->last_message_id, 0);
53073- atomic_set(&pinstance->expose_resources, 0);
53074+ atomic_set_unchecked(&pinstance->last_message_id, 0);
53075+ atomic_set_unchecked(&pinstance->expose_resources, 0);
53076
53077 INIT_LIST_HEAD(&pinstance->free_res_q);
53078 INIT_LIST_HEAD(&pinstance->used_res_q);
53079@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
53080 /* Schedule worker thread to handle CCN and take care of adding and
53081 * removing devices to OS
53082 */
53083- atomic_set(&pinstance->expose_resources, 1);
53084+ atomic_set_unchecked(&pinstance->expose_resources, 1);
53085 schedule_work(&pinstance->worker_q);
53086 return rc;
53087
53088diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
53089index e1d150f..6c6df44 100644
53090--- a/drivers/scsi/pmcraid.h
53091+++ b/drivers/scsi/pmcraid.h
53092@@ -748,7 +748,7 @@ struct pmcraid_instance {
53093 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
53094
53095 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
53096- atomic_t last_message_id;
53097+ atomic_unchecked_t last_message_id;
53098
53099 /* configuration table */
53100 struct pmcraid_config_table *cfg_table;
53101@@ -777,7 +777,7 @@ struct pmcraid_instance {
53102 atomic_t outstanding_cmds;
53103
53104 /* should add/delete resources to mid-layer now ?*/
53105- atomic_t expose_resources;
53106+ atomic_unchecked_t expose_resources;
53107
53108
53109
53110@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
53111 struct pmcraid_config_table_entry_ext cfg_entry_ext;
53112 };
53113 struct scsi_device *scsi_dev; /* Link scsi_device structure */
53114- atomic_t read_failures; /* count of failed READ commands */
53115- atomic_t write_failures; /* count of failed WRITE commands */
53116+ atomic_unchecked_t read_failures; /* count of failed READ commands */
53117+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
53118
53119 /* To indicate add/delete/modify during CCN */
53120 u8 change_detected;
53121diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
53122index 16fe519..3b1ec82 100644
53123--- a/drivers/scsi/qla2xxx/qla_attr.c
53124+++ b/drivers/scsi/qla2xxx/qla_attr.c
53125@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
53126 return 0;
53127 }
53128
53129-struct fc_function_template qla2xxx_transport_functions = {
53130+fc_function_template_no_const qla2xxx_transport_functions = {
53131
53132 .show_host_node_name = 1,
53133 .show_host_port_name = 1,
53134@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
53135 .bsg_timeout = qla24xx_bsg_timeout,
53136 };
53137
53138-struct fc_function_template qla2xxx_transport_vport_functions = {
53139+fc_function_template_no_const qla2xxx_transport_vport_functions = {
53140
53141 .show_host_node_name = 1,
53142 .show_host_port_name = 1,
53143diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
53144index d48dea8..0845f78 100644
53145--- a/drivers/scsi/qla2xxx/qla_gbl.h
53146+++ b/drivers/scsi/qla2xxx/qla_gbl.h
53147@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
53148 struct device_attribute;
53149 extern struct device_attribute *qla2x00_host_attrs[];
53150 struct fc_function_template;
53151-extern struct fc_function_template qla2xxx_transport_functions;
53152-extern struct fc_function_template qla2xxx_transport_vport_functions;
53153+extern fc_function_template_no_const qla2xxx_transport_functions;
53154+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
53155 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
53156 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
53157 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
53158diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
53159index d96bfb5..d7afe90 100644
53160--- a/drivers/scsi/qla2xxx/qla_os.c
53161+++ b/drivers/scsi/qla2xxx/qla_os.c
53162@@ -1490,8 +1490,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
53163 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
53164 /* Ok, a 64bit DMA mask is applicable. */
53165 ha->flags.enable_64bit_addressing = 1;
53166- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53167- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53168+ pax_open_kernel();
53169+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53170+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53171+ pax_close_kernel();
53172 return;
53173 }
53174 }
53175diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
53176index 8f6d0fb..1b21097 100644
53177--- a/drivers/scsi/qla4xxx/ql4_def.h
53178+++ b/drivers/scsi/qla4xxx/ql4_def.h
53179@@ -305,7 +305,7 @@ struct ddb_entry {
53180 * (4000 only) */
53181 atomic_t relogin_timer; /* Max Time to wait for
53182 * relogin to complete */
53183- atomic_t relogin_retry_count; /* Num of times relogin has been
53184+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
53185 * retried */
53186 uint32_t default_time2wait; /* Default Min time between
53187 * relogins (+aens) */
53188diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
53189index 3202063..f9f0ff6 100644
53190--- a/drivers/scsi/qla4xxx/ql4_os.c
53191+++ b/drivers/scsi/qla4xxx/ql4_os.c
53192@@ -4494,12 +4494,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
53193 */
53194 if (!iscsi_is_session_online(cls_sess)) {
53195 /* Reset retry relogin timer */
53196- atomic_inc(&ddb_entry->relogin_retry_count);
53197+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
53198 DEBUG2(ql4_printk(KERN_INFO, ha,
53199 "%s: index[%d] relogin timed out-retrying"
53200 " relogin (%d), retry (%d)\n", __func__,
53201 ddb_entry->fw_ddb_index,
53202- atomic_read(&ddb_entry->relogin_retry_count),
53203+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
53204 ddb_entry->default_time2wait + 4));
53205 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
53206 atomic_set(&ddb_entry->retry_relogin_timer,
53207@@ -6607,7 +6607,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
53208
53209 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
53210 atomic_set(&ddb_entry->relogin_timer, 0);
53211- atomic_set(&ddb_entry->relogin_retry_count, 0);
53212+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
53213 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
53214 ddb_entry->default_relogin_timeout =
53215 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
53216diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
53217index 88d46fe..7351be5 100644
53218--- a/drivers/scsi/scsi.c
53219+++ b/drivers/scsi/scsi.c
53220@@ -640,7 +640,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
53221 struct Scsi_Host *host = cmd->device->host;
53222 int rtn = 0;
53223
53224- atomic_inc(&cmd->device->iorequest_cnt);
53225+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53226
53227 /* check if the device is still usable */
53228 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
53229diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
53230index 3f50dfc..86af487 100644
53231--- a/drivers/scsi/scsi_lib.c
53232+++ b/drivers/scsi/scsi_lib.c
53233@@ -1423,7 +1423,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
53234 shost = sdev->host;
53235 scsi_init_cmd_errh(cmd);
53236 cmd->result = DID_NO_CONNECT << 16;
53237- atomic_inc(&cmd->device->iorequest_cnt);
53238+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53239
53240 /*
53241 * SCSI request completion path will do scsi_device_unbusy(),
53242@@ -1449,9 +1449,9 @@ static void scsi_softirq_done(struct request *rq)
53243
53244 INIT_LIST_HEAD(&cmd->eh_entry);
53245
53246- atomic_inc(&cmd->device->iodone_cnt);
53247+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
53248 if (cmd->result)
53249- atomic_inc(&cmd->device->ioerr_cnt);
53250+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
53251
53252 disposition = scsi_decide_disposition(cmd);
53253 if (disposition != SUCCESS &&
53254diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
53255index 074e8cc..f612e5c 100644
53256--- a/drivers/scsi/scsi_sysfs.c
53257+++ b/drivers/scsi/scsi_sysfs.c
53258@@ -780,7 +780,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
53259 char *buf) \
53260 { \
53261 struct scsi_device *sdev = to_scsi_device(dev); \
53262- unsigned long long count = atomic_read(&sdev->field); \
53263+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
53264 return snprintf(buf, 20, "0x%llx\n", count); \
53265 } \
53266 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
53267diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
53268index e51add0..1e06a96 100644
53269--- a/drivers/scsi/scsi_tgt_lib.c
53270+++ b/drivers/scsi/scsi_tgt_lib.c
53271@@ -363,7 +363,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
53272 int err;
53273
53274 dprintk("%lx %u\n", uaddr, len);
53275- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
53276+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
53277 if (err) {
53278 /*
53279 * TODO: need to fixup sg_tablesize, max_segment_size,
53280diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
53281index 521f583..6b15966 100644
53282--- a/drivers/scsi/scsi_transport_fc.c
53283+++ b/drivers/scsi/scsi_transport_fc.c
53284@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
53285 * Netlink Infrastructure
53286 */
53287
53288-static atomic_t fc_event_seq;
53289+static atomic_unchecked_t fc_event_seq;
53290
53291 /**
53292 * fc_get_event_number - Obtain the next sequential FC event number
53293@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
53294 u32
53295 fc_get_event_number(void)
53296 {
53297- return atomic_add_return(1, &fc_event_seq);
53298+ return atomic_add_return_unchecked(1, &fc_event_seq);
53299 }
53300 EXPORT_SYMBOL(fc_get_event_number);
53301
53302@@ -655,7 +655,7 @@ static __init int fc_transport_init(void)
53303 {
53304 int error;
53305
53306- atomic_set(&fc_event_seq, 0);
53307+ atomic_set_unchecked(&fc_event_seq, 0);
53308
53309 error = transport_class_register(&fc_host_class);
53310 if (error)
53311@@ -845,7 +845,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
53312 char *cp;
53313
53314 *val = simple_strtoul(buf, &cp, 0);
53315- if ((*cp && (*cp != '\n')) || (*val < 0))
53316+ if (*cp && (*cp != '\n'))
53317 return -EINVAL;
53318 /*
53319 * Check for overflow; dev_loss_tmo is u32
53320diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
53321index 0102a2d..cc3f8e9 100644
53322--- a/drivers/scsi/scsi_transport_iscsi.c
53323+++ b/drivers/scsi/scsi_transport_iscsi.c
53324@@ -79,7 +79,7 @@ struct iscsi_internal {
53325 struct transport_container session_cont;
53326 };
53327
53328-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53329+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
53330 static struct workqueue_struct *iscsi_eh_timer_workq;
53331
53332 static DEFINE_IDA(iscsi_sess_ida);
53333@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
53334 int err;
53335
53336 ihost = shost->shost_data;
53337- session->sid = atomic_add_return(1, &iscsi_session_nr);
53338+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
53339
53340 if (target_id == ISCSI_MAX_TARGET) {
53341 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
53342@@ -4511,7 +4511,7 @@ static __init int iscsi_transport_init(void)
53343 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
53344 ISCSI_TRANSPORT_VERSION);
53345
53346- atomic_set(&iscsi_session_nr, 0);
53347+ atomic_set_unchecked(&iscsi_session_nr, 0);
53348
53349 err = class_register(&iscsi_transport_class);
53350 if (err)
53351diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
53352index 13e8983..d306a68 100644
53353--- a/drivers/scsi/scsi_transport_srp.c
53354+++ b/drivers/scsi/scsi_transport_srp.c
53355@@ -36,7 +36,7 @@
53356 #include "scsi_transport_srp_internal.h"
53357
53358 struct srp_host_attrs {
53359- atomic_t next_port_id;
53360+ atomic_unchecked_t next_port_id;
53361 };
53362 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
53363
53364@@ -101,7 +101,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53365 struct Scsi_Host *shost = dev_to_shost(dev);
53366 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53367
53368- atomic_set(&srp_host->next_port_id, 0);
53369+ atomic_set_unchecked(&srp_host->next_port_id, 0);
53370 return 0;
53371 }
53372
53373@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53374 rport_fast_io_fail_timedout);
53375 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53376
53377- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53378+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53379 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53380
53381 transport_setup_device(&rport->dev);
53382diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53383index 6825eda..be470c4 100644
53384--- a/drivers/scsi/sd.c
53385+++ b/drivers/scsi/sd.c
53386@@ -2954,7 +2954,7 @@ static int sd_probe(struct device *dev)
53387 sdkp->disk = gd;
53388 sdkp->index = index;
53389 atomic_set(&sdkp->openers, 0);
53390- atomic_set(&sdkp->device->ioerr_cnt, 0);
53391+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53392
53393 if (!sdp->request_queue->rq_timeout) {
53394 if (sdp->type != TYPE_MOD)
53395diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53396index 53268aab..17c2764 100644
53397--- a/drivers/scsi/sg.c
53398+++ b/drivers/scsi/sg.c
53399@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53400 sdp->disk->disk_name,
53401 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53402 NULL,
53403- (char *)arg);
53404+ (char __user *)arg);
53405 case BLKTRACESTART:
53406 return blk_trace_startstop(sdp->device->request_queue, 1);
53407 case BLKTRACESTOP:
53408diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53409index d4f9670..d37b662 100644
53410--- a/drivers/spi/spi.c
53411+++ b/drivers/spi/spi.c
53412@@ -2204,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master)
53413 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53414
53415 /* portable code must never pass more than 32 bytes */
53416-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53417+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53418
53419 static u8 *buf;
53420
53421diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53422index c341ac1..bf9799f 100644
53423--- a/drivers/staging/android/timed_output.c
53424+++ b/drivers/staging/android/timed_output.c
53425@@ -25,7 +25,7 @@
53426 #include "timed_output.h"
53427
53428 static struct class *timed_output_class;
53429-static atomic_t device_count;
53430+static atomic_unchecked_t device_count;
53431
53432 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53433 char *buf)
53434@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
53435 timed_output_class = class_create(THIS_MODULE, "timed_output");
53436 if (IS_ERR(timed_output_class))
53437 return PTR_ERR(timed_output_class);
53438- atomic_set(&device_count, 0);
53439+ atomic_set_unchecked(&device_count, 0);
53440 timed_output_class->dev_groups = timed_output_groups;
53441 }
53442
53443@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53444 if (ret < 0)
53445 return ret;
53446
53447- tdev->index = atomic_inc_return(&device_count);
53448+ tdev->index = atomic_inc_return_unchecked(&device_count);
53449 tdev->dev = device_create(timed_output_class, NULL,
53450 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53451 if (IS_ERR(tdev->dev))
53452diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53453index fe47cd3..19a1bd1 100644
53454--- a/drivers/staging/gdm724x/gdm_tty.c
53455+++ b/drivers/staging/gdm724x/gdm_tty.c
53456@@ -44,7 +44,7 @@
53457 #define gdm_tty_send_control(n, r, v, d, l) (\
53458 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53459
53460-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53461+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53462
53463 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53464 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53465diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
53466index def8280..e3fd96a 100644
53467--- a/drivers/staging/imx-drm/imx-drm-core.c
53468+++ b/drivers/staging/imx-drm/imx-drm-core.c
53469@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
53470 if (imxdrm->pipes >= MAX_CRTC)
53471 return -EINVAL;
53472
53473- if (imxdrm->drm->open_count)
53474+ if (local_read(&imxdrm->drm->open_count))
53475 return -EBUSY;
53476
53477 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
53478diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53479index 3f8020c..649fded 100644
53480--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53481+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53482@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53483 return 0;
53484 }
53485
53486-sfw_test_client_ops_t brw_test_client;
53487-void brw_init_test_client(void)
53488-{
53489- brw_test_client.tso_init = brw_client_init;
53490- brw_test_client.tso_fini = brw_client_fini;
53491- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53492- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53493+sfw_test_client_ops_t brw_test_client = {
53494+ .tso_init = brw_client_init,
53495+ .tso_fini = brw_client_fini,
53496+ .tso_prep_rpc = brw_client_prep_rpc,
53497+ .tso_done_rpc = brw_client_done_rpc,
53498 };
53499
53500 srpc_service_t brw_test_service;
53501diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53502index 050723a..fa6fdf1 100644
53503--- a/drivers/staging/lustre/lnet/selftest/framework.c
53504+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53505@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
53506
53507 extern sfw_test_client_ops_t ping_test_client;
53508 extern srpc_service_t ping_test_service;
53509-extern void ping_init_test_client(void);
53510 extern void ping_init_test_service(void);
53511
53512 extern sfw_test_client_ops_t brw_test_client;
53513 extern srpc_service_t brw_test_service;
53514-extern void brw_init_test_client(void);
53515 extern void brw_init_test_service(void);
53516
53517
53518@@ -1684,12 +1682,10 @@ sfw_startup (void)
53519 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53520 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53521
53522- brw_init_test_client();
53523 brw_init_test_service();
53524 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53525 LASSERT (rc == 0);
53526
53527- ping_init_test_client();
53528 ping_init_test_service();
53529 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53530 LASSERT (rc == 0);
53531diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53532index 750cac4..e4d751f 100644
53533--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53534+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53535@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53536 return 0;
53537 }
53538
53539-sfw_test_client_ops_t ping_test_client;
53540-void ping_init_test_client(void)
53541-{
53542- ping_test_client.tso_init = ping_client_init;
53543- ping_test_client.tso_fini = ping_client_fini;
53544- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53545- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53546-}
53547+sfw_test_client_ops_t ping_test_client = {
53548+ .tso_init = ping_client_init,
53549+ .tso_fini = ping_client_fini,
53550+ .tso_prep_rpc = ping_client_prep_rpc,
53551+ .tso_done_rpc = ping_client_done_rpc,
53552+};
53553
53554 srpc_service_t ping_test_service;
53555 void ping_init_test_service(void)
53556diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53557index 0c6b784..c64235c 100644
53558--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53559+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53560@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
53561 ldlm_completion_callback lcs_completion;
53562 ldlm_blocking_callback lcs_blocking;
53563 ldlm_glimpse_callback lcs_glimpse;
53564-};
53565+} __no_const;
53566
53567 /* ldlm_lockd.c */
53568 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53569diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53570index d5c4613..a341678 100644
53571--- a/drivers/staging/lustre/lustre/include/obd.h
53572+++ b/drivers/staging/lustre/lustre/include/obd.h
53573@@ -1439,7 +1439,7 @@ struct md_ops {
53574 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53575 * wrapper function in include/linux/obd_class.h.
53576 */
53577-};
53578+} __no_const;
53579
53580 struct lsm_operations {
53581 void (*lsm_free)(struct lov_stripe_md *);
53582diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53583index 986bf38..eab2558f 100644
53584--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53585+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53586@@ -259,7 +259,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53587 int added = (mode == LCK_NL);
53588 int overlaps = 0;
53589 int splitted = 0;
53590- const struct ldlm_callback_suite null_cbs = { NULL };
53591+ const struct ldlm_callback_suite null_cbs = { };
53592
53593 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
53594 LPU64" end "LPU64"\n", *flags,
53595diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53596index e947b91..f408990 100644
53597--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53598+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53599@@ -217,7 +217,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
53600 int LL_PROC_PROTO(proc_console_max_delay_cs)
53601 {
53602 int rc, max_delay_cs;
53603- ctl_table_t dummy = *table;
53604+ ctl_table_no_const dummy = *table;
53605 cfs_duration_t d;
53606
53607 dummy.data = &max_delay_cs;
53608@@ -248,7 +248,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
53609 int LL_PROC_PROTO(proc_console_min_delay_cs)
53610 {
53611 int rc, min_delay_cs;
53612- ctl_table_t dummy = *table;
53613+ ctl_table_no_const dummy = *table;
53614 cfs_duration_t d;
53615
53616 dummy.data = &min_delay_cs;
53617@@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
53618 int LL_PROC_PROTO(proc_console_backoff)
53619 {
53620 int rc, backoff;
53621- ctl_table_t dummy = *table;
53622+ ctl_table_no_const dummy = *table;
53623
53624 dummy.data = &backoff;
53625 dummy.proc_handler = &proc_dointvec;
53626diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53627index b16ee08..a3db5c6 100644
53628--- a/drivers/staging/lustre/lustre/libcfs/module.c
53629+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53630@@ -314,11 +314,11 @@ out:
53631
53632
53633 struct cfs_psdev_ops libcfs_psdev_ops = {
53634- libcfs_psdev_open,
53635- libcfs_psdev_release,
53636- NULL,
53637- NULL,
53638- libcfs_ioctl
53639+ .p_open = libcfs_psdev_open,
53640+ .p_close = libcfs_psdev_release,
53641+ .p_read = NULL,
53642+ .p_write = NULL,
53643+ .p_ioctl = libcfs_ioctl
53644 };
53645
53646 extern int insert_proc(void);
53647diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
53648index ae6f61a..03c3d5d 100644
53649--- a/drivers/staging/lustre/lustre/llite/dir.c
53650+++ b/drivers/staging/lustre/lustre/llite/dir.c
53651@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
53652 int mode;
53653 int err;
53654
53655- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
53656+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
53657 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
53658 strlen(filename), mode, LUSTRE_OPC_MKDIR,
53659 lump);
53660diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
53661index f670469..03b7438 100644
53662--- a/drivers/staging/media/solo6x10/solo6x10-core.c
53663+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
53664@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
53665
53666 static int solo_sysfs_init(struct solo_dev *solo_dev)
53667 {
53668- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
53669+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
53670 struct device *dev = &solo_dev->dev;
53671 const char *driver;
53672 int i;
53673diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
53674index 74f037b..5b5bb76 100644
53675--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
53676+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
53677@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
53678
53679 int solo_g723_init(struct solo_dev *solo_dev)
53680 {
53681- static struct snd_device_ops ops = { NULL };
53682+ static struct snd_device_ops ops = { };
53683 struct snd_card *card;
53684 struct snd_kcontrol_new kctl;
53685 char name[32];
53686diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53687index 7f2f247..d999137 100644
53688--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
53689+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53690@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
53691
53692 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
53693 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
53694- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
53695+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
53696 if (p2m_id < 0)
53697 p2m_id = -p2m_id;
53698 }
53699diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
53700index 8964f8b..36eb087 100644
53701--- a/drivers/staging/media/solo6x10/solo6x10.h
53702+++ b/drivers/staging/media/solo6x10/solo6x10.h
53703@@ -237,7 +237,7 @@ struct solo_dev {
53704
53705 /* P2M DMA Engine */
53706 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
53707- atomic_t p2m_count;
53708+ atomic_unchecked_t p2m_count;
53709 int p2m_jiffies;
53710 unsigned int p2m_timeouts;
53711
53712diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53713index a0f4868..139f1fb 100644
53714--- a/drivers/staging/octeon/ethernet-rx.c
53715+++ b/drivers/staging/octeon/ethernet-rx.c
53716@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53717 /* Increment RX stats for virtual ports */
53718 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53719 #ifdef CONFIG_64BIT
53720- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
53721- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
53722+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
53723+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
53724 #else
53725- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
53726- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
53727+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
53728+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
53729 #endif
53730 }
53731 netif_receive_skb(skb);
53732@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53733 dev->name);
53734 */
53735 #ifdef CONFIG_64BIT
53736- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
53737+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53738 #else
53739- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
53740+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
53741 #endif
53742 dev_kfree_skb_irq(skb);
53743 }
53744diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53745index da9dd6b..8e3e0f5 100644
53746--- a/drivers/staging/octeon/ethernet.c
53747+++ b/drivers/staging/octeon/ethernet.c
53748@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53749 * since the RX tasklet also increments it.
53750 */
53751 #ifdef CONFIG_64BIT
53752- atomic64_add(rx_status.dropped_packets,
53753- (atomic64_t *)&priv->stats.rx_dropped);
53754+ atomic64_add_unchecked(rx_status.dropped_packets,
53755+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53756 #else
53757- atomic_add(rx_status.dropped_packets,
53758- (atomic_t *)&priv->stats.rx_dropped);
53759+ atomic_add_unchecked(rx_status.dropped_packets,
53760+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53761 #endif
53762 }
53763
53764diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53765index c59fccd..79f8fc2 100644
53766--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53767+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53768@@ -267,7 +267,7 @@ struct hal_ops {
53769 s32 (*c2h_handler)(struct adapter *padapter,
53770 struct c2h_evt_hdr *c2h_evt);
53771 c2h_id_filter c2h_id_filter_ccx;
53772-};
53773+} __no_const;
53774
53775 enum rt_eeprom_type {
53776 EEPROM_93C46,
53777diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
53778index e8790f8..b4a5980 100644
53779--- a/drivers/staging/rtl8188eu/include/rtw_io.h
53780+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
53781@@ -124,7 +124,7 @@ struct _io_ops {
53782 u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
53783 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
53784 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
53785-};
53786+} __no_const;
53787
53788 struct io_req {
53789 struct list_head list;
53790diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53791index dc23395..cf7e9b1 100644
53792--- a/drivers/staging/rtl8712/rtl871x_io.h
53793+++ b/drivers/staging/rtl8712/rtl871x_io.h
53794@@ -108,7 +108,7 @@ struct _io_ops {
53795 u8 *pmem);
53796 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53797 u8 *pmem);
53798-};
53799+} __no_const;
53800
53801 struct io_req {
53802 struct list_head list;
53803diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
53804index a863a98..d272795 100644
53805--- a/drivers/staging/usbip/vhci.h
53806+++ b/drivers/staging/usbip/vhci.h
53807@@ -83,7 +83,7 @@ struct vhci_hcd {
53808 unsigned resuming:1;
53809 unsigned long re_timeout;
53810
53811- atomic_t seqnum;
53812+ atomic_unchecked_t seqnum;
53813
53814 /*
53815 * NOTE:
53816diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
53817index 0007d30..c06a693 100644
53818--- a/drivers/staging/usbip/vhci_hcd.c
53819+++ b/drivers/staging/usbip/vhci_hcd.c
53820@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
53821
53822 spin_lock(&vdev->priv_lock);
53823
53824- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
53825+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53826 if (priv->seqnum == 0xffff)
53827 dev_info(&urb->dev->dev, "seqnum max\n");
53828
53829@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
53830 return -ENOMEM;
53831 }
53832
53833- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
53834+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53835 if (unlink->seqnum == 0xffff)
53836 pr_info("seqnum max\n");
53837
53838@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
53839 vdev->rhport = rhport;
53840 }
53841
53842- atomic_set(&vhci->seqnum, 0);
53843+ atomic_set_unchecked(&vhci->seqnum, 0);
53844 spin_lock_init(&vhci->lock);
53845
53846 hcd->power_budget = 0; /* no limit */
53847diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
53848index d07fcb5..358e1e1 100644
53849--- a/drivers/staging/usbip/vhci_rx.c
53850+++ b/drivers/staging/usbip/vhci_rx.c
53851@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
53852 if (!urb) {
53853 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
53854 pr_info("max seqnum %d\n",
53855- atomic_read(&the_controller->seqnum));
53856+ atomic_read_unchecked(&the_controller->seqnum));
53857 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
53858 return;
53859 }
53860diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
53861index 317c2a8..ffeb4ef 100644
53862--- a/drivers/staging/vt6655/hostap.c
53863+++ b/drivers/staging/vt6655/hostap.c
53864@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
53865 *
53866 */
53867
53868+static net_device_ops_no_const apdev_netdev_ops;
53869+
53870 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53871 {
53872 PSDevice apdev_priv;
53873 struct net_device *dev = pDevice->dev;
53874 int ret;
53875- const struct net_device_ops apdev_netdev_ops = {
53876- .ndo_start_xmit = pDevice->tx_80211,
53877- };
53878
53879 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
53880
53881@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53882 *apdev_priv = *pDevice;
53883 eth_hw_addr_inherit(pDevice->apdev, dev);
53884
53885+ /* only half broken now */
53886+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
53887 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
53888
53889 pDevice->apdev->type = ARPHRD_IEEE80211;
53890diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53891index e7e9372..161f530 100644
53892--- a/drivers/target/sbp/sbp_target.c
53893+++ b/drivers/target/sbp/sbp_target.c
53894@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53895
53896 #define SESSION_MAINTENANCE_INTERVAL HZ
53897
53898-static atomic_t login_id = ATOMIC_INIT(0);
53899+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53900
53901 static void session_maintenance_work(struct work_struct *);
53902 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53903@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53904 login->lun = se_lun;
53905 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53906 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53907- login->login_id = atomic_inc_return(&login_id);
53908+ login->login_id = atomic_inc_return_unchecked(&login_id);
53909
53910 login->tgt_agt = sbp_target_agent_register(login);
53911 if (IS_ERR(login->tgt_agt)) {
53912diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53913index 98da901..bb443e8 100644
53914--- a/drivers/target/target_core_device.c
53915+++ b/drivers/target/target_core_device.c
53916@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53917 spin_lock_init(&dev->se_tmr_lock);
53918 spin_lock_init(&dev->qf_cmd_lock);
53919 sema_init(&dev->caw_sem, 1);
53920- atomic_set(&dev->dev_ordered_id, 0);
53921+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53922 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53923 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53924 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53925diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53926index 7fa62fc..abdd041 100644
53927--- a/drivers/target/target_core_transport.c
53928+++ b/drivers/target/target_core_transport.c
53929@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53930 * Used to determine when ORDERED commands should go from
53931 * Dormant to Active status.
53932 */
53933- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53934+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53935 smp_mb__after_atomic();
53936 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53937 cmd->se_ordered_id, cmd->sam_task_attr,
53938diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53939index 4b2b999..cad9fa5 100644
53940--- a/drivers/thermal/of-thermal.c
53941+++ b/drivers/thermal/of-thermal.c
53942@@ -30,6 +30,7 @@
53943 #include <linux/err.h>
53944 #include <linux/export.h>
53945 #include <linux/string.h>
53946+#include <linux/mm.h>
53947
53948 #include "thermal_core.h"
53949
53950@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53951 tz->get_trend = get_trend;
53952 tz->sensor_data = data;
53953
53954- tzd->ops->get_temp = of_thermal_get_temp;
53955- tzd->ops->get_trend = of_thermal_get_trend;
53956+ pax_open_kernel();
53957+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53958+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53959+ pax_close_kernel();
53960 mutex_unlock(&tzd->lock);
53961
53962 return tzd;
53963@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53964 return;
53965
53966 mutex_lock(&tzd->lock);
53967- tzd->ops->get_temp = NULL;
53968- tzd->ops->get_trend = NULL;
53969+ pax_open_kernel();
53970+ *(void **)&tzd->ops->get_temp = NULL;
53971+ *(void **)&tzd->ops->get_trend = NULL;
53972+ pax_close_kernel();
53973
53974 tz->get_temp = NULL;
53975 tz->get_trend = NULL;
53976diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53977index a57bb5a..1f727d33 100644
53978--- a/drivers/tty/cyclades.c
53979+++ b/drivers/tty/cyclades.c
53980@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53981 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53982 info->port.count);
53983 #endif
53984- info->port.count++;
53985+ atomic_inc(&info->port.count);
53986 #ifdef CY_DEBUG_COUNT
53987 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53988- current->pid, info->port.count);
53989+ current->pid, atomic_read(&info->port.count));
53990 #endif
53991
53992 /*
53993@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53994 for (j = 0; j < cy_card[i].nports; j++) {
53995 info = &cy_card[i].ports[j];
53996
53997- if (info->port.count) {
53998+ if (atomic_read(&info->port.count)) {
53999 /* XXX is the ldisc num worth this? */
54000 struct tty_struct *tty;
54001 struct tty_ldisc *ld;
54002diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
54003index 4fcec1d..5a036f7 100644
54004--- a/drivers/tty/hvc/hvc_console.c
54005+++ b/drivers/tty/hvc/hvc_console.c
54006@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
54007
54008 spin_lock_irqsave(&hp->port.lock, flags);
54009 /* Check and then increment for fast path open. */
54010- if (hp->port.count++ > 0) {
54011+ if (atomic_inc_return(&hp->port.count) > 1) {
54012 spin_unlock_irqrestore(&hp->port.lock, flags);
54013 hvc_kick();
54014 return 0;
54015@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54016
54017 spin_lock_irqsave(&hp->port.lock, flags);
54018
54019- if (--hp->port.count == 0) {
54020+ if (atomic_dec_return(&hp->port.count) == 0) {
54021 spin_unlock_irqrestore(&hp->port.lock, flags);
54022 /* We are done with the tty pointer now. */
54023 tty_port_tty_set(&hp->port, NULL);
54024@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54025 */
54026 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
54027 } else {
54028- if (hp->port.count < 0)
54029+ if (atomic_read(&hp->port.count) < 0)
54030 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
54031- hp->vtermno, hp->port.count);
54032+ hp->vtermno, atomic_read(&hp->port.count));
54033 spin_unlock_irqrestore(&hp->port.lock, flags);
54034 }
54035 }
54036@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
54037 * open->hangup case this can be called after the final close so prevent
54038 * that from happening for now.
54039 */
54040- if (hp->port.count <= 0) {
54041+ if (atomic_read(&hp->port.count) <= 0) {
54042 spin_unlock_irqrestore(&hp->port.lock, flags);
54043 return;
54044 }
54045
54046- hp->port.count = 0;
54047+ atomic_set(&hp->port.count, 0);
54048 spin_unlock_irqrestore(&hp->port.lock, flags);
54049 tty_port_tty_set(&hp->port, NULL);
54050
54051@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
54052 return -EPIPE;
54053
54054 /* FIXME what's this (unprotected) check for? */
54055- if (hp->port.count <= 0)
54056+ if (atomic_read(&hp->port.count) <= 0)
54057 return -EIO;
54058
54059 spin_lock_irqsave(&hp->lock, flags);
54060diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
54061index 81e939e..95ead10 100644
54062--- a/drivers/tty/hvc/hvcs.c
54063+++ b/drivers/tty/hvc/hvcs.c
54064@@ -83,6 +83,7 @@
54065 #include <asm/hvcserver.h>
54066 #include <asm/uaccess.h>
54067 #include <asm/vio.h>
54068+#include <asm/local.h>
54069
54070 /*
54071 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
54072@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
54073
54074 spin_lock_irqsave(&hvcsd->lock, flags);
54075
54076- if (hvcsd->port.count > 0) {
54077+ if (atomic_read(&hvcsd->port.count) > 0) {
54078 spin_unlock_irqrestore(&hvcsd->lock, flags);
54079 printk(KERN_INFO "HVCS: vterm state unchanged. "
54080 "The hvcs device node is still in use.\n");
54081@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
54082 }
54083 }
54084
54085- hvcsd->port.count = 0;
54086+ atomic_set(&hvcsd->port.count, 0);
54087 hvcsd->port.tty = tty;
54088 tty->driver_data = hvcsd;
54089
54090@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
54091 unsigned long flags;
54092
54093 spin_lock_irqsave(&hvcsd->lock, flags);
54094- hvcsd->port.count++;
54095+ atomic_inc(&hvcsd->port.count);
54096 hvcsd->todo_mask |= HVCS_SCHED_READ;
54097 spin_unlock_irqrestore(&hvcsd->lock, flags);
54098
54099@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54100 hvcsd = tty->driver_data;
54101
54102 spin_lock_irqsave(&hvcsd->lock, flags);
54103- if (--hvcsd->port.count == 0) {
54104+ if (atomic_dec_and_test(&hvcsd->port.count)) {
54105
54106 vio_disable_interrupts(hvcsd->vdev);
54107
54108@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54109
54110 free_irq(irq, hvcsd);
54111 return;
54112- } else if (hvcsd->port.count < 0) {
54113+ } else if (atomic_read(&hvcsd->port.count) < 0) {
54114 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
54115 " is missmanaged.\n",
54116- hvcsd->vdev->unit_address, hvcsd->port.count);
54117+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
54118 }
54119
54120 spin_unlock_irqrestore(&hvcsd->lock, flags);
54121@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54122
54123 spin_lock_irqsave(&hvcsd->lock, flags);
54124 /* Preserve this so that we know how many kref refs to put */
54125- temp_open_count = hvcsd->port.count;
54126+ temp_open_count = atomic_read(&hvcsd->port.count);
54127
54128 /*
54129 * Don't kref put inside the spinlock because the destruction
54130@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54131 tty->driver_data = NULL;
54132 hvcsd->port.tty = NULL;
54133
54134- hvcsd->port.count = 0;
54135+ atomic_set(&hvcsd->port.count, 0);
54136
54137 /* This will drop any buffered data on the floor which is OK in a hangup
54138 * scenario. */
54139@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
54140 * the middle of a write operation? This is a crummy place to do this
54141 * but we want to keep it all in the spinlock.
54142 */
54143- if (hvcsd->port.count <= 0) {
54144+ if (atomic_read(&hvcsd->port.count) <= 0) {
54145 spin_unlock_irqrestore(&hvcsd->lock, flags);
54146 return -ENODEV;
54147 }
54148@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
54149 {
54150 struct hvcs_struct *hvcsd = tty->driver_data;
54151
54152- if (!hvcsd || hvcsd->port.count <= 0)
54153+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
54154 return 0;
54155
54156 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
54157diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
54158index 4190199..06d5bfa 100644
54159--- a/drivers/tty/hvc/hvsi.c
54160+++ b/drivers/tty/hvc/hvsi.c
54161@@ -85,7 +85,7 @@ struct hvsi_struct {
54162 int n_outbuf;
54163 uint32_t vtermno;
54164 uint32_t virq;
54165- atomic_t seqno; /* HVSI packet sequence number */
54166+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
54167 uint16_t mctrl;
54168 uint8_t state; /* HVSI protocol state */
54169 uint8_t flags;
54170@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
54171
54172 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
54173 packet.hdr.len = sizeof(struct hvsi_query_response);
54174- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54175+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54176 packet.verb = VSV_SEND_VERSION_NUMBER;
54177 packet.u.version = HVSI_VERSION;
54178 packet.query_seqno = query_seqno+1;
54179@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
54180
54181 packet.hdr.type = VS_QUERY_PACKET_HEADER;
54182 packet.hdr.len = sizeof(struct hvsi_query);
54183- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54184+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54185 packet.verb = verb;
54186
54187 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
54188@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
54189 int wrote;
54190
54191 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
54192- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54193+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54194 packet.hdr.len = sizeof(struct hvsi_control);
54195 packet.verb = VSV_SET_MODEM_CTL;
54196 packet.mask = HVSI_TSDTR;
54197@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
54198 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
54199
54200 packet.hdr.type = VS_DATA_PACKET_HEADER;
54201- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54202+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54203 packet.hdr.len = count + sizeof(struct hvsi_header);
54204 memcpy(&packet.data, buf, count);
54205
54206@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
54207 struct hvsi_control packet __ALIGNED__;
54208
54209 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
54210- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54211+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54212 packet.hdr.len = 6;
54213 packet.verb = VSV_CLOSE_PROTOCOL;
54214
54215@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
54216
54217 tty_port_tty_set(&hp->port, tty);
54218 spin_lock_irqsave(&hp->lock, flags);
54219- hp->port.count++;
54220+ atomic_inc(&hp->port.count);
54221 atomic_set(&hp->seqno, 0);
54222 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
54223 spin_unlock_irqrestore(&hp->lock, flags);
54224@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54225
54226 spin_lock_irqsave(&hp->lock, flags);
54227
54228- if (--hp->port.count == 0) {
54229+ if (atomic_dec_return(&hp->port.count) == 0) {
54230 tty_port_tty_set(&hp->port, NULL);
54231 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
54232
54233@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54234
54235 spin_lock_irqsave(&hp->lock, flags);
54236 }
54237- } else if (hp->port.count < 0)
54238+ } else if (atomic_read(&hp->port.count) < 0)
54239 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
54240- hp - hvsi_ports, hp->port.count);
54241+ hp - hvsi_ports, atomic_read(&hp->port.count));
54242
54243 spin_unlock_irqrestore(&hp->lock, flags);
54244 }
54245@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
54246 tty_port_tty_set(&hp->port, NULL);
54247
54248 spin_lock_irqsave(&hp->lock, flags);
54249- hp->port.count = 0;
54250+ atomic_set(&hp->port.count, 0);
54251 hp->n_outbuf = 0;
54252 spin_unlock_irqrestore(&hp->lock, flags);
54253 }
54254diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
54255index 7ae6c29..05c6dba 100644
54256--- a/drivers/tty/hvc/hvsi_lib.c
54257+++ b/drivers/tty/hvc/hvsi_lib.c
54258@@ -8,7 +8,7 @@
54259
54260 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
54261 {
54262- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
54263+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
54264
54265 /* Assumes that always succeeds, works in practice */
54266 return pv->put_chars(pv->termno, (char *)packet, packet->len);
54267@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
54268
54269 /* Reset state */
54270 pv->established = 0;
54271- atomic_set(&pv->seqno, 0);
54272+ atomic_set_unchecked(&pv->seqno, 0);
54273
54274 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
54275
54276diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
54277index 17ee3bf..8d2520d 100644
54278--- a/drivers/tty/ipwireless/tty.c
54279+++ b/drivers/tty/ipwireless/tty.c
54280@@ -28,6 +28,7 @@
54281 #include <linux/tty_driver.h>
54282 #include <linux/tty_flip.h>
54283 #include <linux/uaccess.h>
54284+#include <asm/local.h>
54285
54286 #include "tty.h"
54287 #include "network.h"
54288@@ -98,10 +99,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54289 mutex_unlock(&tty->ipw_tty_mutex);
54290 return -ENODEV;
54291 }
54292- if (tty->port.count == 0)
54293+ if (atomic_read(&tty->port.count) == 0)
54294 tty->tx_bytes_queued = 0;
54295
54296- tty->port.count++;
54297+ atomic_inc(&tty->port.count);
54298
54299 tty->port.tty = linux_tty;
54300 linux_tty->driver_data = tty;
54301@@ -117,9 +118,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54302
54303 static void do_ipw_close(struct ipw_tty *tty)
54304 {
54305- tty->port.count--;
54306-
54307- if (tty->port.count == 0) {
54308+ if (atomic_dec_return(&tty->port.count) == 0) {
54309 struct tty_struct *linux_tty = tty->port.tty;
54310
54311 if (linux_tty != NULL) {
54312@@ -140,7 +139,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54313 return;
54314
54315 mutex_lock(&tty->ipw_tty_mutex);
54316- if (tty->port.count == 0) {
54317+ if (atomic_read(&tty->port.count) == 0) {
54318 mutex_unlock(&tty->ipw_tty_mutex);
54319 return;
54320 }
54321@@ -163,7 +162,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54322
54323 mutex_lock(&tty->ipw_tty_mutex);
54324
54325- if (!tty->port.count) {
54326+ if (!atomic_read(&tty->port.count)) {
54327 mutex_unlock(&tty->ipw_tty_mutex);
54328 return;
54329 }
54330@@ -202,7 +201,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54331 return -ENODEV;
54332
54333 mutex_lock(&tty->ipw_tty_mutex);
54334- if (!tty->port.count) {
54335+ if (!atomic_read(&tty->port.count)) {
54336 mutex_unlock(&tty->ipw_tty_mutex);
54337 return -EINVAL;
54338 }
54339@@ -242,7 +241,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54340 if (!tty)
54341 return -ENODEV;
54342
54343- if (!tty->port.count)
54344+ if (!atomic_read(&tty->port.count))
54345 return -EINVAL;
54346
54347 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54348@@ -284,7 +283,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54349 if (!tty)
54350 return 0;
54351
54352- if (!tty->port.count)
54353+ if (!atomic_read(&tty->port.count))
54354 return 0;
54355
54356 return tty->tx_bytes_queued;
54357@@ -365,7 +364,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54358 if (!tty)
54359 return -ENODEV;
54360
54361- if (!tty->port.count)
54362+ if (!atomic_read(&tty->port.count))
54363 return -EINVAL;
54364
54365 return get_control_lines(tty);
54366@@ -381,7 +380,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54367 if (!tty)
54368 return -ENODEV;
54369
54370- if (!tty->port.count)
54371+ if (!atomic_read(&tty->port.count))
54372 return -EINVAL;
54373
54374 return set_control_lines(tty, set, clear);
54375@@ -395,7 +394,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54376 if (!tty)
54377 return -ENODEV;
54378
54379- if (!tty->port.count)
54380+ if (!atomic_read(&tty->port.count))
54381 return -EINVAL;
54382
54383 /* FIXME: Exactly how is the tty object locked here .. */
54384@@ -551,7 +550,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54385 * are gone */
54386 mutex_lock(&ttyj->ipw_tty_mutex);
54387 }
54388- while (ttyj->port.count)
54389+ while (atomic_read(&ttyj->port.count))
54390 do_ipw_close(ttyj);
54391 ipwireless_disassociate_network_ttys(network,
54392 ttyj->channel_idx);
54393diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54394index 1deaca4..c8582d4 100644
54395--- a/drivers/tty/moxa.c
54396+++ b/drivers/tty/moxa.c
54397@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54398 }
54399
54400 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54401- ch->port.count++;
54402+ atomic_inc(&ch->port.count);
54403 tty->driver_data = ch;
54404 tty_port_tty_set(&ch->port, tty);
54405 mutex_lock(&ch->port.mutex);
54406diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54407index 2ebe47b..3205833 100644
54408--- a/drivers/tty/n_gsm.c
54409+++ b/drivers/tty/n_gsm.c
54410@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54411 spin_lock_init(&dlci->lock);
54412 mutex_init(&dlci->mutex);
54413 dlci->fifo = &dlci->_fifo;
54414- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54415+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54416 kfree(dlci);
54417 return NULL;
54418 }
54419@@ -2954,7 +2954,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54420 struct gsm_dlci *dlci = tty->driver_data;
54421 struct tty_port *port = &dlci->port;
54422
54423- port->count++;
54424+ atomic_inc(&port->count);
54425 tty_port_tty_set(port, tty);
54426
54427 dlci->modem_rx = 0;
54428diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54429index f44f1ba..a8d5915 100644
54430--- a/drivers/tty/n_tty.c
54431+++ b/drivers/tty/n_tty.c
54432@@ -115,7 +115,7 @@ struct n_tty_data {
54433 int minimum_to_wake;
54434
54435 /* consumer-published */
54436- size_t read_tail;
54437+ size_t read_tail __intentional_overflow(-1);
54438 size_t line_start;
54439
54440 /* protected by output lock */
54441@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54442 {
54443 *ops = tty_ldisc_N_TTY;
54444 ops->owner = NULL;
54445- ops->refcount = ops->flags = 0;
54446+ atomic_set(&ops->refcount, 0);
54447+ ops->flags = 0;
54448 }
54449 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54450diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54451index 25c9bc7..24077b7 100644
54452--- a/drivers/tty/pty.c
54453+++ b/drivers/tty/pty.c
54454@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
54455 panic("Couldn't register Unix98 pts driver");
54456
54457 /* Now create the /dev/ptmx special device */
54458+ pax_open_kernel();
54459 tty_default_fops(&ptmx_fops);
54460- ptmx_fops.open = ptmx_open;
54461+ *(void **)&ptmx_fops.open = ptmx_open;
54462+ pax_close_kernel();
54463
54464 cdev_init(&ptmx_cdev, &ptmx_fops);
54465 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54466diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54467index 383c4c7..d408e21 100644
54468--- a/drivers/tty/rocket.c
54469+++ b/drivers/tty/rocket.c
54470@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54471 tty->driver_data = info;
54472 tty_port_tty_set(port, tty);
54473
54474- if (port->count++ == 0) {
54475+ if (atomic_inc_return(&port->count) == 1) {
54476 atomic_inc(&rp_num_ports_open);
54477
54478 #ifdef ROCKET_DEBUG_OPEN
54479@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54480 #endif
54481 }
54482 #ifdef ROCKET_DEBUG_OPEN
54483- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54484+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54485 #endif
54486
54487 /*
54488@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54489 spin_unlock_irqrestore(&info->port.lock, flags);
54490 return;
54491 }
54492- if (info->port.count)
54493+ if (atomic_read(&info->port.count))
54494 atomic_dec(&rp_num_ports_open);
54495 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54496 spin_unlock_irqrestore(&info->port.lock, flags);
54497diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54498index 1274499..f541382 100644
54499--- a/drivers/tty/serial/ioc4_serial.c
54500+++ b/drivers/tty/serial/ioc4_serial.c
54501@@ -437,7 +437,7 @@ struct ioc4_soft {
54502 } is_intr_info[MAX_IOC4_INTR_ENTS];
54503
54504 /* Number of entries active in the above array */
54505- atomic_t is_num_intrs;
54506+ atomic_unchecked_t is_num_intrs;
54507 } is_intr_type[IOC4_NUM_INTR_TYPES];
54508
54509 /* is_ir_lock must be held while
54510@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54511 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54512 || (type == IOC4_OTHER_INTR_TYPE)));
54513
54514- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54515+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54516 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54517
54518 /* Save off the lower level interrupt handler */
54519@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54520
54521 soft = arg;
54522 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54523- num_intrs = (int)atomic_read(
54524+ num_intrs = (int)atomic_read_unchecked(
54525 &soft->is_intr_type[intr_type].is_num_intrs);
54526
54527 this_mir = this_ir = pending_intrs(soft, intr_type);
54528diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54529index cfadf29..8cf4595 100644
54530--- a/drivers/tty/serial/kgdb_nmi.c
54531+++ b/drivers/tty/serial/kgdb_nmi.c
54532@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54533 * I/O utilities that messages sent to the console will automatically
54534 * be displayed on the dbg_io.
54535 */
54536- dbg_io_ops->is_console = true;
54537+ pax_open_kernel();
54538+ *(int *)&dbg_io_ops->is_console = true;
54539+ pax_close_kernel();
54540
54541 return 0;
54542 }
54543diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54544index a260cde..6b2b5ce 100644
54545--- a/drivers/tty/serial/kgdboc.c
54546+++ b/drivers/tty/serial/kgdboc.c
54547@@ -24,8 +24,9 @@
54548 #define MAX_CONFIG_LEN 40
54549
54550 static struct kgdb_io kgdboc_io_ops;
54551+static struct kgdb_io kgdboc_io_ops_console;
54552
54553-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54554+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54555 static int configured = -1;
54556
54557 static char config[MAX_CONFIG_LEN];
54558@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54559 kgdboc_unregister_kbd();
54560 if (configured == 1)
54561 kgdb_unregister_io_module(&kgdboc_io_ops);
54562+ else if (configured == 2)
54563+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54564 }
54565
54566 static int configure_kgdboc(void)
54567@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54568 int err;
54569 char *cptr = config;
54570 struct console *cons;
54571+ int is_console = 0;
54572
54573 err = kgdboc_option_setup(config);
54574 if (err || !strlen(config) || isspace(config[0]))
54575 goto noconfig;
54576
54577 err = -ENODEV;
54578- kgdboc_io_ops.is_console = 0;
54579 kgdb_tty_driver = NULL;
54580
54581 kgdboc_use_kms = 0;
54582@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54583 int idx;
54584 if (cons->device && cons->device(cons, &idx) == p &&
54585 idx == tty_line) {
54586- kgdboc_io_ops.is_console = 1;
54587+ is_console = 1;
54588 break;
54589 }
54590 cons = cons->next;
54591@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54592 kgdb_tty_line = tty_line;
54593
54594 do_register:
54595- err = kgdb_register_io_module(&kgdboc_io_ops);
54596+ if (is_console) {
54597+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54598+ configured = 2;
54599+ } else {
54600+ err = kgdb_register_io_module(&kgdboc_io_ops);
54601+ configured = 1;
54602+ }
54603 if (err)
54604 goto noconfig;
54605
54606@@ -205,8 +214,6 @@ do_register:
54607 if (err)
54608 goto nmi_con_failed;
54609
54610- configured = 1;
54611-
54612 return 0;
54613
54614 nmi_con_failed:
54615@@ -223,7 +230,7 @@ noconfig:
54616 static int __init init_kgdboc(void)
54617 {
54618 /* Already configured? */
54619- if (configured == 1)
54620+ if (configured >= 1)
54621 return 0;
54622
54623 return configure_kgdboc();
54624@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54625 if (config[len - 1] == '\n')
54626 config[len - 1] = '\0';
54627
54628- if (configured == 1)
54629+ if (configured >= 1)
54630 cleanup_kgdboc();
54631
54632 /* Go and configure with the new params. */
54633@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54634 .post_exception = kgdboc_post_exp_handler,
54635 };
54636
54637+static struct kgdb_io kgdboc_io_ops_console = {
54638+ .name = "kgdboc",
54639+ .read_char = kgdboc_get_char,
54640+ .write_char = kgdboc_put_char,
54641+ .pre_exception = kgdboc_pre_exp_handler,
54642+ .post_exception = kgdboc_post_exp_handler,
54643+ .is_console = 1
54644+};
54645+
54646 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54647 /* This is only available if kgdboc is a built in for early debugging */
54648 static int __init kgdboc_early_init(char *opt)
54649diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54650index 72000a6..a190bc4 100644
54651--- a/drivers/tty/serial/msm_serial.c
54652+++ b/drivers/tty/serial/msm_serial.c
54653@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
54654 .cons = MSM_CONSOLE,
54655 };
54656
54657-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54658+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54659
54660 static const struct of_device_id msm_uartdm_table[] = {
54661 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54662@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54663 int irq;
54664
54665 if (pdev->id == -1)
54666- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
54667+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54668
54669 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
54670 return -ENXIO;
54671diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54672index c1d3ebd..f618a93 100644
54673--- a/drivers/tty/serial/samsung.c
54674+++ b/drivers/tty/serial/samsung.c
54675@@ -486,11 +486,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54676 }
54677 }
54678
54679+static int s3c64xx_serial_startup(struct uart_port *port);
54680 static int s3c24xx_serial_startup(struct uart_port *port)
54681 {
54682 struct s3c24xx_uart_port *ourport = to_ourport(port);
54683 int ret;
54684
54685+ /* Startup sequence is different for s3c64xx and higher SoC's */
54686+ if (s3c24xx_serial_has_interrupt_mask(port))
54687+ return s3c64xx_serial_startup(port);
54688+
54689 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54690 port, (unsigned long long)port->mapbase, port->membase);
54691
54692@@ -1164,10 +1169,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54693 /* setup info for port */
54694 port->dev = &platdev->dev;
54695
54696- /* Startup sequence is different for s3c64xx and higher SoC's */
54697- if (s3c24xx_serial_has_interrupt_mask(port))
54698- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54699-
54700 port->uartclk = 1;
54701
54702 if (cfg->uart_flags & UPF_CONS_FLOW) {
54703diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54704index ef2fb36..238d80c 100644
54705--- a/drivers/tty/serial/serial_core.c
54706+++ b/drivers/tty/serial/serial_core.c
54707@@ -1336,7 +1336,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54708
54709 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54710
54711- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54712+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54713 return;
54714
54715 /*
54716@@ -1463,7 +1463,7 @@ static void uart_hangup(struct tty_struct *tty)
54717 uart_flush_buffer(tty);
54718 uart_shutdown(tty, state);
54719 spin_lock_irqsave(&port->lock, flags);
54720- port->count = 0;
54721+ atomic_set(&port->count, 0);
54722 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54723 spin_unlock_irqrestore(&port->lock, flags);
54724 tty_port_tty_set(port, NULL);
54725@@ -1561,7 +1561,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54726 goto end;
54727 }
54728
54729- port->count++;
54730+ atomic_inc(&port->count);
54731 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
54732 retval = -ENXIO;
54733 goto err_dec_count;
54734@@ -1601,7 +1601,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54735 end:
54736 return retval;
54737 err_dec_count:
54738- port->count--;
54739+ atomic_inc(&port->count);
54740 mutex_unlock(&port->mutex);
54741 goto end;
54742 }
54743diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54744index d48e040..0f52764 100644
54745--- a/drivers/tty/synclink.c
54746+++ b/drivers/tty/synclink.c
54747@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54748
54749 if (debug_level >= DEBUG_LEVEL_INFO)
54750 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54751- __FILE__,__LINE__, info->device_name, info->port.count);
54752+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54753
54754 if (tty_port_close_start(&info->port, tty, filp) == 0)
54755 goto cleanup;
54756@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54757 cleanup:
54758 if (debug_level >= DEBUG_LEVEL_INFO)
54759 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54760- tty->driver->name, info->port.count);
54761+ tty->driver->name, atomic_read(&info->port.count));
54762
54763 } /* end of mgsl_close() */
54764
54765@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54766
54767 mgsl_flush_buffer(tty);
54768 shutdown(info);
54769-
54770- info->port.count = 0;
54771+
54772+ atomic_set(&info->port.count, 0);
54773 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54774 info->port.tty = NULL;
54775
54776@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54777
54778 if (debug_level >= DEBUG_LEVEL_INFO)
54779 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54780- __FILE__,__LINE__, tty->driver->name, port->count );
54781+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54782
54783 spin_lock_irqsave(&info->irq_spinlock, flags);
54784 if (!tty_hung_up_p(filp)) {
54785 extra_count = true;
54786- port->count--;
54787+ atomic_dec(&port->count);
54788 }
54789 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54790 port->blocked_open++;
54791@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54792
54793 if (debug_level >= DEBUG_LEVEL_INFO)
54794 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54795- __FILE__,__LINE__, tty->driver->name, port->count );
54796+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54797
54798 tty_unlock(tty);
54799 schedule();
54800@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54801
54802 /* FIXME: Racy on hangup during close wait */
54803 if (extra_count)
54804- port->count++;
54805+ atomic_inc(&port->count);
54806 port->blocked_open--;
54807
54808 if (debug_level >= DEBUG_LEVEL_INFO)
54809 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54810- __FILE__,__LINE__, tty->driver->name, port->count );
54811+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54812
54813 if (!retval)
54814 port->flags |= ASYNC_NORMAL_ACTIVE;
54815@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54816
54817 if (debug_level >= DEBUG_LEVEL_INFO)
54818 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54819- __FILE__,__LINE__,tty->driver->name, info->port.count);
54820+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54821
54822 /* If port is closing, signal caller to try again */
54823 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54824@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54825 spin_unlock_irqrestore(&info->netlock, flags);
54826 goto cleanup;
54827 }
54828- info->port.count++;
54829+ atomic_inc(&info->port.count);
54830 spin_unlock_irqrestore(&info->netlock, flags);
54831
54832- if (info->port.count == 1) {
54833+ if (atomic_read(&info->port.count) == 1) {
54834 /* 1st open on this device, init hardware */
54835 retval = startup(info);
54836 if (retval < 0)
54837@@ -3446,8 +3446,8 @@ cleanup:
54838 if (retval) {
54839 if (tty->count == 1)
54840 info->port.tty = NULL; /* tty layer will release tty struct */
54841- if(info->port.count)
54842- info->port.count--;
54843+ if (atomic_read(&info->port.count))
54844+ atomic_dec(&info->port.count);
54845 }
54846
54847 return retval;
54848@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54849 unsigned short new_crctype;
54850
54851 /* return error if TTY interface open */
54852- if (info->port.count)
54853+ if (atomic_read(&info->port.count))
54854 return -EBUSY;
54855
54856 switch (encoding)
54857@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
54858
54859 /* arbitrate between network and tty opens */
54860 spin_lock_irqsave(&info->netlock, flags);
54861- if (info->port.count != 0 || info->netcount != 0) {
54862+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54863 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54864 spin_unlock_irqrestore(&info->netlock, flags);
54865 return -EBUSY;
54866@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54867 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54868
54869 /* return error if TTY interface open */
54870- if (info->port.count)
54871+ if (atomic_read(&info->port.count))
54872 return -EBUSY;
54873
54874 if (cmd != SIOCWANDEV)
54875diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54876index c359a91..959fc26 100644
54877--- a/drivers/tty/synclink_gt.c
54878+++ b/drivers/tty/synclink_gt.c
54879@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54880 tty->driver_data = info;
54881 info->port.tty = tty;
54882
54883- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54884+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54885
54886 /* If port is closing, signal caller to try again */
54887 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54888@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54889 mutex_unlock(&info->port.mutex);
54890 goto cleanup;
54891 }
54892- info->port.count++;
54893+ atomic_inc(&info->port.count);
54894 spin_unlock_irqrestore(&info->netlock, flags);
54895
54896- if (info->port.count == 1) {
54897+ if (atomic_read(&info->port.count) == 1) {
54898 /* 1st open on this device, init hardware */
54899 retval = startup(info);
54900 if (retval < 0) {
54901@@ -715,8 +715,8 @@ cleanup:
54902 if (retval) {
54903 if (tty->count == 1)
54904 info->port.tty = NULL; /* tty layer will release tty struct */
54905- if(info->port.count)
54906- info->port.count--;
54907+ if(atomic_read(&info->port.count))
54908+ atomic_dec(&info->port.count);
54909 }
54910
54911 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54912@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54913
54914 if (sanity_check(info, tty->name, "close"))
54915 return;
54916- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54917+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54918
54919 if (tty_port_close_start(&info->port, tty, filp) == 0)
54920 goto cleanup;
54921@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54922 tty_port_close_end(&info->port, tty);
54923 info->port.tty = NULL;
54924 cleanup:
54925- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54926+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54927 }
54928
54929 static void hangup(struct tty_struct *tty)
54930@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54931 shutdown(info);
54932
54933 spin_lock_irqsave(&info->port.lock, flags);
54934- info->port.count = 0;
54935+ atomic_set(&info->port.count, 0);
54936 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54937 info->port.tty = NULL;
54938 spin_unlock_irqrestore(&info->port.lock, flags);
54939@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54940 unsigned short new_crctype;
54941
54942 /* return error if TTY interface open */
54943- if (info->port.count)
54944+ if (atomic_read(&info->port.count))
54945 return -EBUSY;
54946
54947 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54948@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54949
54950 /* arbitrate between network and tty opens */
54951 spin_lock_irqsave(&info->netlock, flags);
54952- if (info->port.count != 0 || info->netcount != 0) {
54953+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54954 DBGINFO(("%s hdlc_open busy\n", dev->name));
54955 spin_unlock_irqrestore(&info->netlock, flags);
54956 return -EBUSY;
54957@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54958 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54959
54960 /* return error if TTY interface open */
54961- if (info->port.count)
54962+ if (atomic_read(&info->port.count))
54963 return -EBUSY;
54964
54965 if (cmd != SIOCWANDEV)
54966@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54967 if (port == NULL)
54968 continue;
54969 spin_lock(&port->lock);
54970- if ((port->port.count || port->netcount) &&
54971+ if ((atomic_read(&port->port.count) || port->netcount) &&
54972 port->pending_bh && !port->bh_running &&
54973 !port->bh_requested) {
54974 DBGISR(("%s bh queued\n", port->device_name));
54975@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54976 spin_lock_irqsave(&info->lock, flags);
54977 if (!tty_hung_up_p(filp)) {
54978 extra_count = true;
54979- port->count--;
54980+ atomic_dec(&port->count);
54981 }
54982 spin_unlock_irqrestore(&info->lock, flags);
54983 port->blocked_open++;
54984@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54985 remove_wait_queue(&port->open_wait, &wait);
54986
54987 if (extra_count)
54988- port->count++;
54989+ atomic_inc(&port->count);
54990 port->blocked_open--;
54991
54992 if (!retval)
54993diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54994index 53ba853..3c30f6d 100644
54995--- a/drivers/tty/synclinkmp.c
54996+++ b/drivers/tty/synclinkmp.c
54997@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54998
54999 if (debug_level >= DEBUG_LEVEL_INFO)
55000 printk("%s(%d):%s open(), old ref count = %d\n",
55001- __FILE__,__LINE__,tty->driver->name, info->port.count);
55002+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
55003
55004 /* If port is closing, signal caller to try again */
55005 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
55006@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
55007 spin_unlock_irqrestore(&info->netlock, flags);
55008 goto cleanup;
55009 }
55010- info->port.count++;
55011+ atomic_inc(&info->port.count);
55012 spin_unlock_irqrestore(&info->netlock, flags);
55013
55014- if (info->port.count == 1) {
55015+ if (atomic_read(&info->port.count) == 1) {
55016 /* 1st open on this device, init hardware */
55017 retval = startup(info);
55018 if (retval < 0)
55019@@ -796,8 +796,8 @@ cleanup:
55020 if (retval) {
55021 if (tty->count == 1)
55022 info->port.tty = NULL; /* tty layer will release tty struct */
55023- if(info->port.count)
55024- info->port.count--;
55025+ if(atomic_read(&info->port.count))
55026+ atomic_dec(&info->port.count);
55027 }
55028
55029 return retval;
55030@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55031
55032 if (debug_level >= DEBUG_LEVEL_INFO)
55033 printk("%s(%d):%s close() entry, count=%d\n",
55034- __FILE__,__LINE__, info->device_name, info->port.count);
55035+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
55036
55037 if (tty_port_close_start(&info->port, tty, filp) == 0)
55038 goto cleanup;
55039@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55040 cleanup:
55041 if (debug_level >= DEBUG_LEVEL_INFO)
55042 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
55043- tty->driver->name, info->port.count);
55044+ tty->driver->name, atomic_read(&info->port.count));
55045 }
55046
55047 /* Called by tty_hangup() when a hangup is signaled.
55048@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
55049 shutdown(info);
55050
55051 spin_lock_irqsave(&info->port.lock, flags);
55052- info->port.count = 0;
55053+ atomic_set(&info->port.count, 0);
55054 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
55055 info->port.tty = NULL;
55056 spin_unlock_irqrestore(&info->port.lock, flags);
55057@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55058 unsigned short new_crctype;
55059
55060 /* return error if TTY interface open */
55061- if (info->port.count)
55062+ if (atomic_read(&info->port.count))
55063 return -EBUSY;
55064
55065 switch (encoding)
55066@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
55067
55068 /* arbitrate between network and tty opens */
55069 spin_lock_irqsave(&info->netlock, flags);
55070- if (info->port.count != 0 || info->netcount != 0) {
55071+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55072 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
55073 spin_unlock_irqrestore(&info->netlock, flags);
55074 return -EBUSY;
55075@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55076 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
55077
55078 /* return error if TTY interface open */
55079- if (info->port.count)
55080+ if (atomic_read(&info->port.count))
55081 return -EBUSY;
55082
55083 if (cmd != SIOCWANDEV)
55084@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
55085 * do not request bottom half processing if the
55086 * device is not open in a normal mode.
55087 */
55088- if ( port && (port->port.count || port->netcount) &&
55089+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
55090 port->pending_bh && !port->bh_running &&
55091 !port->bh_requested ) {
55092 if ( debug_level >= DEBUG_LEVEL_ISR )
55093@@ -3319,12 +3319,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55094
55095 if (debug_level >= DEBUG_LEVEL_INFO)
55096 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
55097- __FILE__,__LINE__, tty->driver->name, port->count );
55098+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55099
55100 spin_lock_irqsave(&info->lock, flags);
55101 if (!tty_hung_up_p(filp)) {
55102 extra_count = true;
55103- port->count--;
55104+ atomic_dec(&port->count);
55105 }
55106 spin_unlock_irqrestore(&info->lock, flags);
55107 port->blocked_open++;
55108@@ -3353,7 +3353,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55109
55110 if (debug_level >= DEBUG_LEVEL_INFO)
55111 printk("%s(%d):%s block_til_ready() count=%d\n",
55112- __FILE__,__LINE__, tty->driver->name, port->count );
55113+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55114
55115 tty_unlock(tty);
55116 schedule();
55117@@ -3364,12 +3364,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55118 remove_wait_queue(&port->open_wait, &wait);
55119
55120 if (extra_count)
55121- port->count++;
55122+ atomic_inc(&port->count);
55123 port->blocked_open--;
55124
55125 if (debug_level >= DEBUG_LEVEL_INFO)
55126 printk("%s(%d):%s block_til_ready() after, count=%d\n",
55127- __FILE__,__LINE__, tty->driver->name, port->count );
55128+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55129
55130 if (!retval)
55131 port->flags |= ASYNC_NORMAL_ACTIVE;
55132diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
55133index 454b658..57b1430 100644
55134--- a/drivers/tty/sysrq.c
55135+++ b/drivers/tty/sysrq.c
55136@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
55137 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
55138 size_t count, loff_t *ppos)
55139 {
55140- if (count) {
55141+ if (count && capable(CAP_SYS_ADMIN)) {
55142 char c;
55143
55144 if (get_user(c, buf))
55145diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
55146index 3411071..86f2cf2 100644
55147--- a/drivers/tty/tty_io.c
55148+++ b/drivers/tty/tty_io.c
55149@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
55150
55151 void tty_default_fops(struct file_operations *fops)
55152 {
55153- *fops = tty_fops;
55154+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
55155 }
55156
55157 /*
55158diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
55159index 2d822aa..a566234 100644
55160--- a/drivers/tty/tty_ldisc.c
55161+++ b/drivers/tty/tty_ldisc.c
55162@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
55163 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55164 tty_ldiscs[disc] = new_ldisc;
55165 new_ldisc->num = disc;
55166- new_ldisc->refcount = 0;
55167+ atomic_set(&new_ldisc->refcount, 0);
55168 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55169
55170 return ret;
55171@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
55172 return -EINVAL;
55173
55174 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55175- if (tty_ldiscs[disc]->refcount)
55176+ if (atomic_read(&tty_ldiscs[disc]->refcount))
55177 ret = -EBUSY;
55178 else
55179 tty_ldiscs[disc] = NULL;
55180@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
55181 if (ldops) {
55182 ret = ERR_PTR(-EAGAIN);
55183 if (try_module_get(ldops->owner)) {
55184- ldops->refcount++;
55185+ atomic_inc(&ldops->refcount);
55186 ret = ldops;
55187 }
55188 }
55189@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
55190 unsigned long flags;
55191
55192 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55193- ldops->refcount--;
55194+ atomic_dec(&ldops->refcount);
55195 module_put(ldops->owner);
55196 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55197 }
55198diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
55199index 3f746c8..2f2fcaa 100644
55200--- a/drivers/tty/tty_port.c
55201+++ b/drivers/tty/tty_port.c
55202@@ -235,7 +235,7 @@ void tty_port_hangup(struct tty_port *port)
55203 unsigned long flags;
55204
55205 spin_lock_irqsave(&port->lock, flags);
55206- port->count = 0;
55207+ atomic_set(&port->count, 0);
55208 port->flags &= ~ASYNC_NORMAL_ACTIVE;
55209 tty = port->tty;
55210 if (tty)
55211@@ -393,7 +393,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55212 /* The port lock protects the port counts */
55213 spin_lock_irqsave(&port->lock, flags);
55214 if (!tty_hung_up_p(filp))
55215- port->count--;
55216+ atomic_dec(&port->count);
55217 port->blocked_open++;
55218 spin_unlock_irqrestore(&port->lock, flags);
55219
55220@@ -435,7 +435,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55221 we must not mess that up further */
55222 spin_lock_irqsave(&port->lock, flags);
55223 if (!tty_hung_up_p(filp))
55224- port->count++;
55225+ atomic_inc(&port->count);
55226 port->blocked_open--;
55227 if (retval == 0)
55228 port->flags |= ASYNC_NORMAL_ACTIVE;
55229@@ -469,19 +469,19 @@ int tty_port_close_start(struct tty_port *port,
55230 return 0;
55231 }
55232
55233- if (tty->count == 1 && port->count != 1) {
55234+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
55235 printk(KERN_WARNING
55236 "tty_port_close_start: tty->count = 1 port count = %d.\n",
55237- port->count);
55238- port->count = 1;
55239+ atomic_read(&port->count));
55240+ atomic_set(&port->count, 1);
55241 }
55242- if (--port->count < 0) {
55243+ if (atomic_dec_return(&port->count) < 0) {
55244 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
55245- port->count);
55246- port->count = 0;
55247+ atomic_read(&port->count));
55248+ atomic_set(&port->count, 0);
55249 }
55250
55251- if (port->count) {
55252+ if (atomic_read(&port->count)) {
55253 spin_unlock_irqrestore(&port->lock, flags);
55254 return 0;
55255 }
55256@@ -563,7 +563,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
55257 {
55258 spin_lock_irq(&port->lock);
55259 if (!tty_hung_up_p(filp))
55260- ++port->count;
55261+ atomic_inc(&port->count);
55262 spin_unlock_irq(&port->lock);
55263 tty_port_tty_set(port, tty);
55264
55265diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
55266index d0e3a44..5f8b754 100644
55267--- a/drivers/tty/vt/keyboard.c
55268+++ b/drivers/tty/vt/keyboard.c
55269@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
55270 kbd->kbdmode == VC_OFF) &&
55271 value != KVAL(K_SAK))
55272 return; /* SAK is allowed even in raw mode */
55273+
55274+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55275+ {
55276+ void *func = fn_handler[value];
55277+ if (func == fn_show_state || func == fn_show_ptregs ||
55278+ func == fn_show_mem)
55279+ return;
55280+ }
55281+#endif
55282+
55283 fn_handler[value](vc);
55284 }
55285
55286@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55287 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
55288 return -EFAULT;
55289
55290- if (!capable(CAP_SYS_TTY_CONFIG))
55291- perm = 0;
55292-
55293 switch (cmd) {
55294 case KDGKBENT:
55295 /* Ensure another thread doesn't free it under us */
55296@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55297 spin_unlock_irqrestore(&kbd_event_lock, flags);
55298 return put_user(val, &user_kbe->kb_value);
55299 case KDSKBENT:
55300+ if (!capable(CAP_SYS_TTY_CONFIG))
55301+ perm = 0;
55302+
55303 if (!perm)
55304 return -EPERM;
55305 if (!i && v == K_NOSUCHMAP) {
55306@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55307 int i, j, k;
55308 int ret;
55309
55310- if (!capable(CAP_SYS_TTY_CONFIG))
55311- perm = 0;
55312-
55313 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
55314 if (!kbs) {
55315 ret = -ENOMEM;
55316@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55317 kfree(kbs);
55318 return ((p && *p) ? -EOVERFLOW : 0);
55319 case KDSKBSENT:
55320+ if (!capable(CAP_SYS_TTY_CONFIG))
55321+ perm = 0;
55322+
55323 if (!perm) {
55324 ret = -EPERM;
55325 goto reterr;
55326diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55327index a673e5b..36e5d32 100644
55328--- a/drivers/uio/uio.c
55329+++ b/drivers/uio/uio.c
55330@@ -25,6 +25,7 @@
55331 #include <linux/kobject.h>
55332 #include <linux/cdev.h>
55333 #include <linux/uio_driver.h>
55334+#include <asm/local.h>
55335
55336 #define UIO_MAX_DEVICES (1U << MINORBITS)
55337
55338@@ -32,7 +33,7 @@ struct uio_device {
55339 struct module *owner;
55340 struct device *dev;
55341 int minor;
55342- atomic_t event;
55343+ atomic_unchecked_t event;
55344 struct fasync_struct *async_queue;
55345 wait_queue_head_t wait;
55346 struct uio_info *info;
55347@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
55348 struct device_attribute *attr, char *buf)
55349 {
55350 struct uio_device *idev = dev_get_drvdata(dev);
55351- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55352+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55353 }
55354 static DEVICE_ATTR_RO(event);
55355
55356@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
55357 {
55358 struct uio_device *idev = info->uio_dev;
55359
55360- atomic_inc(&idev->event);
55361+ atomic_inc_unchecked(&idev->event);
55362 wake_up_interruptible(&idev->wait);
55363 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55364 }
55365@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55366 }
55367
55368 listener->dev = idev;
55369- listener->event_count = atomic_read(&idev->event);
55370+ listener->event_count = atomic_read_unchecked(&idev->event);
55371 filep->private_data = listener;
55372
55373 if (idev->info->open) {
55374@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55375 return -EIO;
55376
55377 poll_wait(filep, &idev->wait, wait);
55378- if (listener->event_count != atomic_read(&idev->event))
55379+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55380 return POLLIN | POLLRDNORM;
55381 return 0;
55382 }
55383@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55384 do {
55385 set_current_state(TASK_INTERRUPTIBLE);
55386
55387- event_count = atomic_read(&idev->event);
55388+ event_count = atomic_read_unchecked(&idev->event);
55389 if (event_count != listener->event_count) {
55390 if (copy_to_user(buf, &event_count, count))
55391 retval = -EFAULT;
55392@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55393 static int uio_find_mem_index(struct vm_area_struct *vma)
55394 {
55395 struct uio_device *idev = vma->vm_private_data;
55396+ unsigned long size;
55397
55398 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55399- if (idev->info->mem[vma->vm_pgoff].size == 0)
55400+ size = idev->info->mem[vma->vm_pgoff].size;
55401+ if (size == 0)
55402+ return -1;
55403+ if (vma->vm_end - vma->vm_start > size)
55404 return -1;
55405 return (int)vma->vm_pgoff;
55406 }
55407@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
55408 idev->owner = owner;
55409 idev->info = info;
55410 init_waitqueue_head(&idev->wait);
55411- atomic_set(&idev->event, 0);
55412+ atomic_set_unchecked(&idev->event, 0);
55413
55414 ret = uio_get_minor(idev);
55415 if (ret)
55416diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55417index 813d4d3..a71934f 100644
55418--- a/drivers/usb/atm/cxacru.c
55419+++ b/drivers/usb/atm/cxacru.c
55420@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55421 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55422 if (ret < 2)
55423 return -EINVAL;
55424- if (index < 0 || index > 0x7f)
55425+ if (index > 0x7f)
55426 return -EINVAL;
55427 pos += tmp;
55428
55429diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55430index dada014..1d0d517 100644
55431--- a/drivers/usb/atm/usbatm.c
55432+++ b/drivers/usb/atm/usbatm.c
55433@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55434 if (printk_ratelimit())
55435 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55436 __func__, vpi, vci);
55437- atomic_inc(&vcc->stats->rx_err);
55438+ atomic_inc_unchecked(&vcc->stats->rx_err);
55439 return;
55440 }
55441
55442@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55443 if (length > ATM_MAX_AAL5_PDU) {
55444 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55445 __func__, length, vcc);
55446- atomic_inc(&vcc->stats->rx_err);
55447+ atomic_inc_unchecked(&vcc->stats->rx_err);
55448 goto out;
55449 }
55450
55451@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55452 if (sarb->len < pdu_length) {
55453 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55454 __func__, pdu_length, sarb->len, vcc);
55455- atomic_inc(&vcc->stats->rx_err);
55456+ atomic_inc_unchecked(&vcc->stats->rx_err);
55457 goto out;
55458 }
55459
55460 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55461 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55462 __func__, vcc);
55463- atomic_inc(&vcc->stats->rx_err);
55464+ atomic_inc_unchecked(&vcc->stats->rx_err);
55465 goto out;
55466 }
55467
55468@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55469 if (printk_ratelimit())
55470 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55471 __func__, length);
55472- atomic_inc(&vcc->stats->rx_drop);
55473+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55474 goto out;
55475 }
55476
55477@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55478
55479 vcc->push(vcc, skb);
55480
55481- atomic_inc(&vcc->stats->rx);
55482+ atomic_inc_unchecked(&vcc->stats->rx);
55483 out:
55484 skb_trim(sarb, 0);
55485 }
55486@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55487 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55488
55489 usbatm_pop(vcc, skb);
55490- atomic_inc(&vcc->stats->tx);
55491+ atomic_inc_unchecked(&vcc->stats->tx);
55492
55493 skb = skb_dequeue(&instance->sndqueue);
55494 }
55495@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55496 if (!left--)
55497 return sprintf(page,
55498 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55499- atomic_read(&atm_dev->stats.aal5.tx),
55500- atomic_read(&atm_dev->stats.aal5.tx_err),
55501- atomic_read(&atm_dev->stats.aal5.rx),
55502- atomic_read(&atm_dev->stats.aal5.rx_err),
55503- atomic_read(&atm_dev->stats.aal5.rx_drop));
55504+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55505+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55506+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55507+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55508+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55509
55510 if (!left--) {
55511 if (instance->disconnected)
55512diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55513index 2a3bbdf..91d72cf 100644
55514--- a/drivers/usb/core/devices.c
55515+++ b/drivers/usb/core/devices.c
55516@@ -126,7 +126,7 @@ static const char format_endpt[] =
55517 * time it gets called.
55518 */
55519 static struct device_connect_event {
55520- atomic_t count;
55521+ atomic_unchecked_t count;
55522 wait_queue_head_t wait;
55523 } device_event = {
55524 .count = ATOMIC_INIT(1),
55525@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55526
55527 void usbfs_conn_disc_event(void)
55528 {
55529- atomic_add(2, &device_event.count);
55530+ atomic_add_unchecked(2, &device_event.count);
55531 wake_up(&device_event.wait);
55532 }
55533
55534@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55535
55536 poll_wait(file, &device_event.wait, wait);
55537
55538- event_count = atomic_read(&device_event.count);
55539+ event_count = atomic_read_unchecked(&device_event.count);
55540 if (file->f_version != event_count) {
55541 file->f_version = event_count;
55542 return POLLIN | POLLRDNORM;
55543diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55544index 0b59731..46ee7d1 100644
55545--- a/drivers/usb/core/devio.c
55546+++ b/drivers/usb/core/devio.c
55547@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55548 struct usb_dev_state *ps = file->private_data;
55549 struct usb_device *dev = ps->dev;
55550 ssize_t ret = 0;
55551- unsigned len;
55552+ size_t len;
55553 loff_t pos;
55554 int i;
55555
55556@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55557 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55558 struct usb_config_descriptor *config =
55559 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55560- unsigned int length = le16_to_cpu(config->wTotalLength);
55561+ size_t length = le16_to_cpu(config->wTotalLength);
55562
55563 if (*ppos < pos + length) {
55564
55565 /* The descriptor may claim to be longer than it
55566 * really is. Here is the actual allocated length. */
55567- unsigned alloclen =
55568+ size_t alloclen =
55569 le16_to_cpu(dev->config[i].desc.wTotalLength);
55570
55571- len = length - (*ppos - pos);
55572+ len = length + pos - *ppos;
55573 if (len > nbytes)
55574 len = nbytes;
55575
55576 /* Simply don't write (skip over) unallocated parts */
55577 if (alloclen > (*ppos - pos)) {
55578- alloclen -= (*ppos - pos);
55579+ alloclen = alloclen + pos - *ppos;
55580 if (copy_to_user(buf,
55581 dev->rawdescriptors[i] + (*ppos - pos),
55582 min(len, alloclen))) {
55583diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55584index bec31e2..b8091cd 100644
55585--- a/drivers/usb/core/hcd.c
55586+++ b/drivers/usb/core/hcd.c
55587@@ -1554,7 +1554,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55588 */
55589 usb_get_urb(urb);
55590 atomic_inc(&urb->use_count);
55591- atomic_inc(&urb->dev->urbnum);
55592+ atomic_inc_unchecked(&urb->dev->urbnum);
55593 usbmon_urb_submit(&hcd->self, urb);
55594
55595 /* NOTE requirements on root-hub callers (usbfs and the hub
55596@@ -1581,7 +1581,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55597 urb->hcpriv = NULL;
55598 INIT_LIST_HEAD(&urb->urb_list);
55599 atomic_dec(&urb->use_count);
55600- atomic_dec(&urb->dev->urbnum);
55601+ atomic_dec_unchecked(&urb->dev->urbnum);
55602 if (atomic_read(&urb->reject))
55603 wake_up(&usb_kill_urb_queue);
55604 usb_put_urb(urb);
55605diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55606index 27f2171..e3dfc22 100644
55607--- a/drivers/usb/core/hub.c
55608+++ b/drivers/usb/core/hub.c
55609@@ -27,6 +27,7 @@
55610 #include <linux/freezer.h>
55611 #include <linux/random.h>
55612 #include <linux/pm_qos.h>
55613+#include <linux/grsecurity.h>
55614
55615 #include <asm/uaccess.h>
55616 #include <asm/byteorder.h>
55617@@ -4644,6 +4645,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55618 goto done;
55619 return;
55620 }
55621+
55622+ if (gr_handle_new_usb())
55623+ goto done;
55624+
55625 if (hub_is_superspeed(hub->hdev))
55626 unit_load = 150;
55627 else
55628diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55629index 0c8a7fc..c45b40a 100644
55630--- a/drivers/usb/core/message.c
55631+++ b/drivers/usb/core/message.c
55632@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55633 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55634 * error number.
55635 */
55636-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55637+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55638 __u8 requesttype, __u16 value, __u16 index, void *data,
55639 __u16 size, int timeout)
55640 {
55641@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55642 * If successful, 0. Otherwise a negative error number. The number of actual
55643 * bytes transferred will be stored in the @actual_length parameter.
55644 */
55645-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55646+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55647 void *data, int len, int *actual_length, int timeout)
55648 {
55649 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55650@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55651 * bytes transferred will be stored in the @actual_length parameter.
55652 *
55653 */
55654-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55655+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55656 void *data, int len, int *actual_length, int timeout)
55657 {
55658 struct urb *urb;
55659diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55660index 1236c60..d47a51c 100644
55661--- a/drivers/usb/core/sysfs.c
55662+++ b/drivers/usb/core/sysfs.c
55663@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55664 struct usb_device *udev;
55665
55666 udev = to_usb_device(dev);
55667- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55668+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55669 }
55670 static DEVICE_ATTR_RO(urbnum);
55671
55672diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55673index 4d11449..f4ccabf 100644
55674--- a/drivers/usb/core/usb.c
55675+++ b/drivers/usb/core/usb.c
55676@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55677 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55678 dev->state = USB_STATE_ATTACHED;
55679 dev->lpm_disable_count = 1;
55680- atomic_set(&dev->urbnum, 0);
55681+ atomic_set_unchecked(&dev->urbnum, 0);
55682
55683 INIT_LIST_HEAD(&dev->ep0.urb_list);
55684 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55685diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
55686index dab7927..6f53afc 100644
55687--- a/drivers/usb/dwc3/gadget.c
55688+++ b/drivers/usb/dwc3/gadget.c
55689@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
55690 if (!usb_endpoint_xfer_isoc(desc))
55691 return 0;
55692
55693- memset(&trb_link, 0, sizeof(trb_link));
55694-
55695 /* Link TRB for ISOC. The HWO bit is never reset */
55696 trb_st_hw = &dep->trb_pool[0];
55697
55698diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55699index 8cfc319..4868255 100644
55700--- a/drivers/usb/early/ehci-dbgp.c
55701+++ b/drivers/usb/early/ehci-dbgp.c
55702@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55703
55704 #ifdef CONFIG_KGDB
55705 static struct kgdb_io kgdbdbgp_io_ops;
55706-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55707+static struct kgdb_io kgdbdbgp_io_ops_console;
55708+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55709 #else
55710 #define dbgp_kgdb_mode (0)
55711 #endif
55712@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55713 .write_char = kgdbdbgp_write_char,
55714 };
55715
55716+static struct kgdb_io kgdbdbgp_io_ops_console = {
55717+ .name = "kgdbdbgp",
55718+ .read_char = kgdbdbgp_read_char,
55719+ .write_char = kgdbdbgp_write_char,
55720+ .is_console = 1
55721+};
55722+
55723 static int kgdbdbgp_wait_time;
55724
55725 static int __init kgdbdbgp_parse_config(char *str)
55726@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55727 ptr++;
55728 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55729 }
55730- kgdb_register_io_module(&kgdbdbgp_io_ops);
55731- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55732+ if (early_dbgp_console.index != -1)
55733+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55734+ else
55735+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55736
55737 return 0;
55738 }
55739diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
55740index 2b4c82d..06a8ee6 100644
55741--- a/drivers/usb/gadget/f_uac1.c
55742+++ b/drivers/usb/gadget/f_uac1.c
55743@@ -13,6 +13,7 @@
55744 #include <linux/kernel.h>
55745 #include <linux/device.h>
55746 #include <linux/atomic.h>
55747+#include <linux/module.h>
55748
55749 #include "u_uac1.h"
55750
55751diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
55752index ad0aca8..8ff84865 100644
55753--- a/drivers/usb/gadget/u_serial.c
55754+++ b/drivers/usb/gadget/u_serial.c
55755@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55756 spin_lock_irq(&port->port_lock);
55757
55758 /* already open? Great. */
55759- if (port->port.count) {
55760+ if (atomic_read(&port->port.count)) {
55761 status = 0;
55762- port->port.count++;
55763+ atomic_inc(&port->port.count);
55764
55765 /* currently opening/closing? wait ... */
55766 } else if (port->openclose) {
55767@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55768 tty->driver_data = port;
55769 port->port.tty = tty;
55770
55771- port->port.count = 1;
55772+ atomic_set(&port->port.count, 1);
55773 port->openclose = false;
55774
55775 /* if connected, start the I/O stream */
55776@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55777
55778 spin_lock_irq(&port->port_lock);
55779
55780- if (port->port.count != 1) {
55781- if (port->port.count == 0)
55782+ if (atomic_read(&port->port.count) != 1) {
55783+ if (atomic_read(&port->port.count) == 0)
55784 WARN_ON(1);
55785 else
55786- --port->port.count;
55787+ atomic_dec(&port->port.count);
55788 goto exit;
55789 }
55790
55791@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55792 * and sleep if necessary
55793 */
55794 port->openclose = true;
55795- port->port.count = 0;
55796+ atomic_set(&port->port.count, 0);
55797
55798 gser = port->port_usb;
55799 if (gser && gser->disconnect)
55800@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
55801 int cond;
55802
55803 spin_lock_irq(&port->port_lock);
55804- cond = (port->port.count == 0) && !port->openclose;
55805+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55806 spin_unlock_irq(&port->port_lock);
55807 return cond;
55808 }
55809@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55810 /* if it's already open, start I/O ... and notify the serial
55811 * protocol about open/close status (connect/disconnect).
55812 */
55813- if (port->port.count) {
55814+ if (atomic_read(&port->port.count)) {
55815 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55816 gs_start_io(port);
55817 if (gser->connect)
55818@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
55819
55820 port->port_usb = NULL;
55821 gser->ioport = NULL;
55822- if (port->port.count > 0 || port->openclose) {
55823+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55824 wake_up_interruptible(&port->drain_wait);
55825 if (port->port.tty)
55826 tty_hangup(port->port.tty);
55827@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
55828
55829 /* finally, free any unused/unusable I/O buffers */
55830 spin_lock_irqsave(&port->port_lock, flags);
55831- if (port->port.count == 0 && !port->openclose)
55832+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55833 gs_buf_free(&port->port_write_buf);
55834 gs_free_requests(gser->out, &port->read_pool, NULL);
55835 gs_free_requests(gser->out, &port->read_queue, NULL);
55836diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
55837index 7a55fea..cc0ed4f 100644
55838--- a/drivers/usb/gadget/u_uac1.c
55839+++ b/drivers/usb/gadget/u_uac1.c
55840@@ -16,6 +16,7 @@
55841 #include <linux/ctype.h>
55842 #include <linux/random.h>
55843 #include <linux/syscalls.h>
55844+#include <linux/module.h>
55845
55846 #include "u_uac1.h"
55847
55848diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55849index 6130b75..3b60008 100644
55850--- a/drivers/usb/host/ehci-hub.c
55851+++ b/drivers/usb/host/ehci-hub.c
55852@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
55853 urb->transfer_flags = URB_DIR_IN;
55854 usb_get_urb(urb);
55855 atomic_inc(&urb->use_count);
55856- atomic_inc(&urb->dev->urbnum);
55857+ atomic_inc_unchecked(&urb->dev->urbnum);
55858 urb->setup_dma = dma_map_single(
55859 hcd->self.controller,
55860 urb->setup_packet,
55861@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55862 urb->status = -EINPROGRESS;
55863 usb_get_urb(urb);
55864 atomic_inc(&urb->use_count);
55865- atomic_inc(&urb->dev->urbnum);
55866+ atomic_inc_unchecked(&urb->dev->urbnum);
55867 retval = submit_single_step_set_feature(hcd, urb, 0);
55868 if (!retval && !wait_for_completion_timeout(&done,
55869 msecs_to_jiffies(2000))) {
55870diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55871index d0d8fad..668ef7b 100644
55872--- a/drivers/usb/host/hwa-hc.c
55873+++ b/drivers/usb/host/hwa-hc.c
55874@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55875 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55876 struct wahc *wa = &hwahc->wa;
55877 struct device *dev = &wa->usb_iface->dev;
55878- u8 mas_le[UWB_NUM_MAS/8];
55879+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55880+
55881+ if (mas_le == NULL)
55882+ return -ENOMEM;
55883
55884 /* Set the stream index */
55885 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55886@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55887 WUSB_REQ_SET_WUSB_MAS,
55888 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55889 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55890- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55891+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55892 if (result < 0)
55893 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55894 out:
55895+ kfree(mas_le);
55896+
55897 return result;
55898 }
55899
55900diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55901index b3d245e..99549ed 100644
55902--- a/drivers/usb/misc/appledisplay.c
55903+++ b/drivers/usb/misc/appledisplay.c
55904@@ -84,7 +84,7 @@ struct appledisplay {
55905 struct mutex sysfslock; /* concurrent read and write */
55906 };
55907
55908-static atomic_t count_displays = ATOMIC_INIT(0);
55909+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55910 static struct workqueue_struct *wq;
55911
55912 static void appledisplay_complete(struct urb *urb)
55913@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55914
55915 /* Register backlight device */
55916 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55917- atomic_inc_return(&count_displays) - 1);
55918+ atomic_inc_return_unchecked(&count_displays) - 1);
55919 memset(&props, 0, sizeof(struct backlight_properties));
55920 props.type = BACKLIGHT_RAW;
55921 props.max_brightness = 0xff;
55922diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55923index 8d7fc48..01c4986 100644
55924--- a/drivers/usb/serial/console.c
55925+++ b/drivers/usb/serial/console.c
55926@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
55927
55928 info->port = port;
55929
55930- ++port->port.count;
55931+ atomic_inc(&port->port.count);
55932 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55933 if (serial->type->set_termios) {
55934 /*
55935@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
55936 }
55937 /* Now that any required fake tty operations are completed restore
55938 * the tty port count */
55939- --port->port.count;
55940+ atomic_dec(&port->port.count);
55941 /* The console is special in terms of closing the device so
55942 * indicate this port is now acting as a system console. */
55943 port->port.console = 1;
55944@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
55945 free_tty:
55946 kfree(tty);
55947 reset_open_count:
55948- port->port.count = 0;
55949+ atomic_set(&port->port.count, 0);
55950 usb_autopm_put_interface(serial->interface);
55951 error_get_interface:
55952 usb_serial_put(serial);
55953@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
55954 static void usb_console_write(struct console *co,
55955 const char *buf, unsigned count)
55956 {
55957- static struct usbcons_info *info = &usbcons_info;
55958+ struct usbcons_info *info = &usbcons_info;
55959 struct usb_serial_port *port = info->port;
55960 struct usb_serial *serial;
55961 int retval = -ENODEV;
55962diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55963index 307e339..6aa97cb 100644
55964--- a/drivers/usb/storage/usb.h
55965+++ b/drivers/usb/storage/usb.h
55966@@ -63,7 +63,7 @@ struct us_unusual_dev {
55967 __u8 useProtocol;
55968 __u8 useTransport;
55969 int (*initFunction)(struct us_data *);
55970-};
55971+} __do_const;
55972
55973
55974 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55975diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55976index f2a8d29..7bc3fe7 100644
55977--- a/drivers/usb/wusbcore/wa-hc.h
55978+++ b/drivers/usb/wusbcore/wa-hc.h
55979@@ -240,7 +240,7 @@ struct wahc {
55980 spinlock_t xfer_list_lock;
55981 struct work_struct xfer_enqueue_work;
55982 struct work_struct xfer_error_work;
55983- atomic_t xfer_id_count;
55984+ atomic_unchecked_t xfer_id_count;
55985
55986 kernel_ulong_t quirks;
55987 };
55988@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55989 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55990 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55991 wa->dto_in_use = 0;
55992- atomic_set(&wa->xfer_id_count, 1);
55993+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55994 /* init the buf in URBs */
55995 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55996 usb_init_urb(&(wa->buf_in_urbs[index]));
55997diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55998index 3e2e4ed..060c9b8 100644
55999--- a/drivers/usb/wusbcore/wa-xfer.c
56000+++ b/drivers/usb/wusbcore/wa-xfer.c
56001@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
56002 */
56003 static void wa_xfer_id_init(struct wa_xfer *xfer)
56004 {
56005- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
56006+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
56007 }
56008
56009 /* Return the xfer's ID. */
56010diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
56011index f018d8d..ccab63f 100644
56012--- a/drivers/vfio/vfio.c
56013+++ b/drivers/vfio/vfio.c
56014@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
56015 return 0;
56016
56017 /* TODO Prevent device auto probing */
56018- WARN("Device %s added to live group %d!\n", dev_name(dev),
56019+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
56020 iommu_group_id(group->iommu_group));
56021
56022 return 0;
56023diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
56024index 5174eba..451e6bc 100644
56025--- a/drivers/vhost/vringh.c
56026+++ b/drivers/vhost/vringh.c
56027@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
56028 /* Userspace access helpers: in this case, addresses are really userspace. */
56029 static inline int getu16_user(u16 *val, const u16 *p)
56030 {
56031- return get_user(*val, (__force u16 __user *)p);
56032+ return get_user(*val, (u16 __force_user *)p);
56033 }
56034
56035 static inline int putu16_user(u16 *p, u16 val)
56036 {
56037- return put_user(val, (__force u16 __user *)p);
56038+ return put_user(val, (u16 __force_user *)p);
56039 }
56040
56041 static inline int copydesc_user(void *dst, const void *src, size_t len)
56042 {
56043- return copy_from_user(dst, (__force void __user *)src, len) ?
56044+ return copy_from_user(dst, (void __force_user *)src, len) ?
56045 -EFAULT : 0;
56046 }
56047
56048@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
56049 const struct vring_used_elem *src,
56050 unsigned int num)
56051 {
56052- return copy_to_user((__force void __user *)dst, src,
56053+ return copy_to_user((void __force_user *)dst, src,
56054 sizeof(*dst) * num) ? -EFAULT : 0;
56055 }
56056
56057 static inline int xfer_from_user(void *src, void *dst, size_t len)
56058 {
56059- return copy_from_user(dst, (__force void __user *)src, len) ?
56060+ return copy_from_user(dst, (void __force_user *)src, len) ?
56061 -EFAULT : 0;
56062 }
56063
56064 static inline int xfer_to_user(void *dst, void *src, size_t len)
56065 {
56066- return copy_to_user((__force void __user *)dst, src, len) ?
56067+ return copy_to_user((void __force_user *)dst, src, len) ?
56068 -EFAULT : 0;
56069 }
56070
56071@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
56072 vrh->last_used_idx = 0;
56073 vrh->vring.num = num;
56074 /* vring expects kernel addresses, but only used via accessors. */
56075- vrh->vring.desc = (__force struct vring_desc *)desc;
56076- vrh->vring.avail = (__force struct vring_avail *)avail;
56077- vrh->vring.used = (__force struct vring_used *)used;
56078+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
56079+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
56080+ vrh->vring.used = (__force_kernel struct vring_used *)used;
56081 return 0;
56082 }
56083 EXPORT_SYMBOL(vringh_init_user);
56084@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
56085
56086 static inline int putu16_kern(u16 *p, u16 val)
56087 {
56088- ACCESS_ONCE(*p) = val;
56089+ ACCESS_ONCE_RW(*p) = val;
56090 return 0;
56091 }
56092
56093diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
56094index 84a110a..96312c3 100644
56095--- a/drivers/video/backlight/kb3886_bl.c
56096+++ b/drivers/video/backlight/kb3886_bl.c
56097@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
56098 static unsigned long kb3886bl_flags;
56099 #define KB3886BL_SUSPENDED 0x01
56100
56101-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
56102+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
56103 {
56104 .ident = "Sahara Touch-iT",
56105 .matches = {
56106diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
56107index 1b0b233..6f34c2c 100644
56108--- a/drivers/video/fbdev/arcfb.c
56109+++ b/drivers/video/fbdev/arcfb.c
56110@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
56111 return -ENOSPC;
56112
56113 err = 0;
56114- if ((count + p) > fbmemlength) {
56115+ if (count > (fbmemlength - p)) {
56116 count = fbmemlength - p;
56117 err = -ENOSPC;
56118 }
56119diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
56120index 52108be..c7c110d 100644
56121--- a/drivers/video/fbdev/aty/aty128fb.c
56122+++ b/drivers/video/fbdev/aty/aty128fb.c
56123@@ -149,7 +149,7 @@ enum {
56124 };
56125
56126 /* Must match above enum */
56127-static char * const r128_family[] = {
56128+static const char * const r128_family[] = {
56129 "AGP",
56130 "PCI",
56131 "PRO AGP",
56132diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
56133index c3d0074..0b9077e 100644
56134--- a/drivers/video/fbdev/aty/atyfb_base.c
56135+++ b/drivers/video/fbdev/aty/atyfb_base.c
56136@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
56137 par->accel_flags = var->accel_flags; /* hack */
56138
56139 if (var->accel_flags) {
56140- info->fbops->fb_sync = atyfb_sync;
56141+ pax_open_kernel();
56142+ *(void **)&info->fbops->fb_sync = atyfb_sync;
56143+ pax_close_kernel();
56144 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56145 } else {
56146- info->fbops->fb_sync = NULL;
56147+ pax_open_kernel();
56148+ *(void **)&info->fbops->fb_sync = NULL;
56149+ pax_close_kernel();
56150 info->flags |= FBINFO_HWACCEL_DISABLED;
56151 }
56152
56153diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
56154index 2fa0317..4983f2a 100644
56155--- a/drivers/video/fbdev/aty/mach64_cursor.c
56156+++ b/drivers/video/fbdev/aty/mach64_cursor.c
56157@@ -8,6 +8,7 @@
56158 #include "../core/fb_draw.h"
56159
56160 #include <asm/io.h>
56161+#include <asm/pgtable.h>
56162
56163 #ifdef __sparc__
56164 #include <asm/fbio.h>
56165@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
56166 info->sprite.buf_align = 16; /* and 64 lines tall. */
56167 info->sprite.flags = FB_PIXMAP_IO;
56168
56169- info->fbops->fb_cursor = atyfb_cursor;
56170+ pax_open_kernel();
56171+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
56172+ pax_close_kernel();
56173
56174 return 0;
56175 }
56176diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
56177index 900aa4e..6d49418 100644
56178--- a/drivers/video/fbdev/core/fb_defio.c
56179+++ b/drivers/video/fbdev/core/fb_defio.c
56180@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
56181
56182 BUG_ON(!fbdefio);
56183 mutex_init(&fbdefio->lock);
56184- info->fbops->fb_mmap = fb_deferred_io_mmap;
56185+ pax_open_kernel();
56186+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
56187+ pax_close_kernel();
56188 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
56189 INIT_LIST_HEAD(&fbdefio->pagelist);
56190 if (fbdefio->delay == 0) /* set a default of 1 s */
56191@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
56192 page->mapping = NULL;
56193 }
56194
56195- info->fbops->fb_mmap = NULL;
56196+ *(void **)&info->fbops->fb_mmap = NULL;
56197 mutex_destroy(&fbdefio->lock);
56198 }
56199 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
56200diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
56201index b5e85f6..290f8c7 100644
56202--- a/drivers/video/fbdev/core/fbmem.c
56203+++ b/drivers/video/fbdev/core/fbmem.c
56204@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
56205 __u32 data;
56206 int err;
56207
56208- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
56209+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
56210
56211 data = (__u32) (unsigned long) fix->smem_start;
56212 err |= put_user(data, &fix32->smem_start);
56213diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
56214index e23392e..8a77540 100644
56215--- a/drivers/video/fbdev/hyperv_fb.c
56216+++ b/drivers/video/fbdev/hyperv_fb.c
56217@@ -235,7 +235,7 @@ static uint screen_fb_size;
56218 static inline int synthvid_send(struct hv_device *hdev,
56219 struct synthvid_msg *msg)
56220 {
56221- static atomic64_t request_id = ATOMIC64_INIT(0);
56222+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
56223 int ret;
56224
56225 msg->pipe_hdr.type = PIPE_MSG_DATA;
56226@@ -243,7 +243,7 @@ static inline int synthvid_send(struct hv_device *hdev,
56227
56228 ret = vmbus_sendpacket(hdev->channel, msg,
56229 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
56230- atomic64_inc_return(&request_id),
56231+ atomic64_inc_return_unchecked(&request_id),
56232 VM_PKT_DATA_INBAND, 0);
56233
56234 if (ret)
56235diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
56236index 7672d2e..b56437f 100644
56237--- a/drivers/video/fbdev/i810/i810_accel.c
56238+++ b/drivers/video/fbdev/i810/i810_accel.c
56239@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
56240 }
56241 }
56242 printk("ringbuffer lockup!!!\n");
56243+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
56244 i810_report_error(mmio);
56245 par->dev_flags |= LOCKUP;
56246 info->pixmap.scan_align = 1;
56247diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56248index a01147f..5d896f8 100644
56249--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56250+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56251@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
56252
56253 #ifdef CONFIG_FB_MATROX_MYSTIQUE
56254 struct matrox_switch matrox_mystique = {
56255- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
56256+ .preinit = MGA1064_preinit,
56257+ .reset = MGA1064_reset,
56258+ .init = MGA1064_init,
56259+ .restore = MGA1064_restore,
56260 };
56261 EXPORT_SYMBOL(matrox_mystique);
56262 #endif
56263
56264 #ifdef CONFIG_FB_MATROX_G
56265 struct matrox_switch matrox_G100 = {
56266- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
56267+ .preinit = MGAG100_preinit,
56268+ .reset = MGAG100_reset,
56269+ .init = MGAG100_init,
56270+ .restore = MGAG100_restore,
56271 };
56272 EXPORT_SYMBOL(matrox_G100);
56273 #endif
56274diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56275index 195ad7c..09743fc 100644
56276--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56277+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56278@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56279 }
56280
56281 struct matrox_switch matrox_millennium = {
56282- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56283+ .preinit = Ti3026_preinit,
56284+ .reset = Ti3026_reset,
56285+ .init = Ti3026_init,
56286+ .restore = Ti3026_restore
56287 };
56288 EXPORT_SYMBOL(matrox_millennium);
56289 #endif
56290diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56291index fe92eed..106e085 100644
56292--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56293+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56294@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56295 struct mb862xxfb_par *par = info->par;
56296
56297 if (info->var.bits_per_pixel == 32) {
56298- info->fbops->fb_fillrect = cfb_fillrect;
56299- info->fbops->fb_copyarea = cfb_copyarea;
56300- info->fbops->fb_imageblit = cfb_imageblit;
56301+ pax_open_kernel();
56302+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56303+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56304+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56305+ pax_close_kernel();
56306 } else {
56307 outreg(disp, GC_L0EM, 3);
56308- info->fbops->fb_fillrect = mb86290fb_fillrect;
56309- info->fbops->fb_copyarea = mb86290fb_copyarea;
56310- info->fbops->fb_imageblit = mb86290fb_imageblit;
56311+ pax_open_kernel();
56312+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56313+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56314+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56315+ pax_close_kernel();
56316 }
56317 outreg(draw, GDC_REG_DRAW_BASE, 0);
56318 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56319diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56320index def0412..fed6529 100644
56321--- a/drivers/video/fbdev/nvidia/nvidia.c
56322+++ b/drivers/video/fbdev/nvidia/nvidia.c
56323@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56324 info->fix.line_length = (info->var.xres_virtual *
56325 info->var.bits_per_pixel) >> 3;
56326 if (info->var.accel_flags) {
56327- info->fbops->fb_imageblit = nvidiafb_imageblit;
56328- info->fbops->fb_fillrect = nvidiafb_fillrect;
56329- info->fbops->fb_copyarea = nvidiafb_copyarea;
56330- info->fbops->fb_sync = nvidiafb_sync;
56331+ pax_open_kernel();
56332+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56333+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56334+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56335+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56336+ pax_close_kernel();
56337 info->pixmap.scan_align = 4;
56338 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56339 info->flags |= FBINFO_READS_FAST;
56340 NVResetGraphics(info);
56341 } else {
56342- info->fbops->fb_imageblit = cfb_imageblit;
56343- info->fbops->fb_fillrect = cfb_fillrect;
56344- info->fbops->fb_copyarea = cfb_copyarea;
56345- info->fbops->fb_sync = NULL;
56346+ pax_open_kernel();
56347+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56348+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56349+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56350+ *(void **)&info->fbops->fb_sync = NULL;
56351+ pax_close_kernel();
56352 info->pixmap.scan_align = 1;
56353 info->flags |= FBINFO_HWACCEL_DISABLED;
56354 info->flags &= ~FBINFO_READS_FAST;
56355@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56356 info->pixmap.size = 8 * 1024;
56357 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56358
56359- if (!hwcur)
56360- info->fbops->fb_cursor = NULL;
56361+ if (!hwcur) {
56362+ pax_open_kernel();
56363+ *(void **)&info->fbops->fb_cursor = NULL;
56364+ pax_close_kernel();
56365+ }
56366
56367 info->var.accel_flags = (!noaccel);
56368
56369diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56370index 2412a0d..294215b 100644
56371--- a/drivers/video/fbdev/omap2/dss/display.c
56372+++ b/drivers/video/fbdev/omap2/dss/display.c
56373@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56374 if (dssdev->name == NULL)
56375 dssdev->name = dssdev->alias;
56376
56377+ pax_open_kernel();
56378 if (drv && drv->get_resolution == NULL)
56379- drv->get_resolution = omapdss_default_get_resolution;
56380+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56381 if (drv && drv->get_recommended_bpp == NULL)
56382- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56383+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56384 if (drv && drv->get_timings == NULL)
56385- drv->get_timings = omapdss_default_get_timings;
56386+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56387+ pax_close_kernel();
56388
56389 mutex_lock(&panel_list_mutex);
56390 list_add_tail(&dssdev->panel_list, &panel_list);
56391diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56392index 83433cb..71e9b98 100644
56393--- a/drivers/video/fbdev/s1d13xxxfb.c
56394+++ b/drivers/video/fbdev/s1d13xxxfb.c
56395@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56396
56397 switch(prod_id) {
56398 case S1D13506_PROD_ID: /* activate acceleration */
56399- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56400- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56401+ pax_open_kernel();
56402+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56403+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56404+ pax_close_kernel();
56405 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56406 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56407 break;
56408diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56409index 2bcc84a..29dd1ea 100644
56410--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56411+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56412@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56413 }
56414
56415 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56416- lcdc_sys_write_index,
56417- lcdc_sys_write_data,
56418- lcdc_sys_read_data,
56419+ .write_index = lcdc_sys_write_index,
56420+ .write_data = lcdc_sys_write_data,
56421+ .read_data = lcdc_sys_read_data,
56422 };
56423
56424 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56425diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56426index d513ed6..90b0de9 100644
56427--- a/drivers/video/fbdev/smscufx.c
56428+++ b/drivers/video/fbdev/smscufx.c
56429@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56430 fb_deferred_io_cleanup(info);
56431 kfree(info->fbdefio);
56432 info->fbdefio = NULL;
56433- info->fbops->fb_mmap = ufx_ops_mmap;
56434+ pax_open_kernel();
56435+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56436+ pax_close_kernel();
56437 }
56438
56439 pr_debug("released /dev/fb%d user=%d count=%d",
56440diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56441index 77b890e..458e666 100644
56442--- a/drivers/video/fbdev/udlfb.c
56443+++ b/drivers/video/fbdev/udlfb.c
56444@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56445 dlfb_urb_completion(urb);
56446
56447 error:
56448- atomic_add(bytes_sent, &dev->bytes_sent);
56449- atomic_add(bytes_identical, &dev->bytes_identical);
56450- atomic_add(width*height*2, &dev->bytes_rendered);
56451+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56452+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56453+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56454 end_cycles = get_cycles();
56455- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56456+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56457 >> 10)), /* Kcycles */
56458 &dev->cpu_kcycles_used);
56459
56460@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56461 dlfb_urb_completion(urb);
56462
56463 error:
56464- atomic_add(bytes_sent, &dev->bytes_sent);
56465- atomic_add(bytes_identical, &dev->bytes_identical);
56466- atomic_add(bytes_rendered, &dev->bytes_rendered);
56467+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56468+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56469+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56470 end_cycles = get_cycles();
56471- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56472+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56473 >> 10)), /* Kcycles */
56474 &dev->cpu_kcycles_used);
56475 }
56476@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56477 fb_deferred_io_cleanup(info);
56478 kfree(info->fbdefio);
56479 info->fbdefio = NULL;
56480- info->fbops->fb_mmap = dlfb_ops_mmap;
56481+ pax_open_kernel();
56482+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56483+ pax_close_kernel();
56484 }
56485
56486 pr_warn("released /dev/fb%d user=%d count=%d\n",
56487@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56488 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56489 struct dlfb_data *dev = fb_info->par;
56490 return snprintf(buf, PAGE_SIZE, "%u\n",
56491- atomic_read(&dev->bytes_rendered));
56492+ atomic_read_unchecked(&dev->bytes_rendered));
56493 }
56494
56495 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56496@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56497 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56498 struct dlfb_data *dev = fb_info->par;
56499 return snprintf(buf, PAGE_SIZE, "%u\n",
56500- atomic_read(&dev->bytes_identical));
56501+ atomic_read_unchecked(&dev->bytes_identical));
56502 }
56503
56504 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56505@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56506 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56507 struct dlfb_data *dev = fb_info->par;
56508 return snprintf(buf, PAGE_SIZE, "%u\n",
56509- atomic_read(&dev->bytes_sent));
56510+ atomic_read_unchecked(&dev->bytes_sent));
56511 }
56512
56513 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56514@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56515 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56516 struct dlfb_data *dev = fb_info->par;
56517 return snprintf(buf, PAGE_SIZE, "%u\n",
56518- atomic_read(&dev->cpu_kcycles_used));
56519+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56520 }
56521
56522 static ssize_t edid_show(
56523@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56524 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56525 struct dlfb_data *dev = fb_info->par;
56526
56527- atomic_set(&dev->bytes_rendered, 0);
56528- atomic_set(&dev->bytes_identical, 0);
56529- atomic_set(&dev->bytes_sent, 0);
56530- atomic_set(&dev->cpu_kcycles_used, 0);
56531+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56532+ atomic_set_unchecked(&dev->bytes_identical, 0);
56533+ atomic_set_unchecked(&dev->bytes_sent, 0);
56534+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56535
56536 return count;
56537 }
56538diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56539index 509d452..7c9d2de 100644
56540--- a/drivers/video/fbdev/uvesafb.c
56541+++ b/drivers/video/fbdev/uvesafb.c
56542@@ -19,6 +19,7 @@
56543 #include <linux/io.h>
56544 #include <linux/mutex.h>
56545 #include <linux/slab.h>
56546+#include <linux/moduleloader.h>
56547 #include <video/edid.h>
56548 #include <video/uvesafb.h>
56549 #ifdef CONFIG_X86
56550@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56551 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56552 par->pmi_setpal = par->ypan = 0;
56553 } else {
56554+
56555+#ifdef CONFIG_PAX_KERNEXEC
56556+#ifdef CONFIG_MODULES
56557+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56558+#endif
56559+ if (!par->pmi_code) {
56560+ par->pmi_setpal = par->ypan = 0;
56561+ return 0;
56562+ }
56563+#endif
56564+
56565 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56566 + task->t.regs.edi);
56567+
56568+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56569+ pax_open_kernel();
56570+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56571+ pax_close_kernel();
56572+
56573+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56574+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56575+#else
56576 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56577 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56578+#endif
56579+
56580 printk(KERN_INFO "uvesafb: protected mode interface info at "
56581 "%04x:%04x\n",
56582 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56583@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56584 par->ypan = ypan;
56585
56586 if (par->pmi_setpal || par->ypan) {
56587+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56588 if (__supported_pte_mask & _PAGE_NX) {
56589 par->pmi_setpal = par->ypan = 0;
56590 printk(KERN_WARNING "uvesafb: NX protection is active, "
56591 "better not use the PMI.\n");
56592- } else {
56593+ } else
56594+#endif
56595 uvesafb_vbe_getpmi(task, par);
56596- }
56597 }
56598 #else
56599 /* The protected mode interface is not available on non-x86. */
56600@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56601 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56602
56603 /* Disable blanking if the user requested so. */
56604- if (!blank)
56605- info->fbops->fb_blank = NULL;
56606+ if (!blank) {
56607+ pax_open_kernel();
56608+ *(void **)&info->fbops->fb_blank = NULL;
56609+ pax_close_kernel();
56610+ }
56611
56612 /*
56613 * Find out how much IO memory is required for the mode with
56614@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56615 info->flags = FBINFO_FLAG_DEFAULT |
56616 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56617
56618- if (!par->ypan)
56619- info->fbops->fb_pan_display = NULL;
56620+ if (!par->ypan) {
56621+ pax_open_kernel();
56622+ *(void **)&info->fbops->fb_pan_display = NULL;
56623+ pax_close_kernel();
56624+ }
56625 }
56626
56627 static void uvesafb_init_mtrr(struct fb_info *info)
56628@@ -1787,6 +1817,11 @@ out_mode:
56629 out:
56630 kfree(par->vbe_modes);
56631
56632+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56633+ if (par->pmi_code)
56634+ module_free_exec(NULL, par->pmi_code);
56635+#endif
56636+
56637 framebuffer_release(info);
56638 return err;
56639 }
56640@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
56641 kfree(par->vbe_state_orig);
56642 kfree(par->vbe_state_saved);
56643
56644+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56645+ if (par->pmi_code)
56646+ module_free_exec(NULL, par->pmi_code);
56647+#endif
56648+
56649 framebuffer_release(info);
56650 }
56651 return 0;
56652diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56653index 6170e7f..dd63031 100644
56654--- a/drivers/video/fbdev/vesafb.c
56655+++ b/drivers/video/fbdev/vesafb.c
56656@@ -9,6 +9,7 @@
56657 */
56658
56659 #include <linux/module.h>
56660+#include <linux/moduleloader.h>
56661 #include <linux/kernel.h>
56662 #include <linux/errno.h>
56663 #include <linux/string.h>
56664@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56665 static int vram_total; /* Set total amount of memory */
56666 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56667 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56668-static void (*pmi_start)(void) __read_mostly;
56669-static void (*pmi_pal) (void) __read_mostly;
56670+static void (*pmi_start)(void) __read_only;
56671+static void (*pmi_pal) (void) __read_only;
56672 static int depth __read_mostly;
56673 static int vga_compat __read_mostly;
56674 /* --------------------------------------------------------------------- */
56675@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56676 unsigned int size_remap;
56677 unsigned int size_total;
56678 char *option = NULL;
56679+ void *pmi_code = NULL;
56680
56681 /* ignore error return of fb_get_options */
56682 fb_get_options("vesafb", &option);
56683@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56684 size_remap = size_total;
56685 vesafb_fix.smem_len = size_remap;
56686
56687-#ifndef __i386__
56688- screen_info.vesapm_seg = 0;
56689-#endif
56690-
56691 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56692 printk(KERN_WARNING
56693 "vesafb: cannot reserve video memory at 0x%lx\n",
56694@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56695 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56696 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56697
56698+#ifdef __i386__
56699+
56700+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56701+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56702+ if (!pmi_code)
56703+#elif !defined(CONFIG_PAX_KERNEXEC)
56704+ if (0)
56705+#endif
56706+
56707+#endif
56708+ screen_info.vesapm_seg = 0;
56709+
56710 if (screen_info.vesapm_seg) {
56711- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56712- screen_info.vesapm_seg,screen_info.vesapm_off);
56713+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56714+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56715 }
56716
56717 if (screen_info.vesapm_seg < 0xc000)
56718@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56719
56720 if (ypan || pmi_setpal) {
56721 unsigned short *pmi_base;
56722+
56723 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56724- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56725- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56726+
56727+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56728+ pax_open_kernel();
56729+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56730+#else
56731+ pmi_code = pmi_base;
56732+#endif
56733+
56734+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56735+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56736+
56737+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56738+ pmi_start = ktva_ktla(pmi_start);
56739+ pmi_pal = ktva_ktla(pmi_pal);
56740+ pax_close_kernel();
56741+#endif
56742+
56743 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56744 if (pmi_base[3]) {
56745 printk(KERN_INFO "vesafb: pmi: ports = ");
56746@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56747 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56748 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56749
56750- if (!ypan)
56751- info->fbops->fb_pan_display = NULL;
56752+ if (!ypan) {
56753+ pax_open_kernel();
56754+ *(void **)&info->fbops->fb_pan_display = NULL;
56755+ pax_close_kernel();
56756+ }
56757
56758 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56759 err = -ENOMEM;
56760@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56761 fb_info(info, "%s frame buffer device\n", info->fix.id);
56762 return 0;
56763 err:
56764+
56765+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56766+ module_free_exec(NULL, pmi_code);
56767+#endif
56768+
56769 if (info->screen_base)
56770 iounmap(info->screen_base);
56771 framebuffer_release(info);
56772diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56773index 88714ae..16c2e11 100644
56774--- a/drivers/video/fbdev/via/via_clock.h
56775+++ b/drivers/video/fbdev/via/via_clock.h
56776@@ -56,7 +56,7 @@ struct via_clock {
56777
56778 void (*set_engine_pll_state)(u8 state);
56779 void (*set_engine_pll)(struct via_pll_config config);
56780-};
56781+} __no_const;
56782
56783
56784 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56785diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56786index 3c14e43..2630570 100644
56787--- a/drivers/video/logo/logo_linux_clut224.ppm
56788+++ b/drivers/video/logo/logo_linux_clut224.ppm
56789@@ -2,1603 +2,1123 @@ P3
56790 # Standard 224-color Linux logo
56791 80 80
56792 255
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 0 0 0
56799- 0 0 0 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 6 6 6 6 6 6 10 10 10 10 10 10
56803- 10 10 10 6 6 6 6 6 6 6 6 6
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 6 6 6 10 10 10 14 14 14
56822- 22 22 22 26 26 26 30 30 30 34 34 34
56823- 30 30 30 30 30 30 26 26 26 18 18 18
56824- 14 14 14 10 10 10 6 6 6 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 1 0 0 1 0 0 0
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 6 6 6 14 14 14 26 26 26 42 42 42
56842- 54 54 54 66 66 66 78 78 78 78 78 78
56843- 78 78 78 74 74 74 66 66 66 54 54 54
56844- 42 42 42 26 26 26 18 18 18 10 10 10
56845- 6 6 6 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 1 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 10 10 10
56861- 22 22 22 42 42 42 66 66 66 86 86 86
56862- 66 66 66 38 38 38 38 38 38 22 22 22
56863- 26 26 26 34 34 34 54 54 54 66 66 66
56864- 86 86 86 70 70 70 46 46 46 26 26 26
56865- 14 14 14 6 6 6 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 1 0 0 1 0 0 1 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 0 0 0 0 0 0 0
56880- 0 0 0 0 0 0 10 10 10 26 26 26
56881- 50 50 50 82 82 82 58 58 58 6 6 6
56882- 2 2 6 2 2 6 2 2 6 2 2 6
56883- 2 2 6 2 2 6 2 2 6 2 2 6
56884- 6 6 6 54 54 54 86 86 86 66 66 66
56885- 38 38 38 18 18 18 6 6 6 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 0 0 0 0 0 0
56900- 0 0 0 6 6 6 22 22 22 50 50 50
56901- 78 78 78 34 34 34 2 2 6 2 2 6
56902- 2 2 6 2 2 6 2 2 6 2 2 6
56903- 2 2 6 2 2 6 2 2 6 2 2 6
56904- 2 2 6 2 2 6 6 6 6 70 70 70
56905- 78 78 78 46 46 46 22 22 22 6 6 6
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 1 0 0 1 0 0 1 0 0 0
56915- 0 0 0 0 0 0 0 0 0 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 6 6 6 18 18 18 42 42 42 82 82 82
56921- 26 26 26 2 2 6 2 2 6 2 2 6
56922- 2 2 6 2 2 6 2 2 6 2 2 6
56923- 2 2 6 2 2 6 2 2 6 14 14 14
56924- 46 46 46 34 34 34 6 6 6 2 2 6
56925- 42 42 42 78 78 78 42 42 42 18 18 18
56926- 6 6 6 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 1 0 0 0 0 0 1 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 10 10 10 30 30 30 66 66 66 58 58 58
56941- 2 2 6 2 2 6 2 2 6 2 2 6
56942- 2 2 6 2 2 6 2 2 6 2 2 6
56943- 2 2 6 2 2 6 2 2 6 26 26 26
56944- 86 86 86 101 101 101 46 46 46 10 10 10
56945- 2 2 6 58 58 58 70 70 70 34 34 34
56946- 10 10 10 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 1 0 0 1 0 0 1 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 14 14 14 42 42 42 86 86 86 10 10 10
56961- 2 2 6 2 2 6 2 2 6 2 2 6
56962- 2 2 6 2 2 6 2 2 6 2 2 6
56963- 2 2 6 2 2 6 2 2 6 30 30 30
56964- 94 94 94 94 94 94 58 58 58 26 26 26
56965- 2 2 6 6 6 6 78 78 78 54 54 54
56966- 22 22 22 6 6 6 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 6 6 6
56980- 22 22 22 62 62 62 62 62 62 2 2 6
56981- 2 2 6 2 2 6 2 2 6 2 2 6
56982- 2 2 6 2 2 6 2 2 6 2 2 6
56983- 2 2 6 2 2 6 2 2 6 26 26 26
56984- 54 54 54 38 38 38 18 18 18 10 10 10
56985- 2 2 6 2 2 6 34 34 34 82 82 82
56986- 38 38 38 14 14 14 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 1 0 0 1 0 0 0
56995- 0 0 0 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 0 0 0 0 0 0 0 6 6 6
57000- 30 30 30 78 78 78 30 30 30 2 2 6
57001- 2 2 6 2 2 6 2 2 6 2 2 6
57002- 2 2 6 2 2 6 2 2 6 2 2 6
57003- 2 2 6 2 2 6 2 2 6 10 10 10
57004- 10 10 10 2 2 6 2 2 6 2 2 6
57005- 2 2 6 2 2 6 2 2 6 78 78 78
57006- 50 50 50 18 18 18 6 6 6 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 1 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 10 10 10
57020- 38 38 38 86 86 86 14 14 14 2 2 6
57021- 2 2 6 2 2 6 2 2 6 2 2 6
57022- 2 2 6 2 2 6 2 2 6 2 2 6
57023- 2 2 6 2 2 6 2 2 6 2 2 6
57024- 2 2 6 2 2 6 2 2 6 2 2 6
57025- 2 2 6 2 2 6 2 2 6 54 54 54
57026- 66 66 66 26 26 26 6 6 6 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 1 0 0 1 0 0 0
57035- 0 0 0 0 0 0 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 0 0 0 0 14 14 14
57040- 42 42 42 82 82 82 2 2 6 2 2 6
57041- 2 2 6 6 6 6 10 10 10 2 2 6
57042- 2 2 6 2 2 6 2 2 6 2 2 6
57043- 2 2 6 2 2 6 2 2 6 6 6 6
57044- 14 14 14 10 10 10 2 2 6 2 2 6
57045- 2 2 6 2 2 6 2 2 6 18 18 18
57046- 82 82 82 34 34 34 10 10 10 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 1 0 0 0 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 0 0 0 0 0 0 0 14 14 14
57060- 46 46 46 86 86 86 2 2 6 2 2 6
57061- 6 6 6 6 6 6 22 22 22 34 34 34
57062- 6 6 6 2 2 6 2 2 6 2 2 6
57063- 2 2 6 2 2 6 18 18 18 34 34 34
57064- 10 10 10 50 50 50 22 22 22 2 2 6
57065- 2 2 6 2 2 6 2 2 6 10 10 10
57066- 86 86 86 42 42 42 14 14 14 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 1 0 0 1 0 0 1 0 0 0
57075- 0 0 0 0 0 0 0 0 0 0 0 0
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 0 0 0 0 14 14 14
57080- 46 46 46 86 86 86 2 2 6 2 2 6
57081- 38 38 38 116 116 116 94 94 94 22 22 22
57082- 22 22 22 2 2 6 2 2 6 2 2 6
57083- 14 14 14 86 86 86 138 138 138 162 162 162
57084-154 154 154 38 38 38 26 26 26 6 6 6
57085- 2 2 6 2 2 6 2 2 6 2 2 6
57086- 86 86 86 46 46 46 14 14 14 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 0 0 0 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 0 0 0 14 14 14
57100- 46 46 46 86 86 86 2 2 6 14 14 14
57101-134 134 134 198 198 198 195 195 195 116 116 116
57102- 10 10 10 2 2 6 2 2 6 6 6 6
57103-101 98 89 187 187 187 210 210 210 218 218 218
57104-214 214 214 134 134 134 14 14 14 6 6 6
57105- 2 2 6 2 2 6 2 2 6 2 2 6
57106- 86 86 86 50 50 50 18 18 18 6 6 6
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 1 0 0 0
57114- 0 0 1 0 0 1 0 0 1 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 14 14 14
57120- 46 46 46 86 86 86 2 2 6 54 54 54
57121-218 218 218 195 195 195 226 226 226 246 246 246
57122- 58 58 58 2 2 6 2 2 6 30 30 30
57123-210 210 210 253 253 253 174 174 174 123 123 123
57124-221 221 221 234 234 234 74 74 74 2 2 6
57125- 2 2 6 2 2 6 2 2 6 2 2 6
57126- 70 70 70 58 58 58 22 22 22 6 6 6
57127- 0 0 0 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 0 0 0
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 14 14 14
57140- 46 46 46 82 82 82 2 2 6 106 106 106
57141-170 170 170 26 26 26 86 86 86 226 226 226
57142-123 123 123 10 10 10 14 14 14 46 46 46
57143-231 231 231 190 190 190 6 6 6 70 70 70
57144- 90 90 90 238 238 238 158 158 158 2 2 6
57145- 2 2 6 2 2 6 2 2 6 2 2 6
57146- 70 70 70 58 58 58 22 22 22 6 6 6
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 1 0 0 0
57154- 0 0 1 0 0 1 0 0 1 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 14 14 14
57160- 42 42 42 86 86 86 6 6 6 116 116 116
57161-106 106 106 6 6 6 70 70 70 149 149 149
57162-128 128 128 18 18 18 38 38 38 54 54 54
57163-221 221 221 106 106 106 2 2 6 14 14 14
57164- 46 46 46 190 190 190 198 198 198 2 2 6
57165- 2 2 6 2 2 6 2 2 6 2 2 6
57166- 74 74 74 62 62 62 22 22 22 6 6 6
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 1 0 0 0
57174- 0 0 1 0 0 0 0 0 1 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 14 14 14
57180- 42 42 42 94 94 94 14 14 14 101 101 101
57181-128 128 128 2 2 6 18 18 18 116 116 116
57182-118 98 46 121 92 8 121 92 8 98 78 10
57183-162 162 162 106 106 106 2 2 6 2 2 6
57184- 2 2 6 195 195 195 195 195 195 6 6 6
57185- 2 2 6 2 2 6 2 2 6 2 2 6
57186- 74 74 74 62 62 62 22 22 22 6 6 6
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 1 0 0 1
57194- 0 0 1 0 0 0 0 0 1 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197- 0 0 0 0 0 0 0 0 0 0 0 0
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 10 10 10
57200- 38 38 38 90 90 90 14 14 14 58 58 58
57201-210 210 210 26 26 26 54 38 6 154 114 10
57202-226 170 11 236 186 11 225 175 15 184 144 12
57203-215 174 15 175 146 61 37 26 9 2 2 6
57204- 70 70 70 246 246 246 138 138 138 2 2 6
57205- 2 2 6 2 2 6 2 2 6 2 2 6
57206- 70 70 70 66 66 66 26 26 26 6 6 6
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 0 0 0
57215- 0 0 0 0 0 0 0 0 0 0 0 0
57216- 0 0 0 0 0 0 0 0 0 0 0 0
57217- 0 0 0 0 0 0 0 0 0 0 0 0
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 0 0 0 0 0 0 10 10 10
57220- 38 38 38 86 86 86 14 14 14 10 10 10
57221-195 195 195 188 164 115 192 133 9 225 175 15
57222-239 182 13 234 190 10 232 195 16 232 200 30
57223-245 207 45 241 208 19 232 195 16 184 144 12
57224-218 194 134 211 206 186 42 42 42 2 2 6
57225- 2 2 6 2 2 6 2 2 6 2 2 6
57226- 50 50 50 74 74 74 30 30 30 6 6 6
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231- 0 0 0 0 0 0 0 0 0 0 0 0
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 0 0 0 0 0 0 0 0 0 0 0 0
57234- 0 0 0 0 0 0 0 0 0 0 0 0
57235- 0 0 0 0 0 0 0 0 0 0 0 0
57236- 0 0 0 0 0 0 0 0 0 0 0 0
57237- 0 0 0 0 0 0 0 0 0 0 0 0
57238- 0 0 0 0 0 0 0 0 0 0 0 0
57239- 0 0 0 0 0 0 0 0 0 10 10 10
57240- 34 34 34 86 86 86 14 14 14 2 2 6
57241-121 87 25 192 133 9 219 162 10 239 182 13
57242-236 186 11 232 195 16 241 208 19 244 214 54
57243-246 218 60 246 218 38 246 215 20 241 208 19
57244-241 208 19 226 184 13 121 87 25 2 2 6
57245- 2 2 6 2 2 6 2 2 6 2 2 6
57246- 50 50 50 82 82 82 34 34 34 10 10 10
57247- 0 0 0 0 0 0 0 0 0 0 0 0
57248- 0 0 0 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 0 0 0
57251- 0 0 0 0 0 0 0 0 0 0 0 0
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 0 0 0 0 0 0 0 0 0 0 0 0
57254- 0 0 0 0 0 0 0 0 0 0 0 0
57255- 0 0 0 0 0 0 0 0 0 0 0 0
57256- 0 0 0 0 0 0 0 0 0 0 0 0
57257- 0 0 0 0 0 0 0 0 0 0 0 0
57258- 0 0 0 0 0 0 0 0 0 0 0 0
57259- 0 0 0 0 0 0 0 0 0 10 10 10
57260- 34 34 34 82 82 82 30 30 30 61 42 6
57261-180 123 7 206 145 10 230 174 11 239 182 13
57262-234 190 10 238 202 15 241 208 19 246 218 74
57263-246 218 38 246 215 20 246 215 20 246 215 20
57264-226 184 13 215 174 15 184 144 12 6 6 6
57265- 2 2 6 2 2 6 2 2 6 2 2 6
57266- 26 26 26 94 94 94 42 42 42 14 14 14
57267- 0 0 0 0 0 0 0 0 0 0 0 0
57268- 0 0 0 0 0 0 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 0 0 0
57271- 0 0 0 0 0 0 0 0 0 0 0 0
57272- 0 0 0 0 0 0 0 0 0 0 0 0
57273- 0 0 0 0 0 0 0 0 0 0 0 0
57274- 0 0 0 0 0 0 0 0 0 0 0 0
57275- 0 0 0 0 0 0 0 0 0 0 0 0
57276- 0 0 0 0 0 0 0 0 0 0 0 0
57277- 0 0 0 0 0 0 0 0 0 0 0 0
57278- 0 0 0 0 0 0 0 0 0 0 0 0
57279- 0 0 0 0 0 0 0 0 0 10 10 10
57280- 30 30 30 78 78 78 50 50 50 104 69 6
57281-192 133 9 216 158 10 236 178 12 236 186 11
57282-232 195 16 241 208 19 244 214 54 245 215 43
57283-246 215 20 246 215 20 241 208 19 198 155 10
57284-200 144 11 216 158 10 156 118 10 2 2 6
57285- 2 2 6 2 2 6 2 2 6 2 2 6
57286- 6 6 6 90 90 90 54 54 54 18 18 18
57287- 6 6 6 0 0 0 0 0 0 0 0 0
57288- 0 0 0 0 0 0 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 0 0 0
57291- 0 0 0 0 0 0 0 0 0 0 0 0
57292- 0 0 0 0 0 0 0 0 0 0 0 0
57293- 0 0 0 0 0 0 0 0 0 0 0 0
57294- 0 0 0 0 0 0 0 0 0 0 0 0
57295- 0 0 0 0 0 0 0 0 0 0 0 0
57296- 0 0 0 0 0 0 0 0 0 0 0 0
57297- 0 0 0 0 0 0 0 0 0 0 0 0
57298- 0 0 0 0 0 0 0 0 0 0 0 0
57299- 0 0 0 0 0 0 0 0 0 10 10 10
57300- 30 30 30 78 78 78 46 46 46 22 22 22
57301-137 92 6 210 162 10 239 182 13 238 190 10
57302-238 202 15 241 208 19 246 215 20 246 215 20
57303-241 208 19 203 166 17 185 133 11 210 150 10
57304-216 158 10 210 150 10 102 78 10 2 2 6
57305- 6 6 6 54 54 54 14 14 14 2 2 6
57306- 2 2 6 62 62 62 74 74 74 30 30 30
57307- 10 10 10 0 0 0 0 0 0 0 0 0
57308- 0 0 0 0 0 0 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 0 0 0 0 0 0
57311- 0 0 0 0 0 0 0 0 0 0 0 0
57312- 0 0 0 0 0 0 0 0 0 0 0 0
57313- 0 0 0 0 0 0 0 0 0 0 0 0
57314- 0 0 0 0 0 0 0 0 0 0 0 0
57315- 0 0 0 0 0 0 0 0 0 0 0 0
57316- 0 0 0 0 0 0 0 0 0 0 0 0
57317- 0 0 0 0 0 0 0 0 0 0 0 0
57318- 0 0 0 0 0 0 0 0 0 0 0 0
57319- 0 0 0 0 0 0 0 0 0 10 10 10
57320- 34 34 34 78 78 78 50 50 50 6 6 6
57321- 94 70 30 139 102 15 190 146 13 226 184 13
57322-232 200 30 232 195 16 215 174 15 190 146 13
57323-168 122 10 192 133 9 210 150 10 213 154 11
57324-202 150 34 182 157 106 101 98 89 2 2 6
57325- 2 2 6 78 78 78 116 116 116 58 58 58
57326- 2 2 6 22 22 22 90 90 90 46 46 46
57327- 18 18 18 6 6 6 0 0 0 0 0 0
57328- 0 0 0 0 0 0 0 0 0 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 0 0 0 0 0 0
57331- 0 0 0 0 0 0 0 0 0 0 0 0
57332- 0 0 0 0 0 0 0 0 0 0 0 0
57333- 0 0 0 0 0 0 0 0 0 0 0 0
57334- 0 0 0 0 0 0 0 0 0 0 0 0
57335- 0 0 0 0 0 0 0 0 0 0 0 0
57336- 0 0 0 0 0 0 0 0 0 0 0 0
57337- 0 0 0 0 0 0 0 0 0 0 0 0
57338- 0 0 0 0 0 0 0 0 0 0 0 0
57339- 0 0 0 0 0 0 0 0 0 10 10 10
57340- 38 38 38 86 86 86 50 50 50 6 6 6
57341-128 128 128 174 154 114 156 107 11 168 122 10
57342-198 155 10 184 144 12 197 138 11 200 144 11
57343-206 145 10 206 145 10 197 138 11 188 164 115
57344-195 195 195 198 198 198 174 174 174 14 14 14
57345- 2 2 6 22 22 22 116 116 116 116 116 116
57346- 22 22 22 2 2 6 74 74 74 70 70 70
57347- 30 30 30 10 10 10 0 0 0 0 0 0
57348- 0 0 0 0 0 0 0 0 0 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 0 0 0 0 0 0 0 0 0
57351- 0 0 0 0 0 0 0 0 0 0 0 0
57352- 0 0 0 0 0 0 0 0 0 0 0 0
57353- 0 0 0 0 0 0 0 0 0 0 0 0
57354- 0 0 0 0 0 0 0 0 0 0 0 0
57355- 0 0 0 0 0 0 0 0 0 0 0 0
57356- 0 0 0 0 0 0 0 0 0 0 0 0
57357- 0 0 0 0 0 0 0 0 0 0 0 0
57358- 0 0 0 0 0 0 0 0 0 0 0 0
57359- 0 0 0 0 0 0 6 6 6 18 18 18
57360- 50 50 50 101 101 101 26 26 26 10 10 10
57361-138 138 138 190 190 190 174 154 114 156 107 11
57362-197 138 11 200 144 11 197 138 11 192 133 9
57363-180 123 7 190 142 34 190 178 144 187 187 187
57364-202 202 202 221 221 221 214 214 214 66 66 66
57365- 2 2 6 2 2 6 50 50 50 62 62 62
57366- 6 6 6 2 2 6 10 10 10 90 90 90
57367- 50 50 50 18 18 18 6 6 6 0 0 0
57368- 0 0 0 0 0 0 0 0 0 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 0 0 0 0 0 0 0 0 0 0 0 0
57371- 0 0 0 0 0 0 0 0 0 0 0 0
57372- 0 0 0 0 0 0 0 0 0 0 0 0
57373- 0 0 0 0 0 0 0 0 0 0 0 0
57374- 0 0 0 0 0 0 0 0 0 0 0 0
57375- 0 0 0 0 0 0 0 0 0 0 0 0
57376- 0 0 0 0 0 0 0 0 0 0 0 0
57377- 0 0 0 0 0 0 0 0 0 0 0 0
57378- 0 0 0 0 0 0 0 0 0 0 0 0
57379- 0 0 0 0 0 0 10 10 10 34 34 34
57380- 74 74 74 74 74 74 2 2 6 6 6 6
57381-144 144 144 198 198 198 190 190 190 178 166 146
57382-154 121 60 156 107 11 156 107 11 168 124 44
57383-174 154 114 187 187 187 190 190 190 210 210 210
57384-246 246 246 253 253 253 253 253 253 182 182 182
57385- 6 6 6 2 2 6 2 2 6 2 2 6
57386- 2 2 6 2 2 6 2 2 6 62 62 62
57387- 74 74 74 34 34 34 14 14 14 0 0 0
57388- 0 0 0 0 0 0 0 0 0 0 0 0
57389- 0 0 0 0 0 0 0 0 0 0 0 0
57390- 0 0 0 0 0 0 0 0 0 0 0 0
57391- 0 0 0 0 0 0 0 0 0 0 0 0
57392- 0 0 0 0 0 0 0 0 0 0 0 0
57393- 0 0 0 0 0 0 0 0 0 0 0 0
57394- 0 0 0 0 0 0 0 0 0 0 0 0
57395- 0 0 0 0 0 0 0 0 0 0 0 0
57396- 0 0 0 0 0 0 0 0 0 0 0 0
57397- 0 0 0 0 0 0 0 0 0 0 0 0
57398- 0 0 0 0 0 0 0 0 0 0 0 0
57399- 0 0 0 10 10 10 22 22 22 54 54 54
57400- 94 94 94 18 18 18 2 2 6 46 46 46
57401-234 234 234 221 221 221 190 190 190 190 190 190
57402-190 190 190 187 187 187 187 187 187 190 190 190
57403-190 190 190 195 195 195 214 214 214 242 242 242
57404-253 253 253 253 253 253 253 253 253 253 253 253
57405- 82 82 82 2 2 6 2 2 6 2 2 6
57406- 2 2 6 2 2 6 2 2 6 14 14 14
57407- 86 86 86 54 54 54 22 22 22 6 6 6
57408- 0 0 0 0 0 0 0 0 0 0 0 0
57409- 0 0 0 0 0 0 0 0 0 0 0 0
57410- 0 0 0 0 0 0 0 0 0 0 0 0
57411- 0 0 0 0 0 0 0 0 0 0 0 0
57412- 0 0 0 0 0 0 0 0 0 0 0 0
57413- 0 0 0 0 0 0 0 0 0 0 0 0
57414- 0 0 0 0 0 0 0 0 0 0 0 0
57415- 0 0 0 0 0 0 0 0 0 0 0 0
57416- 0 0 0 0 0 0 0 0 0 0 0 0
57417- 0 0 0 0 0 0 0 0 0 0 0 0
57418- 0 0 0 0 0 0 0 0 0 0 0 0
57419- 6 6 6 18 18 18 46 46 46 90 90 90
57420- 46 46 46 18 18 18 6 6 6 182 182 182
57421-253 253 253 246 246 246 206 206 206 190 190 190
57422-190 190 190 190 190 190 190 190 190 190 190 190
57423-206 206 206 231 231 231 250 250 250 253 253 253
57424-253 253 253 253 253 253 253 253 253 253 253 253
57425-202 202 202 14 14 14 2 2 6 2 2 6
57426- 2 2 6 2 2 6 2 2 6 2 2 6
57427- 42 42 42 86 86 86 42 42 42 18 18 18
57428- 6 6 6 0 0 0 0 0 0 0 0 0
57429- 0 0 0 0 0 0 0 0 0 0 0 0
57430- 0 0 0 0 0 0 0 0 0 0 0 0
57431- 0 0 0 0 0 0 0 0 0 0 0 0
57432- 0 0 0 0 0 0 0 0 0 0 0 0
57433- 0 0 0 0 0 0 0 0 0 0 0 0
57434- 0 0 0 0 0 0 0 0 0 0 0 0
57435- 0 0 0 0 0 0 0 0 0 0 0 0
57436- 0 0 0 0 0 0 0 0 0 0 0 0
57437- 0 0 0 0 0 0 0 0 0 0 0 0
57438- 0 0 0 0 0 0 0 0 0 6 6 6
57439- 14 14 14 38 38 38 74 74 74 66 66 66
57440- 2 2 6 6 6 6 90 90 90 250 250 250
57441-253 253 253 253 253 253 238 238 238 198 198 198
57442-190 190 190 190 190 190 195 195 195 221 221 221
57443-246 246 246 253 253 253 253 253 253 253 253 253
57444-253 253 253 253 253 253 253 253 253 253 253 253
57445-253 253 253 82 82 82 2 2 6 2 2 6
57446- 2 2 6 2 2 6 2 2 6 2 2 6
57447- 2 2 6 78 78 78 70 70 70 34 34 34
57448- 14 14 14 6 6 6 0 0 0 0 0 0
57449- 0 0 0 0 0 0 0 0 0 0 0 0
57450- 0 0 0 0 0 0 0 0 0 0 0 0
57451- 0 0 0 0 0 0 0 0 0 0 0 0
57452- 0 0 0 0 0 0 0 0 0 0 0 0
57453- 0 0 0 0 0 0 0 0 0 0 0 0
57454- 0 0 0 0 0 0 0 0 0 0 0 0
57455- 0 0 0 0 0 0 0 0 0 0 0 0
57456- 0 0 0 0 0 0 0 0 0 0 0 0
57457- 0 0 0 0 0 0 0 0 0 0 0 0
57458- 0 0 0 0 0 0 0 0 0 14 14 14
57459- 34 34 34 66 66 66 78 78 78 6 6 6
57460- 2 2 6 18 18 18 218 218 218 253 253 253
57461-253 253 253 253 253 253 253 253 253 246 246 246
57462-226 226 226 231 231 231 246 246 246 253 253 253
57463-253 253 253 253 253 253 253 253 253 253 253 253
57464-253 253 253 253 253 253 253 253 253 253 253 253
57465-253 253 253 178 178 178 2 2 6 2 2 6
57466- 2 2 6 2 2 6 2 2 6 2 2 6
57467- 2 2 6 18 18 18 90 90 90 62 62 62
57468- 30 30 30 10 10 10 0 0 0 0 0 0
57469- 0 0 0 0 0 0 0 0 0 0 0 0
57470- 0 0 0 0 0 0 0 0 0 0 0 0
57471- 0 0 0 0 0 0 0 0 0 0 0 0
57472- 0 0 0 0 0 0 0 0 0 0 0 0
57473- 0 0 0 0 0 0 0 0 0 0 0 0
57474- 0 0 0 0 0 0 0 0 0 0 0 0
57475- 0 0 0 0 0 0 0 0 0 0 0 0
57476- 0 0 0 0 0 0 0 0 0 0 0 0
57477- 0 0 0 0 0 0 0 0 0 0 0 0
57478- 0 0 0 0 0 0 10 10 10 26 26 26
57479- 58 58 58 90 90 90 18 18 18 2 2 6
57480- 2 2 6 110 110 110 253 253 253 253 253 253
57481-253 253 253 253 253 253 253 253 253 253 253 253
57482-250 250 250 253 253 253 253 253 253 253 253 253
57483-253 253 253 253 253 253 253 253 253 253 253 253
57484-253 253 253 253 253 253 253 253 253 253 253 253
57485-253 253 253 231 231 231 18 18 18 2 2 6
57486- 2 2 6 2 2 6 2 2 6 2 2 6
57487- 2 2 6 2 2 6 18 18 18 94 94 94
57488- 54 54 54 26 26 26 10 10 10 0 0 0
57489- 0 0 0 0 0 0 0 0 0 0 0 0
57490- 0 0 0 0 0 0 0 0 0 0 0 0
57491- 0 0 0 0 0 0 0 0 0 0 0 0
57492- 0 0 0 0 0 0 0 0 0 0 0 0
57493- 0 0 0 0 0 0 0 0 0 0 0 0
57494- 0 0 0 0 0 0 0 0 0 0 0 0
57495- 0 0 0 0 0 0 0 0 0 0 0 0
57496- 0 0 0 0 0 0 0 0 0 0 0 0
57497- 0 0 0 0 0 0 0 0 0 0 0 0
57498- 0 0 0 6 6 6 22 22 22 50 50 50
57499- 90 90 90 26 26 26 2 2 6 2 2 6
57500- 14 14 14 195 195 195 250 250 250 253 253 253
57501-253 253 253 253 253 253 253 253 253 253 253 253
57502-253 253 253 253 253 253 253 253 253 253 253 253
57503-253 253 253 253 253 253 253 253 253 253 253 253
57504-253 253 253 253 253 253 253 253 253 253 253 253
57505-250 250 250 242 242 242 54 54 54 2 2 6
57506- 2 2 6 2 2 6 2 2 6 2 2 6
57507- 2 2 6 2 2 6 2 2 6 38 38 38
57508- 86 86 86 50 50 50 22 22 22 6 6 6
57509- 0 0 0 0 0 0 0 0 0 0 0 0
57510- 0 0 0 0 0 0 0 0 0 0 0 0
57511- 0 0 0 0 0 0 0 0 0 0 0 0
57512- 0 0 0 0 0 0 0 0 0 0 0 0
57513- 0 0 0 0 0 0 0 0 0 0 0 0
57514- 0 0 0 0 0 0 0 0 0 0 0 0
57515- 0 0 0 0 0 0 0 0 0 0 0 0
57516- 0 0 0 0 0 0 0 0 0 0 0 0
57517- 0 0 0 0 0 0 0 0 0 0 0 0
57518- 6 6 6 14 14 14 38 38 38 82 82 82
57519- 34 34 34 2 2 6 2 2 6 2 2 6
57520- 42 42 42 195 195 195 246 246 246 253 253 253
57521-253 253 253 253 253 253 253 253 253 250 250 250
57522-242 242 242 242 242 242 250 250 250 253 253 253
57523-253 253 253 253 253 253 253 253 253 253 253 253
57524-253 253 253 250 250 250 246 246 246 238 238 238
57525-226 226 226 231 231 231 101 101 101 6 6 6
57526- 2 2 6 2 2 6 2 2 6 2 2 6
57527- 2 2 6 2 2 6 2 2 6 2 2 6
57528- 38 38 38 82 82 82 42 42 42 14 14 14
57529- 6 6 6 0 0 0 0 0 0 0 0 0
57530- 0 0 0 0 0 0 0 0 0 0 0 0
57531- 0 0 0 0 0 0 0 0 0 0 0 0
57532- 0 0 0 0 0 0 0 0 0 0 0 0
57533- 0 0 0 0 0 0 0 0 0 0 0 0
57534- 0 0 0 0 0 0 0 0 0 0 0 0
57535- 0 0 0 0 0 0 0 0 0 0 0 0
57536- 0 0 0 0 0 0 0 0 0 0 0 0
57537- 0 0 0 0 0 0 0 0 0 0 0 0
57538- 10 10 10 26 26 26 62 62 62 66 66 66
57539- 2 2 6 2 2 6 2 2 6 6 6 6
57540- 70 70 70 170 170 170 206 206 206 234 234 234
57541-246 246 246 250 250 250 250 250 250 238 238 238
57542-226 226 226 231 231 231 238 238 238 250 250 250
57543-250 250 250 250 250 250 246 246 246 231 231 231
57544-214 214 214 206 206 206 202 202 202 202 202 202
57545-198 198 198 202 202 202 182 182 182 18 18 18
57546- 2 2 6 2 2 6 2 2 6 2 2 6
57547- 2 2 6 2 2 6 2 2 6 2 2 6
57548- 2 2 6 62 62 62 66 66 66 30 30 30
57549- 10 10 10 0 0 0 0 0 0 0 0 0
57550- 0 0 0 0 0 0 0 0 0 0 0 0
57551- 0 0 0 0 0 0 0 0 0 0 0 0
57552- 0 0 0 0 0 0 0 0 0 0 0 0
57553- 0 0 0 0 0 0 0 0 0 0 0 0
57554- 0 0 0 0 0 0 0 0 0 0 0 0
57555- 0 0 0 0 0 0 0 0 0 0 0 0
57556- 0 0 0 0 0 0 0 0 0 0 0 0
57557- 0 0 0 0 0 0 0 0 0 0 0 0
57558- 14 14 14 42 42 42 82 82 82 18 18 18
57559- 2 2 6 2 2 6 2 2 6 10 10 10
57560- 94 94 94 182 182 182 218 218 218 242 242 242
57561-250 250 250 253 253 253 253 253 253 250 250 250
57562-234 234 234 253 253 253 253 253 253 253 253 253
57563-253 253 253 253 253 253 253 253 253 246 246 246
57564-238 238 238 226 226 226 210 210 210 202 202 202
57565-195 195 195 195 195 195 210 210 210 158 158 158
57566- 6 6 6 14 14 14 50 50 50 14 14 14
57567- 2 2 6 2 2 6 2 2 6 2 2 6
57568- 2 2 6 6 6 6 86 86 86 46 46 46
57569- 18 18 18 6 6 6 0 0 0 0 0 0
57570- 0 0 0 0 0 0 0 0 0 0 0 0
57571- 0 0 0 0 0 0 0 0 0 0 0 0
57572- 0 0 0 0 0 0 0 0 0 0 0 0
57573- 0 0 0 0 0 0 0 0 0 0 0 0
57574- 0 0 0 0 0 0 0 0 0 0 0 0
57575- 0 0 0 0 0 0 0 0 0 0 0 0
57576- 0 0 0 0 0 0 0 0 0 0 0 0
57577- 0 0 0 0 0 0 0 0 0 6 6 6
57578- 22 22 22 54 54 54 70 70 70 2 2 6
57579- 2 2 6 10 10 10 2 2 6 22 22 22
57580-166 166 166 231 231 231 250 250 250 253 253 253
57581-253 253 253 253 253 253 253 253 253 250 250 250
57582-242 242 242 253 253 253 253 253 253 253 253 253
57583-253 253 253 253 253 253 253 253 253 253 253 253
57584-253 253 253 253 253 253 253 253 253 246 246 246
57585-231 231 231 206 206 206 198 198 198 226 226 226
57586- 94 94 94 2 2 6 6 6 6 38 38 38
57587- 30 30 30 2 2 6 2 2 6 2 2 6
57588- 2 2 6 2 2 6 62 62 62 66 66 66
57589- 26 26 26 10 10 10 0 0 0 0 0 0
57590- 0 0 0 0 0 0 0 0 0 0 0 0
57591- 0 0 0 0 0 0 0 0 0 0 0 0
57592- 0 0 0 0 0 0 0 0 0 0 0 0
57593- 0 0 0 0 0 0 0 0 0 0 0 0
57594- 0 0 0 0 0 0 0 0 0 0 0 0
57595- 0 0 0 0 0 0 0 0 0 0 0 0
57596- 0 0 0 0 0 0 0 0 0 0 0 0
57597- 0 0 0 0 0 0 0 0 0 10 10 10
57598- 30 30 30 74 74 74 50 50 50 2 2 6
57599- 26 26 26 26 26 26 2 2 6 106 106 106
57600-238 238 238 253 253 253 253 253 253 253 253 253
57601-253 253 253 253 253 253 253 253 253 253 253 253
57602-253 253 253 253 253 253 253 253 253 253 253 253
57603-253 253 253 253 253 253 253 253 253 253 253 253
57604-253 253 253 253 253 253 253 253 253 253 253 253
57605-253 253 253 246 246 246 218 218 218 202 202 202
57606-210 210 210 14 14 14 2 2 6 2 2 6
57607- 30 30 30 22 22 22 2 2 6 2 2 6
57608- 2 2 6 2 2 6 18 18 18 86 86 86
57609- 42 42 42 14 14 14 0 0 0 0 0 0
57610- 0 0 0 0 0 0 0 0 0 0 0 0
57611- 0 0 0 0 0 0 0 0 0 0 0 0
57612- 0 0 0 0 0 0 0 0 0 0 0 0
57613- 0 0 0 0 0 0 0 0 0 0 0 0
57614- 0 0 0 0 0 0 0 0 0 0 0 0
57615- 0 0 0 0 0 0 0 0 0 0 0 0
57616- 0 0 0 0 0 0 0 0 0 0 0 0
57617- 0 0 0 0 0 0 0 0 0 14 14 14
57618- 42 42 42 90 90 90 22 22 22 2 2 6
57619- 42 42 42 2 2 6 18 18 18 218 218 218
57620-253 253 253 253 253 253 253 253 253 253 253 253
57621-253 253 253 253 253 253 253 253 253 253 253 253
57622-253 253 253 253 253 253 253 253 253 253 253 253
57623-253 253 253 253 253 253 253 253 253 253 253 253
57624-253 253 253 253 253 253 253 253 253 253 253 253
57625-253 253 253 253 253 253 250 250 250 221 221 221
57626-218 218 218 101 101 101 2 2 6 14 14 14
57627- 18 18 18 38 38 38 10 10 10 2 2 6
57628- 2 2 6 2 2 6 2 2 6 78 78 78
57629- 58 58 58 22 22 22 6 6 6 0 0 0
57630- 0 0 0 0 0 0 0 0 0 0 0 0
57631- 0 0 0 0 0 0 0 0 0 0 0 0
57632- 0 0 0 0 0 0 0 0 0 0 0 0
57633- 0 0 0 0 0 0 0 0 0 0 0 0
57634- 0 0 0 0 0 0 0 0 0 0 0 0
57635- 0 0 0 0 0 0 0 0 0 0 0 0
57636- 0 0 0 0 0 0 0 0 0 0 0 0
57637- 0 0 0 0 0 0 6 6 6 18 18 18
57638- 54 54 54 82 82 82 2 2 6 26 26 26
57639- 22 22 22 2 2 6 123 123 123 253 253 253
57640-253 253 253 253 253 253 253 253 253 253 253 253
57641-253 253 253 253 253 253 253 253 253 253 253 253
57642-253 253 253 253 253 253 253 253 253 253 253 253
57643-253 253 253 253 253 253 253 253 253 253 253 253
57644-253 253 253 253 253 253 253 253 253 253 253 253
57645-253 253 253 253 253 253 253 253 253 250 250 250
57646-238 238 238 198 198 198 6 6 6 38 38 38
57647- 58 58 58 26 26 26 38 38 38 2 2 6
57648- 2 2 6 2 2 6 2 2 6 46 46 46
57649- 78 78 78 30 30 30 10 10 10 0 0 0
57650- 0 0 0 0 0 0 0 0 0 0 0 0
57651- 0 0 0 0 0 0 0 0 0 0 0 0
57652- 0 0 0 0 0 0 0 0 0 0 0 0
57653- 0 0 0 0 0 0 0 0 0 0 0 0
57654- 0 0 0 0 0 0 0 0 0 0 0 0
57655- 0 0 0 0 0 0 0 0 0 0 0 0
57656- 0 0 0 0 0 0 0 0 0 0 0 0
57657- 0 0 0 0 0 0 10 10 10 30 30 30
57658- 74 74 74 58 58 58 2 2 6 42 42 42
57659- 2 2 6 22 22 22 231 231 231 253 253 253
57660-253 253 253 253 253 253 253 253 253 253 253 253
57661-253 253 253 253 253 253 253 253 253 250 250 250
57662-253 253 253 253 253 253 253 253 253 253 253 253
57663-253 253 253 253 253 253 253 253 253 253 253 253
57664-253 253 253 253 253 253 253 253 253 253 253 253
57665-253 253 253 253 253 253 253 253 253 253 253 253
57666-253 253 253 246 246 246 46 46 46 38 38 38
57667- 42 42 42 14 14 14 38 38 38 14 14 14
57668- 2 2 6 2 2 6 2 2 6 6 6 6
57669- 86 86 86 46 46 46 14 14 14 0 0 0
57670- 0 0 0 0 0 0 0 0 0 0 0 0
57671- 0 0 0 0 0 0 0 0 0 0 0 0
57672- 0 0 0 0 0 0 0 0 0 0 0 0
57673- 0 0 0 0 0 0 0 0 0 0 0 0
57674- 0 0 0 0 0 0 0 0 0 0 0 0
57675- 0 0 0 0 0 0 0 0 0 0 0 0
57676- 0 0 0 0 0 0 0 0 0 0 0 0
57677- 0 0 0 6 6 6 14 14 14 42 42 42
57678- 90 90 90 18 18 18 18 18 18 26 26 26
57679- 2 2 6 116 116 116 253 253 253 253 253 253
57680-253 253 253 253 253 253 253 253 253 253 253 253
57681-253 253 253 253 253 253 250 250 250 238 238 238
57682-253 253 253 253 253 253 253 253 253 253 253 253
57683-253 253 253 253 253 253 253 253 253 253 253 253
57684-253 253 253 253 253 253 253 253 253 253 253 253
57685-253 253 253 253 253 253 253 253 253 253 253 253
57686-253 253 253 253 253 253 94 94 94 6 6 6
57687- 2 2 6 2 2 6 10 10 10 34 34 34
57688- 2 2 6 2 2 6 2 2 6 2 2 6
57689- 74 74 74 58 58 58 22 22 22 6 6 6
57690- 0 0 0 0 0 0 0 0 0 0 0 0
57691- 0 0 0 0 0 0 0 0 0 0 0 0
57692- 0 0 0 0 0 0 0 0 0 0 0 0
57693- 0 0 0 0 0 0 0 0 0 0 0 0
57694- 0 0 0 0 0 0 0 0 0 0 0 0
57695- 0 0 0 0 0 0 0 0 0 0 0 0
57696- 0 0 0 0 0 0 0 0 0 0 0 0
57697- 0 0 0 10 10 10 26 26 26 66 66 66
57698- 82 82 82 2 2 6 38 38 38 6 6 6
57699- 14 14 14 210 210 210 253 253 253 253 253 253
57700-253 253 253 253 253 253 253 253 253 253 253 253
57701-253 253 253 253 253 253 246 246 246 242 242 242
57702-253 253 253 253 253 253 253 253 253 253 253 253
57703-253 253 253 253 253 253 253 253 253 253 253 253
57704-253 253 253 253 253 253 253 253 253 253 253 253
57705-253 253 253 253 253 253 253 253 253 253 253 253
57706-253 253 253 253 253 253 144 144 144 2 2 6
57707- 2 2 6 2 2 6 2 2 6 46 46 46
57708- 2 2 6 2 2 6 2 2 6 2 2 6
57709- 42 42 42 74 74 74 30 30 30 10 10 10
57710- 0 0 0 0 0 0 0 0 0 0 0 0
57711- 0 0 0 0 0 0 0 0 0 0 0 0
57712- 0 0 0 0 0 0 0 0 0 0 0 0
57713- 0 0 0 0 0 0 0 0 0 0 0 0
57714- 0 0 0 0 0 0 0 0 0 0 0 0
57715- 0 0 0 0 0 0 0 0 0 0 0 0
57716- 0 0 0 0 0 0 0 0 0 0 0 0
57717- 6 6 6 14 14 14 42 42 42 90 90 90
57718- 26 26 26 6 6 6 42 42 42 2 2 6
57719- 74 74 74 250 250 250 253 253 253 253 253 253
57720-253 253 253 253 253 253 253 253 253 253 253 253
57721-253 253 253 253 253 253 242 242 242 242 242 242
57722-253 253 253 253 253 253 253 253 253 253 253 253
57723-253 253 253 253 253 253 253 253 253 253 253 253
57724-253 253 253 253 253 253 253 253 253 253 253 253
57725-253 253 253 253 253 253 253 253 253 253 253 253
57726-253 253 253 253 253 253 182 182 182 2 2 6
57727- 2 2 6 2 2 6 2 2 6 46 46 46
57728- 2 2 6 2 2 6 2 2 6 2 2 6
57729- 10 10 10 86 86 86 38 38 38 10 10 10
57730- 0 0 0 0 0 0 0 0 0 0 0 0
57731- 0 0 0 0 0 0 0 0 0 0 0 0
57732- 0 0 0 0 0 0 0 0 0 0 0 0
57733- 0 0 0 0 0 0 0 0 0 0 0 0
57734- 0 0 0 0 0 0 0 0 0 0 0 0
57735- 0 0 0 0 0 0 0 0 0 0 0 0
57736- 0 0 0 0 0 0 0 0 0 0 0 0
57737- 10 10 10 26 26 26 66 66 66 82 82 82
57738- 2 2 6 22 22 22 18 18 18 2 2 6
57739-149 149 149 253 253 253 253 253 253 253 253 253
57740-253 253 253 253 253 253 253 253 253 253 253 253
57741-253 253 253 253 253 253 234 234 234 242 242 242
57742-253 253 253 253 253 253 253 253 253 253 253 253
57743-253 253 253 253 253 253 253 253 253 253 253 253
57744-253 253 253 253 253 253 253 253 253 253 253 253
57745-253 253 253 253 253 253 253 253 253 253 253 253
57746-253 253 253 253 253 253 206 206 206 2 2 6
57747- 2 2 6 2 2 6 2 2 6 38 38 38
57748- 2 2 6 2 2 6 2 2 6 2 2 6
57749- 6 6 6 86 86 86 46 46 46 14 14 14
57750- 0 0 0 0 0 0 0 0 0 0 0 0
57751- 0 0 0 0 0 0 0 0 0 0 0 0
57752- 0 0 0 0 0 0 0 0 0 0 0 0
57753- 0 0 0 0 0 0 0 0 0 0 0 0
57754- 0 0 0 0 0 0 0 0 0 0 0 0
57755- 0 0 0 0 0 0 0 0 0 0 0 0
57756- 0 0 0 0 0 0 0 0 0 6 6 6
57757- 18 18 18 46 46 46 86 86 86 18 18 18
57758- 2 2 6 34 34 34 10 10 10 6 6 6
57759-210 210 210 253 253 253 253 253 253 253 253 253
57760-253 253 253 253 253 253 253 253 253 253 253 253
57761-253 253 253 253 253 253 234 234 234 242 242 242
57762-253 253 253 253 253 253 253 253 253 253 253 253
57763-253 253 253 253 253 253 253 253 253 253 253 253
57764-253 253 253 253 253 253 253 253 253 253 253 253
57765-253 253 253 253 253 253 253 253 253 253 253 253
57766-253 253 253 253 253 253 221 221 221 6 6 6
57767- 2 2 6 2 2 6 6 6 6 30 30 30
57768- 2 2 6 2 2 6 2 2 6 2 2 6
57769- 2 2 6 82 82 82 54 54 54 18 18 18
57770- 6 6 6 0 0 0 0 0 0 0 0 0
57771- 0 0 0 0 0 0 0 0 0 0 0 0
57772- 0 0 0 0 0 0 0 0 0 0 0 0
57773- 0 0 0 0 0 0 0 0 0 0 0 0
57774- 0 0 0 0 0 0 0 0 0 0 0 0
57775- 0 0 0 0 0 0 0 0 0 0 0 0
57776- 0 0 0 0 0 0 0 0 0 10 10 10
57777- 26 26 26 66 66 66 62 62 62 2 2 6
57778- 2 2 6 38 38 38 10 10 10 26 26 26
57779-238 238 238 253 253 253 253 253 253 253 253 253
57780-253 253 253 253 253 253 253 253 253 253 253 253
57781-253 253 253 253 253 253 231 231 231 238 238 238
57782-253 253 253 253 253 253 253 253 253 253 253 253
57783-253 253 253 253 253 253 253 253 253 253 253 253
57784-253 253 253 253 253 253 253 253 253 253 253 253
57785-253 253 253 253 253 253 253 253 253 253 253 253
57786-253 253 253 253 253 253 231 231 231 6 6 6
57787- 2 2 6 2 2 6 10 10 10 30 30 30
57788- 2 2 6 2 2 6 2 2 6 2 2 6
57789- 2 2 6 66 66 66 58 58 58 22 22 22
57790- 6 6 6 0 0 0 0 0 0 0 0 0
57791- 0 0 0 0 0 0 0 0 0 0 0 0
57792- 0 0 0 0 0 0 0 0 0 0 0 0
57793- 0 0 0 0 0 0 0 0 0 0 0 0
57794- 0 0 0 0 0 0 0 0 0 0 0 0
57795- 0 0 0 0 0 0 0 0 0 0 0 0
57796- 0 0 0 0 0 0 0 0 0 10 10 10
57797- 38 38 38 78 78 78 6 6 6 2 2 6
57798- 2 2 6 46 46 46 14 14 14 42 42 42
57799-246 246 246 253 253 253 253 253 253 253 253 253
57800-253 253 253 253 253 253 253 253 253 253 253 253
57801-253 253 253 253 253 253 231 231 231 242 242 242
57802-253 253 253 253 253 253 253 253 253 253 253 253
57803-253 253 253 253 253 253 253 253 253 253 253 253
57804-253 253 253 253 253 253 253 253 253 253 253 253
57805-253 253 253 253 253 253 253 253 253 253 253 253
57806-253 253 253 253 253 253 234 234 234 10 10 10
57807- 2 2 6 2 2 6 22 22 22 14 14 14
57808- 2 2 6 2 2 6 2 2 6 2 2 6
57809- 2 2 6 66 66 66 62 62 62 22 22 22
57810- 6 6 6 0 0 0 0 0 0 0 0 0
57811- 0 0 0 0 0 0 0 0 0 0 0 0
57812- 0 0 0 0 0 0 0 0 0 0 0 0
57813- 0 0 0 0 0 0 0 0 0 0 0 0
57814- 0 0 0 0 0 0 0 0 0 0 0 0
57815- 0 0 0 0 0 0 0 0 0 0 0 0
57816- 0 0 0 0 0 0 6 6 6 18 18 18
57817- 50 50 50 74 74 74 2 2 6 2 2 6
57818- 14 14 14 70 70 70 34 34 34 62 62 62
57819-250 250 250 253 253 253 253 253 253 253 253 253
57820-253 253 253 253 253 253 253 253 253 253 253 253
57821-253 253 253 253 253 253 231 231 231 246 246 246
57822-253 253 253 253 253 253 253 253 253 253 253 253
57823-253 253 253 253 253 253 253 253 253 253 253 253
57824-253 253 253 253 253 253 253 253 253 253 253 253
57825-253 253 253 253 253 253 253 253 253 253 253 253
57826-253 253 253 253 253 253 234 234 234 14 14 14
57827- 2 2 6 2 2 6 30 30 30 2 2 6
57828- 2 2 6 2 2 6 2 2 6 2 2 6
57829- 2 2 6 66 66 66 62 62 62 22 22 22
57830- 6 6 6 0 0 0 0 0 0 0 0 0
57831- 0 0 0 0 0 0 0 0 0 0 0 0
57832- 0 0 0 0 0 0 0 0 0 0 0 0
57833- 0 0 0 0 0 0 0 0 0 0 0 0
57834- 0 0 0 0 0 0 0 0 0 0 0 0
57835- 0 0 0 0 0 0 0 0 0 0 0 0
57836- 0 0 0 0 0 0 6 6 6 18 18 18
57837- 54 54 54 62 62 62 2 2 6 2 2 6
57838- 2 2 6 30 30 30 46 46 46 70 70 70
57839-250 250 250 253 253 253 253 253 253 253 253 253
57840-253 253 253 253 253 253 253 253 253 253 253 253
57841-253 253 253 253 253 253 231 231 231 246 246 246
57842-253 253 253 253 253 253 253 253 253 253 253 253
57843-253 253 253 253 253 253 253 253 253 253 253 253
57844-253 253 253 253 253 253 253 253 253 253 253 253
57845-253 253 253 253 253 253 253 253 253 253 253 253
57846-253 253 253 253 253 253 226 226 226 10 10 10
57847- 2 2 6 6 6 6 30 30 30 2 2 6
57848- 2 2 6 2 2 6 2 2 6 2 2 6
57849- 2 2 6 66 66 66 58 58 58 22 22 22
57850- 6 6 6 0 0 0 0 0 0 0 0 0
57851- 0 0 0 0 0 0 0 0 0 0 0 0
57852- 0 0 0 0 0 0 0 0 0 0 0 0
57853- 0 0 0 0 0 0 0 0 0 0 0 0
57854- 0 0 0 0 0 0 0 0 0 0 0 0
57855- 0 0 0 0 0 0 0 0 0 0 0 0
57856- 0 0 0 0 0 0 6 6 6 22 22 22
57857- 58 58 58 62 62 62 2 2 6 2 2 6
57858- 2 2 6 2 2 6 30 30 30 78 78 78
57859-250 250 250 253 253 253 253 253 253 253 253 253
57860-253 253 253 253 253 253 253 253 253 253 253 253
57861-253 253 253 253 253 253 231 231 231 246 246 246
57862-253 253 253 253 253 253 253 253 253 253 253 253
57863-253 253 253 253 253 253 253 253 253 253 253 253
57864-253 253 253 253 253 253 253 253 253 253 253 253
57865-253 253 253 253 253 253 253 253 253 253 253 253
57866-253 253 253 253 253 253 206 206 206 2 2 6
57867- 22 22 22 34 34 34 18 14 6 22 22 22
57868- 26 26 26 18 18 18 6 6 6 2 2 6
57869- 2 2 6 82 82 82 54 54 54 18 18 18
57870- 6 6 6 0 0 0 0 0 0 0 0 0
57871- 0 0 0 0 0 0 0 0 0 0 0 0
57872- 0 0 0 0 0 0 0 0 0 0 0 0
57873- 0 0 0 0 0 0 0 0 0 0 0 0
57874- 0 0 0 0 0 0 0 0 0 0 0 0
57875- 0 0 0 0 0 0 0 0 0 0 0 0
57876- 0 0 0 0 0 0 6 6 6 26 26 26
57877- 62 62 62 106 106 106 74 54 14 185 133 11
57878-210 162 10 121 92 8 6 6 6 62 62 62
57879-238 238 238 253 253 253 253 253 253 253 253 253
57880-253 253 253 253 253 253 253 253 253 253 253 253
57881-253 253 253 253 253 253 231 231 231 246 246 246
57882-253 253 253 253 253 253 253 253 253 253 253 253
57883-253 253 253 253 253 253 253 253 253 253 253 253
57884-253 253 253 253 253 253 253 253 253 253 253 253
57885-253 253 253 253 253 253 253 253 253 253 253 253
57886-253 253 253 253 253 253 158 158 158 18 18 18
57887- 14 14 14 2 2 6 2 2 6 2 2 6
57888- 6 6 6 18 18 18 66 66 66 38 38 38
57889- 6 6 6 94 94 94 50 50 50 18 18 18
57890- 6 6 6 0 0 0 0 0 0 0 0 0
57891- 0 0 0 0 0 0 0 0 0 0 0 0
57892- 0 0 0 0 0 0 0 0 0 0 0 0
57893- 0 0 0 0 0 0 0 0 0 0 0 0
57894- 0 0 0 0 0 0 0 0 0 0 0 0
57895- 0 0 0 0 0 0 0 0 0 6 6 6
57896- 10 10 10 10 10 10 18 18 18 38 38 38
57897- 78 78 78 142 134 106 216 158 10 242 186 14
57898-246 190 14 246 190 14 156 118 10 10 10 10
57899- 90 90 90 238 238 238 253 253 253 253 253 253
57900-253 253 253 253 253 253 253 253 253 253 253 253
57901-253 253 253 253 253 253 231 231 231 250 250 250
57902-253 253 253 253 253 253 253 253 253 253 253 253
57903-253 253 253 253 253 253 253 253 253 253 253 253
57904-253 253 253 253 253 253 253 253 253 253 253 253
57905-253 253 253 253 253 253 253 253 253 246 230 190
57906-238 204 91 238 204 91 181 142 44 37 26 9
57907- 2 2 6 2 2 6 2 2 6 2 2 6
57908- 2 2 6 2 2 6 38 38 38 46 46 46
57909- 26 26 26 106 106 106 54 54 54 18 18 18
57910- 6 6 6 0 0 0 0 0 0 0 0 0
57911- 0 0 0 0 0 0 0 0 0 0 0 0
57912- 0 0 0 0 0 0 0 0 0 0 0 0
57913- 0 0 0 0 0 0 0 0 0 0 0 0
57914- 0 0 0 0 0 0 0 0 0 0 0 0
57915- 0 0 0 6 6 6 14 14 14 22 22 22
57916- 30 30 30 38 38 38 50 50 50 70 70 70
57917-106 106 106 190 142 34 226 170 11 242 186 14
57918-246 190 14 246 190 14 246 190 14 154 114 10
57919- 6 6 6 74 74 74 226 226 226 253 253 253
57920-253 253 253 253 253 253 253 253 253 253 253 253
57921-253 253 253 253 253 253 231 231 231 250 250 250
57922-253 253 253 253 253 253 253 253 253 253 253 253
57923-253 253 253 253 253 253 253 253 253 253 253 253
57924-253 253 253 253 253 253 253 253 253 253 253 253
57925-253 253 253 253 253 253 253 253 253 228 184 62
57926-241 196 14 241 208 19 232 195 16 38 30 10
57927- 2 2 6 2 2 6 2 2 6 2 2 6
57928- 2 2 6 6 6 6 30 30 30 26 26 26
57929-203 166 17 154 142 90 66 66 66 26 26 26
57930- 6 6 6 0 0 0 0 0 0 0 0 0
57931- 0 0 0 0 0 0 0 0 0 0 0 0
57932- 0 0 0 0 0 0 0 0 0 0 0 0
57933- 0 0 0 0 0 0 0 0 0 0 0 0
57934- 0 0 0 0 0 0 0 0 0 0 0 0
57935- 6 6 6 18 18 18 38 38 38 58 58 58
57936- 78 78 78 86 86 86 101 101 101 123 123 123
57937-175 146 61 210 150 10 234 174 13 246 186 14
57938-246 190 14 246 190 14 246 190 14 238 190 10
57939-102 78 10 2 2 6 46 46 46 198 198 198
57940-253 253 253 253 253 253 253 253 253 253 253 253
57941-253 253 253 253 253 253 234 234 234 242 242 242
57942-253 253 253 253 253 253 253 253 253 253 253 253
57943-253 253 253 253 253 253 253 253 253 253 253 253
57944-253 253 253 253 253 253 253 253 253 253 253 253
57945-253 253 253 253 253 253 253 253 253 224 178 62
57946-242 186 14 241 196 14 210 166 10 22 18 6
57947- 2 2 6 2 2 6 2 2 6 2 2 6
57948- 2 2 6 2 2 6 6 6 6 121 92 8
57949-238 202 15 232 195 16 82 82 82 34 34 34
57950- 10 10 10 0 0 0 0 0 0 0 0 0
57951- 0 0 0 0 0 0 0 0 0 0 0 0
57952- 0 0 0 0 0 0 0 0 0 0 0 0
57953- 0 0 0 0 0 0 0 0 0 0 0 0
57954- 0 0 0 0 0 0 0 0 0 0 0 0
57955- 14 14 14 38 38 38 70 70 70 154 122 46
57956-190 142 34 200 144 11 197 138 11 197 138 11
57957-213 154 11 226 170 11 242 186 14 246 190 14
57958-246 190 14 246 190 14 246 190 14 246 190 14
57959-225 175 15 46 32 6 2 2 6 22 22 22
57960-158 158 158 250 250 250 253 253 253 253 253 253
57961-253 253 253 253 253 253 253 253 253 253 253 253
57962-253 253 253 253 253 253 253 253 253 253 253 253
57963-253 253 253 253 253 253 253 253 253 253 253 253
57964-253 253 253 253 253 253 253 253 253 253 253 253
57965-253 253 253 250 250 250 242 242 242 224 178 62
57966-239 182 13 236 186 11 213 154 11 46 32 6
57967- 2 2 6 2 2 6 2 2 6 2 2 6
57968- 2 2 6 2 2 6 61 42 6 225 175 15
57969-238 190 10 236 186 11 112 100 78 42 42 42
57970- 14 14 14 0 0 0 0 0 0 0 0 0
57971- 0 0 0 0 0 0 0 0 0 0 0 0
57972- 0 0 0 0 0 0 0 0 0 0 0 0
57973- 0 0 0 0 0 0 0 0 0 0 0 0
57974- 0 0 0 0 0 0 0 0 0 6 6 6
57975- 22 22 22 54 54 54 154 122 46 213 154 11
57976-226 170 11 230 174 11 226 170 11 226 170 11
57977-236 178 12 242 186 14 246 190 14 246 190 14
57978-246 190 14 246 190 14 246 190 14 246 190 14
57979-241 196 14 184 144 12 10 10 10 2 2 6
57980- 6 6 6 116 116 116 242 242 242 253 253 253
57981-253 253 253 253 253 253 253 253 253 253 253 253
57982-253 253 253 253 253 253 253 253 253 253 253 253
57983-253 253 253 253 253 253 253 253 253 253 253 253
57984-253 253 253 253 253 253 253 253 253 253 253 253
57985-253 253 253 231 231 231 198 198 198 214 170 54
57986-236 178 12 236 178 12 210 150 10 137 92 6
57987- 18 14 6 2 2 6 2 2 6 2 2 6
57988- 6 6 6 70 47 6 200 144 11 236 178 12
57989-239 182 13 239 182 13 124 112 88 58 58 58
57990- 22 22 22 6 6 6 0 0 0 0 0 0
57991- 0 0 0 0 0 0 0 0 0 0 0 0
57992- 0 0 0 0 0 0 0 0 0 0 0 0
57993- 0 0 0 0 0 0 0 0 0 0 0 0
57994- 0 0 0 0 0 0 0 0 0 10 10 10
57995- 30 30 30 70 70 70 180 133 36 226 170 11
57996-239 182 13 242 186 14 242 186 14 246 186 14
57997-246 190 14 246 190 14 246 190 14 246 190 14
57998-246 190 14 246 190 14 246 190 14 246 190 14
57999-246 190 14 232 195 16 98 70 6 2 2 6
58000- 2 2 6 2 2 6 66 66 66 221 221 221
58001-253 253 253 253 253 253 253 253 253 253 253 253
58002-253 253 253 253 253 253 253 253 253 253 253 253
58003-253 253 253 253 253 253 253 253 253 253 253 253
58004-253 253 253 253 253 253 253 253 253 253 253 253
58005-253 253 253 206 206 206 198 198 198 214 166 58
58006-230 174 11 230 174 11 216 158 10 192 133 9
58007-163 110 8 116 81 8 102 78 10 116 81 8
58008-167 114 7 197 138 11 226 170 11 239 182 13
58009-242 186 14 242 186 14 162 146 94 78 78 78
58010- 34 34 34 14 14 14 6 6 6 0 0 0
58011- 0 0 0 0 0 0 0 0 0 0 0 0
58012- 0 0 0 0 0 0 0 0 0 0 0 0
58013- 0 0 0 0 0 0 0 0 0 0 0 0
58014- 0 0 0 0 0 0 0 0 0 6 6 6
58015- 30 30 30 78 78 78 190 142 34 226 170 11
58016-239 182 13 246 190 14 246 190 14 246 190 14
58017-246 190 14 246 190 14 246 190 14 246 190 14
58018-246 190 14 246 190 14 246 190 14 246 190 14
58019-246 190 14 241 196 14 203 166 17 22 18 6
58020- 2 2 6 2 2 6 2 2 6 38 38 38
58021-218 218 218 253 253 253 253 253 253 253 253 253
58022-253 253 253 253 253 253 253 253 253 253 253 253
58023-253 253 253 253 253 253 253 253 253 253 253 253
58024-253 253 253 253 253 253 253 253 253 253 253 253
58025-250 250 250 206 206 206 198 198 198 202 162 69
58026-226 170 11 236 178 12 224 166 10 210 150 10
58027-200 144 11 197 138 11 192 133 9 197 138 11
58028-210 150 10 226 170 11 242 186 14 246 190 14
58029-246 190 14 246 186 14 225 175 15 124 112 88
58030- 62 62 62 30 30 30 14 14 14 6 6 6
58031- 0 0 0 0 0 0 0 0 0 0 0 0
58032- 0 0 0 0 0 0 0 0 0 0 0 0
58033- 0 0 0 0 0 0 0 0 0 0 0 0
58034- 0 0 0 0 0 0 0 0 0 10 10 10
58035- 30 30 30 78 78 78 174 135 50 224 166 10
58036-239 182 13 246 190 14 246 190 14 246 190 14
58037-246 190 14 246 190 14 246 190 14 246 190 14
58038-246 190 14 246 190 14 246 190 14 246 190 14
58039-246 190 14 246 190 14 241 196 14 139 102 15
58040- 2 2 6 2 2 6 2 2 6 2 2 6
58041- 78 78 78 250 250 250 253 253 253 253 253 253
58042-253 253 253 253 253 253 253 253 253 253 253 253
58043-253 253 253 253 253 253 253 253 253 253 253 253
58044-253 253 253 253 253 253 253 253 253 253 253 253
58045-250 250 250 214 214 214 198 198 198 190 150 46
58046-219 162 10 236 178 12 234 174 13 224 166 10
58047-216 158 10 213 154 11 213 154 11 216 158 10
58048-226 170 11 239 182 13 246 190 14 246 190 14
58049-246 190 14 246 190 14 242 186 14 206 162 42
58050-101 101 101 58 58 58 30 30 30 14 14 14
58051- 6 6 6 0 0 0 0 0 0 0 0 0
58052- 0 0 0 0 0 0 0 0 0 0 0 0
58053- 0 0 0 0 0 0 0 0 0 0 0 0
58054- 0 0 0 0 0 0 0 0 0 10 10 10
58055- 30 30 30 74 74 74 174 135 50 216 158 10
58056-236 178 12 246 190 14 246 190 14 246 190 14
58057-246 190 14 246 190 14 246 190 14 246 190 14
58058-246 190 14 246 190 14 246 190 14 246 190 14
58059-246 190 14 246 190 14 241 196 14 226 184 13
58060- 61 42 6 2 2 6 2 2 6 2 2 6
58061- 22 22 22 238 238 238 253 253 253 253 253 253
58062-253 253 253 253 253 253 253 253 253 253 253 253
58063-253 253 253 253 253 253 253 253 253 253 253 253
58064-253 253 253 253 253 253 253 253 253 253 253 253
58065-253 253 253 226 226 226 187 187 187 180 133 36
58066-216 158 10 236 178 12 239 182 13 236 178 12
58067-230 174 11 226 170 11 226 170 11 230 174 11
58068-236 178 12 242 186 14 246 190 14 246 190 14
58069-246 190 14 246 190 14 246 186 14 239 182 13
58070-206 162 42 106 106 106 66 66 66 34 34 34
58071- 14 14 14 6 6 6 0 0 0 0 0 0
58072- 0 0 0 0 0 0 0 0 0 0 0 0
58073- 0 0 0 0 0 0 0 0 0 0 0 0
58074- 0 0 0 0 0 0 0 0 0 6 6 6
58075- 26 26 26 70 70 70 163 133 67 213 154 11
58076-236 178 12 246 190 14 246 190 14 246 190 14
58077-246 190 14 246 190 14 246 190 14 246 190 14
58078-246 190 14 246 190 14 246 190 14 246 190 14
58079-246 190 14 246 190 14 246 190 14 241 196 14
58080-190 146 13 18 14 6 2 2 6 2 2 6
58081- 46 46 46 246 246 246 253 253 253 253 253 253
58082-253 253 253 253 253 253 253 253 253 253 253 253
58083-253 253 253 253 253 253 253 253 253 253 253 253
58084-253 253 253 253 253 253 253 253 253 253 253 253
58085-253 253 253 221 221 221 86 86 86 156 107 11
58086-216 158 10 236 178 12 242 186 14 246 186 14
58087-242 186 14 239 182 13 239 182 13 242 186 14
58088-242 186 14 246 186 14 246 190 14 246 190 14
58089-246 190 14 246 190 14 246 190 14 246 190 14
58090-242 186 14 225 175 15 142 122 72 66 66 66
58091- 30 30 30 10 10 10 0 0 0 0 0 0
58092- 0 0 0 0 0 0 0 0 0 0 0 0
58093- 0 0 0 0 0 0 0 0 0 0 0 0
58094- 0 0 0 0 0 0 0 0 0 6 6 6
58095- 26 26 26 70 70 70 163 133 67 210 150 10
58096-236 178 12 246 190 14 246 190 14 246 190 14
58097-246 190 14 246 190 14 246 190 14 246 190 14
58098-246 190 14 246 190 14 246 190 14 246 190 14
58099-246 190 14 246 190 14 246 190 14 246 190 14
58100-232 195 16 121 92 8 34 34 34 106 106 106
58101-221 221 221 253 253 253 253 253 253 253 253 253
58102-253 253 253 253 253 253 253 253 253 253 253 253
58103-253 253 253 253 253 253 253 253 253 253 253 253
58104-253 253 253 253 253 253 253 253 253 253 253 253
58105-242 242 242 82 82 82 18 14 6 163 110 8
58106-216 158 10 236 178 12 242 186 14 246 190 14
58107-246 190 14 246 190 14 246 190 14 246 190 14
58108-246 190 14 246 190 14 246 190 14 246 190 14
58109-246 190 14 246 190 14 246 190 14 246 190 14
58110-246 190 14 246 190 14 242 186 14 163 133 67
58111- 46 46 46 18 18 18 6 6 6 0 0 0
58112- 0 0 0 0 0 0 0 0 0 0 0 0
58113- 0 0 0 0 0 0 0 0 0 0 0 0
58114- 0 0 0 0 0 0 0 0 0 10 10 10
58115- 30 30 30 78 78 78 163 133 67 210 150 10
58116-236 178 12 246 186 14 246 190 14 246 190 14
58117-246 190 14 246 190 14 246 190 14 246 190 14
58118-246 190 14 246 190 14 246 190 14 246 190 14
58119-246 190 14 246 190 14 246 190 14 246 190 14
58120-241 196 14 215 174 15 190 178 144 253 253 253
58121-253 253 253 253 253 253 253 253 253 253 253 253
58122-253 253 253 253 253 253 253 253 253 253 253 253
58123-253 253 253 253 253 253 253 253 253 253 253 253
58124-253 253 253 253 253 253 253 253 253 218 218 218
58125- 58 58 58 2 2 6 22 18 6 167 114 7
58126-216 158 10 236 178 12 246 186 14 246 190 14
58127-246 190 14 246 190 14 246 190 14 246 190 14
58128-246 190 14 246 190 14 246 190 14 246 190 14
58129-246 190 14 246 190 14 246 190 14 246 190 14
58130-246 190 14 246 186 14 242 186 14 190 150 46
58131- 54 54 54 22 22 22 6 6 6 0 0 0
58132- 0 0 0 0 0 0 0 0 0 0 0 0
58133- 0 0 0 0 0 0 0 0 0 0 0 0
58134- 0 0 0 0 0 0 0 0 0 14 14 14
58135- 38 38 38 86 86 86 180 133 36 213 154 11
58136-236 178 12 246 186 14 246 190 14 246 190 14
58137-246 190 14 246 190 14 246 190 14 246 190 14
58138-246 190 14 246 190 14 246 190 14 246 190 14
58139-246 190 14 246 190 14 246 190 14 246 190 14
58140-246 190 14 232 195 16 190 146 13 214 214 214
58141-253 253 253 253 253 253 253 253 253 253 253 253
58142-253 253 253 253 253 253 253 253 253 253 253 253
58143-253 253 253 253 253 253 253 253 253 253 253 253
58144-253 253 253 250 250 250 170 170 170 26 26 26
58145- 2 2 6 2 2 6 37 26 9 163 110 8
58146-219 162 10 239 182 13 246 186 14 246 190 14
58147-246 190 14 246 190 14 246 190 14 246 190 14
58148-246 190 14 246 190 14 246 190 14 246 190 14
58149-246 190 14 246 190 14 246 190 14 246 190 14
58150-246 186 14 236 178 12 224 166 10 142 122 72
58151- 46 46 46 18 18 18 6 6 6 0 0 0
58152- 0 0 0 0 0 0 0 0 0 0 0 0
58153- 0 0 0 0 0 0 0 0 0 0 0 0
58154- 0 0 0 0 0 0 6 6 6 18 18 18
58155- 50 50 50 109 106 95 192 133 9 224 166 10
58156-242 186 14 246 190 14 246 190 14 246 190 14
58157-246 190 14 246 190 14 246 190 14 246 190 14
58158-246 190 14 246 190 14 246 190 14 246 190 14
58159-246 190 14 246 190 14 246 190 14 246 190 14
58160-242 186 14 226 184 13 210 162 10 142 110 46
58161-226 226 226 253 253 253 253 253 253 253 253 253
58162-253 253 253 253 253 253 253 253 253 253 253 253
58163-253 253 253 253 253 253 253 253 253 253 253 253
58164-198 198 198 66 66 66 2 2 6 2 2 6
58165- 2 2 6 2 2 6 50 34 6 156 107 11
58166-219 162 10 239 182 13 246 186 14 246 190 14
58167-246 190 14 246 190 14 246 190 14 246 190 14
58168-246 190 14 246 190 14 246 190 14 246 190 14
58169-246 190 14 246 190 14 246 190 14 242 186 14
58170-234 174 13 213 154 11 154 122 46 66 66 66
58171- 30 30 30 10 10 10 0 0 0 0 0 0
58172- 0 0 0 0 0 0 0 0 0 0 0 0
58173- 0 0 0 0 0 0 0 0 0 0 0 0
58174- 0 0 0 0 0 0 6 6 6 22 22 22
58175- 58 58 58 154 121 60 206 145 10 234 174 13
58176-242 186 14 246 186 14 246 190 14 246 190 14
58177-246 190 14 246 190 14 246 190 14 246 190 14
58178-246 190 14 246 190 14 246 190 14 246 190 14
58179-246 190 14 246 190 14 246 190 14 246 190 14
58180-246 186 14 236 178 12 210 162 10 163 110 8
58181- 61 42 6 138 138 138 218 218 218 250 250 250
58182-253 253 253 253 253 253 253 253 253 250 250 250
58183-242 242 242 210 210 210 144 144 144 66 66 66
58184- 6 6 6 2 2 6 2 2 6 2 2 6
58185- 2 2 6 2 2 6 61 42 6 163 110 8
58186-216 158 10 236 178 12 246 190 14 246 190 14
58187-246 190 14 246 190 14 246 190 14 246 190 14
58188-246 190 14 246 190 14 246 190 14 246 190 14
58189-246 190 14 239 182 13 230 174 11 216 158 10
58190-190 142 34 124 112 88 70 70 70 38 38 38
58191- 18 18 18 6 6 6 0 0 0 0 0 0
58192- 0 0 0 0 0 0 0 0 0 0 0 0
58193- 0 0 0 0 0 0 0 0 0 0 0 0
58194- 0 0 0 0 0 0 6 6 6 22 22 22
58195- 62 62 62 168 124 44 206 145 10 224 166 10
58196-236 178 12 239 182 13 242 186 14 242 186 14
58197-246 186 14 246 190 14 246 190 14 246 190 14
58198-246 190 14 246 190 14 246 190 14 246 190 14
58199-246 190 14 246 190 14 246 190 14 246 190 14
58200-246 190 14 236 178 12 216 158 10 175 118 6
58201- 80 54 7 2 2 6 6 6 6 30 30 30
58202- 54 54 54 62 62 62 50 50 50 38 38 38
58203- 14 14 14 2 2 6 2 2 6 2 2 6
58204- 2 2 6 2 2 6 2 2 6 2 2 6
58205- 2 2 6 6 6 6 80 54 7 167 114 7
58206-213 154 11 236 178 12 246 190 14 246 190 14
58207-246 190 14 246 190 14 246 190 14 246 190 14
58208-246 190 14 242 186 14 239 182 13 239 182 13
58209-230 174 11 210 150 10 174 135 50 124 112 88
58210- 82 82 82 54 54 54 34 34 34 18 18 18
58211- 6 6 6 0 0 0 0 0 0 0 0 0
58212- 0 0 0 0 0 0 0 0 0 0 0 0
58213- 0 0 0 0 0 0 0 0 0 0 0 0
58214- 0 0 0 0 0 0 6 6 6 18 18 18
58215- 50 50 50 158 118 36 192 133 9 200 144 11
58216-216 158 10 219 162 10 224 166 10 226 170 11
58217-230 174 11 236 178 12 239 182 13 239 182 13
58218-242 186 14 246 186 14 246 190 14 246 190 14
58219-246 190 14 246 190 14 246 190 14 246 190 14
58220-246 186 14 230 174 11 210 150 10 163 110 8
58221-104 69 6 10 10 10 2 2 6 2 2 6
58222- 2 2 6 2 2 6 2 2 6 2 2 6
58223- 2 2 6 2 2 6 2 2 6 2 2 6
58224- 2 2 6 2 2 6 2 2 6 2 2 6
58225- 2 2 6 6 6 6 91 60 6 167 114 7
58226-206 145 10 230 174 11 242 186 14 246 190 14
58227-246 190 14 246 190 14 246 186 14 242 186 14
58228-239 182 13 230 174 11 224 166 10 213 154 11
58229-180 133 36 124 112 88 86 86 86 58 58 58
58230- 38 38 38 22 22 22 10 10 10 6 6 6
58231- 0 0 0 0 0 0 0 0 0 0 0 0
58232- 0 0 0 0 0 0 0 0 0 0 0 0
58233- 0 0 0 0 0 0 0 0 0 0 0 0
58234- 0 0 0 0 0 0 0 0 0 14 14 14
58235- 34 34 34 70 70 70 138 110 50 158 118 36
58236-167 114 7 180 123 7 192 133 9 197 138 11
58237-200 144 11 206 145 10 213 154 11 219 162 10
58238-224 166 10 230 174 11 239 182 13 242 186 14
58239-246 186 14 246 186 14 246 186 14 246 186 14
58240-239 182 13 216 158 10 185 133 11 152 99 6
58241-104 69 6 18 14 6 2 2 6 2 2 6
58242- 2 2 6 2 2 6 2 2 6 2 2 6
58243- 2 2 6 2 2 6 2 2 6 2 2 6
58244- 2 2 6 2 2 6 2 2 6 2 2 6
58245- 2 2 6 6 6 6 80 54 7 152 99 6
58246-192 133 9 219 162 10 236 178 12 239 182 13
58247-246 186 14 242 186 14 239 182 13 236 178 12
58248-224 166 10 206 145 10 192 133 9 154 121 60
58249- 94 94 94 62 62 62 42 42 42 22 22 22
58250- 14 14 14 6 6 6 0 0 0 0 0 0
58251- 0 0 0 0 0 0 0 0 0 0 0 0
58252- 0 0 0 0 0 0 0 0 0 0 0 0
58253- 0 0 0 0 0 0 0 0 0 0 0 0
58254- 0 0 0 0 0 0 0 0 0 6 6 6
58255- 18 18 18 34 34 34 58 58 58 78 78 78
58256-101 98 89 124 112 88 142 110 46 156 107 11
58257-163 110 8 167 114 7 175 118 6 180 123 7
58258-185 133 11 197 138 11 210 150 10 219 162 10
58259-226 170 11 236 178 12 236 178 12 234 174 13
58260-219 162 10 197 138 11 163 110 8 130 83 6
58261- 91 60 6 10 10 10 2 2 6 2 2 6
58262- 18 18 18 38 38 38 38 38 38 38 38 38
58263- 38 38 38 38 38 38 38 38 38 38 38 38
58264- 38 38 38 38 38 38 26 26 26 2 2 6
58265- 2 2 6 6 6 6 70 47 6 137 92 6
58266-175 118 6 200 144 11 219 162 10 230 174 11
58267-234 174 13 230 174 11 219 162 10 210 150 10
58268-192 133 9 163 110 8 124 112 88 82 82 82
58269- 50 50 50 30 30 30 14 14 14 6 6 6
58270- 0 0 0 0 0 0 0 0 0 0 0 0
58271- 0 0 0 0 0 0 0 0 0 0 0 0
58272- 0 0 0 0 0 0 0 0 0 0 0 0
58273- 0 0 0 0 0 0 0 0 0 0 0 0
58274- 0 0 0 0 0 0 0 0 0 0 0 0
58275- 6 6 6 14 14 14 22 22 22 34 34 34
58276- 42 42 42 58 58 58 74 74 74 86 86 86
58277-101 98 89 122 102 70 130 98 46 121 87 25
58278-137 92 6 152 99 6 163 110 8 180 123 7
58279-185 133 11 197 138 11 206 145 10 200 144 11
58280-180 123 7 156 107 11 130 83 6 104 69 6
58281- 50 34 6 54 54 54 110 110 110 101 98 89
58282- 86 86 86 82 82 82 78 78 78 78 78 78
58283- 78 78 78 78 78 78 78 78 78 78 78 78
58284- 78 78 78 82 82 82 86 86 86 94 94 94
58285-106 106 106 101 101 101 86 66 34 124 80 6
58286-156 107 11 180 123 7 192 133 9 200 144 11
58287-206 145 10 200 144 11 192 133 9 175 118 6
58288-139 102 15 109 106 95 70 70 70 42 42 42
58289- 22 22 22 10 10 10 0 0 0 0 0 0
58290- 0 0 0 0 0 0 0 0 0 0 0 0
58291- 0 0 0 0 0 0 0 0 0 0 0 0
58292- 0 0 0 0 0 0 0 0 0 0 0 0
58293- 0 0 0 0 0 0 0 0 0 0 0 0
58294- 0 0 0 0 0 0 0 0 0 0 0 0
58295- 0 0 0 0 0 0 6 6 6 10 10 10
58296- 14 14 14 22 22 22 30 30 30 38 38 38
58297- 50 50 50 62 62 62 74 74 74 90 90 90
58298-101 98 89 112 100 78 121 87 25 124 80 6
58299-137 92 6 152 99 6 152 99 6 152 99 6
58300-138 86 6 124 80 6 98 70 6 86 66 30
58301-101 98 89 82 82 82 58 58 58 46 46 46
58302- 38 38 38 34 34 34 34 34 34 34 34 34
58303- 34 34 34 34 34 34 34 34 34 34 34 34
58304- 34 34 34 34 34 34 38 38 38 42 42 42
58305- 54 54 54 82 82 82 94 86 76 91 60 6
58306-134 86 6 156 107 11 167 114 7 175 118 6
58307-175 118 6 167 114 7 152 99 6 121 87 25
58308-101 98 89 62 62 62 34 34 34 18 18 18
58309- 6 6 6 0 0 0 0 0 0 0 0 0
58310- 0 0 0 0 0 0 0 0 0 0 0 0
58311- 0 0 0 0 0 0 0 0 0 0 0 0
58312- 0 0 0 0 0 0 0 0 0 0 0 0
58313- 0 0 0 0 0 0 0 0 0 0 0 0
58314- 0 0 0 0 0 0 0 0 0 0 0 0
58315- 0 0 0 0 0 0 0 0 0 0 0 0
58316- 0 0 0 6 6 6 6 6 6 10 10 10
58317- 18 18 18 22 22 22 30 30 30 42 42 42
58318- 50 50 50 66 66 66 86 86 86 101 98 89
58319-106 86 58 98 70 6 104 69 6 104 69 6
58320-104 69 6 91 60 6 82 62 34 90 90 90
58321- 62 62 62 38 38 38 22 22 22 14 14 14
58322- 10 10 10 10 10 10 10 10 10 10 10 10
58323- 10 10 10 10 10 10 6 6 6 10 10 10
58324- 10 10 10 10 10 10 10 10 10 14 14 14
58325- 22 22 22 42 42 42 70 70 70 89 81 66
58326- 80 54 7 104 69 6 124 80 6 137 92 6
58327-134 86 6 116 81 8 100 82 52 86 86 86
58328- 58 58 58 30 30 30 14 14 14 6 6 6
58329- 0 0 0 0 0 0 0 0 0 0 0 0
58330- 0 0 0 0 0 0 0 0 0 0 0 0
58331- 0 0 0 0 0 0 0 0 0 0 0 0
58332- 0 0 0 0 0 0 0 0 0 0 0 0
58333- 0 0 0 0 0 0 0 0 0 0 0 0
58334- 0 0 0 0 0 0 0 0 0 0 0 0
58335- 0 0 0 0 0 0 0 0 0 0 0 0
58336- 0 0 0 0 0 0 0 0 0 0 0 0
58337- 0 0 0 6 6 6 10 10 10 14 14 14
58338- 18 18 18 26 26 26 38 38 38 54 54 54
58339- 70 70 70 86 86 86 94 86 76 89 81 66
58340- 89 81 66 86 86 86 74 74 74 50 50 50
58341- 30 30 30 14 14 14 6 6 6 0 0 0
58342- 0 0 0 0 0 0 0 0 0 0 0 0
58343- 0 0 0 0 0 0 0 0 0 0 0 0
58344- 0 0 0 0 0 0 0 0 0 0 0 0
58345- 6 6 6 18 18 18 34 34 34 58 58 58
58346- 82 82 82 89 81 66 89 81 66 89 81 66
58347- 94 86 66 94 86 76 74 74 74 50 50 50
58348- 26 26 26 14 14 14 6 6 6 0 0 0
58349- 0 0 0 0 0 0 0 0 0 0 0 0
58350- 0 0 0 0 0 0 0 0 0 0 0 0
58351- 0 0 0 0 0 0 0 0 0 0 0 0
58352- 0 0 0 0 0 0 0 0 0 0 0 0
58353- 0 0 0 0 0 0 0 0 0 0 0 0
58354- 0 0 0 0 0 0 0 0 0 0 0 0
58355- 0 0 0 0 0 0 0 0 0 0 0 0
58356- 0 0 0 0 0 0 0 0 0 0 0 0
58357- 0 0 0 0 0 0 0 0 0 0 0 0
58358- 6 6 6 6 6 6 14 14 14 18 18 18
58359- 30 30 30 38 38 38 46 46 46 54 54 54
58360- 50 50 50 42 42 42 30 30 30 18 18 18
58361- 10 10 10 0 0 0 0 0 0 0 0 0
58362- 0 0 0 0 0 0 0 0 0 0 0 0
58363- 0 0 0 0 0 0 0 0 0 0 0 0
58364- 0 0 0 0 0 0 0 0 0 0 0 0
58365- 0 0 0 6 6 6 14 14 14 26 26 26
58366- 38 38 38 50 50 50 58 58 58 58 58 58
58367- 54 54 54 42 42 42 30 30 30 18 18 18
58368- 10 10 10 0 0 0 0 0 0 0 0 0
58369- 0 0 0 0 0 0 0 0 0 0 0 0
58370- 0 0 0 0 0 0 0 0 0 0 0 0
58371- 0 0 0 0 0 0 0 0 0 0 0 0
58372- 0 0 0 0 0 0 0 0 0 0 0 0
58373- 0 0 0 0 0 0 0 0 0 0 0 0
58374- 0 0 0 0 0 0 0 0 0 0 0 0
58375- 0 0 0 0 0 0 0 0 0 0 0 0
58376- 0 0 0 0 0 0 0 0 0 0 0 0
58377- 0 0 0 0 0 0 0 0 0 0 0 0
58378- 0 0 0 0 0 0 0 0 0 6 6 6
58379- 6 6 6 10 10 10 14 14 14 18 18 18
58380- 18 18 18 14 14 14 10 10 10 6 6 6
58381- 0 0 0 0 0 0 0 0 0 0 0 0
58382- 0 0 0 0 0 0 0 0 0 0 0 0
58383- 0 0 0 0 0 0 0 0 0 0 0 0
58384- 0 0 0 0 0 0 0 0 0 0 0 0
58385- 0 0 0 0 0 0 0 0 0 6 6 6
58386- 14 14 14 18 18 18 22 22 22 22 22 22
58387- 18 18 18 14 14 14 10 10 10 6 6 6
58388- 0 0 0 0 0 0 0 0 0 0 0 0
58389- 0 0 0 0 0 0 0 0 0 0 0 0
58390- 0 0 0 0 0 0 0 0 0 0 0 0
58391- 0 0 0 0 0 0 0 0 0 0 0 0
58392- 0 0 0 0 0 0 0 0 0 0 0 0
58393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58406+4 4 4 4 4 4
58407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58420+4 4 4 4 4 4
58421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58434+4 4 4 4 4 4
58435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58448+4 4 4 4 4 4
58449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58462+4 4 4 4 4 4
58463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58476+4 4 4 4 4 4
58477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58481+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58482+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58486+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58487+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58488+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58490+4 4 4 4 4 4
58491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58495+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58496+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58497+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58500+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58501+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58502+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58503+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58504+4 4 4 4 4 4
58505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58509+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58510+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58511+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58514+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58515+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58516+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58517+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58518+4 4 4 4 4 4
58519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58522+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58523+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58524+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58525+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58527+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58528+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58529+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58530+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58531+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58532+4 4 4 4 4 4
58533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58536+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58537+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58538+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58539+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58540+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58541+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58542+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58543+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58544+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58545+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58546+4 4 4 4 4 4
58547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58550+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58551+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58552+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58553+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58554+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58555+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58556+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58557+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58558+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58559+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58560+4 4 4 4 4 4
58561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58563+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58564+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58565+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58566+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58567+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58568+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58569+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58570+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58571+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58572+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58573+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58574+4 4 4 4 4 4
58575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58577+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58578+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58579+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58580+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58581+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58582+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58583+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58584+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58585+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58586+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58587+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58588+4 4 4 4 4 4
58589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58591+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58592+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58593+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58594+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58595+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58596+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58597+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58598+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58599+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58600+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58601+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58602+4 4 4 4 4 4
58603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58605+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58606+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58607+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58608+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58609+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58610+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58611+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58612+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58613+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58614+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58615+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58616+4 4 4 4 4 4
58617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58618+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58619+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58620+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58621+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58622+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58623+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58624+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58625+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58626+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58627+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58628+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58629+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58630+4 4 4 4 4 4
58631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58632+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58633+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58634+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58635+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58636+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58637+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58638+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58639+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58640+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58641+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58642+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58643+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58644+0 0 0 4 4 4
58645+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58646+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58647+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58648+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58649+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58650+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58651+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58652+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58653+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58654+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58655+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58656+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58657+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58658+2 0 0 0 0 0
58659+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58660+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58661+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58662+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58663+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58664+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58665+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58666+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58667+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58668+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58669+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58670+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58671+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58672+37 38 37 0 0 0
58673+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58674+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58675+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58676+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58677+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58678+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58679+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58680+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58681+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58682+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58683+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58684+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58685+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58686+85 115 134 4 0 0
58687+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58688+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58689+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58690+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58691+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58692+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58693+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58694+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58695+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58696+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58697+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58698+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58699+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58700+60 73 81 4 0 0
58701+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58702+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58703+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58704+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58705+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58706+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58707+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58708+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58709+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58710+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58711+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58712+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58713+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58714+16 19 21 4 0 0
58715+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58716+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58717+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58718+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58719+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58720+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58721+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58722+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58723+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58724+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58725+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58726+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58727+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58728+4 0 0 4 3 3
58729+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58730+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58731+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58733+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58734+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58735+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58736+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58737+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58738+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58739+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58740+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58741+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58742+3 2 2 4 4 4
58743+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58744+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58745+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58746+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58747+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58748+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58749+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58750+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58751+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58752+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58753+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58754+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58755+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58756+4 4 4 4 4 4
58757+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58758+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58759+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58760+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58761+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58762+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58763+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58764+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58765+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58766+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58767+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58768+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58769+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58770+4 4 4 4 4 4
58771+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58772+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58773+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58774+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58775+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58776+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58777+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58778+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58779+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58780+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58781+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58782+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58783+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58784+5 5 5 5 5 5
58785+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58786+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58787+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58788+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58789+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58790+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58791+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58792+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58793+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58794+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58795+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58796+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58797+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58798+5 5 5 4 4 4
58799+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58800+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58801+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58802+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58803+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58804+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58805+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58806+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58807+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58808+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58809+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58810+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58812+4 4 4 4 4 4
58813+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58814+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58815+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58816+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58817+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58818+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58819+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58820+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58821+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58822+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58823+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58824+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58826+4 4 4 4 4 4
58827+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58828+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58829+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58830+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58831+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58832+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58833+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58834+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58835+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58836+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58837+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58840+4 4 4 4 4 4
58841+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58842+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58843+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58844+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58845+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58846+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58847+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58848+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58849+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58850+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58851+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58854+4 4 4 4 4 4
58855+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58856+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58857+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58858+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58859+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58860+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58861+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58862+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58863+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58864+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58865+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58868+4 4 4 4 4 4
58869+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58870+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58871+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58872+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58873+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58874+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58875+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58876+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58877+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58878+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58879+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58882+4 4 4 4 4 4
58883+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58884+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58885+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58886+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58887+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58888+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58889+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58890+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58891+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58892+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58893+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58896+4 4 4 4 4 4
58897+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58898+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58899+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58900+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58901+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58902+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58903+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58904+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58905+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58906+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58907+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58910+4 4 4 4 4 4
58911+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58912+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58913+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58914+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58915+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58916+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58917+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58918+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58919+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58920+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58921+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58924+4 4 4 4 4 4
58925+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58926+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58927+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58928+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58929+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58930+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58931+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58932+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58933+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58934+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58935+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58938+4 4 4 4 4 4
58939+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58940+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58941+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58942+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58943+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58944+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58945+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58946+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58947+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58948+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58949+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58952+4 4 4 4 4 4
58953+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58954+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58955+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58956+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58957+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58958+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58959+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58960+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58961+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58962+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58963+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58966+4 4 4 4 4 4
58967+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58968+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58969+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58970+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58971+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58972+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58973+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58974+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58975+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58976+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58977+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58980+4 4 4 4 4 4
58981+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58982+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58983+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58984+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58985+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58986+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58987+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58988+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58989+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58990+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58991+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58994+4 4 4 4 4 4
58995+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58996+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58997+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58998+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58999+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
59000+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
59001+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
59002+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
59003+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
59004+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59005+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59008+4 4 4 4 4 4
59009+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59010+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
59011+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
59012+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
59013+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
59014+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
59015+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
59016+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
59017+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59018+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59019+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59022+4 4 4 4 4 4
59023+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
59024+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
59025+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59026+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
59027+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
59028+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
59029+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
59030+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
59031+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
59032+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59033+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59036+4 4 4 4 4 4
59037+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59038+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
59039+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
59040+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
59041+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
59042+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
59043+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
59044+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
59045+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59046+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59047+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59050+4 4 4 4 4 4
59051+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59052+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
59053+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59054+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
59055+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
59056+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
59057+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
59058+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
59059+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59060+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59061+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59064+4 4 4 4 4 4
59065+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59066+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
59067+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
59068+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
59069+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
59070+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
59071+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59072+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
59073+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59074+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59075+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59078+4 4 4 4 4 4
59079+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59080+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
59081+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
59082+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59083+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
59084+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
59085+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59086+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
59087+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59088+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59089+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59092+4 4 4 4 4 4
59093+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59094+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
59095+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
59096+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
59097+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
59098+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
59099+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
59100+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
59101+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
59102+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59103+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59106+4 4 4 4 4 4
59107+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59108+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
59109+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
59110+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
59111+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
59112+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
59113+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
59114+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
59115+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
59116+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59117+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59120+4 4 4 4 4 4
59121+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
59122+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
59123+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
59124+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
59125+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59126+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
59127+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
59128+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
59129+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
59130+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59131+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59134+4 4 4 4 4 4
59135+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59136+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
59137+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
59138+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
59139+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
59140+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
59141+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
59142+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
59143+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
59144+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59145+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59148+4 4 4 4 4 4
59149+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
59150+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
59151+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
59152+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
59153+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
59154+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
59155+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
59156+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
59157+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
59158+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
59159+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59162+4 4 4 4 4 4
59163+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
59164+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59165+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
59166+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
59167+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
59168+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
59169+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
59170+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
59171+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
59172+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
59173+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59176+4 4 4 4 4 4
59177+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
59178+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59179+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
59180+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
59181+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
59182+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
59183+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59184+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
59185+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
59186+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
59187+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59190+4 4 4 4 4 4
59191+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
59192+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
59193+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
59194+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
59195+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
59196+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
59197+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
59198+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
59199+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
59200+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
59201+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59204+4 4 4 4 4 4
59205+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
59206+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
59207+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59208+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
59209+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
59210+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
59211+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
59212+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
59213+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
59214+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
59215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59218+4 4 4 4 4 4
59219+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59220+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
59221+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
59222+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
59223+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
59224+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
59225+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
59226+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
59227+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
59228+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59232+4 4 4 4 4 4
59233+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
59234+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
59235+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
59236+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
59237+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
59238+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
59239+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
59240+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
59241+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
59242+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
59243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59246+4 4 4 4 4 4
59247+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
59248+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
59249+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
59250+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
59251+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
59252+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
59253+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
59254+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
59255+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
59256+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59260+4 4 4 4 4 4
59261+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
59262+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59263+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
59264+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59265+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
59266+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
59267+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
59268+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
59269+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
59270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59274+4 4 4 4 4 4
59275+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
59276+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
59277+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
59278+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
59279+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
59280+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
59281+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
59282+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
59283+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
59284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59288+4 4 4 4 4 4
59289+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59290+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
59291+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
59292+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
59293+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
59294+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
59295+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
59296+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
59297+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59302+4 4 4 4 4 4
59303+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59304+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59305+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59306+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59307+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59308+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59309+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59310+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59316+4 4 4 4 4 4
59317+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59318+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59319+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59320+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59321+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59322+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59323+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59324+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59330+4 4 4 4 4 4
59331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59332+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59333+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59334+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59335+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59336+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59337+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59338+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59344+4 4 4 4 4 4
59345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59346+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59347+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59348+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59349+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59350+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59351+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59352+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59358+4 4 4 4 4 4
59359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59360+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59361+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59362+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59363+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59364+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59365+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59366+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59372+4 4 4 4 4 4
59373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59375+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59376+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59377+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59378+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59379+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59380+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59386+4 4 4 4 4 4
59387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59390+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59391+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59392+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59393+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59400+4 4 4 4 4 4
59401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59404+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59405+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59406+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59407+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59414+4 4 4 4 4 4
59415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59418+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59419+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59420+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59421+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59428+4 4 4 4 4 4
59429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59432+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59433+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59434+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59435+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59442+4 4 4 4 4 4
59443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59447+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59448+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59449+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59456+4 4 4 4 4 4
59457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59461+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59462+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59463+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59470+4 4 4 4 4 4
59471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59475+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59476+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59477+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59484+4 4 4 4 4 4
59485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59489+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59490+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59498+4 4 4 4 4 4
59499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59503+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59504+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59512+4 4 4 4 4 4
59513diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59514index fef20db..d28b1ab 100644
59515--- a/drivers/xen/xenfs/xenstored.c
59516+++ b/drivers/xen/xenfs/xenstored.c
59517@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59518 static int xsd_kva_open(struct inode *inode, struct file *file)
59519 {
59520 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59521+#ifdef CONFIG_GRKERNSEC_HIDESYM
59522+ NULL);
59523+#else
59524 xen_store_interface);
59525+#endif
59526+
59527 if (!file->private_data)
59528 return -ENOMEM;
59529 return 0;
59530diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59531index cc1cfae..41158ad 100644
59532--- a/fs/9p/vfs_addr.c
59533+++ b/fs/9p/vfs_addr.c
59534@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59535
59536 retval = v9fs_file_write_internal(inode,
59537 v9inode->writeback_fid,
59538- (__force const char __user *)buffer,
59539+ (const char __force_user *)buffer,
59540 len, &offset, 0);
59541 if (retval > 0)
59542 retval = 0;
59543diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59544index 7fa4f7a..a7ebf8c 100644
59545--- a/fs/9p/vfs_inode.c
59546+++ b/fs/9p/vfs_inode.c
59547@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59548 void
59549 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59550 {
59551- char *s = nd_get_link(nd);
59552+ const char *s = nd_get_link(nd);
59553
59554 p9_debug(P9_DEBUG_VFS, " %s %s\n",
59555 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
59556diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59557index 370b24c..ff0be7b 100644
59558--- a/fs/Kconfig.binfmt
59559+++ b/fs/Kconfig.binfmt
59560@@ -103,7 +103,7 @@ config HAVE_AOUT
59561
59562 config BINFMT_AOUT
59563 tristate "Kernel support for a.out and ECOFF binaries"
59564- depends on HAVE_AOUT
59565+ depends on HAVE_AOUT && BROKEN
59566 ---help---
59567 A.out (Assembler.OUTput) is a set of formats for libraries and
59568 executables used in the earliest versions of UNIX. Linux used
59569diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59570index 2946712..f737435 100644
59571--- a/fs/afs/inode.c
59572+++ b/fs/afs/inode.c
59573@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59574 struct afs_vnode *vnode;
59575 struct super_block *sb;
59576 struct inode *inode;
59577- static atomic_t afs_autocell_ino;
59578+ static atomic_unchecked_t afs_autocell_ino;
59579
59580 _enter("{%x:%u},%*.*s,",
59581 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59582@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59583 data.fid.unique = 0;
59584 data.fid.vnode = 0;
59585
59586- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59587+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59588 afs_iget5_autocell_test, afs_iget5_set,
59589 &data);
59590 if (!inode) {
59591diff --git a/fs/aio.c b/fs/aio.c
59592index 1c9c5f0..c935d6e 100644
59593--- a/fs/aio.c
59594+++ b/fs/aio.c
59595@@ -141,6 +141,7 @@ struct kioctx {
59596
59597 struct {
59598 unsigned tail;
59599+ unsigned completed_events;
59600 spinlock_t completion_lock;
59601 } ____cacheline_aligned_in_smp;
59602
59603@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59604 size += sizeof(struct io_event) * nr_events;
59605
59606 nr_pages = PFN_UP(size);
59607- if (nr_pages < 0)
59608+ if (nr_pages <= 0)
59609 return -EINVAL;
59610
59611 file = aio_private_file(ctx, nr_pages);
59612@@ -880,6 +881,68 @@ out:
59613 return ret;
59614 }
59615
59616+/* refill_reqs_available
59617+ * Updates the reqs_available reference counts used for tracking the
59618+ * number of free slots in the completion ring. This can be called
59619+ * from aio_complete() (to optimistically update reqs_available) or
59620+ * from aio_get_req() (the we're out of events case). It must be
59621+ * called holding ctx->completion_lock.
59622+ */
59623+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
59624+ unsigned tail)
59625+{
59626+ unsigned events_in_ring, completed;
59627+
59628+ /* Clamp head since userland can write to it. */
59629+ head %= ctx->nr_events;
59630+ if (head <= tail)
59631+ events_in_ring = tail - head;
59632+ else
59633+ events_in_ring = ctx->nr_events - (head - tail);
59634+
59635+ completed = ctx->completed_events;
59636+ if (events_in_ring < completed)
59637+ completed -= events_in_ring;
59638+ else
59639+ completed = 0;
59640+
59641+ if (!completed)
59642+ return;
59643+
59644+ ctx->completed_events -= completed;
59645+ put_reqs_available(ctx, completed);
59646+}
59647+
59648+/* user_refill_reqs_available
59649+ * Called to refill reqs_available when aio_get_req() encounters an
59650+ * out of space in the completion ring.
59651+ */
59652+static void user_refill_reqs_available(struct kioctx *ctx)
59653+{
59654+ spin_lock_irq(&ctx->completion_lock);
59655+ if (ctx->completed_events) {
59656+ struct aio_ring *ring;
59657+ unsigned head;
59658+
59659+ /* Access of ring->head may race with aio_read_events_ring()
59660+ * here, but that's okay since whether we read the old version
59661+ * or the new version, and either will be valid. The important
59662+ * part is that head cannot pass tail since we prevent
59663+ * aio_complete() from updating tail by holding
59664+ * ctx->completion_lock. Even if head is invalid, the check
59665+ * against ctx->completed_events below will make sure we do the
59666+ * safe/right thing.
59667+ */
59668+ ring = kmap_atomic(ctx->ring_pages[0]);
59669+ head = ring->head;
59670+ kunmap_atomic(ring);
59671+
59672+ refill_reqs_available(ctx, head, ctx->tail);
59673+ }
59674+
59675+ spin_unlock_irq(&ctx->completion_lock);
59676+}
59677+
59678 /* aio_get_req
59679 * Allocate a slot for an aio request.
59680 * Returns NULL if no requests are free.
59681@@ -888,8 +951,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
59682 {
59683 struct kiocb *req;
59684
59685- if (!get_reqs_available(ctx))
59686- return NULL;
59687+ if (!get_reqs_available(ctx)) {
59688+ user_refill_reqs_available(ctx);
59689+ if (!get_reqs_available(ctx))
59690+ return NULL;
59691+ }
59692
59693 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
59694 if (unlikely(!req))
59695@@ -948,8 +1014,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59696 struct kioctx *ctx = iocb->ki_ctx;
59697 struct aio_ring *ring;
59698 struct io_event *ev_page, *event;
59699+ unsigned tail, pos, head;
59700 unsigned long flags;
59701- unsigned tail, pos;
59702
59703 /*
59704 * Special case handling for sync iocbs:
59705@@ -1010,10 +1076,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59706 ctx->tail = tail;
59707
59708 ring = kmap_atomic(ctx->ring_pages[0]);
59709+ head = ring->head;
59710 ring->tail = tail;
59711 kunmap_atomic(ring);
59712 flush_dcache_page(ctx->ring_pages[0]);
59713
59714+ ctx->completed_events++;
59715+ if (ctx->completed_events > 1)
59716+ refill_reqs_available(ctx, head, tail);
59717 spin_unlock_irqrestore(&ctx->completion_lock, flags);
59718
59719 pr_debug("added to ring %p at [%u]\n", iocb, tail);
59720@@ -1028,7 +1098,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59721
59722 /* everything turned out well, dispose of the aiocb. */
59723 kiocb_free(iocb);
59724- put_reqs_available(ctx, 1);
59725
59726 /*
59727 * We have to order our ring_info tail store above and test
59728@@ -1065,6 +1134,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
59729 tail = ring->tail;
59730 kunmap_atomic(ring);
59731
59732+ /*
59733+ * Ensure that once we've read the current tail pointer, that
59734+ * we also see the events that were stored up to the tail.
59735+ */
59736+ smp_rmb();
59737+
59738 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
59739
59740 if (head == tail)
59741diff --git a/fs/attr.c b/fs/attr.c
59742index 6530ced..4a827e2 100644
59743--- a/fs/attr.c
59744+++ b/fs/attr.c
59745@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59746 unsigned long limit;
59747
59748 limit = rlimit(RLIMIT_FSIZE);
59749+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59750 if (limit != RLIM_INFINITY && offset > limit)
59751 goto out_sig;
59752 if (offset > inode->i_sb->s_maxbytes)
59753diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59754index 116fd38..c04182da 100644
59755--- a/fs/autofs4/waitq.c
59756+++ b/fs/autofs4/waitq.c
59757@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59758 {
59759 unsigned long sigpipe, flags;
59760 mm_segment_t fs;
59761- const char *data = (const char *)addr;
59762+ const char __user *data = (const char __force_user *)addr;
59763 ssize_t wr = 0;
59764
59765 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59766@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59767 return 1;
59768 }
59769
59770+#ifdef CONFIG_GRKERNSEC_HIDESYM
59771+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59772+#endif
59773+
59774 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59775 enum autofs_notify notify)
59776 {
59777@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59778
59779 /* If this is a direct mount request create a dummy name */
59780 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59781+#ifdef CONFIG_GRKERNSEC_HIDESYM
59782+ /* this name does get written to userland via autofs4_write() */
59783+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59784+#else
59785 qstr.len = sprintf(name, "%p", dentry);
59786+#endif
59787 else {
59788 qstr.len = autofs4_getpath(sbi, dentry, &name);
59789 if (!qstr.len) {
59790diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59791index 2722387..56059b5 100644
59792--- a/fs/befs/endian.h
59793+++ b/fs/befs/endian.h
59794@@ -11,7 +11,7 @@
59795
59796 #include <asm/byteorder.h>
59797
59798-static inline u64
59799+static inline u64 __intentional_overflow(-1)
59800 fs64_to_cpu(const struct super_block *sb, fs64 n)
59801 {
59802 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59803@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59804 return (__force fs64)cpu_to_be64(n);
59805 }
59806
59807-static inline u32
59808+static inline u32 __intentional_overflow(-1)
59809 fs32_to_cpu(const struct super_block *sb, fs32 n)
59810 {
59811 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59812@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59813 return (__force fs32)cpu_to_be32(n);
59814 }
59815
59816-static inline u16
59817+static inline u16 __intentional_overflow(-1)
59818 fs16_to_cpu(const struct super_block *sb, fs16 n)
59819 {
59820 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59821diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59822index ca0ba15..0fa3257 100644
59823--- a/fs/binfmt_aout.c
59824+++ b/fs/binfmt_aout.c
59825@@ -16,6 +16,7 @@
59826 #include <linux/string.h>
59827 #include <linux/fs.h>
59828 #include <linux/file.h>
59829+#include <linux/security.h>
59830 #include <linux/stat.h>
59831 #include <linux/fcntl.h>
59832 #include <linux/ptrace.h>
59833@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59834 #endif
59835 # define START_STACK(u) ((void __user *)u.start_stack)
59836
59837+ memset(&dump, 0, sizeof(dump));
59838+
59839 fs = get_fs();
59840 set_fs(KERNEL_DS);
59841 has_dumped = 1;
59842@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59843
59844 /* If the size of the dump file exceeds the rlimit, then see what would happen
59845 if we wrote the stack, but not the data area. */
59846+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59847 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59848 dump.u_dsize = 0;
59849
59850 /* Make sure we have enough room to write the stack and data areas. */
59851+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59852 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59853 dump.u_ssize = 0;
59854
59855@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59856 rlim = rlimit(RLIMIT_DATA);
59857 if (rlim >= RLIM_INFINITY)
59858 rlim = ~0;
59859+
59860+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59861 if (ex.a_data + ex.a_bss > rlim)
59862 return -ENOMEM;
59863
59864@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59865
59866 install_exec_creds(bprm);
59867
59868+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59869+ current->mm->pax_flags = 0UL;
59870+#endif
59871+
59872+#ifdef CONFIG_PAX_PAGEEXEC
59873+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59874+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59875+
59876+#ifdef CONFIG_PAX_EMUTRAMP
59877+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59878+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59879+#endif
59880+
59881+#ifdef CONFIG_PAX_MPROTECT
59882+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59883+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59884+#endif
59885+
59886+ }
59887+#endif
59888+
59889 if (N_MAGIC(ex) == OMAGIC) {
59890 unsigned long text_addr, map_size;
59891 loff_t pos;
59892@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59893 }
59894
59895 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59896- PROT_READ | PROT_WRITE | PROT_EXEC,
59897+ PROT_READ | PROT_WRITE,
59898 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59899 fd_offset + ex.a_text);
59900 if (error != N_DATADDR(ex)) {
59901diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59902index 3892c1a..4e27c04 100644
59903--- a/fs/binfmt_elf.c
59904+++ b/fs/binfmt_elf.c
59905@@ -34,6 +34,7 @@
59906 #include <linux/utsname.h>
59907 #include <linux/coredump.h>
59908 #include <linux/sched.h>
59909+#include <linux/xattr.h>
59910 #include <asm/uaccess.h>
59911 #include <asm/param.h>
59912 #include <asm/page.h>
59913@@ -47,7 +48,7 @@
59914
59915 static int load_elf_binary(struct linux_binprm *bprm);
59916 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59917- int, int, unsigned long);
59918+ int, int, unsigned long) __intentional_overflow(-1);
59919
59920 #ifdef CONFIG_USELIB
59921 static int load_elf_library(struct file *);
59922@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59923 #define elf_core_dump NULL
59924 #endif
59925
59926+#ifdef CONFIG_PAX_MPROTECT
59927+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59928+#endif
59929+
59930+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59931+static void elf_handle_mmap(struct file *file);
59932+#endif
59933+
59934 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59935 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59936 #else
59937@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59938 .load_binary = load_elf_binary,
59939 .load_shlib = load_elf_library,
59940 .core_dump = elf_core_dump,
59941+
59942+#ifdef CONFIG_PAX_MPROTECT
59943+ .handle_mprotect= elf_handle_mprotect,
59944+#endif
59945+
59946+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59947+ .handle_mmap = elf_handle_mmap,
59948+#endif
59949+
59950 .min_coredump = ELF_EXEC_PAGESIZE,
59951 };
59952
59953@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59954
59955 static int set_brk(unsigned long start, unsigned long end)
59956 {
59957+ unsigned long e = end;
59958+
59959 start = ELF_PAGEALIGN(start);
59960 end = ELF_PAGEALIGN(end);
59961 if (end > start) {
59962@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59963 if (BAD_ADDR(addr))
59964 return addr;
59965 }
59966- current->mm->start_brk = current->mm->brk = end;
59967+ current->mm->start_brk = current->mm->brk = e;
59968 return 0;
59969 }
59970
59971@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59972 elf_addr_t __user *u_rand_bytes;
59973 const char *k_platform = ELF_PLATFORM;
59974 const char *k_base_platform = ELF_BASE_PLATFORM;
59975- unsigned char k_rand_bytes[16];
59976+ u32 k_rand_bytes[4];
59977 int items;
59978 elf_addr_t *elf_info;
59979 int ei_index = 0;
59980 const struct cred *cred = current_cred();
59981 struct vm_area_struct *vma;
59982+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59983
59984 /*
59985 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59986@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59987 * Generate 16 random bytes for userspace PRNG seeding.
59988 */
59989 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
59990- u_rand_bytes = (elf_addr_t __user *)
59991- STACK_ALLOC(p, sizeof(k_rand_bytes));
59992+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
59993+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
59994+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
59995+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
59996+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
59997+ u_rand_bytes = (elf_addr_t __user *) p;
59998 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
59999 return -EFAULT;
60000
60001@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
60002 return -EFAULT;
60003 current->mm->env_end = p;
60004
60005+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
60006+
60007 /* Put the elf_info on the stack in the right place. */
60008 sp = (elf_addr_t __user *)envp + 1;
60009- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
60010+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
60011 return -EFAULT;
60012 return 0;
60013 }
60014@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
60015 an ELF header */
60016
60017 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60018- struct file *interpreter, unsigned long *interp_map_addr,
60019- unsigned long no_base)
60020+ struct file *interpreter, unsigned long no_base)
60021 {
60022 struct elf_phdr *elf_phdata;
60023 struct elf_phdr *eppnt;
60024- unsigned long load_addr = 0;
60025+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
60026 int load_addr_set = 0;
60027 unsigned long last_bss = 0, elf_bss = 0;
60028- unsigned long error = ~0UL;
60029+ unsigned long error = -EINVAL;
60030 unsigned long total_size;
60031 int retval, i, size;
60032
60033@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60034 goto out_close;
60035 }
60036
60037+#ifdef CONFIG_PAX_SEGMEXEC
60038+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
60039+ pax_task_size = SEGMEXEC_TASK_SIZE;
60040+#endif
60041+
60042 eppnt = elf_phdata;
60043 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
60044 if (eppnt->p_type == PT_LOAD) {
60045@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60046 map_addr = elf_map(interpreter, load_addr + vaddr,
60047 eppnt, elf_prot, elf_type, total_size);
60048 total_size = 0;
60049- if (!*interp_map_addr)
60050- *interp_map_addr = map_addr;
60051 error = map_addr;
60052 if (BAD_ADDR(map_addr))
60053 goto out_close;
60054@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60055 k = load_addr + eppnt->p_vaddr;
60056 if (BAD_ADDR(k) ||
60057 eppnt->p_filesz > eppnt->p_memsz ||
60058- eppnt->p_memsz > TASK_SIZE ||
60059- TASK_SIZE - eppnt->p_memsz < k) {
60060+ eppnt->p_memsz > pax_task_size ||
60061+ pax_task_size - eppnt->p_memsz < k) {
60062 error = -ENOMEM;
60063 goto out_close;
60064 }
60065@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60066 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
60067
60068 /* Map the last of the bss segment */
60069- error = vm_brk(elf_bss, last_bss - elf_bss);
60070- if (BAD_ADDR(error))
60071- goto out_close;
60072+ if (last_bss > elf_bss) {
60073+ error = vm_brk(elf_bss, last_bss - elf_bss);
60074+ if (BAD_ADDR(error))
60075+ goto out_close;
60076+ }
60077 }
60078
60079 error = load_addr;
60080@@ -543,6 +574,336 @@ out:
60081 return error;
60082 }
60083
60084+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60085+#ifdef CONFIG_PAX_SOFTMODE
60086+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
60087+{
60088+ unsigned long pax_flags = 0UL;
60089+
60090+#ifdef CONFIG_PAX_PAGEEXEC
60091+ if (elf_phdata->p_flags & PF_PAGEEXEC)
60092+ pax_flags |= MF_PAX_PAGEEXEC;
60093+#endif
60094+
60095+#ifdef CONFIG_PAX_SEGMEXEC
60096+ if (elf_phdata->p_flags & PF_SEGMEXEC)
60097+ pax_flags |= MF_PAX_SEGMEXEC;
60098+#endif
60099+
60100+#ifdef CONFIG_PAX_EMUTRAMP
60101+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
60102+ pax_flags |= MF_PAX_EMUTRAMP;
60103+#endif
60104+
60105+#ifdef CONFIG_PAX_MPROTECT
60106+ if (elf_phdata->p_flags & PF_MPROTECT)
60107+ pax_flags |= MF_PAX_MPROTECT;
60108+#endif
60109+
60110+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60111+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
60112+ pax_flags |= MF_PAX_RANDMMAP;
60113+#endif
60114+
60115+ return pax_flags;
60116+}
60117+#endif
60118+
60119+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
60120+{
60121+ unsigned long pax_flags = 0UL;
60122+
60123+#ifdef CONFIG_PAX_PAGEEXEC
60124+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
60125+ pax_flags |= MF_PAX_PAGEEXEC;
60126+#endif
60127+
60128+#ifdef CONFIG_PAX_SEGMEXEC
60129+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
60130+ pax_flags |= MF_PAX_SEGMEXEC;
60131+#endif
60132+
60133+#ifdef CONFIG_PAX_EMUTRAMP
60134+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
60135+ pax_flags |= MF_PAX_EMUTRAMP;
60136+#endif
60137+
60138+#ifdef CONFIG_PAX_MPROTECT
60139+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
60140+ pax_flags |= MF_PAX_MPROTECT;
60141+#endif
60142+
60143+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60144+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
60145+ pax_flags |= MF_PAX_RANDMMAP;
60146+#endif
60147+
60148+ return pax_flags;
60149+}
60150+#endif
60151+
60152+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60153+#ifdef CONFIG_PAX_SOFTMODE
60154+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
60155+{
60156+ unsigned long pax_flags = 0UL;
60157+
60158+#ifdef CONFIG_PAX_PAGEEXEC
60159+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
60160+ pax_flags |= MF_PAX_PAGEEXEC;
60161+#endif
60162+
60163+#ifdef CONFIG_PAX_SEGMEXEC
60164+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
60165+ pax_flags |= MF_PAX_SEGMEXEC;
60166+#endif
60167+
60168+#ifdef CONFIG_PAX_EMUTRAMP
60169+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
60170+ pax_flags |= MF_PAX_EMUTRAMP;
60171+#endif
60172+
60173+#ifdef CONFIG_PAX_MPROTECT
60174+ if (pax_flags_softmode & MF_PAX_MPROTECT)
60175+ pax_flags |= MF_PAX_MPROTECT;
60176+#endif
60177+
60178+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60179+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
60180+ pax_flags |= MF_PAX_RANDMMAP;
60181+#endif
60182+
60183+ return pax_flags;
60184+}
60185+#endif
60186+
60187+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
60188+{
60189+ unsigned long pax_flags = 0UL;
60190+
60191+#ifdef CONFIG_PAX_PAGEEXEC
60192+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
60193+ pax_flags |= MF_PAX_PAGEEXEC;
60194+#endif
60195+
60196+#ifdef CONFIG_PAX_SEGMEXEC
60197+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
60198+ pax_flags |= MF_PAX_SEGMEXEC;
60199+#endif
60200+
60201+#ifdef CONFIG_PAX_EMUTRAMP
60202+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
60203+ pax_flags |= MF_PAX_EMUTRAMP;
60204+#endif
60205+
60206+#ifdef CONFIG_PAX_MPROTECT
60207+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
60208+ pax_flags |= MF_PAX_MPROTECT;
60209+#endif
60210+
60211+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60212+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
60213+ pax_flags |= MF_PAX_RANDMMAP;
60214+#endif
60215+
60216+ return pax_flags;
60217+}
60218+#endif
60219+
60220+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60221+static unsigned long pax_parse_defaults(void)
60222+{
60223+ unsigned long pax_flags = 0UL;
60224+
60225+#ifdef CONFIG_PAX_SOFTMODE
60226+ if (pax_softmode)
60227+ return pax_flags;
60228+#endif
60229+
60230+#ifdef CONFIG_PAX_PAGEEXEC
60231+ pax_flags |= MF_PAX_PAGEEXEC;
60232+#endif
60233+
60234+#ifdef CONFIG_PAX_SEGMEXEC
60235+ pax_flags |= MF_PAX_SEGMEXEC;
60236+#endif
60237+
60238+#ifdef CONFIG_PAX_MPROTECT
60239+ pax_flags |= MF_PAX_MPROTECT;
60240+#endif
60241+
60242+#ifdef CONFIG_PAX_RANDMMAP
60243+ if (randomize_va_space)
60244+ pax_flags |= MF_PAX_RANDMMAP;
60245+#endif
60246+
60247+ return pax_flags;
60248+}
60249+
60250+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
60251+{
60252+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
60253+
60254+#ifdef CONFIG_PAX_EI_PAX
60255+
60256+#ifdef CONFIG_PAX_SOFTMODE
60257+ if (pax_softmode)
60258+ return pax_flags;
60259+#endif
60260+
60261+ pax_flags = 0UL;
60262+
60263+#ifdef CONFIG_PAX_PAGEEXEC
60264+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
60265+ pax_flags |= MF_PAX_PAGEEXEC;
60266+#endif
60267+
60268+#ifdef CONFIG_PAX_SEGMEXEC
60269+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
60270+ pax_flags |= MF_PAX_SEGMEXEC;
60271+#endif
60272+
60273+#ifdef CONFIG_PAX_EMUTRAMP
60274+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
60275+ pax_flags |= MF_PAX_EMUTRAMP;
60276+#endif
60277+
60278+#ifdef CONFIG_PAX_MPROTECT
60279+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
60280+ pax_flags |= MF_PAX_MPROTECT;
60281+#endif
60282+
60283+#ifdef CONFIG_PAX_ASLR
60284+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
60285+ pax_flags |= MF_PAX_RANDMMAP;
60286+#endif
60287+
60288+#endif
60289+
60290+ return pax_flags;
60291+
60292+}
60293+
60294+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
60295+{
60296+
60297+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60298+ unsigned long i;
60299+
60300+ for (i = 0UL; i < elf_ex->e_phnum; i++)
60301+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
60302+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
60303+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
60304+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
60305+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
60306+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
60307+ return PAX_PARSE_FLAGS_FALLBACK;
60308+
60309+#ifdef CONFIG_PAX_SOFTMODE
60310+ if (pax_softmode)
60311+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
60312+ else
60313+#endif
60314+
60315+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
60316+ break;
60317+ }
60318+#endif
60319+
60320+ return PAX_PARSE_FLAGS_FALLBACK;
60321+}
60322+
60323+static unsigned long pax_parse_xattr_pax(struct file * const file)
60324+{
60325+
60326+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60327+ ssize_t xattr_size, i;
60328+ unsigned char xattr_value[sizeof("pemrs") - 1];
60329+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
60330+
60331+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
60332+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
60333+ return PAX_PARSE_FLAGS_FALLBACK;
60334+
60335+ for (i = 0; i < xattr_size; i++)
60336+ switch (xattr_value[i]) {
60337+ default:
60338+ return PAX_PARSE_FLAGS_FALLBACK;
60339+
60340+#define parse_flag(option1, option2, flag) \
60341+ case option1: \
60342+ if (pax_flags_hardmode & MF_PAX_##flag) \
60343+ return PAX_PARSE_FLAGS_FALLBACK;\
60344+ pax_flags_hardmode |= MF_PAX_##flag; \
60345+ break; \
60346+ case option2: \
60347+ if (pax_flags_softmode & MF_PAX_##flag) \
60348+ return PAX_PARSE_FLAGS_FALLBACK;\
60349+ pax_flags_softmode |= MF_PAX_##flag; \
60350+ break;
60351+
60352+ parse_flag('p', 'P', PAGEEXEC);
60353+ parse_flag('e', 'E', EMUTRAMP);
60354+ parse_flag('m', 'M', MPROTECT);
60355+ parse_flag('r', 'R', RANDMMAP);
60356+ parse_flag('s', 'S', SEGMEXEC);
60357+
60358+#undef parse_flag
60359+ }
60360+
60361+ if (pax_flags_hardmode & pax_flags_softmode)
60362+ return PAX_PARSE_FLAGS_FALLBACK;
60363+
60364+#ifdef CONFIG_PAX_SOFTMODE
60365+ if (pax_softmode)
60366+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
60367+ else
60368+#endif
60369+
60370+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
60371+#else
60372+ return PAX_PARSE_FLAGS_FALLBACK;
60373+#endif
60374+
60375+}
60376+
60377+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
60378+{
60379+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
60380+
60381+ pax_flags = pax_parse_defaults();
60382+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
60383+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
60384+ xattr_pax_flags = pax_parse_xattr_pax(file);
60385+
60386+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60387+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60388+ pt_pax_flags != xattr_pax_flags)
60389+ return -EINVAL;
60390+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60391+ pax_flags = xattr_pax_flags;
60392+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60393+ pax_flags = pt_pax_flags;
60394+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60395+ pax_flags = ei_pax_flags;
60396+
60397+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
60398+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60399+ if ((__supported_pte_mask & _PAGE_NX))
60400+ pax_flags &= ~MF_PAX_SEGMEXEC;
60401+ else
60402+ pax_flags &= ~MF_PAX_PAGEEXEC;
60403+ }
60404+#endif
60405+
60406+ if (0 > pax_check_flags(&pax_flags))
60407+ return -EINVAL;
60408+
60409+ current->mm->pax_flags = pax_flags;
60410+ return 0;
60411+}
60412+#endif
60413+
60414 /*
60415 * These are the functions used to load ELF style executables and shared
60416 * libraries. There is no binary dependent code anywhere else.
60417@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
60418 {
60419 unsigned int random_variable = 0;
60420
60421+#ifdef CONFIG_PAX_RANDUSTACK
60422+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
60423+ return stack_top - current->mm->delta_stack;
60424+#endif
60425+
60426 if ((current->flags & PF_RANDOMIZE) &&
60427 !(current->personality & ADDR_NO_RANDOMIZE)) {
60428 random_variable = get_random_int() & STACK_RND_MASK;
60429@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60430 unsigned long load_addr = 0, load_bias = 0;
60431 int load_addr_set = 0;
60432 char * elf_interpreter = NULL;
60433- unsigned long error;
60434+ unsigned long error = 0;
60435 struct elf_phdr *elf_ppnt, *elf_phdata;
60436 unsigned long elf_bss, elf_brk;
60437 int retval, i;
60438@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60439 struct elfhdr elf_ex;
60440 struct elfhdr interp_elf_ex;
60441 } *loc;
60442+ unsigned long pax_task_size;
60443
60444 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
60445 if (!loc) {
60446@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
60447 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
60448 may depend on the personality. */
60449 SET_PERSONALITY(loc->elf_ex);
60450+
60451+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60452+ current->mm->pax_flags = 0UL;
60453+#endif
60454+
60455+#ifdef CONFIG_PAX_DLRESOLVE
60456+ current->mm->call_dl_resolve = 0UL;
60457+#endif
60458+
60459+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60460+ current->mm->call_syscall = 0UL;
60461+#endif
60462+
60463+#ifdef CONFIG_PAX_ASLR
60464+ current->mm->delta_mmap = 0UL;
60465+ current->mm->delta_stack = 0UL;
60466+#endif
60467+
60468+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60469+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
60470+ send_sig(SIGKILL, current, 0);
60471+ goto out_free_dentry;
60472+ }
60473+#endif
60474+
60475+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60476+ pax_set_initial_flags(bprm);
60477+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60478+ if (pax_set_initial_flags_func)
60479+ (pax_set_initial_flags_func)(bprm);
60480+#endif
60481+
60482+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60483+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
60484+ current->mm->context.user_cs_limit = PAGE_SIZE;
60485+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
60486+ }
60487+#endif
60488+
60489+#ifdef CONFIG_PAX_SEGMEXEC
60490+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60491+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
60492+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
60493+ pax_task_size = SEGMEXEC_TASK_SIZE;
60494+ current->mm->def_flags |= VM_NOHUGEPAGE;
60495+ } else
60496+#endif
60497+
60498+ pax_task_size = TASK_SIZE;
60499+
60500+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
60501+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60502+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
60503+ put_cpu();
60504+ }
60505+#endif
60506+
60507+#ifdef CONFIG_PAX_ASLR
60508+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60509+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
60510+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
60511+ }
60512+#endif
60513+
60514+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60515+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60516+ executable_stack = EXSTACK_DISABLE_X;
60517+ current->personality &= ~READ_IMPLIES_EXEC;
60518+ } else
60519+#endif
60520+
60521 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
60522 current->personality |= READ_IMPLIES_EXEC;
60523
60524@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
60525 #else
60526 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
60527 #endif
60528+
60529+#ifdef CONFIG_PAX_RANDMMAP
60530+ /* PaX: randomize base address at the default exe base if requested */
60531+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
60532+#ifdef CONFIG_SPARC64
60533+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
60534+#else
60535+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
60536+#endif
60537+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
60538+ elf_flags |= MAP_FIXED;
60539+ }
60540+#endif
60541+
60542 }
60543
60544 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
60545@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
60546 * allowed task size. Note that p_filesz must always be
60547 * <= p_memsz so it is only necessary to check p_memsz.
60548 */
60549- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60550- elf_ppnt->p_memsz > TASK_SIZE ||
60551- TASK_SIZE - elf_ppnt->p_memsz < k) {
60552+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60553+ elf_ppnt->p_memsz > pax_task_size ||
60554+ pax_task_size - elf_ppnt->p_memsz < k) {
60555 /* set_brk can never work. Avoid overflows. */
60556 send_sig(SIGKILL, current, 0);
60557 retval = -EINVAL;
60558@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
60559 goto out_free_dentry;
60560 }
60561 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
60562- send_sig(SIGSEGV, current, 0);
60563- retval = -EFAULT; /* Nobody gets to see this, but.. */
60564- goto out_free_dentry;
60565+ /*
60566+ * This bss-zeroing can fail if the ELF
60567+ * file specifies odd protections. So
60568+ * we don't check the return value
60569+ */
60570 }
60571
60572+#ifdef CONFIG_PAX_RANDMMAP
60573+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60574+ unsigned long start, size, flags;
60575+ vm_flags_t vm_flags;
60576+
60577+ start = ELF_PAGEALIGN(elf_brk);
60578+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60579+ flags = MAP_FIXED | MAP_PRIVATE;
60580+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60581+
60582+ down_write(&current->mm->mmap_sem);
60583+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60584+ retval = -ENOMEM;
60585+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60586+// if (current->personality & ADDR_NO_RANDOMIZE)
60587+// vm_flags |= VM_READ | VM_MAYREAD;
60588+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60589+ retval = IS_ERR_VALUE(start) ? start : 0;
60590+ }
60591+ up_write(&current->mm->mmap_sem);
60592+ if (retval == 0)
60593+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60594+ if (retval < 0) {
60595+ send_sig(SIGKILL, current, 0);
60596+ goto out_free_dentry;
60597+ }
60598+ }
60599+#endif
60600+
60601 if (elf_interpreter) {
60602- unsigned long interp_map_addr = 0;
60603-
60604 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60605 interpreter,
60606- &interp_map_addr,
60607 load_bias);
60608 if (!IS_ERR((void *)elf_entry)) {
60609 /*
60610@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60611 * Decide what to dump of a segment, part, all or none.
60612 */
60613 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60614- unsigned long mm_flags)
60615+ unsigned long mm_flags, long signr)
60616 {
60617 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60618
60619@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60620 if (vma->vm_file == NULL)
60621 return 0;
60622
60623- if (FILTER(MAPPED_PRIVATE))
60624+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60625 goto whole;
60626
60627 /*
60628@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60629 {
60630 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60631 int i = 0;
60632- do
60633+ do {
60634 i += 2;
60635- while (auxv[i - 2] != AT_NULL);
60636+ } while (auxv[i - 2] != AT_NULL);
60637 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60638 }
60639
60640@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60641 {
60642 mm_segment_t old_fs = get_fs();
60643 set_fs(KERNEL_DS);
60644- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60645+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60646 set_fs(old_fs);
60647 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60648 }
60649@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
60650 }
60651
60652 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
60653- unsigned long mm_flags)
60654+ struct coredump_params *cprm)
60655 {
60656 struct vm_area_struct *vma;
60657 size_t size = 0;
60658
60659 for (vma = first_vma(current, gate_vma); vma != NULL;
60660 vma = next_vma(vma, gate_vma))
60661- size += vma_dump_size(vma, mm_flags);
60662+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60663 return size;
60664 }
60665
60666@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60667
60668 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
60669
60670- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
60671+ offset += elf_core_vma_data_size(gate_vma, cprm);
60672 offset += elf_core_extra_data_size();
60673 e_shoff = offset;
60674
60675@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60676 phdr.p_offset = offset;
60677 phdr.p_vaddr = vma->vm_start;
60678 phdr.p_paddr = 0;
60679- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
60680+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60681 phdr.p_memsz = vma->vm_end - vma->vm_start;
60682 offset += phdr.p_filesz;
60683 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
60684@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60685 unsigned long addr;
60686 unsigned long end;
60687
60688- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
60689+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60690
60691 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
60692 struct page *page;
60693@@ -2210,6 +2690,167 @@ out:
60694
60695 #endif /* CONFIG_ELF_CORE */
60696
60697+#ifdef CONFIG_PAX_MPROTECT
60698+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60699+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60700+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60701+ *
60702+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60703+ * basis because we want to allow the common case and not the special ones.
60704+ */
60705+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60706+{
60707+ struct elfhdr elf_h;
60708+ struct elf_phdr elf_p;
60709+ unsigned long i;
60710+ unsigned long oldflags;
60711+ bool is_textrel_rw, is_textrel_rx, is_relro;
60712+
60713+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60714+ return;
60715+
60716+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60717+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60718+
60719+#ifdef CONFIG_PAX_ELFRELOCS
60720+ /* possible TEXTREL */
60721+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60722+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60723+#else
60724+ is_textrel_rw = false;
60725+ is_textrel_rx = false;
60726+#endif
60727+
60728+ /* possible RELRO */
60729+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60730+
60731+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60732+ return;
60733+
60734+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60735+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60736+
60737+#ifdef CONFIG_PAX_ETEXECRELOCS
60738+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60739+#else
60740+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60741+#endif
60742+
60743+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60744+ !elf_check_arch(&elf_h) ||
60745+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60746+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60747+ return;
60748+
60749+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60750+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60751+ return;
60752+ switch (elf_p.p_type) {
60753+ case PT_DYNAMIC:
60754+ if (!is_textrel_rw && !is_textrel_rx)
60755+ continue;
60756+ i = 0UL;
60757+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60758+ elf_dyn dyn;
60759+
60760+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60761+ break;
60762+ if (dyn.d_tag == DT_NULL)
60763+ break;
60764+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60765+ gr_log_textrel(vma);
60766+ if (is_textrel_rw)
60767+ vma->vm_flags |= VM_MAYWRITE;
60768+ else
60769+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60770+ vma->vm_flags &= ~VM_MAYWRITE;
60771+ break;
60772+ }
60773+ i++;
60774+ }
60775+ is_textrel_rw = false;
60776+ is_textrel_rx = false;
60777+ continue;
60778+
60779+ case PT_GNU_RELRO:
60780+ if (!is_relro)
60781+ continue;
60782+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60783+ vma->vm_flags &= ~VM_MAYWRITE;
60784+ is_relro = false;
60785+ continue;
60786+
60787+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60788+ case PT_PAX_FLAGS: {
60789+ const char *msg_mprotect = "", *msg_emutramp = "";
60790+ char *buffer_lib, *buffer_exe;
60791+
60792+ if (elf_p.p_flags & PF_NOMPROTECT)
60793+ msg_mprotect = "MPROTECT disabled";
60794+
60795+#ifdef CONFIG_PAX_EMUTRAMP
60796+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60797+ msg_emutramp = "EMUTRAMP enabled";
60798+#endif
60799+
60800+ if (!msg_mprotect[0] && !msg_emutramp[0])
60801+ continue;
60802+
60803+ if (!printk_ratelimit())
60804+ continue;
60805+
60806+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60807+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60808+ if (buffer_lib && buffer_exe) {
60809+ char *path_lib, *path_exe;
60810+
60811+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60812+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60813+
60814+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60815+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60816+
60817+ }
60818+ free_page((unsigned long)buffer_exe);
60819+ free_page((unsigned long)buffer_lib);
60820+ continue;
60821+ }
60822+#endif
60823+
60824+ }
60825+ }
60826+}
60827+#endif
60828+
60829+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60830+
60831+extern int grsec_enable_log_rwxmaps;
60832+
60833+static void elf_handle_mmap(struct file *file)
60834+{
60835+ struct elfhdr elf_h;
60836+ struct elf_phdr elf_p;
60837+ unsigned long i;
60838+
60839+ if (!grsec_enable_log_rwxmaps)
60840+ return;
60841+
60842+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60843+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60844+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60845+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60846+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60847+ return;
60848+
60849+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60850+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60851+ return;
60852+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60853+ gr_log_ptgnustack(file);
60854+ }
60855+}
60856+#endif
60857+
60858 static int __init init_elf_binfmt(void)
60859 {
60860 register_binfmt(&elf_format);
60861diff --git a/fs/block_dev.c b/fs/block_dev.c
60862index 6d72746..536d1db 100644
60863--- a/fs/block_dev.c
60864+++ b/fs/block_dev.c
60865@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60866 else if (bdev->bd_contains == bdev)
60867 return true; /* is a whole device which isn't held */
60868
60869- else if (whole->bd_holder == bd_may_claim)
60870+ else if (whole->bd_holder == (void *)bd_may_claim)
60871 return true; /* is a partition of a device that is being partitioned */
60872 else if (whole->bd_holder != NULL)
60873 return false; /* is a partition of a held device */
60874diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60875index aeab453..48dbafc 100644
60876--- a/fs/btrfs/ctree.c
60877+++ b/fs/btrfs/ctree.c
60878@@ -1184,9 +1184,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60879 free_extent_buffer(buf);
60880 add_root_to_dirty_list(root);
60881 } else {
60882- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60883- parent_start = parent->start;
60884- else
60885+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60886+ if (parent)
60887+ parent_start = parent->start;
60888+ else
60889+ parent_start = 0;
60890+ } else
60891 parent_start = 0;
60892
60893 WARN_ON(trans->transid != btrfs_header_generation(parent));
60894diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60895index a2e90f8..5135e5f 100644
60896--- a/fs/btrfs/delayed-inode.c
60897+++ b/fs/btrfs/delayed-inode.c
60898@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60899
60900 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60901 {
60902- int seq = atomic_inc_return(&delayed_root->items_seq);
60903+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60904 if ((atomic_dec_return(&delayed_root->items) <
60905 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60906 waitqueue_active(&delayed_root->wait))
60907@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60908
60909 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60910 {
60911- int val = atomic_read(&delayed_root->items_seq);
60912+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60913
60914 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60915 return 1;
60916@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60917 int seq;
60918 int ret;
60919
60920- seq = atomic_read(&delayed_root->items_seq);
60921+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60922
60923 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60924 if (ret)
60925diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60926index f70119f..ab5894d 100644
60927--- a/fs/btrfs/delayed-inode.h
60928+++ b/fs/btrfs/delayed-inode.h
60929@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60930 */
60931 struct list_head prepare_list;
60932 atomic_t items; /* for delayed items */
60933- atomic_t items_seq; /* for delayed items */
60934+ atomic_unchecked_t items_seq; /* for delayed items */
60935 int nodes; /* for delayed nodes */
60936 wait_queue_head_t wait;
60937 };
60938@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60939 struct btrfs_delayed_root *delayed_root)
60940 {
60941 atomic_set(&delayed_root->items, 0);
60942- atomic_set(&delayed_root->items_seq, 0);
60943+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60944 delayed_root->nodes = 0;
60945 spin_lock_init(&delayed_root->lock);
60946 init_waitqueue_head(&delayed_root->wait);
60947diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
60948index 47aceb4..7d28b1c 100644
60949--- a/fs/btrfs/ioctl.c
60950+++ b/fs/btrfs/ioctl.c
60951@@ -3965,9 +3965,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60952 for (i = 0; i < num_types; i++) {
60953 struct btrfs_space_info *tmp;
60954
60955+ /* Don't copy in more than we allocated */
60956 if (!slot_count)
60957 break;
60958
60959+ slot_count--;
60960+
60961 info = NULL;
60962 rcu_read_lock();
60963 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
60964@@ -3989,10 +3992,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60965 memcpy(dest, &space, sizeof(space));
60966 dest++;
60967 space_args.total_spaces++;
60968- slot_count--;
60969 }
60970- if (!slot_count)
60971- break;
60972 }
60973 up_read(&info->groups_sem);
60974 }
60975diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60976index 8e16bca..6eabd9e 100644
60977--- a/fs/btrfs/super.c
60978+++ b/fs/btrfs/super.c
60979@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60980 function, line, errstr);
60981 return;
60982 }
60983- ACCESS_ONCE(trans->transaction->aborted) = errno;
60984+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60985 /* Wake up anybody who may be waiting on this transaction */
60986 wake_up(&root->fs_info->transaction_wait);
60987 wake_up(&root->fs_info->transaction_blocked_wait);
60988diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
60989index 7869936..7e153dc 100644
60990--- a/fs/btrfs/sysfs.c
60991+++ b/fs/btrfs/sysfs.c
60992@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
60993 for (set = 0; set < FEAT_MAX; set++) {
60994 int i;
60995 struct attribute *attrs[2];
60996- struct attribute_group agroup = {
60997+ attribute_group_no_const agroup = {
60998 .name = "features",
60999 .attrs = attrs,
61000 };
61001diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
61002index 7f5b41b..e589c13 100644
61003--- a/fs/btrfs/tree-log.h
61004+++ b/fs/btrfs/tree-log.h
61005@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
61006 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
61007 struct btrfs_trans_handle *trans)
61008 {
61009- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
61010+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
61011 }
61012
61013 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
61014diff --git a/fs/buffer.c b/fs/buffer.c
61015index eba6e4f..af1182c 100644
61016--- a/fs/buffer.c
61017+++ b/fs/buffer.c
61018@@ -3429,7 +3429,7 @@ void __init buffer_init(void)
61019 bh_cachep = kmem_cache_create("buffer_head",
61020 sizeof(struct buffer_head), 0,
61021 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
61022- SLAB_MEM_SPREAD),
61023+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
61024 NULL);
61025
61026 /*
61027diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
61028index d749731..dd333a6 100644
61029--- a/fs/cachefiles/bind.c
61030+++ b/fs/cachefiles/bind.c
61031@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
61032 args);
61033
61034 /* start by checking things over */
61035- ASSERT(cache->fstop_percent >= 0 &&
61036- cache->fstop_percent < cache->fcull_percent &&
61037+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
61038 cache->fcull_percent < cache->frun_percent &&
61039 cache->frun_percent < 100);
61040
61041- ASSERT(cache->bstop_percent >= 0 &&
61042- cache->bstop_percent < cache->bcull_percent &&
61043+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
61044 cache->bcull_percent < cache->brun_percent &&
61045 cache->brun_percent < 100);
61046
61047diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
61048index b078d30..db23012 100644
61049--- a/fs/cachefiles/daemon.c
61050+++ b/fs/cachefiles/daemon.c
61051@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
61052 if (n > buflen)
61053 return -EMSGSIZE;
61054
61055- if (copy_to_user(_buffer, buffer, n) != 0)
61056+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
61057 return -EFAULT;
61058
61059 return n;
61060@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
61061 if (test_bit(CACHEFILES_DEAD, &cache->flags))
61062 return -EIO;
61063
61064- if (datalen < 0 || datalen > PAGE_SIZE - 1)
61065+ if (datalen > PAGE_SIZE - 1)
61066 return -EOPNOTSUPP;
61067
61068 /* drag the command string into the kernel so we can parse it */
61069@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
61070 if (args[0] != '%' || args[1] != '\0')
61071 return -EINVAL;
61072
61073- if (fstop < 0 || fstop >= cache->fcull_percent)
61074+ if (fstop >= cache->fcull_percent)
61075 return cachefiles_daemon_range_error(cache, args);
61076
61077 cache->fstop_percent = fstop;
61078@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
61079 if (args[0] != '%' || args[1] != '\0')
61080 return -EINVAL;
61081
61082- if (bstop < 0 || bstop >= cache->bcull_percent)
61083+ if (bstop >= cache->bcull_percent)
61084 return cachefiles_daemon_range_error(cache, args);
61085
61086 cache->bstop_percent = bstop;
61087diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
61088index 3d50998..0550d67 100644
61089--- a/fs/cachefiles/internal.h
61090+++ b/fs/cachefiles/internal.h
61091@@ -66,7 +66,7 @@ struct cachefiles_cache {
61092 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
61093 struct rb_root active_nodes; /* active nodes (can't be culled) */
61094 rwlock_t active_lock; /* lock for active_nodes */
61095- atomic_t gravecounter; /* graveyard uniquifier */
61096+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
61097 unsigned frun_percent; /* when to stop culling (% files) */
61098 unsigned fcull_percent; /* when to start culling (% files) */
61099 unsigned fstop_percent; /* when to stop allocating (% files) */
61100@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
61101 * proc.c
61102 */
61103 #ifdef CONFIG_CACHEFILES_HISTOGRAM
61104-extern atomic_t cachefiles_lookup_histogram[HZ];
61105-extern atomic_t cachefiles_mkdir_histogram[HZ];
61106-extern atomic_t cachefiles_create_histogram[HZ];
61107+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
61108+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
61109+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
61110
61111 extern int __init cachefiles_proc_init(void);
61112 extern void cachefiles_proc_cleanup(void);
61113 static inline
61114-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
61115+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
61116 {
61117 unsigned long jif = jiffies - start_jif;
61118 if (jif >= HZ)
61119 jif = HZ - 1;
61120- atomic_inc(&histogram[jif]);
61121+ atomic_inc_unchecked(&histogram[jif]);
61122 }
61123
61124 #else
61125diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
61126index 5bf2b41..85b93f9 100644
61127--- a/fs/cachefiles/namei.c
61128+++ b/fs/cachefiles/namei.c
61129@@ -312,7 +312,7 @@ try_again:
61130 /* first step is to make up a grave dentry in the graveyard */
61131 sprintf(nbuffer, "%08x%08x",
61132 (uint32_t) get_seconds(),
61133- (uint32_t) atomic_inc_return(&cache->gravecounter));
61134+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
61135
61136 /* do the multiway lock magic */
61137 trap = lock_rename(cache->graveyard, dir);
61138diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
61139index eccd339..4c1d995 100644
61140--- a/fs/cachefiles/proc.c
61141+++ b/fs/cachefiles/proc.c
61142@@ -14,9 +14,9 @@
61143 #include <linux/seq_file.h>
61144 #include "internal.h"
61145
61146-atomic_t cachefiles_lookup_histogram[HZ];
61147-atomic_t cachefiles_mkdir_histogram[HZ];
61148-atomic_t cachefiles_create_histogram[HZ];
61149+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
61150+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
61151+atomic_unchecked_t cachefiles_create_histogram[HZ];
61152
61153 /*
61154 * display the latency histogram
61155@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
61156 return 0;
61157 default:
61158 index = (unsigned long) v - 3;
61159- x = atomic_read(&cachefiles_lookup_histogram[index]);
61160- y = atomic_read(&cachefiles_mkdir_histogram[index]);
61161- z = atomic_read(&cachefiles_create_histogram[index]);
61162+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
61163+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
61164+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
61165 if (x == 0 && y == 0 && z == 0)
61166 return 0;
61167
61168diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
61169index 4b1fb5c..0d2a699 100644
61170--- a/fs/cachefiles/rdwr.c
61171+++ b/fs/cachefiles/rdwr.c
61172@@ -943,7 +943,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
61173 old_fs = get_fs();
61174 set_fs(KERNEL_DS);
61175 ret = file->f_op->write(
61176- file, (const void __user *) data, len, &pos);
61177+ file, (const void __force_user *) data, len, &pos);
61178 set_fs(old_fs);
61179 kunmap(page);
61180 file_end_write(file);
61181diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
61182index c29d6ae..719b9bb 100644
61183--- a/fs/ceph/dir.c
61184+++ b/fs/ceph/dir.c
61185@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
61186 struct dentry *dentry, *last;
61187 struct ceph_dentry_info *di;
61188 int err = 0;
61189+ char d_name[DNAME_INLINE_LEN];
61190+ const unsigned char *name;
61191
61192 /* claim ref on last dentry we returned */
61193 last = fi->dentry;
61194@@ -192,7 +194,12 @@ more:
61195
61196 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
61197 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
61198- if (!dir_emit(ctx, dentry->d_name.name,
61199+ name = dentry->d_name.name;
61200+ if (name == dentry->d_iname) {
61201+ memcpy(d_name, name, dentry->d_name.len);
61202+ name = d_name;
61203+ }
61204+ if (!dir_emit(ctx, name,
61205 dentry->d_name.len,
61206 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
61207 dentry->d_inode->i_mode >> 12)) {
61208@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
61209 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
61210 struct ceph_mds_client *mdsc = fsc->mdsc;
61211 unsigned frag = fpos_frag(ctx->pos);
61212- int off = fpos_off(ctx->pos);
61213+ unsigned int off = fpos_off(ctx->pos);
61214 int err;
61215 u32 ftype;
61216 struct ceph_mds_reply_info_parsed *rinfo;
61217diff --git a/fs/ceph/super.c b/fs/ceph/super.c
61218index 06150fd..192061b 100644
61219--- a/fs/ceph/super.c
61220+++ b/fs/ceph/super.c
61221@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
61222 /*
61223 * construct our own bdi so we can control readahead, etc.
61224 */
61225-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
61226+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
61227
61228 static int ceph_register_bdi(struct super_block *sb,
61229 struct ceph_fs_client *fsc)
61230@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
61231 default_backing_dev_info.ra_pages;
61232
61233 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
61234- atomic_long_inc_return(&bdi_seq));
61235+ atomic_long_inc_return_unchecked(&bdi_seq));
61236 if (!err)
61237 sb->s_bdi = &fsc->backing_dev_info;
61238 return err;
61239diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
61240index f3ac415..3d2420c 100644
61241--- a/fs/cifs/cifs_debug.c
61242+++ b/fs/cifs/cifs_debug.c
61243@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
61244
61245 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
61246 #ifdef CONFIG_CIFS_STATS2
61247- atomic_set(&totBufAllocCount, 0);
61248- atomic_set(&totSmBufAllocCount, 0);
61249+ atomic_set_unchecked(&totBufAllocCount, 0);
61250+ atomic_set_unchecked(&totSmBufAllocCount, 0);
61251 #endif /* CONFIG_CIFS_STATS2 */
61252 spin_lock(&cifs_tcp_ses_lock);
61253 list_for_each(tmp1, &cifs_tcp_ses_list) {
61254@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
61255 tcon = list_entry(tmp3,
61256 struct cifs_tcon,
61257 tcon_list);
61258- atomic_set(&tcon->num_smbs_sent, 0);
61259+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
61260 if (server->ops->clear_stats)
61261 server->ops->clear_stats(tcon);
61262 }
61263@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
61264 smBufAllocCount.counter, cifs_min_small);
61265 #ifdef CONFIG_CIFS_STATS2
61266 seq_printf(m, "Total Large %d Small %d Allocations\n",
61267- atomic_read(&totBufAllocCount),
61268- atomic_read(&totSmBufAllocCount));
61269+ atomic_read_unchecked(&totBufAllocCount),
61270+ atomic_read_unchecked(&totSmBufAllocCount));
61271 #endif /* CONFIG_CIFS_STATS2 */
61272
61273 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
61274@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
61275 if (tcon->need_reconnect)
61276 seq_puts(m, "\tDISCONNECTED ");
61277 seq_printf(m, "\nSMBs: %d",
61278- atomic_read(&tcon->num_smbs_sent));
61279+ atomic_read_unchecked(&tcon->num_smbs_sent));
61280 if (server->ops->print_stats)
61281 server->ops->print_stats(m, tcon);
61282 }
61283diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
61284index 8883980..c8ade72 100644
61285--- a/fs/cifs/cifsfs.c
61286+++ b/fs/cifs/cifsfs.c
61287@@ -1072,7 +1072,7 @@ cifs_init_request_bufs(void)
61288 */
61289 cifs_req_cachep = kmem_cache_create("cifs_request",
61290 CIFSMaxBufSize + max_hdr_size, 0,
61291- SLAB_HWCACHE_ALIGN, NULL);
61292+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
61293 if (cifs_req_cachep == NULL)
61294 return -ENOMEM;
61295
61296@@ -1099,7 +1099,7 @@ cifs_init_request_bufs(void)
61297 efficient to alloc 1 per page off the slab compared to 17K (5page)
61298 alloc of large cifs buffers even when page debugging is on */
61299 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
61300- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
61301+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
61302 NULL);
61303 if (cifs_sm_req_cachep == NULL) {
61304 mempool_destroy(cifs_req_poolp);
61305@@ -1184,8 +1184,8 @@ init_cifs(void)
61306 atomic_set(&bufAllocCount, 0);
61307 atomic_set(&smBufAllocCount, 0);
61308 #ifdef CONFIG_CIFS_STATS2
61309- atomic_set(&totBufAllocCount, 0);
61310- atomic_set(&totSmBufAllocCount, 0);
61311+ atomic_set_unchecked(&totBufAllocCount, 0);
61312+ atomic_set_unchecked(&totSmBufAllocCount, 0);
61313 #endif /* CONFIG_CIFS_STATS2 */
61314
61315 atomic_set(&midCount, 0);
61316diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
61317index de6aed8..a0a76fd 100644
61318--- a/fs/cifs/cifsglob.h
61319+++ b/fs/cifs/cifsglob.h
61320@@ -807,35 +807,35 @@ struct cifs_tcon {
61321 __u16 Flags; /* optional support bits */
61322 enum statusEnum tidStatus;
61323 #ifdef CONFIG_CIFS_STATS
61324- atomic_t num_smbs_sent;
61325+ atomic_unchecked_t num_smbs_sent;
61326 union {
61327 struct {
61328- atomic_t num_writes;
61329- atomic_t num_reads;
61330- atomic_t num_flushes;
61331- atomic_t num_oplock_brks;
61332- atomic_t num_opens;
61333- atomic_t num_closes;
61334- atomic_t num_deletes;
61335- atomic_t num_mkdirs;
61336- atomic_t num_posixopens;
61337- atomic_t num_posixmkdirs;
61338- atomic_t num_rmdirs;
61339- atomic_t num_renames;
61340- atomic_t num_t2renames;
61341- atomic_t num_ffirst;
61342- atomic_t num_fnext;
61343- atomic_t num_fclose;
61344- atomic_t num_hardlinks;
61345- atomic_t num_symlinks;
61346- atomic_t num_locks;
61347- atomic_t num_acl_get;
61348- atomic_t num_acl_set;
61349+ atomic_unchecked_t num_writes;
61350+ atomic_unchecked_t num_reads;
61351+ atomic_unchecked_t num_flushes;
61352+ atomic_unchecked_t num_oplock_brks;
61353+ atomic_unchecked_t num_opens;
61354+ atomic_unchecked_t num_closes;
61355+ atomic_unchecked_t num_deletes;
61356+ atomic_unchecked_t num_mkdirs;
61357+ atomic_unchecked_t num_posixopens;
61358+ atomic_unchecked_t num_posixmkdirs;
61359+ atomic_unchecked_t num_rmdirs;
61360+ atomic_unchecked_t num_renames;
61361+ atomic_unchecked_t num_t2renames;
61362+ atomic_unchecked_t num_ffirst;
61363+ atomic_unchecked_t num_fnext;
61364+ atomic_unchecked_t num_fclose;
61365+ atomic_unchecked_t num_hardlinks;
61366+ atomic_unchecked_t num_symlinks;
61367+ atomic_unchecked_t num_locks;
61368+ atomic_unchecked_t num_acl_get;
61369+ atomic_unchecked_t num_acl_set;
61370 } cifs_stats;
61371 #ifdef CONFIG_CIFS_SMB2
61372 struct {
61373- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
61374- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
61375+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
61376+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
61377 } smb2_stats;
61378 #endif /* CONFIG_CIFS_SMB2 */
61379 } stats;
61380@@ -1172,7 +1172,7 @@ convert_delimiter(char *path, char delim)
61381 }
61382
61383 #ifdef CONFIG_CIFS_STATS
61384-#define cifs_stats_inc atomic_inc
61385+#define cifs_stats_inc atomic_inc_unchecked
61386
61387 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
61388 unsigned int bytes)
61389@@ -1538,8 +1538,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
61390 /* Various Debug counters */
61391 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
61392 #ifdef CONFIG_CIFS_STATS2
61393-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
61394-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
61395+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
61396+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
61397 #endif
61398 GLOBAL_EXTERN atomic_t smBufAllocCount;
61399 GLOBAL_EXTERN atomic_t midCount;
61400diff --git a/fs/cifs/file.c b/fs/cifs/file.c
61401index e90a1e9..908699d 100644
61402--- a/fs/cifs/file.c
61403+++ b/fs/cifs/file.c
61404@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
61405 index = mapping->writeback_index; /* Start from prev offset */
61406 end = -1;
61407 } else {
61408- index = wbc->range_start >> PAGE_CACHE_SHIFT;
61409- end = wbc->range_end >> PAGE_CACHE_SHIFT;
61410- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
61411+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
61412 range_whole = true;
61413+ index = 0;
61414+ end = ULONG_MAX;
61415+ } else {
61416+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
61417+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
61418+ }
61419 scanned = true;
61420 }
61421 retry:
61422diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
61423index 3b0c62e..f7d090c 100644
61424--- a/fs/cifs/misc.c
61425+++ b/fs/cifs/misc.c
61426@@ -170,7 +170,7 @@ cifs_buf_get(void)
61427 memset(ret_buf, 0, buf_size + 3);
61428 atomic_inc(&bufAllocCount);
61429 #ifdef CONFIG_CIFS_STATS2
61430- atomic_inc(&totBufAllocCount);
61431+ atomic_inc_unchecked(&totBufAllocCount);
61432 #endif /* CONFIG_CIFS_STATS2 */
61433 }
61434
61435@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
61436 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
61437 atomic_inc(&smBufAllocCount);
61438 #ifdef CONFIG_CIFS_STATS2
61439- atomic_inc(&totSmBufAllocCount);
61440+ atomic_inc_unchecked(&totSmBufAllocCount);
61441 #endif /* CONFIG_CIFS_STATS2 */
61442
61443 }
61444diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
61445index d1fdfa8..94558f8 100644
61446--- a/fs/cifs/smb1ops.c
61447+++ b/fs/cifs/smb1ops.c
61448@@ -626,27 +626,27 @@ static void
61449 cifs_clear_stats(struct cifs_tcon *tcon)
61450 {
61451 #ifdef CONFIG_CIFS_STATS
61452- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
61453- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
61454- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
61455- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61456- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
61457- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
61458- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61459- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
61460- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
61461- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
61462- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
61463- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
61464- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
61465- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
61466- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
61467- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
61468- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
61469- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
61470- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
61471- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
61472- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
61473+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
61474+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
61475+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
61476+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61477+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
61478+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
61479+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61480+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
61481+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
61482+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
61483+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
61484+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
61485+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
61486+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
61487+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
61488+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
61489+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
61490+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
61491+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
61492+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
61493+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
61494 #endif
61495 }
61496
61497@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61498 {
61499 #ifdef CONFIG_CIFS_STATS
61500 seq_printf(m, " Oplocks breaks: %d",
61501- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
61502+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
61503 seq_printf(m, "\nReads: %d Bytes: %llu",
61504- atomic_read(&tcon->stats.cifs_stats.num_reads),
61505+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
61506 (long long)(tcon->bytes_read));
61507 seq_printf(m, "\nWrites: %d Bytes: %llu",
61508- atomic_read(&tcon->stats.cifs_stats.num_writes),
61509+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
61510 (long long)(tcon->bytes_written));
61511 seq_printf(m, "\nFlushes: %d",
61512- atomic_read(&tcon->stats.cifs_stats.num_flushes));
61513+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
61514 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
61515- atomic_read(&tcon->stats.cifs_stats.num_locks),
61516- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
61517- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
61518+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
61519+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
61520+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
61521 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
61522- atomic_read(&tcon->stats.cifs_stats.num_opens),
61523- atomic_read(&tcon->stats.cifs_stats.num_closes),
61524- atomic_read(&tcon->stats.cifs_stats.num_deletes));
61525+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
61526+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
61527+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
61528 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
61529- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
61530- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
61531+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
61532+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
61533 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
61534- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
61535- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
61536+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
61537+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
61538 seq_printf(m, "\nRenames: %d T2 Renames %d",
61539- atomic_read(&tcon->stats.cifs_stats.num_renames),
61540- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
61541+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
61542+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
61543 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
61544- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61545- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61546- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61547+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61548+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61549+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61550 #endif
61551 }
61552
61553diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61554index 787844b..8e7bc7d 100644
61555--- a/fs/cifs/smb2ops.c
61556+++ b/fs/cifs/smb2ops.c
61557@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61558 #ifdef CONFIG_CIFS_STATS
61559 int i;
61560 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61561- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61562- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61563+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61564+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61565 }
61566 #endif
61567 }
61568@@ -405,65 +405,65 @@ static void
61569 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61570 {
61571 #ifdef CONFIG_CIFS_STATS
61572- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61573- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61574+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61575+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61576 seq_printf(m, "\nNegotiates: %d sent %d failed",
61577- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61578- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61579+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61580+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61581 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61582- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61583- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61584+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61585+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61586 seq_printf(m, "\nLogoffs: %d sent %d failed",
61587- atomic_read(&sent[SMB2_LOGOFF_HE]),
61588- atomic_read(&failed[SMB2_LOGOFF_HE]));
61589+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61590+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61591 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61592- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61593- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61594+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61595+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61596 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61597- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61598- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61599+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61600+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61601 seq_printf(m, "\nCreates: %d sent %d failed",
61602- atomic_read(&sent[SMB2_CREATE_HE]),
61603- atomic_read(&failed[SMB2_CREATE_HE]));
61604+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61605+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61606 seq_printf(m, "\nCloses: %d sent %d failed",
61607- atomic_read(&sent[SMB2_CLOSE_HE]),
61608- atomic_read(&failed[SMB2_CLOSE_HE]));
61609+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61610+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61611 seq_printf(m, "\nFlushes: %d sent %d failed",
61612- atomic_read(&sent[SMB2_FLUSH_HE]),
61613- atomic_read(&failed[SMB2_FLUSH_HE]));
61614+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61615+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61616 seq_printf(m, "\nReads: %d sent %d failed",
61617- atomic_read(&sent[SMB2_READ_HE]),
61618- atomic_read(&failed[SMB2_READ_HE]));
61619+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61620+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61621 seq_printf(m, "\nWrites: %d sent %d failed",
61622- atomic_read(&sent[SMB2_WRITE_HE]),
61623- atomic_read(&failed[SMB2_WRITE_HE]));
61624+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61625+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61626 seq_printf(m, "\nLocks: %d sent %d failed",
61627- atomic_read(&sent[SMB2_LOCK_HE]),
61628- atomic_read(&failed[SMB2_LOCK_HE]));
61629+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61630+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61631 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61632- atomic_read(&sent[SMB2_IOCTL_HE]),
61633- atomic_read(&failed[SMB2_IOCTL_HE]));
61634+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61635+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61636 seq_printf(m, "\nCancels: %d sent %d failed",
61637- atomic_read(&sent[SMB2_CANCEL_HE]),
61638- atomic_read(&failed[SMB2_CANCEL_HE]));
61639+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61640+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61641 seq_printf(m, "\nEchos: %d sent %d failed",
61642- atomic_read(&sent[SMB2_ECHO_HE]),
61643- atomic_read(&failed[SMB2_ECHO_HE]));
61644+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61645+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61646 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61647- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61648- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61649+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61650+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61651 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61652- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61653- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61654+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61655+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61656 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61657- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61658- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61659+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61660+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61661 seq_printf(m, "\nSetInfos: %d sent %d failed",
61662- atomic_read(&sent[SMB2_SET_INFO_HE]),
61663- atomic_read(&failed[SMB2_SET_INFO_HE]));
61664+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61665+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61666 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61667- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61668- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61669+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61670+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61671 #endif
61672 }
61673
61674diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61675index b0b260d..c8927e1 100644
61676--- a/fs/cifs/smb2pdu.c
61677+++ b/fs/cifs/smb2pdu.c
61678@@ -2105,8 +2105,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61679 default:
61680 cifs_dbg(VFS, "info level %u isn't supported\n",
61681 srch_inf->info_level);
61682- rc = -EINVAL;
61683- goto qdir_exit;
61684+ return -EINVAL;
61685 }
61686
61687 req->FileIndex = cpu_to_le32(index);
61688diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61689index 1da168c..8bc7ff6 100644
61690--- a/fs/coda/cache.c
61691+++ b/fs/coda/cache.c
61692@@ -24,7 +24,7 @@
61693 #include "coda_linux.h"
61694 #include "coda_cache.h"
61695
61696-static atomic_t permission_epoch = ATOMIC_INIT(0);
61697+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61698
61699 /* replace or extend an acl cache hit */
61700 void coda_cache_enter(struct inode *inode, int mask)
61701@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61702 struct coda_inode_info *cii = ITOC(inode);
61703
61704 spin_lock(&cii->c_lock);
61705- cii->c_cached_epoch = atomic_read(&permission_epoch);
61706+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61707 if (!uid_eq(cii->c_uid, current_fsuid())) {
61708 cii->c_uid = current_fsuid();
61709 cii->c_cached_perm = mask;
61710@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61711 {
61712 struct coda_inode_info *cii = ITOC(inode);
61713 spin_lock(&cii->c_lock);
61714- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61715+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61716 spin_unlock(&cii->c_lock);
61717 }
61718
61719 /* remove all acl caches */
61720 void coda_cache_clear_all(struct super_block *sb)
61721 {
61722- atomic_inc(&permission_epoch);
61723+ atomic_inc_unchecked(&permission_epoch);
61724 }
61725
61726
61727@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61728 spin_lock(&cii->c_lock);
61729 hit = (mask & cii->c_cached_perm) == mask &&
61730 uid_eq(cii->c_uid, current_fsuid()) &&
61731- cii->c_cached_epoch == atomic_read(&permission_epoch);
61732+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61733 spin_unlock(&cii->c_lock);
61734
61735 return hit;
61736diff --git a/fs/compat.c b/fs/compat.c
61737index 66d3d3c..9c10175 100644
61738--- a/fs/compat.c
61739+++ b/fs/compat.c
61740@@ -54,7 +54,7 @@
61741 #include <asm/ioctls.h>
61742 #include "internal.h"
61743
61744-int compat_log = 1;
61745+int compat_log = 0;
61746
61747 int compat_printk(const char *fmt, ...)
61748 {
61749@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61750
61751 set_fs(KERNEL_DS);
61752 /* The __user pointer cast is valid because of the set_fs() */
61753- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61754+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61755 set_fs(oldfs);
61756 /* truncating is ok because it's a user address */
61757 if (!ret)
61758@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61759 goto out;
61760
61761 ret = -EINVAL;
61762- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61763+ if (nr_segs > UIO_MAXIOV)
61764 goto out;
61765 if (nr_segs > fast_segs) {
61766 ret = -ENOMEM;
61767@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
61768 struct compat_readdir_callback {
61769 struct dir_context ctx;
61770 struct compat_old_linux_dirent __user *dirent;
61771+ struct file * file;
61772 int result;
61773 };
61774
61775@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
61776 buf->result = -EOVERFLOW;
61777 return -EOVERFLOW;
61778 }
61779+
61780+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61781+ return 0;
61782+
61783 buf->result++;
61784 dirent = buf->dirent;
61785 if (!access_ok(VERIFY_WRITE, dirent,
61786@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61787 if (!f.file)
61788 return -EBADF;
61789
61790+ buf.file = f.file;
61791 error = iterate_dir(f.file, &buf.ctx);
61792 if (buf.result)
61793 error = buf.result;
61794@@ -917,6 +923,7 @@ struct compat_getdents_callback {
61795 struct dir_context ctx;
61796 struct compat_linux_dirent __user *current_dir;
61797 struct compat_linux_dirent __user *previous;
61798+ struct file * file;
61799 int count;
61800 int error;
61801 };
61802@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
61803 buf->error = -EOVERFLOW;
61804 return -EOVERFLOW;
61805 }
61806+
61807+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61808+ return 0;
61809+
61810 dirent = buf->previous;
61811 if (dirent) {
61812 if (__put_user(offset, &dirent->d_off))
61813@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61814 if (!f.file)
61815 return -EBADF;
61816
61817+ buf.file = f.file;
61818 error = iterate_dir(f.file, &buf.ctx);
61819 if (error >= 0)
61820 error = buf.error;
61821@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
61822 struct dir_context ctx;
61823 struct linux_dirent64 __user *current_dir;
61824 struct linux_dirent64 __user *previous;
61825+ struct file * file;
61826 int count;
61827 int error;
61828 };
61829@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
61830 buf->error = -EINVAL; /* only used if we fail.. */
61831 if (reclen > buf->count)
61832 return -EINVAL;
61833+
61834+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61835+ return 0;
61836+
61837 dirent = buf->previous;
61838
61839 if (dirent) {
61840@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61841 if (!f.file)
61842 return -EBADF;
61843
61844+ buf.file = f.file;
61845 error = iterate_dir(f.file, &buf.ctx);
61846 if (error >= 0)
61847 error = buf.error;
61848diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61849index 4d24d17..4f8c09e 100644
61850--- a/fs/compat_binfmt_elf.c
61851+++ b/fs/compat_binfmt_elf.c
61852@@ -30,11 +30,13 @@
61853 #undef elf_phdr
61854 #undef elf_shdr
61855 #undef elf_note
61856+#undef elf_dyn
61857 #undef elf_addr_t
61858 #define elfhdr elf32_hdr
61859 #define elf_phdr elf32_phdr
61860 #define elf_shdr elf32_shdr
61861 #define elf_note elf32_note
61862+#define elf_dyn Elf32_Dyn
61863 #define elf_addr_t Elf32_Addr
61864
61865 /*
61866diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61867index e822890..fed89d9 100644
61868--- a/fs/compat_ioctl.c
61869+++ b/fs/compat_ioctl.c
61870@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61871 return -EFAULT;
61872 if (__get_user(udata, &ss32->iomem_base))
61873 return -EFAULT;
61874- ss.iomem_base = compat_ptr(udata);
61875+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61876 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61877 __get_user(ss.port_high, &ss32->port_high))
61878 return -EFAULT;
61879@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61880 for (i = 0; i < nmsgs; i++) {
61881 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61882 return -EFAULT;
61883- if (get_user(datap, &umsgs[i].buf) ||
61884- put_user(compat_ptr(datap), &tmsgs[i].buf))
61885+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61886+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61887 return -EFAULT;
61888 }
61889 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61890@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61891 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61892 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61893 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61894- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61895+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61896 return -EFAULT;
61897
61898 return ioctl_preallocate(file, p);
61899@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61900 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61901 {
61902 unsigned int a, b;
61903- a = *(unsigned int *)p;
61904- b = *(unsigned int *)q;
61905+ a = *(const unsigned int *)p;
61906+ b = *(const unsigned int *)q;
61907 if (a > b)
61908 return 1;
61909 if (a < b)
61910diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61911index 668dcab..daebcd6 100644
61912--- a/fs/configfs/dir.c
61913+++ b/fs/configfs/dir.c
61914@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61915 }
61916 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61917 struct configfs_dirent *next;
61918- const char *name;
61919+ const unsigned char * name;
61920+ char d_name[sizeof(next->s_dentry->d_iname)];
61921 int len;
61922 struct inode *inode = NULL;
61923
61924@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61925 continue;
61926
61927 name = configfs_get_name(next);
61928- len = strlen(name);
61929+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61930+ len = next->s_dentry->d_name.len;
61931+ memcpy(d_name, name, len);
61932+ name = d_name;
61933+ } else
61934+ len = strlen(name);
61935
61936 /*
61937 * We'll have a dentry and an inode for
61938diff --git a/fs/coredump.c b/fs/coredump.c
61939index a93f7e6..d58bcbe 100644
61940--- a/fs/coredump.c
61941+++ b/fs/coredump.c
61942@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
61943 struct pipe_inode_info *pipe = file->private_data;
61944
61945 pipe_lock(pipe);
61946- pipe->readers++;
61947- pipe->writers--;
61948+ atomic_inc(&pipe->readers);
61949+ atomic_dec(&pipe->writers);
61950 wake_up_interruptible_sync(&pipe->wait);
61951 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61952 pipe_unlock(pipe);
61953@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
61954 * We actually want wait_event_freezable() but then we need
61955 * to clear TIF_SIGPENDING and improve dump_interrupted().
61956 */
61957- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61958+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61959
61960 pipe_lock(pipe);
61961- pipe->readers--;
61962- pipe->writers++;
61963+ atomic_dec(&pipe->readers);
61964+ atomic_inc(&pipe->writers);
61965 pipe_unlock(pipe);
61966 }
61967
61968@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
61969 struct files_struct *displaced;
61970 bool need_nonrelative = false;
61971 bool core_dumped = false;
61972- static atomic_t core_dump_count = ATOMIC_INIT(0);
61973+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61974+ long signr = siginfo->si_signo;
61975+ int dumpable;
61976 struct coredump_params cprm = {
61977 .siginfo = siginfo,
61978 .regs = signal_pt_regs(),
61979@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
61980 .mm_flags = mm->flags,
61981 };
61982
61983- audit_core_dumps(siginfo->si_signo);
61984+ audit_core_dumps(signr);
61985+
61986+ dumpable = __get_dumpable(cprm.mm_flags);
61987+
61988+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
61989+ gr_handle_brute_attach(dumpable);
61990
61991 binfmt = mm->binfmt;
61992 if (!binfmt || !binfmt->core_dump)
61993 goto fail;
61994- if (!__get_dumpable(cprm.mm_flags))
61995+ if (!dumpable)
61996 goto fail;
61997
61998 cred = prepare_creds();
61999@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
62000 need_nonrelative = true;
62001 }
62002
62003- retval = coredump_wait(siginfo->si_signo, &core_state);
62004+ retval = coredump_wait(signr, &core_state);
62005 if (retval < 0)
62006 goto fail_creds;
62007
62008@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
62009 }
62010 cprm.limit = RLIM_INFINITY;
62011
62012- dump_count = atomic_inc_return(&core_dump_count);
62013+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
62014 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
62015 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
62016 task_tgid_vnr(current), current->comm);
62017@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
62018 } else {
62019 struct inode *inode;
62020
62021+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
62022+
62023 if (cprm.limit < binfmt->min_coredump)
62024 goto fail_unlock;
62025
62026@@ -673,7 +682,7 @@ close_fail:
62027 filp_close(cprm.file, NULL);
62028 fail_dropcount:
62029 if (ispipe)
62030- atomic_dec(&core_dump_count);
62031+ atomic_dec_unchecked(&core_dump_count);
62032 fail_unlock:
62033 kfree(cn.corename);
62034 coredump_finish(mm, core_dumped);
62035@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
62036 struct file *file = cprm->file;
62037 loff_t pos = file->f_pos;
62038 ssize_t n;
62039+
62040+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
62041 if (cprm->written + nr > cprm->limit)
62042 return 0;
62043 while (nr) {
62044diff --git a/fs/dcache.c b/fs/dcache.c
62045index 06f6585..65499d1 100644
62046--- a/fs/dcache.c
62047+++ b/fs/dcache.c
62048@@ -1445,7 +1445,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
62049 */
62050 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
62051 if (name->len > DNAME_INLINE_LEN-1) {
62052- dname = kmalloc(name->len + 1, GFP_KERNEL);
62053+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
62054 if (!dname) {
62055 kmem_cache_free(dentry_cache, dentry);
62056 return NULL;
62057@@ -2402,7 +2402,7 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
62058 }
62059 EXPORT_SYMBOL(dentry_update_name_case);
62060
62061-static void switch_names(struct dentry *dentry, struct dentry *target)
62062+static void switch_names(struct dentry *dentry, struct dentry *target, bool exchange)
62063 {
62064 if (dname_external(target)) {
62065 if (dname_external(dentry)) {
62066@@ -2430,7 +2430,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
62067 target->d_name.len + 1);
62068 target->d_name.name = dentry->d_name.name;
62069 dentry->d_name.name = dentry->d_iname;
62070- } else {
62071+ } else if (exchange) {
62072 /*
62073 * Both are internal.
62074 */
62075@@ -2440,6 +2440,14 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
62076 swap(((long *) &dentry->d_iname)[i],
62077 ((long *) &target->d_iname)[i]);
62078 }
62079+ } else {
62080+ /*
62081+ * Both are internal. Just copy target to dentry
62082+ */
62083+ memcpy(dentry->d_iname, target->d_name.name,
62084+ target->d_name.len + 1);
62085+ dentry->d_name.len = target->d_name.len;
62086+ return;
62087 }
62088 }
62089 swap(dentry->d_name.len, target->d_name.len);
62090@@ -2540,7 +2548,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
62091 list_del(&target->d_u.d_child);
62092
62093 /* Switch the names.. */
62094- switch_names(dentry, target);
62095+ switch_names(dentry, target, exchange);
62096 swap(dentry->d_name.hash, target->d_name.hash);
62097
62098 /* ... and switch the parents */
62099@@ -2679,7 +2687,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
62100
62101 dparent = dentry->d_parent;
62102
62103- switch_names(dentry, anon);
62104+ switch_names(dentry, anon, false);
62105 swap(dentry->d_name.hash, anon->d_name.hash);
62106
62107 dentry->d_parent = dentry;
62108@@ -3413,7 +3421,8 @@ void __init vfs_caches_init(unsigned long mempages)
62109 mempages -= reserve;
62110
62111 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
62112- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
62113+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
62114+ SLAB_NO_SANITIZE, NULL);
62115
62116 dcache_init();
62117 inode_init();
62118diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
62119index 16a46b6..41696fd 100644
62120--- a/fs/debugfs/inode.c
62121+++ b/fs/debugfs/inode.c
62122@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
62123 */
62124 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
62125 {
62126+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
62127+ return __create_file(name, S_IFDIR | S_IRWXU,
62128+#else
62129 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
62130+#endif
62131 parent, NULL, NULL);
62132 }
62133 EXPORT_SYMBOL_GPL(debugfs_create_dir);
62134diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
62135index d4a9431..77f9b2e 100644
62136--- a/fs/ecryptfs/inode.c
62137+++ b/fs/ecryptfs/inode.c
62138@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
62139 old_fs = get_fs();
62140 set_fs(get_ds());
62141 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
62142- (char __user *)lower_buf,
62143+ (char __force_user *)lower_buf,
62144 PATH_MAX);
62145 set_fs(old_fs);
62146 if (rc < 0)
62147diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
62148index e4141f2..d8263e8 100644
62149--- a/fs/ecryptfs/miscdev.c
62150+++ b/fs/ecryptfs/miscdev.c
62151@@ -304,7 +304,7 @@ check_list:
62152 goto out_unlock_msg_ctx;
62153 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
62154 if (msg_ctx->msg) {
62155- if (copy_to_user(&buf[i], packet_length, packet_length_size))
62156+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
62157 goto out_unlock_msg_ctx;
62158 i += packet_length_size;
62159 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
62160diff --git a/fs/exec.c b/fs/exec.c
62161index a3d33fe..49e9bc9 100644
62162--- a/fs/exec.c
62163+++ b/fs/exec.c
62164@@ -56,8 +56,20 @@
62165 #include <linux/pipe_fs_i.h>
62166 #include <linux/oom.h>
62167 #include <linux/compat.h>
62168+#include <linux/random.h>
62169+#include <linux/seq_file.h>
62170+#include <linux/coredump.h>
62171+#include <linux/mman.h>
62172+
62173+#ifdef CONFIG_PAX_REFCOUNT
62174+#include <linux/kallsyms.h>
62175+#include <linux/kdebug.h>
62176+#endif
62177+
62178+#include <trace/events/fs.h>
62179
62180 #include <asm/uaccess.h>
62181+#include <asm/sections.h>
62182 #include <asm/mmu_context.h>
62183 #include <asm/tlb.h>
62184
62185@@ -66,19 +78,34 @@
62186
62187 #include <trace/events/sched.h>
62188
62189+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62190+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
62191+{
62192+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
62193+}
62194+#endif
62195+
62196+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
62197+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62198+EXPORT_SYMBOL(pax_set_initial_flags_func);
62199+#endif
62200+
62201 int suid_dumpable = 0;
62202
62203 static LIST_HEAD(formats);
62204 static DEFINE_RWLOCK(binfmt_lock);
62205
62206+extern int gr_process_kernel_exec_ban(void);
62207+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
62208+
62209 void __register_binfmt(struct linux_binfmt * fmt, int insert)
62210 {
62211 BUG_ON(!fmt);
62212 if (WARN_ON(!fmt->load_binary))
62213 return;
62214 write_lock(&binfmt_lock);
62215- insert ? list_add(&fmt->lh, &formats) :
62216- list_add_tail(&fmt->lh, &formats);
62217+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
62218+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
62219 write_unlock(&binfmt_lock);
62220 }
62221
62222@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
62223 void unregister_binfmt(struct linux_binfmt * fmt)
62224 {
62225 write_lock(&binfmt_lock);
62226- list_del(&fmt->lh);
62227+ pax_list_del((struct list_head *)&fmt->lh);
62228 write_unlock(&binfmt_lock);
62229 }
62230
62231@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
62232 int write)
62233 {
62234 struct page *page;
62235- int ret;
62236
62237-#ifdef CONFIG_STACK_GROWSUP
62238- if (write) {
62239- ret = expand_downwards(bprm->vma, pos);
62240- if (ret < 0)
62241- return NULL;
62242- }
62243-#endif
62244- ret = get_user_pages(current, bprm->mm, pos,
62245- 1, write, 1, &page, NULL);
62246- if (ret <= 0)
62247+ if (0 > expand_downwards(bprm->vma, pos))
62248+ return NULL;
62249+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
62250 return NULL;
62251
62252 if (write) {
62253@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
62254 if (size <= ARG_MAX)
62255 return page;
62256
62257+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62258+ // only allow 512KB for argv+env on suid/sgid binaries
62259+ // to prevent easy ASLR exhaustion
62260+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
62261+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
62262+ (size > (512 * 1024))) {
62263+ put_page(page);
62264+ return NULL;
62265+ }
62266+#endif
62267+
62268 /*
62269 * Limit to 1/4-th the stack size for the argv+env strings.
62270 * This ensures that:
62271@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
62272 vma->vm_end = STACK_TOP_MAX;
62273 vma->vm_start = vma->vm_end - PAGE_SIZE;
62274 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
62275+
62276+#ifdef CONFIG_PAX_SEGMEXEC
62277+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62278+#endif
62279+
62280 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62281 INIT_LIST_HEAD(&vma->anon_vma_chain);
62282
62283@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
62284 mm->stack_vm = mm->total_vm = 1;
62285 up_write(&mm->mmap_sem);
62286 bprm->p = vma->vm_end - sizeof(void *);
62287+
62288+#ifdef CONFIG_PAX_RANDUSTACK
62289+ if (randomize_va_space)
62290+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
62291+#endif
62292+
62293 return 0;
62294 err:
62295 up_write(&mm->mmap_sem);
62296@@ -399,7 +440,7 @@ struct user_arg_ptr {
62297 } ptr;
62298 };
62299
62300-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62301+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62302 {
62303 const char __user *native;
62304
62305@@ -408,14 +449,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62306 compat_uptr_t compat;
62307
62308 if (get_user(compat, argv.ptr.compat + nr))
62309- return ERR_PTR(-EFAULT);
62310+ return (const char __force_user *)ERR_PTR(-EFAULT);
62311
62312 return compat_ptr(compat);
62313 }
62314 #endif
62315
62316 if (get_user(native, argv.ptr.native + nr))
62317- return ERR_PTR(-EFAULT);
62318+ return (const char __force_user *)ERR_PTR(-EFAULT);
62319
62320 return native;
62321 }
62322@@ -434,7 +475,7 @@ static int count(struct user_arg_ptr argv, int max)
62323 if (!p)
62324 break;
62325
62326- if (IS_ERR(p))
62327+ if (IS_ERR((const char __force_kernel *)p))
62328 return -EFAULT;
62329
62330 if (i >= max)
62331@@ -469,7 +510,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
62332
62333 ret = -EFAULT;
62334 str = get_user_arg_ptr(argv, argc);
62335- if (IS_ERR(str))
62336+ if (IS_ERR((const char __force_kernel *)str))
62337 goto out;
62338
62339 len = strnlen_user(str, MAX_ARG_STRLEN);
62340@@ -551,7 +592,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
62341 int r;
62342 mm_segment_t oldfs = get_fs();
62343 struct user_arg_ptr argv = {
62344- .ptr.native = (const char __user *const __user *)__argv,
62345+ .ptr.native = (const char __user * const __force_user *)__argv,
62346 };
62347
62348 set_fs(KERNEL_DS);
62349@@ -586,7 +627,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62350 unsigned long new_end = old_end - shift;
62351 struct mmu_gather tlb;
62352
62353- BUG_ON(new_start > new_end);
62354+ if (new_start >= new_end || new_start < mmap_min_addr)
62355+ return -ENOMEM;
62356
62357 /*
62358 * ensure there are no vmas between where we want to go
62359@@ -595,6 +637,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62360 if (vma != find_vma(mm, new_start))
62361 return -EFAULT;
62362
62363+#ifdef CONFIG_PAX_SEGMEXEC
62364+ BUG_ON(pax_find_mirror_vma(vma));
62365+#endif
62366+
62367 /*
62368 * cover the whole range: [new_start, old_end)
62369 */
62370@@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62371 stack_top = arch_align_stack(stack_top);
62372 stack_top = PAGE_ALIGN(stack_top);
62373
62374- if (unlikely(stack_top < mmap_min_addr) ||
62375- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
62376- return -ENOMEM;
62377-
62378 stack_shift = vma->vm_end - stack_top;
62379
62380 bprm->p -= stack_shift;
62381@@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
62382 bprm->exec -= stack_shift;
62383
62384 down_write(&mm->mmap_sem);
62385+
62386+ /* Move stack pages down in memory. */
62387+ if (stack_shift) {
62388+ ret = shift_arg_pages(vma, stack_shift);
62389+ if (ret)
62390+ goto out_unlock;
62391+ }
62392+
62393 vm_flags = VM_STACK_FLAGS;
62394
62395+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62396+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62397+ vm_flags &= ~VM_EXEC;
62398+
62399+#ifdef CONFIG_PAX_MPROTECT
62400+ if (mm->pax_flags & MF_PAX_MPROTECT)
62401+ vm_flags &= ~VM_MAYEXEC;
62402+#endif
62403+
62404+ }
62405+#endif
62406+
62407 /*
62408 * Adjust stack execute permissions; explicitly enable for
62409 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
62410@@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62411 goto out_unlock;
62412 BUG_ON(prev != vma);
62413
62414- /* Move stack pages down in memory. */
62415- if (stack_shift) {
62416- ret = shift_arg_pages(vma, stack_shift);
62417- if (ret)
62418- goto out_unlock;
62419- }
62420-
62421 /* mprotect_fixup is overkill to remove the temporary stack flags */
62422 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62423
62424@@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62425 #endif
62426 current->mm->start_stack = bprm->p;
62427 ret = expand_stack(vma, stack_base);
62428+
62429+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62430+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62431+ unsigned long size;
62432+ vm_flags_t vm_flags;
62433+
62434+ size = STACK_TOP - vma->vm_end;
62435+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62436+
62437+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62438+
62439+#ifdef CONFIG_X86
62440+ if (!ret) {
62441+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62442+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62443+ }
62444+#endif
62445+
62446+ }
62447+#endif
62448+
62449 if (ret)
62450 ret = -EFAULT;
62451
62452@@ -775,6 +851,8 @@ static struct file *do_open_exec(struct filename *name)
62453
62454 fsnotify_open(file);
62455
62456+ trace_open_exec(name->name);
62457+
62458 err = deny_write_access(file);
62459 if (err)
62460 goto exit;
62461@@ -804,7 +882,7 @@ int kernel_read(struct file *file, loff_t offset,
62462 old_fs = get_fs();
62463 set_fs(get_ds());
62464 /* The cast to a user pointer is valid due to the set_fs() */
62465- result = vfs_read(file, (void __user *)addr, count, &pos);
62466+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62467 set_fs(old_fs);
62468 return result;
62469 }
62470@@ -849,6 +927,7 @@ static int exec_mmap(struct mm_struct *mm)
62471 tsk->mm = mm;
62472 tsk->active_mm = mm;
62473 activate_mm(active_mm, mm);
62474+ populate_stack();
62475 tsk->mm->vmacache_seqnum = 0;
62476 vmacache_flush(tsk);
62477 task_unlock(tsk);
62478@@ -1247,7 +1326,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62479 }
62480 rcu_read_unlock();
62481
62482- if (p->fs->users > n_fs)
62483+ if (atomic_read(&p->fs->users) > n_fs)
62484 bprm->unsafe |= LSM_UNSAFE_SHARE;
62485 else
62486 p->fs->in_exec = 1;
62487@@ -1423,6 +1502,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62488 return ret;
62489 }
62490
62491+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62492+static DEFINE_PER_CPU(u64, exec_counter);
62493+static int __init init_exec_counters(void)
62494+{
62495+ unsigned int cpu;
62496+
62497+ for_each_possible_cpu(cpu) {
62498+ per_cpu(exec_counter, cpu) = (u64)cpu;
62499+ }
62500+
62501+ return 0;
62502+}
62503+early_initcall(init_exec_counters);
62504+static inline void increment_exec_counter(void)
62505+{
62506+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62507+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62508+}
62509+#else
62510+static inline void increment_exec_counter(void) {}
62511+#endif
62512+
62513+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62514+ struct user_arg_ptr argv);
62515+
62516 /*
62517 * sys_execve() executes a new program.
62518 */
62519@@ -1430,6 +1534,11 @@ static int do_execve_common(struct filename *filename,
62520 struct user_arg_ptr argv,
62521 struct user_arg_ptr envp)
62522 {
62523+#ifdef CONFIG_GRKERNSEC
62524+ struct file *old_exec_file;
62525+ struct acl_subject_label *old_acl;
62526+ struct rlimit old_rlim[RLIM_NLIMITS];
62527+#endif
62528 struct linux_binprm *bprm;
62529 struct file *file;
62530 struct files_struct *displaced;
62531@@ -1438,6 +1547,8 @@ static int do_execve_common(struct filename *filename,
62532 if (IS_ERR(filename))
62533 return PTR_ERR(filename);
62534
62535+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62536+
62537 /*
62538 * We move the actual failure in case of RLIMIT_NPROC excess from
62539 * set*uid() to execve() because too many poorly written programs
62540@@ -1475,11 +1586,21 @@ static int do_execve_common(struct filename *filename,
62541 if (IS_ERR(file))
62542 goto out_unmark;
62543
62544+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62545+ retval = -EPERM;
62546+ goto out_unmark;
62547+ }
62548+
62549 sched_exec();
62550
62551 bprm->file = file;
62552 bprm->filename = bprm->interp = filename->name;
62553
62554+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62555+ retval = -EACCES;
62556+ goto out_unmark;
62557+ }
62558+
62559 retval = bprm_mm_init(bprm);
62560 if (retval)
62561 goto out_unmark;
62562@@ -1496,24 +1617,70 @@ static int do_execve_common(struct filename *filename,
62563 if (retval < 0)
62564 goto out;
62565
62566+#ifdef CONFIG_GRKERNSEC
62567+ old_acl = current->acl;
62568+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62569+ old_exec_file = current->exec_file;
62570+ get_file(file);
62571+ current->exec_file = file;
62572+#endif
62573+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62574+ /* limit suid stack to 8MB
62575+ * we saved the old limits above and will restore them if this exec fails
62576+ */
62577+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62578+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62579+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62580+#endif
62581+
62582+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62583+ retval = -EPERM;
62584+ goto out_fail;
62585+ }
62586+
62587+ if (!gr_tpe_allow(file)) {
62588+ retval = -EACCES;
62589+ goto out_fail;
62590+ }
62591+
62592+ if (gr_check_crash_exec(file)) {
62593+ retval = -EACCES;
62594+ goto out_fail;
62595+ }
62596+
62597+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62598+ bprm->unsafe);
62599+ if (retval < 0)
62600+ goto out_fail;
62601+
62602 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62603 if (retval < 0)
62604- goto out;
62605+ goto out_fail;
62606
62607 bprm->exec = bprm->p;
62608 retval = copy_strings(bprm->envc, envp, bprm);
62609 if (retval < 0)
62610- goto out;
62611+ goto out_fail;
62612
62613 retval = copy_strings(bprm->argc, argv, bprm);
62614 if (retval < 0)
62615- goto out;
62616+ goto out_fail;
62617+
62618+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62619+
62620+ gr_handle_exec_args(bprm, argv);
62621
62622 retval = exec_binprm(bprm);
62623 if (retval < 0)
62624- goto out;
62625+ goto out_fail;
62626+#ifdef CONFIG_GRKERNSEC
62627+ if (old_exec_file)
62628+ fput(old_exec_file);
62629+#endif
62630
62631 /* execve succeeded */
62632+
62633+ increment_exec_counter();
62634 current->fs->in_exec = 0;
62635 current->in_execve = 0;
62636 acct_update_integrals(current);
62637@@ -1524,6 +1691,14 @@ static int do_execve_common(struct filename *filename,
62638 put_files_struct(displaced);
62639 return retval;
62640
62641+out_fail:
62642+#ifdef CONFIG_GRKERNSEC
62643+ current->acl = old_acl;
62644+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62645+ fput(current->exec_file);
62646+ current->exec_file = old_exec_file;
62647+#endif
62648+
62649 out:
62650 if (bprm->mm) {
62651 acct_arg_size(bprm, 0);
62652@@ -1615,3 +1790,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
62653 return compat_do_execve(getname(filename), argv, envp);
62654 }
62655 #endif
62656+
62657+int pax_check_flags(unsigned long *flags)
62658+{
62659+ int retval = 0;
62660+
62661+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62662+ if (*flags & MF_PAX_SEGMEXEC)
62663+ {
62664+ *flags &= ~MF_PAX_SEGMEXEC;
62665+ retval = -EINVAL;
62666+ }
62667+#endif
62668+
62669+ if ((*flags & MF_PAX_PAGEEXEC)
62670+
62671+#ifdef CONFIG_PAX_PAGEEXEC
62672+ && (*flags & MF_PAX_SEGMEXEC)
62673+#endif
62674+
62675+ )
62676+ {
62677+ *flags &= ~MF_PAX_PAGEEXEC;
62678+ retval = -EINVAL;
62679+ }
62680+
62681+ if ((*flags & MF_PAX_MPROTECT)
62682+
62683+#ifdef CONFIG_PAX_MPROTECT
62684+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62685+#endif
62686+
62687+ )
62688+ {
62689+ *flags &= ~MF_PAX_MPROTECT;
62690+ retval = -EINVAL;
62691+ }
62692+
62693+ if ((*flags & MF_PAX_EMUTRAMP)
62694+
62695+#ifdef CONFIG_PAX_EMUTRAMP
62696+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62697+#endif
62698+
62699+ )
62700+ {
62701+ *flags &= ~MF_PAX_EMUTRAMP;
62702+ retval = -EINVAL;
62703+ }
62704+
62705+ return retval;
62706+}
62707+
62708+EXPORT_SYMBOL(pax_check_flags);
62709+
62710+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62711+char *pax_get_path(const struct path *path, char *buf, int buflen)
62712+{
62713+ char *pathname = d_path(path, buf, buflen);
62714+
62715+ if (IS_ERR(pathname))
62716+ goto toolong;
62717+
62718+ pathname = mangle_path(buf, pathname, "\t\n\\");
62719+ if (!pathname)
62720+ goto toolong;
62721+
62722+ *pathname = 0;
62723+ return buf;
62724+
62725+toolong:
62726+ return "<path too long>";
62727+}
62728+EXPORT_SYMBOL(pax_get_path);
62729+
62730+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62731+{
62732+ struct task_struct *tsk = current;
62733+ struct mm_struct *mm = current->mm;
62734+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62735+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62736+ char *path_exec = NULL;
62737+ char *path_fault = NULL;
62738+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62739+ siginfo_t info = { };
62740+
62741+ if (buffer_exec && buffer_fault) {
62742+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62743+
62744+ down_read(&mm->mmap_sem);
62745+ vma = mm->mmap;
62746+ while (vma && (!vma_exec || !vma_fault)) {
62747+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62748+ vma_exec = vma;
62749+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62750+ vma_fault = vma;
62751+ vma = vma->vm_next;
62752+ }
62753+ if (vma_exec)
62754+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62755+ if (vma_fault) {
62756+ start = vma_fault->vm_start;
62757+ end = vma_fault->vm_end;
62758+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62759+ if (vma_fault->vm_file)
62760+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62761+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62762+ path_fault = "<heap>";
62763+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62764+ path_fault = "<stack>";
62765+ else
62766+ path_fault = "<anonymous mapping>";
62767+ }
62768+ up_read(&mm->mmap_sem);
62769+ }
62770+ if (tsk->signal->curr_ip)
62771+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62772+ else
62773+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62774+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62775+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62776+ free_page((unsigned long)buffer_exec);
62777+ free_page((unsigned long)buffer_fault);
62778+ pax_report_insns(regs, pc, sp);
62779+ info.si_signo = SIGKILL;
62780+ info.si_errno = 0;
62781+ info.si_code = SI_KERNEL;
62782+ info.si_pid = 0;
62783+ info.si_uid = 0;
62784+ do_coredump(&info);
62785+}
62786+#endif
62787+
62788+#ifdef CONFIG_PAX_REFCOUNT
62789+void pax_report_refcount_overflow(struct pt_regs *regs)
62790+{
62791+ if (current->signal->curr_ip)
62792+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62793+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62794+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62795+ else
62796+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62797+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62798+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62799+ preempt_disable();
62800+ show_regs(regs);
62801+ preempt_enable();
62802+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62803+}
62804+#endif
62805+
62806+#ifdef CONFIG_PAX_USERCOPY
62807+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62808+static noinline int check_stack_object(const void *obj, unsigned long len)
62809+{
62810+ const void * const stack = task_stack_page(current);
62811+ const void * const stackend = stack + THREAD_SIZE;
62812+
62813+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62814+ const void *frame = NULL;
62815+ const void *oldframe;
62816+#endif
62817+
62818+ if (obj + len < obj)
62819+ return -1;
62820+
62821+ if (obj + len <= stack || stackend <= obj)
62822+ return 0;
62823+
62824+ if (obj < stack || stackend < obj + len)
62825+ return -1;
62826+
62827+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62828+ oldframe = __builtin_frame_address(1);
62829+ if (oldframe)
62830+ frame = __builtin_frame_address(2);
62831+ /*
62832+ low ----------------------------------------------> high
62833+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62834+ ^----------------^
62835+ allow copies only within here
62836+ */
62837+ while (stack <= frame && frame < stackend) {
62838+ /* if obj + len extends past the last frame, this
62839+ check won't pass and the next frame will be 0,
62840+ causing us to bail out and correctly report
62841+ the copy as invalid
62842+ */
62843+ if (obj + len <= frame)
62844+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62845+ oldframe = frame;
62846+ frame = *(const void * const *)frame;
62847+ }
62848+ return -1;
62849+#else
62850+ return 1;
62851+#endif
62852+}
62853+
62854+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62855+{
62856+ if (current->signal->curr_ip)
62857+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62858+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62859+ else
62860+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62861+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62862+ dump_stack();
62863+ gr_handle_kernel_exploit();
62864+ do_group_exit(SIGKILL);
62865+}
62866+#endif
62867+
62868+#ifdef CONFIG_PAX_USERCOPY
62869+
62870+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62871+{
62872+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62873+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62874+#ifdef CONFIG_MODULES
62875+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62876+#else
62877+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62878+#endif
62879+
62880+#else
62881+ unsigned long textlow = (unsigned long)_stext;
62882+ unsigned long texthigh = (unsigned long)_etext;
62883+
62884+#ifdef CONFIG_X86_64
62885+ /* check against linear mapping as well */
62886+ if (high > (unsigned long)__va(__pa(textlow)) &&
62887+ low < (unsigned long)__va(__pa(texthigh)))
62888+ return true;
62889+#endif
62890+
62891+#endif
62892+
62893+ if (high <= textlow || low >= texthigh)
62894+ return false;
62895+ else
62896+ return true;
62897+}
62898+#endif
62899+
62900+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62901+{
62902+#ifdef CONFIG_PAX_USERCOPY
62903+ const char *type;
62904+#endif
62905+
62906+#ifndef CONFIG_STACK_GROWSUP
62907+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62908+ unsigned long currentsp = (unsigned long)&stackstart;
62909+ if (unlikely((currentsp < stackstart + 512 ||
62910+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62911+ BUG();
62912+#endif
62913+
62914+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62915+ if (const_size)
62916+ return;
62917+#endif
62918+
62919+#ifdef CONFIG_PAX_USERCOPY
62920+ if (!n)
62921+ return;
62922+
62923+ type = check_heap_object(ptr, n);
62924+ if (!type) {
62925+ int ret = check_stack_object(ptr, n);
62926+ if (ret == 1 || ret == 2)
62927+ return;
62928+ if (ret == 0) {
62929+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62930+ type = "<kernel text>";
62931+ else
62932+ return;
62933+ } else
62934+ type = "<process stack>";
62935+ }
62936+
62937+ pax_report_usercopy(ptr, n, to_user, type);
62938+#endif
62939+
62940+}
62941+EXPORT_SYMBOL(__check_object_size);
62942+
62943+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62944+void pax_track_stack(void)
62945+{
62946+ unsigned long sp = (unsigned long)&sp;
62947+ if (sp < current_thread_info()->lowest_stack &&
62948+ sp > (unsigned long)task_stack_page(current))
62949+ current_thread_info()->lowest_stack = sp;
62950+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62951+ BUG();
62952+}
62953+EXPORT_SYMBOL(pax_track_stack);
62954+#endif
62955+
62956+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62957+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62958+{
62959+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62960+ dump_stack();
62961+ do_group_exit(SIGKILL);
62962+}
62963+EXPORT_SYMBOL(report_size_overflow);
62964+#endif
62965diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62966index 9f9992b..8b59411 100644
62967--- a/fs/ext2/balloc.c
62968+++ b/fs/ext2/balloc.c
62969@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62970
62971 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62972 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62973- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62974+ if (free_blocks < root_blocks + 1 &&
62975 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62976 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62977- !in_group_p (sbi->s_resgid))) {
62978+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62979 return 0;
62980 }
62981 return 1;
62982diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62983index 9142614..97484fa 100644
62984--- a/fs/ext2/xattr.c
62985+++ b/fs/ext2/xattr.c
62986@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62987 struct buffer_head *bh = NULL;
62988 struct ext2_xattr_entry *entry;
62989 char *end;
62990- size_t rest = buffer_size;
62991+ size_t rest = buffer_size, total_size = 0;
62992 int error;
62993
62994 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
62995@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
62996 buffer += size;
62997 }
62998 rest -= size;
62999+ total_size += size;
63000 }
63001 }
63002- error = buffer_size - rest; /* total size */
63003+ error = total_size;
63004
63005 cleanup:
63006 brelse(bh);
63007diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
63008index 158b5d4..2432610 100644
63009--- a/fs/ext3/balloc.c
63010+++ b/fs/ext3/balloc.c
63011@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
63012
63013 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
63014 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
63015- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
63016+ if (free_blocks < root_blocks + 1 &&
63017 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
63018 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
63019- !in_group_p (sbi->s_resgid))) {
63020+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
63021 return 0;
63022 }
63023 return 1;
63024diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
63025index c6874be..f8a6ae8 100644
63026--- a/fs/ext3/xattr.c
63027+++ b/fs/ext3/xattr.c
63028@@ -330,7 +330,7 @@ static int
63029 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
63030 char *buffer, size_t buffer_size)
63031 {
63032- size_t rest = buffer_size;
63033+ size_t rest = buffer_size, total_size = 0;
63034
63035 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
63036 const struct xattr_handler *handler =
63037@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
63038 buffer += size;
63039 }
63040 rest -= size;
63041+ total_size += size;
63042 }
63043 }
63044- return buffer_size - rest;
63045+ return total_size;
63046 }
63047
63048 static int
63049diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
63050index fca3820..e1ea241 100644
63051--- a/fs/ext4/balloc.c
63052+++ b/fs/ext4/balloc.c
63053@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
63054 /* Hm, nope. Are (enough) root reserved clusters available? */
63055 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
63056 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
63057- capable(CAP_SYS_RESOURCE) ||
63058- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
63059+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
63060+ capable_nolog(CAP_SYS_RESOURCE)) {
63061
63062 if (free_clusters >= (nclusters + dirty_clusters +
63063 resv_clusters))
63064diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
63065index 1bbe7c3..c7442e2 100644
63066--- a/fs/ext4/ext4.h
63067+++ b/fs/ext4/ext4.h
63068@@ -1276,19 +1276,19 @@ struct ext4_sb_info {
63069 unsigned long s_mb_last_start;
63070
63071 /* stats for buddy allocator */
63072- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
63073- atomic_t s_bal_success; /* we found long enough chunks */
63074- atomic_t s_bal_allocated; /* in blocks */
63075- atomic_t s_bal_ex_scanned; /* total extents scanned */
63076- atomic_t s_bal_goals; /* goal hits */
63077- atomic_t s_bal_breaks; /* too long searches */
63078- atomic_t s_bal_2orders; /* 2^order hits */
63079+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
63080+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
63081+ atomic_unchecked_t s_bal_allocated; /* in blocks */
63082+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
63083+ atomic_unchecked_t s_bal_goals; /* goal hits */
63084+ atomic_unchecked_t s_bal_breaks; /* too long searches */
63085+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
63086 spinlock_t s_bal_lock;
63087 unsigned long s_mb_buddies_generated;
63088 unsigned long long s_mb_generation_time;
63089- atomic_t s_mb_lost_chunks;
63090- atomic_t s_mb_preallocated;
63091- atomic_t s_mb_discarded;
63092+ atomic_unchecked_t s_mb_lost_chunks;
63093+ atomic_unchecked_t s_mb_preallocated;
63094+ atomic_unchecked_t s_mb_discarded;
63095 atomic_t s_lock_busy;
63096
63097 /* locality groups */
63098diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
63099index c3e7418..f62cab3 100644
63100--- a/fs/ext4/mballoc.c
63101+++ b/fs/ext4/mballoc.c
63102@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
63103 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
63104
63105 if (EXT4_SB(sb)->s_mb_stats)
63106- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
63107+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
63108
63109 break;
63110 }
63111@@ -2211,7 +2211,7 @@ repeat:
63112 ac->ac_status = AC_STATUS_CONTINUE;
63113 ac->ac_flags |= EXT4_MB_HINT_FIRST;
63114 cr = 3;
63115- atomic_inc(&sbi->s_mb_lost_chunks);
63116+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
63117 goto repeat;
63118 }
63119 }
63120@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
63121 if (sbi->s_mb_stats) {
63122 ext4_msg(sb, KERN_INFO,
63123 "mballoc: %u blocks %u reqs (%u success)",
63124- atomic_read(&sbi->s_bal_allocated),
63125- atomic_read(&sbi->s_bal_reqs),
63126- atomic_read(&sbi->s_bal_success));
63127+ atomic_read_unchecked(&sbi->s_bal_allocated),
63128+ atomic_read_unchecked(&sbi->s_bal_reqs),
63129+ atomic_read_unchecked(&sbi->s_bal_success));
63130 ext4_msg(sb, KERN_INFO,
63131 "mballoc: %u extents scanned, %u goal hits, "
63132 "%u 2^N hits, %u breaks, %u lost",
63133- atomic_read(&sbi->s_bal_ex_scanned),
63134- atomic_read(&sbi->s_bal_goals),
63135- atomic_read(&sbi->s_bal_2orders),
63136- atomic_read(&sbi->s_bal_breaks),
63137- atomic_read(&sbi->s_mb_lost_chunks));
63138+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
63139+ atomic_read_unchecked(&sbi->s_bal_goals),
63140+ atomic_read_unchecked(&sbi->s_bal_2orders),
63141+ atomic_read_unchecked(&sbi->s_bal_breaks),
63142+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
63143 ext4_msg(sb, KERN_INFO,
63144 "mballoc: %lu generated and it took %Lu",
63145 sbi->s_mb_buddies_generated,
63146 sbi->s_mb_generation_time);
63147 ext4_msg(sb, KERN_INFO,
63148 "mballoc: %u preallocated, %u discarded",
63149- atomic_read(&sbi->s_mb_preallocated),
63150- atomic_read(&sbi->s_mb_discarded));
63151+ atomic_read_unchecked(&sbi->s_mb_preallocated),
63152+ atomic_read_unchecked(&sbi->s_mb_discarded));
63153 }
63154
63155 free_percpu(sbi->s_locality_groups);
63156@@ -3191,16 +3191,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
63157 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
63158
63159 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
63160- atomic_inc(&sbi->s_bal_reqs);
63161- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
63162+ atomic_inc_unchecked(&sbi->s_bal_reqs);
63163+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
63164 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
63165- atomic_inc(&sbi->s_bal_success);
63166- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
63167+ atomic_inc_unchecked(&sbi->s_bal_success);
63168+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
63169 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
63170 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
63171- atomic_inc(&sbi->s_bal_goals);
63172+ atomic_inc_unchecked(&sbi->s_bal_goals);
63173 if (ac->ac_found > sbi->s_mb_max_to_scan)
63174- atomic_inc(&sbi->s_bal_breaks);
63175+ atomic_inc_unchecked(&sbi->s_bal_breaks);
63176 }
63177
63178 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
63179@@ -3627,7 +3627,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
63180 trace_ext4_mb_new_inode_pa(ac, pa);
63181
63182 ext4_mb_use_inode_pa(ac, pa);
63183- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
63184+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
63185
63186 ei = EXT4_I(ac->ac_inode);
63187 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
63188@@ -3687,7 +3687,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
63189 trace_ext4_mb_new_group_pa(ac, pa);
63190
63191 ext4_mb_use_group_pa(ac, pa);
63192- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
63193+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
63194
63195 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
63196 lg = ac->ac_lg;
63197@@ -3776,7 +3776,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
63198 * from the bitmap and continue.
63199 */
63200 }
63201- atomic_add(free, &sbi->s_mb_discarded);
63202+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
63203
63204 return err;
63205 }
63206@@ -3794,7 +3794,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
63207 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
63208 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
63209 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
63210- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
63211+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
63212 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
63213
63214 return 0;
63215diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
63216index 32bce84..112d969 100644
63217--- a/fs/ext4/mmp.c
63218+++ b/fs/ext4/mmp.c
63219@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
63220 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
63221 const char *function, unsigned int line, const char *msg)
63222 {
63223- __ext4_warning(sb, function, line, msg);
63224+ __ext4_warning(sb, function, line, "%s", msg);
63225 __ext4_warning(sb, function, line,
63226 "MMP failure info: last update time: %llu, last update "
63227 "node: %s, last update device: %s\n",
63228diff --git a/fs/ext4/super.c b/fs/ext4/super.c
63229index beeb5c4..998c28d 100644
63230--- a/fs/ext4/super.c
63231+++ b/fs/ext4/super.c
63232@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
63233 }
63234
63235 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
63236-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63237+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63238 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
63239
63240 #ifdef CONFIG_QUOTA
63241@@ -2464,7 +2464,7 @@ struct ext4_attr {
63242 int offset;
63243 int deprecated_val;
63244 } u;
63245-};
63246+} __do_const;
63247
63248 static int parse_strtoull(const char *buf,
63249 unsigned long long max, unsigned long long *value)
63250diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
63251index e738733..9843a6c 100644
63252--- a/fs/ext4/xattr.c
63253+++ b/fs/ext4/xattr.c
63254@@ -386,7 +386,7 @@ static int
63255 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63256 char *buffer, size_t buffer_size)
63257 {
63258- size_t rest = buffer_size;
63259+ size_t rest = buffer_size, total_size = 0;
63260
63261 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
63262 const struct xattr_handler *handler =
63263@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63264 buffer += size;
63265 }
63266 rest -= size;
63267+ total_size += size;
63268 }
63269 }
63270- return buffer_size - rest;
63271+ return total_size;
63272 }
63273
63274 static int
63275diff --git a/fs/fcntl.c b/fs/fcntl.c
63276index 72c82f6..a18b263 100644
63277--- a/fs/fcntl.c
63278+++ b/fs/fcntl.c
63279@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
63280 if (err)
63281 return err;
63282
63283+ if (gr_handle_chroot_fowner(pid, type))
63284+ return -ENOENT;
63285+ if (gr_check_protected_task_fowner(pid, type))
63286+ return -EACCES;
63287+
63288 f_modown(filp, pid, type, force);
63289 return 0;
63290 }
63291diff --git a/fs/fhandle.c b/fs/fhandle.c
63292index 999ff5c..ac037c9 100644
63293--- a/fs/fhandle.c
63294+++ b/fs/fhandle.c
63295@@ -8,6 +8,7 @@
63296 #include <linux/fs_struct.h>
63297 #include <linux/fsnotify.h>
63298 #include <linux/personality.h>
63299+#include <linux/grsecurity.h>
63300 #include <asm/uaccess.h>
63301 #include "internal.h"
63302 #include "mount.h"
63303@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
63304 } else
63305 retval = 0;
63306 /* copy the mount id */
63307- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
63308- sizeof(*mnt_id)) ||
63309+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
63310 copy_to_user(ufh, handle,
63311 sizeof(struct file_handle) + handle_bytes))
63312 retval = -EFAULT;
63313@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63314 * the directory. Ideally we would like CAP_DAC_SEARCH.
63315 * But we don't have that
63316 */
63317- if (!capable(CAP_DAC_READ_SEARCH)) {
63318+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
63319 retval = -EPERM;
63320 goto out_err;
63321 }
63322diff --git a/fs/file.c b/fs/file.c
63323index 66923fe..2849783 100644
63324--- a/fs/file.c
63325+++ b/fs/file.c
63326@@ -16,6 +16,7 @@
63327 #include <linux/slab.h>
63328 #include <linux/vmalloc.h>
63329 #include <linux/file.h>
63330+#include <linux/security.h>
63331 #include <linux/fdtable.h>
63332 #include <linux/bitops.h>
63333 #include <linux/interrupt.h>
63334@@ -139,7 +140,7 @@ out:
63335 * Return <0 error code on error; 1 on successful completion.
63336 * The files->file_lock should be held on entry, and will be held on exit.
63337 */
63338-static int expand_fdtable(struct files_struct *files, int nr)
63339+static int expand_fdtable(struct files_struct *files, unsigned int nr)
63340 __releases(files->file_lock)
63341 __acquires(files->file_lock)
63342 {
63343@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
63344 * expanded and execution may have blocked.
63345 * The files->file_lock should be held on entry, and will be held on exit.
63346 */
63347-static int expand_files(struct files_struct *files, int nr)
63348+static int expand_files(struct files_struct *files, unsigned int nr)
63349 {
63350 struct fdtable *fdt;
63351
63352@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
63353 if (!file)
63354 return __close_fd(files, fd);
63355
63356+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
63357 if (fd >= rlimit(RLIMIT_NOFILE))
63358 return -EBADF;
63359
63360@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
63361 if (unlikely(oldfd == newfd))
63362 return -EINVAL;
63363
63364+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
63365 if (newfd >= rlimit(RLIMIT_NOFILE))
63366 return -EBADF;
63367
63368@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
63369 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
63370 {
63371 int err;
63372+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
63373 if (from >= rlimit(RLIMIT_NOFILE))
63374 return -EINVAL;
63375 err = alloc_fd(from, flags);
63376diff --git a/fs/filesystems.c b/fs/filesystems.c
63377index 5797d45..7d7d79a 100644
63378--- a/fs/filesystems.c
63379+++ b/fs/filesystems.c
63380@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
63381 int len = dot ? dot - name : strlen(name);
63382
63383 fs = __get_fs_type(name, len);
63384+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63385+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
63386+#else
63387 if (!fs && (request_module("fs-%.*s", len, name) == 0))
63388+#endif
63389 fs = __get_fs_type(name, len);
63390
63391 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
63392diff --git a/fs/fs_struct.c b/fs/fs_struct.c
63393index 7dca743..543d620 100644
63394--- a/fs/fs_struct.c
63395+++ b/fs/fs_struct.c
63396@@ -4,6 +4,7 @@
63397 #include <linux/path.h>
63398 #include <linux/slab.h>
63399 #include <linux/fs_struct.h>
63400+#include <linux/grsecurity.h>
63401 #include "internal.h"
63402
63403 /*
63404@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
63405 write_seqcount_begin(&fs->seq);
63406 old_root = fs->root;
63407 fs->root = *path;
63408+ gr_set_chroot_entries(current, path);
63409 write_seqcount_end(&fs->seq);
63410 spin_unlock(&fs->lock);
63411 if (old_root.dentry)
63412@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63413 int hits = 0;
63414 spin_lock(&fs->lock);
63415 write_seqcount_begin(&fs->seq);
63416+ /* this root replacement is only done by pivot_root,
63417+ leave grsec's chroot tagging alone for this task
63418+ so that a pivoted root isn't treated as a chroot
63419+ */
63420 hits += replace_path(&fs->root, old_root, new_root);
63421 hits += replace_path(&fs->pwd, old_root, new_root);
63422 write_seqcount_end(&fs->seq);
63423@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
63424 task_lock(tsk);
63425 spin_lock(&fs->lock);
63426 tsk->fs = NULL;
63427- kill = !--fs->users;
63428+ gr_clear_chroot_entries(tsk);
63429+ kill = !atomic_dec_return(&fs->users);
63430 spin_unlock(&fs->lock);
63431 task_unlock(tsk);
63432 if (kill)
63433@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63434 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63435 /* We don't need to lock fs - think why ;-) */
63436 if (fs) {
63437- fs->users = 1;
63438+ atomic_set(&fs->users, 1);
63439 fs->in_exec = 0;
63440 spin_lock_init(&fs->lock);
63441 seqcount_init(&fs->seq);
63442@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63443 spin_lock(&old->lock);
63444 fs->root = old->root;
63445 path_get(&fs->root);
63446+ /* instead of calling gr_set_chroot_entries here,
63447+ we call it from every caller of this function
63448+ */
63449 fs->pwd = old->pwd;
63450 path_get(&fs->pwd);
63451 spin_unlock(&old->lock);
63452@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
63453
63454 task_lock(current);
63455 spin_lock(&fs->lock);
63456- kill = !--fs->users;
63457+ kill = !atomic_dec_return(&fs->users);
63458 current->fs = new_fs;
63459+ gr_set_chroot_entries(current, &new_fs->root);
63460 spin_unlock(&fs->lock);
63461 task_unlock(current);
63462
63463@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63464
63465 int current_umask(void)
63466 {
63467- return current->fs->umask;
63468+ return current->fs->umask | gr_acl_umask();
63469 }
63470 EXPORT_SYMBOL(current_umask);
63471
63472 /* to be mentioned only in INIT_TASK */
63473 struct fs_struct init_fs = {
63474- .users = 1,
63475+ .users = ATOMIC_INIT(1),
63476 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63477 .seq = SEQCNT_ZERO(init_fs.seq),
63478 .umask = 0022,
63479diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63480index aec01be..cf81ff9 100644
63481--- a/fs/fscache/cookie.c
63482+++ b/fs/fscache/cookie.c
63483@@ -19,7 +19,7 @@
63484
63485 struct kmem_cache *fscache_cookie_jar;
63486
63487-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63488+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63489
63490 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63491 static int fscache_alloc_object(struct fscache_cache *cache,
63492@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63493 parent ? (char *) parent->def->name : "<no-parent>",
63494 def->name, netfs_data, enable);
63495
63496- fscache_stat(&fscache_n_acquires);
63497+ fscache_stat_unchecked(&fscache_n_acquires);
63498
63499 /* if there's no parent cookie, then we don't create one here either */
63500 if (!parent) {
63501- fscache_stat(&fscache_n_acquires_null);
63502+ fscache_stat_unchecked(&fscache_n_acquires_null);
63503 _leave(" [no parent]");
63504 return NULL;
63505 }
63506@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63507 /* allocate and initialise a cookie */
63508 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63509 if (!cookie) {
63510- fscache_stat(&fscache_n_acquires_oom);
63511+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63512 _leave(" [ENOMEM]");
63513 return NULL;
63514 }
63515@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63516
63517 switch (cookie->def->type) {
63518 case FSCACHE_COOKIE_TYPE_INDEX:
63519- fscache_stat(&fscache_n_cookie_index);
63520+ fscache_stat_unchecked(&fscache_n_cookie_index);
63521 break;
63522 case FSCACHE_COOKIE_TYPE_DATAFILE:
63523- fscache_stat(&fscache_n_cookie_data);
63524+ fscache_stat_unchecked(&fscache_n_cookie_data);
63525 break;
63526 default:
63527- fscache_stat(&fscache_n_cookie_special);
63528+ fscache_stat_unchecked(&fscache_n_cookie_special);
63529 break;
63530 }
63531
63532@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63533 } else {
63534 atomic_dec(&parent->n_children);
63535 __fscache_cookie_put(cookie);
63536- fscache_stat(&fscache_n_acquires_nobufs);
63537+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63538 _leave(" = NULL");
63539 return NULL;
63540 }
63541@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63542 }
63543 }
63544
63545- fscache_stat(&fscache_n_acquires_ok);
63546+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63547 _leave(" = %p", cookie);
63548 return cookie;
63549 }
63550@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63551 cache = fscache_select_cache_for_object(cookie->parent);
63552 if (!cache) {
63553 up_read(&fscache_addremove_sem);
63554- fscache_stat(&fscache_n_acquires_no_cache);
63555+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63556 _leave(" = -ENOMEDIUM [no cache]");
63557 return -ENOMEDIUM;
63558 }
63559@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63560 object = cache->ops->alloc_object(cache, cookie);
63561 fscache_stat_d(&fscache_n_cop_alloc_object);
63562 if (IS_ERR(object)) {
63563- fscache_stat(&fscache_n_object_no_alloc);
63564+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63565 ret = PTR_ERR(object);
63566 goto error;
63567 }
63568
63569- fscache_stat(&fscache_n_object_alloc);
63570+ fscache_stat_unchecked(&fscache_n_object_alloc);
63571
63572- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63573+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63574
63575 _debug("ALLOC OBJ%x: %s {%lx}",
63576 object->debug_id, cookie->def->name, object->events);
63577@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63578
63579 _enter("{%s}", cookie->def->name);
63580
63581- fscache_stat(&fscache_n_invalidates);
63582+ fscache_stat_unchecked(&fscache_n_invalidates);
63583
63584 /* Only permit invalidation of data files. Invalidating an index will
63585 * require the caller to release all its attachments to the tree rooted
63586@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63587 {
63588 struct fscache_object *object;
63589
63590- fscache_stat(&fscache_n_updates);
63591+ fscache_stat_unchecked(&fscache_n_updates);
63592
63593 if (!cookie) {
63594- fscache_stat(&fscache_n_updates_null);
63595+ fscache_stat_unchecked(&fscache_n_updates_null);
63596 _leave(" [no cookie]");
63597 return;
63598 }
63599@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63600 */
63601 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63602 {
63603- fscache_stat(&fscache_n_relinquishes);
63604+ fscache_stat_unchecked(&fscache_n_relinquishes);
63605 if (retire)
63606- fscache_stat(&fscache_n_relinquishes_retire);
63607+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63608
63609 if (!cookie) {
63610- fscache_stat(&fscache_n_relinquishes_null);
63611+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63612 _leave(" [no cookie]");
63613 return;
63614 }
63615@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63616 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63617 goto inconsistent;
63618
63619- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63620+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63621
63622 __fscache_use_cookie(cookie);
63623 if (fscache_submit_op(object, op) < 0)
63624diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63625index bc6c08f..09c0d96 100644
63626--- a/fs/fscache/internal.h
63627+++ b/fs/fscache/internal.h
63628@@ -139,8 +139,8 @@ extern void fscache_operation_gc(struct work_struct *);
63629 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63630 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63631 struct fscache_operation *,
63632- atomic_t *,
63633- atomic_t *,
63634+ atomic_unchecked_t *,
63635+ atomic_unchecked_t *,
63636 void (*)(struct fscache_operation *));
63637 extern void fscache_invalidate_writes(struct fscache_cookie *);
63638
63639@@ -159,101 +159,101 @@ extern void fscache_proc_cleanup(void);
63640 * stats.c
63641 */
63642 #ifdef CONFIG_FSCACHE_STATS
63643-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63644-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63645+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63646+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63647
63648-extern atomic_t fscache_n_op_pend;
63649-extern atomic_t fscache_n_op_run;
63650-extern atomic_t fscache_n_op_enqueue;
63651-extern atomic_t fscache_n_op_deferred_release;
63652-extern atomic_t fscache_n_op_release;
63653-extern atomic_t fscache_n_op_gc;
63654-extern atomic_t fscache_n_op_cancelled;
63655-extern atomic_t fscache_n_op_rejected;
63656+extern atomic_unchecked_t fscache_n_op_pend;
63657+extern atomic_unchecked_t fscache_n_op_run;
63658+extern atomic_unchecked_t fscache_n_op_enqueue;
63659+extern atomic_unchecked_t fscache_n_op_deferred_release;
63660+extern atomic_unchecked_t fscache_n_op_release;
63661+extern atomic_unchecked_t fscache_n_op_gc;
63662+extern atomic_unchecked_t fscache_n_op_cancelled;
63663+extern atomic_unchecked_t fscache_n_op_rejected;
63664
63665-extern atomic_t fscache_n_attr_changed;
63666-extern atomic_t fscache_n_attr_changed_ok;
63667-extern atomic_t fscache_n_attr_changed_nobufs;
63668-extern atomic_t fscache_n_attr_changed_nomem;
63669-extern atomic_t fscache_n_attr_changed_calls;
63670+extern atomic_unchecked_t fscache_n_attr_changed;
63671+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63672+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63673+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63674+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63675
63676-extern atomic_t fscache_n_allocs;
63677-extern atomic_t fscache_n_allocs_ok;
63678-extern atomic_t fscache_n_allocs_wait;
63679-extern atomic_t fscache_n_allocs_nobufs;
63680-extern atomic_t fscache_n_allocs_intr;
63681-extern atomic_t fscache_n_allocs_object_dead;
63682-extern atomic_t fscache_n_alloc_ops;
63683-extern atomic_t fscache_n_alloc_op_waits;
63684+extern atomic_unchecked_t fscache_n_allocs;
63685+extern atomic_unchecked_t fscache_n_allocs_ok;
63686+extern atomic_unchecked_t fscache_n_allocs_wait;
63687+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63688+extern atomic_unchecked_t fscache_n_allocs_intr;
63689+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63690+extern atomic_unchecked_t fscache_n_alloc_ops;
63691+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63692
63693-extern atomic_t fscache_n_retrievals;
63694-extern atomic_t fscache_n_retrievals_ok;
63695-extern atomic_t fscache_n_retrievals_wait;
63696-extern atomic_t fscache_n_retrievals_nodata;
63697-extern atomic_t fscache_n_retrievals_nobufs;
63698-extern atomic_t fscache_n_retrievals_intr;
63699-extern atomic_t fscache_n_retrievals_nomem;
63700-extern atomic_t fscache_n_retrievals_object_dead;
63701-extern atomic_t fscache_n_retrieval_ops;
63702-extern atomic_t fscache_n_retrieval_op_waits;
63703+extern atomic_unchecked_t fscache_n_retrievals;
63704+extern atomic_unchecked_t fscache_n_retrievals_ok;
63705+extern atomic_unchecked_t fscache_n_retrievals_wait;
63706+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63707+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63708+extern atomic_unchecked_t fscache_n_retrievals_intr;
63709+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63710+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63711+extern atomic_unchecked_t fscache_n_retrieval_ops;
63712+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63713
63714-extern atomic_t fscache_n_stores;
63715-extern atomic_t fscache_n_stores_ok;
63716-extern atomic_t fscache_n_stores_again;
63717-extern atomic_t fscache_n_stores_nobufs;
63718-extern atomic_t fscache_n_stores_oom;
63719-extern atomic_t fscache_n_store_ops;
63720-extern atomic_t fscache_n_store_calls;
63721-extern atomic_t fscache_n_store_pages;
63722-extern atomic_t fscache_n_store_radix_deletes;
63723-extern atomic_t fscache_n_store_pages_over_limit;
63724+extern atomic_unchecked_t fscache_n_stores;
63725+extern atomic_unchecked_t fscache_n_stores_ok;
63726+extern atomic_unchecked_t fscache_n_stores_again;
63727+extern atomic_unchecked_t fscache_n_stores_nobufs;
63728+extern atomic_unchecked_t fscache_n_stores_oom;
63729+extern atomic_unchecked_t fscache_n_store_ops;
63730+extern atomic_unchecked_t fscache_n_store_calls;
63731+extern atomic_unchecked_t fscache_n_store_pages;
63732+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63733+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63734
63735-extern atomic_t fscache_n_store_vmscan_not_storing;
63736-extern atomic_t fscache_n_store_vmscan_gone;
63737-extern atomic_t fscache_n_store_vmscan_busy;
63738-extern atomic_t fscache_n_store_vmscan_cancelled;
63739-extern atomic_t fscache_n_store_vmscan_wait;
63740+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63741+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63742+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63743+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63744+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63745
63746-extern atomic_t fscache_n_marks;
63747-extern atomic_t fscache_n_uncaches;
63748+extern atomic_unchecked_t fscache_n_marks;
63749+extern atomic_unchecked_t fscache_n_uncaches;
63750
63751-extern atomic_t fscache_n_acquires;
63752-extern atomic_t fscache_n_acquires_null;
63753-extern atomic_t fscache_n_acquires_no_cache;
63754-extern atomic_t fscache_n_acquires_ok;
63755-extern atomic_t fscache_n_acquires_nobufs;
63756-extern atomic_t fscache_n_acquires_oom;
63757+extern atomic_unchecked_t fscache_n_acquires;
63758+extern atomic_unchecked_t fscache_n_acquires_null;
63759+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63760+extern atomic_unchecked_t fscache_n_acquires_ok;
63761+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63762+extern atomic_unchecked_t fscache_n_acquires_oom;
63763
63764-extern atomic_t fscache_n_invalidates;
63765-extern atomic_t fscache_n_invalidates_run;
63766+extern atomic_unchecked_t fscache_n_invalidates;
63767+extern atomic_unchecked_t fscache_n_invalidates_run;
63768
63769-extern atomic_t fscache_n_updates;
63770-extern atomic_t fscache_n_updates_null;
63771-extern atomic_t fscache_n_updates_run;
63772+extern atomic_unchecked_t fscache_n_updates;
63773+extern atomic_unchecked_t fscache_n_updates_null;
63774+extern atomic_unchecked_t fscache_n_updates_run;
63775
63776-extern atomic_t fscache_n_relinquishes;
63777-extern atomic_t fscache_n_relinquishes_null;
63778-extern atomic_t fscache_n_relinquishes_waitcrt;
63779-extern atomic_t fscache_n_relinquishes_retire;
63780+extern atomic_unchecked_t fscache_n_relinquishes;
63781+extern atomic_unchecked_t fscache_n_relinquishes_null;
63782+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63783+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63784
63785-extern atomic_t fscache_n_cookie_index;
63786-extern atomic_t fscache_n_cookie_data;
63787-extern atomic_t fscache_n_cookie_special;
63788+extern atomic_unchecked_t fscache_n_cookie_index;
63789+extern atomic_unchecked_t fscache_n_cookie_data;
63790+extern atomic_unchecked_t fscache_n_cookie_special;
63791
63792-extern atomic_t fscache_n_object_alloc;
63793-extern atomic_t fscache_n_object_no_alloc;
63794-extern atomic_t fscache_n_object_lookups;
63795-extern atomic_t fscache_n_object_lookups_negative;
63796-extern atomic_t fscache_n_object_lookups_positive;
63797-extern atomic_t fscache_n_object_lookups_timed_out;
63798-extern atomic_t fscache_n_object_created;
63799-extern atomic_t fscache_n_object_avail;
63800-extern atomic_t fscache_n_object_dead;
63801+extern atomic_unchecked_t fscache_n_object_alloc;
63802+extern atomic_unchecked_t fscache_n_object_no_alloc;
63803+extern atomic_unchecked_t fscache_n_object_lookups;
63804+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63805+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63806+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63807+extern atomic_unchecked_t fscache_n_object_created;
63808+extern atomic_unchecked_t fscache_n_object_avail;
63809+extern atomic_unchecked_t fscache_n_object_dead;
63810
63811-extern atomic_t fscache_n_checkaux_none;
63812-extern atomic_t fscache_n_checkaux_okay;
63813-extern atomic_t fscache_n_checkaux_update;
63814-extern atomic_t fscache_n_checkaux_obsolete;
63815+extern atomic_unchecked_t fscache_n_checkaux_none;
63816+extern atomic_unchecked_t fscache_n_checkaux_okay;
63817+extern atomic_unchecked_t fscache_n_checkaux_update;
63818+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63819
63820 extern atomic_t fscache_n_cop_alloc_object;
63821 extern atomic_t fscache_n_cop_lookup_object;
63822@@ -278,6 +278,11 @@ static inline void fscache_stat(atomic_t *stat)
63823 atomic_inc(stat);
63824 }
63825
63826+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63827+{
63828+ atomic_inc_unchecked(stat);
63829+}
63830+
63831 static inline void fscache_stat_d(atomic_t *stat)
63832 {
63833 atomic_dec(stat);
63834@@ -290,6 +295,7 @@ extern const struct file_operations fscache_stats_fops;
63835
63836 #define __fscache_stat(stat) (NULL)
63837 #define fscache_stat(stat) do {} while (0)
63838+#define fscache_stat_unchecked(stat) do {} while (0)
63839 #define fscache_stat_d(stat) do {} while (0)
63840 #endif
63841
63842diff --git a/fs/fscache/object.c b/fs/fscache/object.c
63843index d3b4539..ed0c659 100644
63844--- a/fs/fscache/object.c
63845+++ b/fs/fscache/object.c
63846@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63847 _debug("LOOKUP \"%s\" in \"%s\"",
63848 cookie->def->name, object->cache->tag->name);
63849
63850- fscache_stat(&fscache_n_object_lookups);
63851+ fscache_stat_unchecked(&fscache_n_object_lookups);
63852 fscache_stat(&fscache_n_cop_lookup_object);
63853 ret = object->cache->ops->lookup_object(object);
63854 fscache_stat_d(&fscache_n_cop_lookup_object);
63855@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63856 if (ret == -ETIMEDOUT) {
63857 /* probably stuck behind another object, so move this one to
63858 * the back of the queue */
63859- fscache_stat(&fscache_n_object_lookups_timed_out);
63860+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
63861 _leave(" [timeout]");
63862 return NO_TRANSIT;
63863 }
63864@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
63865 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
63866
63867 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63868- fscache_stat(&fscache_n_object_lookups_negative);
63869+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
63870
63871 /* Allow write requests to begin stacking up and read requests to begin
63872 * returning ENODATA.
63873@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
63874 /* if we were still looking up, then we must have a positive lookup
63875 * result, in which case there may be data available */
63876 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63877- fscache_stat(&fscache_n_object_lookups_positive);
63878+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
63879
63880 /* We do (presumably) have data */
63881 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
63882@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
63883 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
63884 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
63885 } else {
63886- fscache_stat(&fscache_n_object_created);
63887+ fscache_stat_unchecked(&fscache_n_object_created);
63888 }
63889
63890 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
63891@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
63892 fscache_stat_d(&fscache_n_cop_lookup_complete);
63893
63894 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
63895- fscache_stat(&fscache_n_object_avail);
63896+ fscache_stat_unchecked(&fscache_n_object_avail);
63897
63898 _leave("");
63899 return transit_to(JUMPSTART_DEPS);
63900@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
63901
63902 /* this just shifts the object release to the work processor */
63903 fscache_put_object(object);
63904- fscache_stat(&fscache_n_object_dead);
63905+ fscache_stat_unchecked(&fscache_n_object_dead);
63906
63907 _leave("");
63908 return transit_to(OBJECT_DEAD);
63909@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63910 enum fscache_checkaux result;
63911
63912 if (!object->cookie->def->check_aux) {
63913- fscache_stat(&fscache_n_checkaux_none);
63914+ fscache_stat_unchecked(&fscache_n_checkaux_none);
63915 return FSCACHE_CHECKAUX_OKAY;
63916 }
63917
63918@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63919 switch (result) {
63920 /* entry okay as is */
63921 case FSCACHE_CHECKAUX_OKAY:
63922- fscache_stat(&fscache_n_checkaux_okay);
63923+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
63924 break;
63925
63926 /* entry requires update */
63927 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
63928- fscache_stat(&fscache_n_checkaux_update);
63929+ fscache_stat_unchecked(&fscache_n_checkaux_update);
63930 break;
63931
63932 /* entry requires deletion */
63933 case FSCACHE_CHECKAUX_OBSOLETE:
63934- fscache_stat(&fscache_n_checkaux_obsolete);
63935+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
63936 break;
63937
63938 default:
63939@@ -992,7 +992,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
63940 {
63941 const struct fscache_state *s;
63942
63943- fscache_stat(&fscache_n_invalidates_run);
63944+ fscache_stat_unchecked(&fscache_n_invalidates_run);
63945 fscache_stat(&fscache_n_cop_invalidate_object);
63946 s = _fscache_invalidate_object(object, event);
63947 fscache_stat_d(&fscache_n_cop_invalidate_object);
63948@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
63949 {
63950 _enter("{OBJ%x},%d", object->debug_id, event);
63951
63952- fscache_stat(&fscache_n_updates_run);
63953+ fscache_stat_unchecked(&fscache_n_updates_run);
63954 fscache_stat(&fscache_n_cop_update_object);
63955 object->cache->ops->update_object(object);
63956 fscache_stat_d(&fscache_n_cop_update_object);
63957diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
63958index e7b87a0..a85d47a 100644
63959--- a/fs/fscache/operation.c
63960+++ b/fs/fscache/operation.c
63961@@ -17,7 +17,7 @@
63962 #include <linux/slab.h>
63963 #include "internal.h"
63964
63965-atomic_t fscache_op_debug_id;
63966+atomic_unchecked_t fscache_op_debug_id;
63967 EXPORT_SYMBOL(fscache_op_debug_id);
63968
63969 /**
63970@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
63971 ASSERTCMP(atomic_read(&op->usage), >, 0);
63972 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63973
63974- fscache_stat(&fscache_n_op_enqueue);
63975+ fscache_stat_unchecked(&fscache_n_op_enqueue);
63976 switch (op->flags & FSCACHE_OP_TYPE) {
63977 case FSCACHE_OP_ASYNC:
63978 _debug("queue async");
63979@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
63980 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
63981 if (op->processor)
63982 fscache_enqueue_operation(op);
63983- fscache_stat(&fscache_n_op_run);
63984+ fscache_stat_unchecked(&fscache_n_op_run);
63985 }
63986
63987 /*
63988@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63989 if (object->n_in_progress > 0) {
63990 atomic_inc(&op->usage);
63991 list_add_tail(&op->pend_link, &object->pending_ops);
63992- fscache_stat(&fscache_n_op_pend);
63993+ fscache_stat_unchecked(&fscache_n_op_pend);
63994 } else if (!list_empty(&object->pending_ops)) {
63995 atomic_inc(&op->usage);
63996 list_add_tail(&op->pend_link, &object->pending_ops);
63997- fscache_stat(&fscache_n_op_pend);
63998+ fscache_stat_unchecked(&fscache_n_op_pend);
63999 fscache_start_operations(object);
64000 } else {
64001 ASSERTCMP(object->n_in_progress, ==, 0);
64002@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
64003 object->n_exclusive++; /* reads and writes must wait */
64004 atomic_inc(&op->usage);
64005 list_add_tail(&op->pend_link, &object->pending_ops);
64006- fscache_stat(&fscache_n_op_pend);
64007+ fscache_stat_unchecked(&fscache_n_op_pend);
64008 ret = 0;
64009 } else {
64010 /* If we're in any other state, there must have been an I/O
64011@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
64012 if (object->n_exclusive > 0) {
64013 atomic_inc(&op->usage);
64014 list_add_tail(&op->pend_link, &object->pending_ops);
64015- fscache_stat(&fscache_n_op_pend);
64016+ fscache_stat_unchecked(&fscache_n_op_pend);
64017 } else if (!list_empty(&object->pending_ops)) {
64018 atomic_inc(&op->usage);
64019 list_add_tail(&op->pend_link, &object->pending_ops);
64020- fscache_stat(&fscache_n_op_pend);
64021+ fscache_stat_unchecked(&fscache_n_op_pend);
64022 fscache_start_operations(object);
64023 } else {
64024 ASSERTCMP(object->n_exclusive, ==, 0);
64025@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
64026 object->n_ops++;
64027 atomic_inc(&op->usage);
64028 list_add_tail(&op->pend_link, &object->pending_ops);
64029- fscache_stat(&fscache_n_op_pend);
64030+ fscache_stat_unchecked(&fscache_n_op_pend);
64031 ret = 0;
64032 } else if (fscache_object_is_dying(object)) {
64033- fscache_stat(&fscache_n_op_rejected);
64034+ fscache_stat_unchecked(&fscache_n_op_rejected);
64035 op->state = FSCACHE_OP_ST_CANCELLED;
64036 ret = -ENOBUFS;
64037 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
64038@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
64039 ret = -EBUSY;
64040 if (op->state == FSCACHE_OP_ST_PENDING) {
64041 ASSERT(!list_empty(&op->pend_link));
64042- fscache_stat(&fscache_n_op_cancelled);
64043+ fscache_stat_unchecked(&fscache_n_op_cancelled);
64044 list_del_init(&op->pend_link);
64045 if (do_cancel)
64046 do_cancel(op);
64047@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
64048 while (!list_empty(&object->pending_ops)) {
64049 op = list_entry(object->pending_ops.next,
64050 struct fscache_operation, pend_link);
64051- fscache_stat(&fscache_n_op_cancelled);
64052+ fscache_stat_unchecked(&fscache_n_op_cancelled);
64053 list_del_init(&op->pend_link);
64054
64055 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
64056@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
64057 op->state, ==, FSCACHE_OP_ST_CANCELLED);
64058 op->state = FSCACHE_OP_ST_DEAD;
64059
64060- fscache_stat(&fscache_n_op_release);
64061+ fscache_stat_unchecked(&fscache_n_op_release);
64062
64063 if (op->release) {
64064 op->release(op);
64065@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
64066 * lock, and defer it otherwise */
64067 if (!spin_trylock(&object->lock)) {
64068 _debug("defer put");
64069- fscache_stat(&fscache_n_op_deferred_release);
64070+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
64071
64072 cache = object->cache;
64073 spin_lock(&cache->op_gc_list_lock);
64074@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
64075
64076 _debug("GC DEFERRED REL OBJ%x OP%x",
64077 object->debug_id, op->debug_id);
64078- fscache_stat(&fscache_n_op_gc);
64079+ fscache_stat_unchecked(&fscache_n_op_gc);
64080
64081 ASSERTCMP(atomic_read(&op->usage), ==, 0);
64082 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
64083diff --git a/fs/fscache/page.c b/fs/fscache/page.c
64084index ed70714..67f4982 100644
64085--- a/fs/fscache/page.c
64086+++ b/fs/fscache/page.c
64087@@ -61,7 +61,7 @@ try_again:
64088 val = radix_tree_lookup(&cookie->stores, page->index);
64089 if (!val) {
64090 rcu_read_unlock();
64091- fscache_stat(&fscache_n_store_vmscan_not_storing);
64092+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
64093 __fscache_uncache_page(cookie, page);
64094 return true;
64095 }
64096@@ -91,11 +91,11 @@ try_again:
64097 spin_unlock(&cookie->stores_lock);
64098
64099 if (xpage) {
64100- fscache_stat(&fscache_n_store_vmscan_cancelled);
64101- fscache_stat(&fscache_n_store_radix_deletes);
64102+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
64103+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
64104 ASSERTCMP(xpage, ==, page);
64105 } else {
64106- fscache_stat(&fscache_n_store_vmscan_gone);
64107+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
64108 }
64109
64110 wake_up_bit(&cookie->flags, 0);
64111@@ -110,11 +110,11 @@ page_busy:
64112 * sleeping on memory allocation, so we may need to impose a timeout
64113 * too. */
64114 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
64115- fscache_stat(&fscache_n_store_vmscan_busy);
64116+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
64117 return false;
64118 }
64119
64120- fscache_stat(&fscache_n_store_vmscan_wait);
64121+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
64122 __fscache_wait_on_page_write(cookie, page);
64123 gfp &= ~__GFP_WAIT;
64124 goto try_again;
64125@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
64126 FSCACHE_COOKIE_STORING_TAG);
64127 if (!radix_tree_tag_get(&cookie->stores, page->index,
64128 FSCACHE_COOKIE_PENDING_TAG)) {
64129- fscache_stat(&fscache_n_store_radix_deletes);
64130+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
64131 xpage = radix_tree_delete(&cookie->stores, page->index);
64132 }
64133 spin_unlock(&cookie->stores_lock);
64134@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
64135
64136 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
64137
64138- fscache_stat(&fscache_n_attr_changed_calls);
64139+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
64140
64141 if (fscache_object_is_active(object)) {
64142 fscache_stat(&fscache_n_cop_attr_changed);
64143@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64144
64145 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64146
64147- fscache_stat(&fscache_n_attr_changed);
64148+ fscache_stat_unchecked(&fscache_n_attr_changed);
64149
64150 op = kzalloc(sizeof(*op), GFP_KERNEL);
64151 if (!op) {
64152- fscache_stat(&fscache_n_attr_changed_nomem);
64153+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
64154 _leave(" = -ENOMEM");
64155 return -ENOMEM;
64156 }
64157@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64158 if (fscache_submit_exclusive_op(object, op) < 0)
64159 goto nobufs;
64160 spin_unlock(&cookie->lock);
64161- fscache_stat(&fscache_n_attr_changed_ok);
64162+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
64163 fscache_put_operation(op);
64164 _leave(" = 0");
64165 return 0;
64166@@ -225,7 +225,7 @@ nobufs:
64167 kfree(op);
64168 if (wake_cookie)
64169 __fscache_wake_unused_cookie(cookie);
64170- fscache_stat(&fscache_n_attr_changed_nobufs);
64171+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
64172 _leave(" = %d", -ENOBUFS);
64173 return -ENOBUFS;
64174 }
64175@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
64176 /* allocate a retrieval operation and attempt to submit it */
64177 op = kzalloc(sizeof(*op), GFP_NOIO);
64178 if (!op) {
64179- fscache_stat(&fscache_n_retrievals_nomem);
64180+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64181 return NULL;
64182 }
64183
64184@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
64185 return 0;
64186 }
64187
64188- fscache_stat(&fscache_n_retrievals_wait);
64189+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
64190
64191 jif = jiffies;
64192 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
64193 fscache_wait_bit_interruptible,
64194 TASK_INTERRUPTIBLE) != 0) {
64195- fscache_stat(&fscache_n_retrievals_intr);
64196+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64197 _leave(" = -ERESTARTSYS");
64198 return -ERESTARTSYS;
64199 }
64200@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
64201 */
64202 int fscache_wait_for_operation_activation(struct fscache_object *object,
64203 struct fscache_operation *op,
64204- atomic_t *stat_op_waits,
64205- atomic_t *stat_object_dead,
64206+ atomic_unchecked_t *stat_op_waits,
64207+ atomic_unchecked_t *stat_object_dead,
64208 void (*do_cancel)(struct fscache_operation *))
64209 {
64210 int ret;
64211@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64212
64213 _debug(">>> WT");
64214 if (stat_op_waits)
64215- fscache_stat(stat_op_waits);
64216+ fscache_stat_unchecked(stat_op_waits);
64217 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
64218 fscache_wait_bit_interruptible,
64219 TASK_INTERRUPTIBLE) != 0) {
64220@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64221 check_if_dead:
64222 if (op->state == FSCACHE_OP_ST_CANCELLED) {
64223 if (stat_object_dead)
64224- fscache_stat(stat_object_dead);
64225+ fscache_stat_unchecked(stat_object_dead);
64226 _leave(" = -ENOBUFS [cancelled]");
64227 return -ENOBUFS;
64228 }
64229@@ -366,7 +366,7 @@ check_if_dead:
64230 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
64231 fscache_cancel_op(op, do_cancel);
64232 if (stat_object_dead)
64233- fscache_stat(stat_object_dead);
64234+ fscache_stat_unchecked(stat_object_dead);
64235 return -ENOBUFS;
64236 }
64237 return 0;
64238@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64239
64240 _enter("%p,%p,,,", cookie, page);
64241
64242- fscache_stat(&fscache_n_retrievals);
64243+ fscache_stat_unchecked(&fscache_n_retrievals);
64244
64245 if (hlist_empty(&cookie->backing_objects))
64246 goto nobufs;
64247@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64248 goto nobufs_unlock_dec;
64249 spin_unlock(&cookie->lock);
64250
64251- fscache_stat(&fscache_n_retrieval_ops);
64252+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64253
64254 /* pin the netfs read context in case we need to do the actual netfs
64255 * read because we've encountered a cache read failure */
64256@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64257
64258 error:
64259 if (ret == -ENOMEM)
64260- fscache_stat(&fscache_n_retrievals_nomem);
64261+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64262 else if (ret == -ERESTARTSYS)
64263- fscache_stat(&fscache_n_retrievals_intr);
64264+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64265 else if (ret == -ENODATA)
64266- fscache_stat(&fscache_n_retrievals_nodata);
64267+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64268 else if (ret < 0)
64269- fscache_stat(&fscache_n_retrievals_nobufs);
64270+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64271 else
64272- fscache_stat(&fscache_n_retrievals_ok);
64273+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64274
64275 fscache_put_retrieval(op);
64276 _leave(" = %d", ret);
64277@@ -490,7 +490,7 @@ nobufs_unlock:
64278 __fscache_wake_unused_cookie(cookie);
64279 kfree(op);
64280 nobufs:
64281- fscache_stat(&fscache_n_retrievals_nobufs);
64282+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64283 _leave(" = -ENOBUFS");
64284 return -ENOBUFS;
64285 }
64286@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64287
64288 _enter("%p,,%d,,,", cookie, *nr_pages);
64289
64290- fscache_stat(&fscache_n_retrievals);
64291+ fscache_stat_unchecked(&fscache_n_retrievals);
64292
64293 if (hlist_empty(&cookie->backing_objects))
64294 goto nobufs;
64295@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64296 goto nobufs_unlock_dec;
64297 spin_unlock(&cookie->lock);
64298
64299- fscache_stat(&fscache_n_retrieval_ops);
64300+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64301
64302 /* pin the netfs read context in case we need to do the actual netfs
64303 * read because we've encountered a cache read failure */
64304@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64305
64306 error:
64307 if (ret == -ENOMEM)
64308- fscache_stat(&fscache_n_retrievals_nomem);
64309+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64310 else if (ret == -ERESTARTSYS)
64311- fscache_stat(&fscache_n_retrievals_intr);
64312+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64313 else if (ret == -ENODATA)
64314- fscache_stat(&fscache_n_retrievals_nodata);
64315+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64316 else if (ret < 0)
64317- fscache_stat(&fscache_n_retrievals_nobufs);
64318+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64319 else
64320- fscache_stat(&fscache_n_retrievals_ok);
64321+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64322
64323 fscache_put_retrieval(op);
64324 _leave(" = %d", ret);
64325@@ -621,7 +621,7 @@ nobufs_unlock:
64326 if (wake_cookie)
64327 __fscache_wake_unused_cookie(cookie);
64328 nobufs:
64329- fscache_stat(&fscache_n_retrievals_nobufs);
64330+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64331 _leave(" = -ENOBUFS");
64332 return -ENOBUFS;
64333 }
64334@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64335
64336 _enter("%p,%p,,,", cookie, page);
64337
64338- fscache_stat(&fscache_n_allocs);
64339+ fscache_stat_unchecked(&fscache_n_allocs);
64340
64341 if (hlist_empty(&cookie->backing_objects))
64342 goto nobufs;
64343@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64344 goto nobufs_unlock_dec;
64345 spin_unlock(&cookie->lock);
64346
64347- fscache_stat(&fscache_n_alloc_ops);
64348+ fscache_stat_unchecked(&fscache_n_alloc_ops);
64349
64350 ret = fscache_wait_for_operation_activation(
64351 object, &op->op,
64352@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64353
64354 error:
64355 if (ret == -ERESTARTSYS)
64356- fscache_stat(&fscache_n_allocs_intr);
64357+ fscache_stat_unchecked(&fscache_n_allocs_intr);
64358 else if (ret < 0)
64359- fscache_stat(&fscache_n_allocs_nobufs);
64360+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64361 else
64362- fscache_stat(&fscache_n_allocs_ok);
64363+ fscache_stat_unchecked(&fscache_n_allocs_ok);
64364
64365 fscache_put_retrieval(op);
64366 _leave(" = %d", ret);
64367@@ -715,7 +715,7 @@ nobufs_unlock:
64368 if (wake_cookie)
64369 __fscache_wake_unused_cookie(cookie);
64370 nobufs:
64371- fscache_stat(&fscache_n_allocs_nobufs);
64372+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64373 _leave(" = -ENOBUFS");
64374 return -ENOBUFS;
64375 }
64376@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64377
64378 spin_lock(&cookie->stores_lock);
64379
64380- fscache_stat(&fscache_n_store_calls);
64381+ fscache_stat_unchecked(&fscache_n_store_calls);
64382
64383 /* find a page to store */
64384 page = NULL;
64385@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64386 page = results[0];
64387 _debug("gang %d [%lx]", n, page->index);
64388 if (page->index > op->store_limit) {
64389- fscache_stat(&fscache_n_store_pages_over_limit);
64390+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
64391 goto superseded;
64392 }
64393
64394@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64395 spin_unlock(&cookie->stores_lock);
64396 spin_unlock(&object->lock);
64397
64398- fscache_stat(&fscache_n_store_pages);
64399+ fscache_stat_unchecked(&fscache_n_store_pages);
64400 fscache_stat(&fscache_n_cop_write_page);
64401 ret = object->cache->ops->write_page(op, page);
64402 fscache_stat_d(&fscache_n_cop_write_page);
64403@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64404 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64405 ASSERT(PageFsCache(page));
64406
64407- fscache_stat(&fscache_n_stores);
64408+ fscache_stat_unchecked(&fscache_n_stores);
64409
64410 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
64411 _leave(" = -ENOBUFS [invalidating]");
64412@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64413 spin_unlock(&cookie->stores_lock);
64414 spin_unlock(&object->lock);
64415
64416- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
64417+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64418 op->store_limit = object->store_limit;
64419
64420 __fscache_use_cookie(cookie);
64421@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64422
64423 spin_unlock(&cookie->lock);
64424 radix_tree_preload_end();
64425- fscache_stat(&fscache_n_store_ops);
64426- fscache_stat(&fscache_n_stores_ok);
64427+ fscache_stat_unchecked(&fscache_n_store_ops);
64428+ fscache_stat_unchecked(&fscache_n_stores_ok);
64429
64430 /* the work queue now carries its own ref on the object */
64431 fscache_put_operation(&op->op);
64432@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64433 return 0;
64434
64435 already_queued:
64436- fscache_stat(&fscache_n_stores_again);
64437+ fscache_stat_unchecked(&fscache_n_stores_again);
64438 already_pending:
64439 spin_unlock(&cookie->stores_lock);
64440 spin_unlock(&object->lock);
64441 spin_unlock(&cookie->lock);
64442 radix_tree_preload_end();
64443 kfree(op);
64444- fscache_stat(&fscache_n_stores_ok);
64445+ fscache_stat_unchecked(&fscache_n_stores_ok);
64446 _leave(" = 0");
64447 return 0;
64448
64449@@ -1024,14 +1024,14 @@ nobufs:
64450 kfree(op);
64451 if (wake_cookie)
64452 __fscache_wake_unused_cookie(cookie);
64453- fscache_stat(&fscache_n_stores_nobufs);
64454+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64455 _leave(" = -ENOBUFS");
64456 return -ENOBUFS;
64457
64458 nomem_free:
64459 kfree(op);
64460 nomem:
64461- fscache_stat(&fscache_n_stores_oom);
64462+ fscache_stat_unchecked(&fscache_n_stores_oom);
64463 _leave(" = -ENOMEM");
64464 return -ENOMEM;
64465 }
64466@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64467 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64468 ASSERTCMP(page, !=, NULL);
64469
64470- fscache_stat(&fscache_n_uncaches);
64471+ fscache_stat_unchecked(&fscache_n_uncaches);
64472
64473 /* cache withdrawal may beat us to it */
64474 if (!PageFsCache(page))
64475@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64476 struct fscache_cookie *cookie = op->op.object->cookie;
64477
64478 #ifdef CONFIG_FSCACHE_STATS
64479- atomic_inc(&fscache_n_marks);
64480+ atomic_inc_unchecked(&fscache_n_marks);
64481 #endif
64482
64483 _debug("- mark %p{%lx}", page, page->index);
64484diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64485index 40d13c7..ddf52b9 100644
64486--- a/fs/fscache/stats.c
64487+++ b/fs/fscache/stats.c
64488@@ -18,99 +18,99 @@
64489 /*
64490 * operation counters
64491 */
64492-atomic_t fscache_n_op_pend;
64493-atomic_t fscache_n_op_run;
64494-atomic_t fscache_n_op_enqueue;
64495-atomic_t fscache_n_op_requeue;
64496-atomic_t fscache_n_op_deferred_release;
64497-atomic_t fscache_n_op_release;
64498-atomic_t fscache_n_op_gc;
64499-atomic_t fscache_n_op_cancelled;
64500-atomic_t fscache_n_op_rejected;
64501+atomic_unchecked_t fscache_n_op_pend;
64502+atomic_unchecked_t fscache_n_op_run;
64503+atomic_unchecked_t fscache_n_op_enqueue;
64504+atomic_unchecked_t fscache_n_op_requeue;
64505+atomic_unchecked_t fscache_n_op_deferred_release;
64506+atomic_unchecked_t fscache_n_op_release;
64507+atomic_unchecked_t fscache_n_op_gc;
64508+atomic_unchecked_t fscache_n_op_cancelled;
64509+atomic_unchecked_t fscache_n_op_rejected;
64510
64511-atomic_t fscache_n_attr_changed;
64512-atomic_t fscache_n_attr_changed_ok;
64513-atomic_t fscache_n_attr_changed_nobufs;
64514-atomic_t fscache_n_attr_changed_nomem;
64515-atomic_t fscache_n_attr_changed_calls;
64516+atomic_unchecked_t fscache_n_attr_changed;
64517+atomic_unchecked_t fscache_n_attr_changed_ok;
64518+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64519+atomic_unchecked_t fscache_n_attr_changed_nomem;
64520+atomic_unchecked_t fscache_n_attr_changed_calls;
64521
64522-atomic_t fscache_n_allocs;
64523-atomic_t fscache_n_allocs_ok;
64524-atomic_t fscache_n_allocs_wait;
64525-atomic_t fscache_n_allocs_nobufs;
64526-atomic_t fscache_n_allocs_intr;
64527-atomic_t fscache_n_allocs_object_dead;
64528-atomic_t fscache_n_alloc_ops;
64529-atomic_t fscache_n_alloc_op_waits;
64530+atomic_unchecked_t fscache_n_allocs;
64531+atomic_unchecked_t fscache_n_allocs_ok;
64532+atomic_unchecked_t fscache_n_allocs_wait;
64533+atomic_unchecked_t fscache_n_allocs_nobufs;
64534+atomic_unchecked_t fscache_n_allocs_intr;
64535+atomic_unchecked_t fscache_n_allocs_object_dead;
64536+atomic_unchecked_t fscache_n_alloc_ops;
64537+atomic_unchecked_t fscache_n_alloc_op_waits;
64538
64539-atomic_t fscache_n_retrievals;
64540-atomic_t fscache_n_retrievals_ok;
64541-atomic_t fscache_n_retrievals_wait;
64542-atomic_t fscache_n_retrievals_nodata;
64543-atomic_t fscache_n_retrievals_nobufs;
64544-atomic_t fscache_n_retrievals_intr;
64545-atomic_t fscache_n_retrievals_nomem;
64546-atomic_t fscache_n_retrievals_object_dead;
64547-atomic_t fscache_n_retrieval_ops;
64548-atomic_t fscache_n_retrieval_op_waits;
64549+atomic_unchecked_t fscache_n_retrievals;
64550+atomic_unchecked_t fscache_n_retrievals_ok;
64551+atomic_unchecked_t fscache_n_retrievals_wait;
64552+atomic_unchecked_t fscache_n_retrievals_nodata;
64553+atomic_unchecked_t fscache_n_retrievals_nobufs;
64554+atomic_unchecked_t fscache_n_retrievals_intr;
64555+atomic_unchecked_t fscache_n_retrievals_nomem;
64556+atomic_unchecked_t fscache_n_retrievals_object_dead;
64557+atomic_unchecked_t fscache_n_retrieval_ops;
64558+atomic_unchecked_t fscache_n_retrieval_op_waits;
64559
64560-atomic_t fscache_n_stores;
64561-atomic_t fscache_n_stores_ok;
64562-atomic_t fscache_n_stores_again;
64563-atomic_t fscache_n_stores_nobufs;
64564-atomic_t fscache_n_stores_oom;
64565-atomic_t fscache_n_store_ops;
64566-atomic_t fscache_n_store_calls;
64567-atomic_t fscache_n_store_pages;
64568-atomic_t fscache_n_store_radix_deletes;
64569-atomic_t fscache_n_store_pages_over_limit;
64570+atomic_unchecked_t fscache_n_stores;
64571+atomic_unchecked_t fscache_n_stores_ok;
64572+atomic_unchecked_t fscache_n_stores_again;
64573+atomic_unchecked_t fscache_n_stores_nobufs;
64574+atomic_unchecked_t fscache_n_stores_oom;
64575+atomic_unchecked_t fscache_n_store_ops;
64576+atomic_unchecked_t fscache_n_store_calls;
64577+atomic_unchecked_t fscache_n_store_pages;
64578+atomic_unchecked_t fscache_n_store_radix_deletes;
64579+atomic_unchecked_t fscache_n_store_pages_over_limit;
64580
64581-atomic_t fscache_n_store_vmscan_not_storing;
64582-atomic_t fscache_n_store_vmscan_gone;
64583-atomic_t fscache_n_store_vmscan_busy;
64584-atomic_t fscache_n_store_vmscan_cancelled;
64585-atomic_t fscache_n_store_vmscan_wait;
64586+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64587+atomic_unchecked_t fscache_n_store_vmscan_gone;
64588+atomic_unchecked_t fscache_n_store_vmscan_busy;
64589+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64590+atomic_unchecked_t fscache_n_store_vmscan_wait;
64591
64592-atomic_t fscache_n_marks;
64593-atomic_t fscache_n_uncaches;
64594+atomic_unchecked_t fscache_n_marks;
64595+atomic_unchecked_t fscache_n_uncaches;
64596
64597-atomic_t fscache_n_acquires;
64598-atomic_t fscache_n_acquires_null;
64599-atomic_t fscache_n_acquires_no_cache;
64600-atomic_t fscache_n_acquires_ok;
64601-atomic_t fscache_n_acquires_nobufs;
64602-atomic_t fscache_n_acquires_oom;
64603+atomic_unchecked_t fscache_n_acquires;
64604+atomic_unchecked_t fscache_n_acquires_null;
64605+atomic_unchecked_t fscache_n_acquires_no_cache;
64606+atomic_unchecked_t fscache_n_acquires_ok;
64607+atomic_unchecked_t fscache_n_acquires_nobufs;
64608+atomic_unchecked_t fscache_n_acquires_oom;
64609
64610-atomic_t fscache_n_invalidates;
64611-atomic_t fscache_n_invalidates_run;
64612+atomic_unchecked_t fscache_n_invalidates;
64613+atomic_unchecked_t fscache_n_invalidates_run;
64614
64615-atomic_t fscache_n_updates;
64616-atomic_t fscache_n_updates_null;
64617-atomic_t fscache_n_updates_run;
64618+atomic_unchecked_t fscache_n_updates;
64619+atomic_unchecked_t fscache_n_updates_null;
64620+atomic_unchecked_t fscache_n_updates_run;
64621
64622-atomic_t fscache_n_relinquishes;
64623-atomic_t fscache_n_relinquishes_null;
64624-atomic_t fscache_n_relinquishes_waitcrt;
64625-atomic_t fscache_n_relinquishes_retire;
64626+atomic_unchecked_t fscache_n_relinquishes;
64627+atomic_unchecked_t fscache_n_relinquishes_null;
64628+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64629+atomic_unchecked_t fscache_n_relinquishes_retire;
64630
64631-atomic_t fscache_n_cookie_index;
64632-atomic_t fscache_n_cookie_data;
64633-atomic_t fscache_n_cookie_special;
64634+atomic_unchecked_t fscache_n_cookie_index;
64635+atomic_unchecked_t fscache_n_cookie_data;
64636+atomic_unchecked_t fscache_n_cookie_special;
64637
64638-atomic_t fscache_n_object_alloc;
64639-atomic_t fscache_n_object_no_alloc;
64640-atomic_t fscache_n_object_lookups;
64641-atomic_t fscache_n_object_lookups_negative;
64642-atomic_t fscache_n_object_lookups_positive;
64643-atomic_t fscache_n_object_lookups_timed_out;
64644-atomic_t fscache_n_object_created;
64645-atomic_t fscache_n_object_avail;
64646-atomic_t fscache_n_object_dead;
64647+atomic_unchecked_t fscache_n_object_alloc;
64648+atomic_unchecked_t fscache_n_object_no_alloc;
64649+atomic_unchecked_t fscache_n_object_lookups;
64650+atomic_unchecked_t fscache_n_object_lookups_negative;
64651+atomic_unchecked_t fscache_n_object_lookups_positive;
64652+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64653+atomic_unchecked_t fscache_n_object_created;
64654+atomic_unchecked_t fscache_n_object_avail;
64655+atomic_unchecked_t fscache_n_object_dead;
64656
64657-atomic_t fscache_n_checkaux_none;
64658-atomic_t fscache_n_checkaux_okay;
64659-atomic_t fscache_n_checkaux_update;
64660-atomic_t fscache_n_checkaux_obsolete;
64661+atomic_unchecked_t fscache_n_checkaux_none;
64662+atomic_unchecked_t fscache_n_checkaux_okay;
64663+atomic_unchecked_t fscache_n_checkaux_update;
64664+atomic_unchecked_t fscache_n_checkaux_obsolete;
64665
64666 atomic_t fscache_n_cop_alloc_object;
64667 atomic_t fscache_n_cop_lookup_object;
64668@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64669 seq_puts(m, "FS-Cache statistics\n");
64670
64671 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64672- atomic_read(&fscache_n_cookie_index),
64673- atomic_read(&fscache_n_cookie_data),
64674- atomic_read(&fscache_n_cookie_special));
64675+ atomic_read_unchecked(&fscache_n_cookie_index),
64676+ atomic_read_unchecked(&fscache_n_cookie_data),
64677+ atomic_read_unchecked(&fscache_n_cookie_special));
64678
64679 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64680- atomic_read(&fscache_n_object_alloc),
64681- atomic_read(&fscache_n_object_no_alloc),
64682- atomic_read(&fscache_n_object_avail),
64683- atomic_read(&fscache_n_object_dead));
64684+ atomic_read_unchecked(&fscache_n_object_alloc),
64685+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64686+ atomic_read_unchecked(&fscache_n_object_avail),
64687+ atomic_read_unchecked(&fscache_n_object_dead));
64688 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64689- atomic_read(&fscache_n_checkaux_none),
64690- atomic_read(&fscache_n_checkaux_okay),
64691- atomic_read(&fscache_n_checkaux_update),
64692- atomic_read(&fscache_n_checkaux_obsolete));
64693+ atomic_read_unchecked(&fscache_n_checkaux_none),
64694+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64695+ atomic_read_unchecked(&fscache_n_checkaux_update),
64696+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64697
64698 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64699- atomic_read(&fscache_n_marks),
64700- atomic_read(&fscache_n_uncaches));
64701+ atomic_read_unchecked(&fscache_n_marks),
64702+ atomic_read_unchecked(&fscache_n_uncaches));
64703
64704 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64705 " oom=%u\n",
64706- atomic_read(&fscache_n_acquires),
64707- atomic_read(&fscache_n_acquires_null),
64708- atomic_read(&fscache_n_acquires_no_cache),
64709- atomic_read(&fscache_n_acquires_ok),
64710- atomic_read(&fscache_n_acquires_nobufs),
64711- atomic_read(&fscache_n_acquires_oom));
64712+ atomic_read_unchecked(&fscache_n_acquires),
64713+ atomic_read_unchecked(&fscache_n_acquires_null),
64714+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64715+ atomic_read_unchecked(&fscache_n_acquires_ok),
64716+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64717+ atomic_read_unchecked(&fscache_n_acquires_oom));
64718
64719 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64720- atomic_read(&fscache_n_object_lookups),
64721- atomic_read(&fscache_n_object_lookups_negative),
64722- atomic_read(&fscache_n_object_lookups_positive),
64723- atomic_read(&fscache_n_object_created),
64724- atomic_read(&fscache_n_object_lookups_timed_out));
64725+ atomic_read_unchecked(&fscache_n_object_lookups),
64726+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64727+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64728+ atomic_read_unchecked(&fscache_n_object_created),
64729+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64730
64731 seq_printf(m, "Invals : n=%u run=%u\n",
64732- atomic_read(&fscache_n_invalidates),
64733- atomic_read(&fscache_n_invalidates_run));
64734+ atomic_read_unchecked(&fscache_n_invalidates),
64735+ atomic_read_unchecked(&fscache_n_invalidates_run));
64736
64737 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64738- atomic_read(&fscache_n_updates),
64739- atomic_read(&fscache_n_updates_null),
64740- atomic_read(&fscache_n_updates_run));
64741+ atomic_read_unchecked(&fscache_n_updates),
64742+ atomic_read_unchecked(&fscache_n_updates_null),
64743+ atomic_read_unchecked(&fscache_n_updates_run));
64744
64745 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64746- atomic_read(&fscache_n_relinquishes),
64747- atomic_read(&fscache_n_relinquishes_null),
64748- atomic_read(&fscache_n_relinquishes_waitcrt),
64749- atomic_read(&fscache_n_relinquishes_retire));
64750+ atomic_read_unchecked(&fscache_n_relinquishes),
64751+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64752+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64753+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64754
64755 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64756- atomic_read(&fscache_n_attr_changed),
64757- atomic_read(&fscache_n_attr_changed_ok),
64758- atomic_read(&fscache_n_attr_changed_nobufs),
64759- atomic_read(&fscache_n_attr_changed_nomem),
64760- atomic_read(&fscache_n_attr_changed_calls));
64761+ atomic_read_unchecked(&fscache_n_attr_changed),
64762+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64763+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64764+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64765+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64766
64767 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64768- atomic_read(&fscache_n_allocs),
64769- atomic_read(&fscache_n_allocs_ok),
64770- atomic_read(&fscache_n_allocs_wait),
64771- atomic_read(&fscache_n_allocs_nobufs),
64772- atomic_read(&fscache_n_allocs_intr));
64773+ atomic_read_unchecked(&fscache_n_allocs),
64774+ atomic_read_unchecked(&fscache_n_allocs_ok),
64775+ atomic_read_unchecked(&fscache_n_allocs_wait),
64776+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64777+ atomic_read_unchecked(&fscache_n_allocs_intr));
64778 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64779- atomic_read(&fscache_n_alloc_ops),
64780- atomic_read(&fscache_n_alloc_op_waits),
64781- atomic_read(&fscache_n_allocs_object_dead));
64782+ atomic_read_unchecked(&fscache_n_alloc_ops),
64783+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64784+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64785
64786 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64787 " int=%u oom=%u\n",
64788- atomic_read(&fscache_n_retrievals),
64789- atomic_read(&fscache_n_retrievals_ok),
64790- atomic_read(&fscache_n_retrievals_wait),
64791- atomic_read(&fscache_n_retrievals_nodata),
64792- atomic_read(&fscache_n_retrievals_nobufs),
64793- atomic_read(&fscache_n_retrievals_intr),
64794- atomic_read(&fscache_n_retrievals_nomem));
64795+ atomic_read_unchecked(&fscache_n_retrievals),
64796+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64797+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64798+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64799+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64800+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64801+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64802 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64803- atomic_read(&fscache_n_retrieval_ops),
64804- atomic_read(&fscache_n_retrieval_op_waits),
64805- atomic_read(&fscache_n_retrievals_object_dead));
64806+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64807+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64808+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64809
64810 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64811- atomic_read(&fscache_n_stores),
64812- atomic_read(&fscache_n_stores_ok),
64813- atomic_read(&fscache_n_stores_again),
64814- atomic_read(&fscache_n_stores_nobufs),
64815- atomic_read(&fscache_n_stores_oom));
64816+ atomic_read_unchecked(&fscache_n_stores),
64817+ atomic_read_unchecked(&fscache_n_stores_ok),
64818+ atomic_read_unchecked(&fscache_n_stores_again),
64819+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64820+ atomic_read_unchecked(&fscache_n_stores_oom));
64821 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64822- atomic_read(&fscache_n_store_ops),
64823- atomic_read(&fscache_n_store_calls),
64824- atomic_read(&fscache_n_store_pages),
64825- atomic_read(&fscache_n_store_radix_deletes),
64826- atomic_read(&fscache_n_store_pages_over_limit));
64827+ atomic_read_unchecked(&fscache_n_store_ops),
64828+ atomic_read_unchecked(&fscache_n_store_calls),
64829+ atomic_read_unchecked(&fscache_n_store_pages),
64830+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64831+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64832
64833 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64834- atomic_read(&fscache_n_store_vmscan_not_storing),
64835- atomic_read(&fscache_n_store_vmscan_gone),
64836- atomic_read(&fscache_n_store_vmscan_busy),
64837- atomic_read(&fscache_n_store_vmscan_cancelled),
64838- atomic_read(&fscache_n_store_vmscan_wait));
64839+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
64840+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
64841+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
64842+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
64843+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
64844
64845 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
64846- atomic_read(&fscache_n_op_pend),
64847- atomic_read(&fscache_n_op_run),
64848- atomic_read(&fscache_n_op_enqueue),
64849- atomic_read(&fscache_n_op_cancelled),
64850- atomic_read(&fscache_n_op_rejected));
64851+ atomic_read_unchecked(&fscache_n_op_pend),
64852+ atomic_read_unchecked(&fscache_n_op_run),
64853+ atomic_read_unchecked(&fscache_n_op_enqueue),
64854+ atomic_read_unchecked(&fscache_n_op_cancelled),
64855+ atomic_read_unchecked(&fscache_n_op_rejected));
64856 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
64857- atomic_read(&fscache_n_op_deferred_release),
64858- atomic_read(&fscache_n_op_release),
64859- atomic_read(&fscache_n_op_gc));
64860+ atomic_read_unchecked(&fscache_n_op_deferred_release),
64861+ atomic_read_unchecked(&fscache_n_op_release),
64862+ atomic_read_unchecked(&fscache_n_op_gc));
64863
64864 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
64865 atomic_read(&fscache_n_cop_alloc_object),
64866diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
64867index 966ace8..030a03a 100644
64868--- a/fs/fuse/cuse.c
64869+++ b/fs/fuse/cuse.c
64870@@ -611,10 +611,12 @@ static int __init cuse_init(void)
64871 INIT_LIST_HEAD(&cuse_conntbl[i]);
64872
64873 /* inherit and extend fuse_dev_operations */
64874- cuse_channel_fops = fuse_dev_operations;
64875- cuse_channel_fops.owner = THIS_MODULE;
64876- cuse_channel_fops.open = cuse_channel_open;
64877- cuse_channel_fops.release = cuse_channel_release;
64878+ pax_open_kernel();
64879+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
64880+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
64881+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
64882+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
64883+ pax_close_kernel();
64884
64885 cuse_class = class_create(THIS_MODULE, "cuse");
64886 if (IS_ERR(cuse_class))
64887diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
64888index ca88731..8e9c55d 100644
64889--- a/fs/fuse/dev.c
64890+++ b/fs/fuse/dev.c
64891@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64892 ret = 0;
64893 pipe_lock(pipe);
64894
64895- if (!pipe->readers) {
64896+ if (!atomic_read(&pipe->readers)) {
64897 send_sig(SIGPIPE, current, 0);
64898 if (!ret)
64899 ret = -EPIPE;
64900@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64901 page_nr++;
64902 ret += buf->len;
64903
64904- if (pipe->files)
64905+ if (atomic_read(&pipe->files))
64906 do_wakeup = 1;
64907 }
64908
64909diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
64910index 0c60482..025724f 100644
64911--- a/fs/fuse/dir.c
64912+++ b/fs/fuse/dir.c
64913@@ -1485,7 +1485,7 @@ static char *read_link(struct dentry *dentry)
64914 return link;
64915 }
64916
64917-static void free_link(char *link)
64918+static void free_link(const char *link)
64919 {
64920 if (!IS_ERR(link))
64921 free_page((unsigned long) link);
64922diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64923index bb529f3..454c253 100644
64924--- a/fs/hostfs/hostfs_kern.c
64925+++ b/fs/hostfs/hostfs_kern.c
64926@@ -898,7 +898,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64927
64928 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64929 {
64930- char *s = nd_get_link(nd);
64931+ const char *s = nd_get_link(nd);
64932 if (!IS_ERR(s))
64933 __putname(s);
64934 }
64935diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64936index 1e2872b..7aea000 100644
64937--- a/fs/hugetlbfs/inode.c
64938+++ b/fs/hugetlbfs/inode.c
64939@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64940 struct mm_struct *mm = current->mm;
64941 struct vm_area_struct *vma;
64942 struct hstate *h = hstate_file(file);
64943+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64944 struct vm_unmapped_area_info info;
64945
64946 if (len & ~huge_page_mask(h))
64947@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64948 return addr;
64949 }
64950
64951+#ifdef CONFIG_PAX_RANDMMAP
64952+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64953+#endif
64954+
64955 if (addr) {
64956 addr = ALIGN(addr, huge_page_size(h));
64957 vma = find_vma(mm, addr);
64958- if (TASK_SIZE - len >= addr &&
64959- (!vma || addr + len <= vma->vm_start))
64960+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64961 return addr;
64962 }
64963
64964 info.flags = 0;
64965 info.length = len;
64966 info.low_limit = TASK_UNMAPPED_BASE;
64967+
64968+#ifdef CONFIG_PAX_RANDMMAP
64969+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64970+ info.low_limit += mm->delta_mmap;
64971+#endif
64972+
64973 info.high_limit = TASK_SIZE;
64974 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64975 info.align_offset = 0;
64976@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64977 };
64978 MODULE_ALIAS_FS("hugetlbfs");
64979
64980-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64981+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64982
64983 static int can_do_hugetlb_shm(void)
64984 {
64985diff --git a/fs/inode.c b/fs/inode.c
64986index 6eecb7f..abec305 100644
64987--- a/fs/inode.c
64988+++ b/fs/inode.c
64989@@ -839,16 +839,20 @@ unsigned int get_next_ino(void)
64990 unsigned int *p = &get_cpu_var(last_ino);
64991 unsigned int res = *p;
64992
64993+start:
64994+
64995 #ifdef CONFIG_SMP
64996 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64997- static atomic_t shared_last_ino;
64998- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64999+ static atomic_unchecked_t shared_last_ino;
65000+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
65001
65002 res = next - LAST_INO_BATCH;
65003 }
65004 #endif
65005
65006- *p = ++res;
65007+ if (unlikely(!++res))
65008+ goto start; /* never zero */
65009+ *p = res;
65010 put_cpu_var(last_ino);
65011 return res;
65012 }
65013diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
65014index 4a6cf28..d3a29d3 100644
65015--- a/fs/jffs2/erase.c
65016+++ b/fs/jffs2/erase.c
65017@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
65018 struct jffs2_unknown_node marker = {
65019 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
65020 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65021- .totlen = cpu_to_je32(c->cleanmarker_size)
65022+ .totlen = cpu_to_je32(c->cleanmarker_size),
65023+ .hdr_crc = cpu_to_je32(0)
65024 };
65025
65026 jffs2_prealloc_raw_node_refs(c, jeb, 1);
65027diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
65028index a6597d6..41b30ec 100644
65029--- a/fs/jffs2/wbuf.c
65030+++ b/fs/jffs2/wbuf.c
65031@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
65032 {
65033 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
65034 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65035- .totlen = constant_cpu_to_je32(8)
65036+ .totlen = constant_cpu_to_je32(8),
65037+ .hdr_crc = constant_cpu_to_je32(0)
65038 };
65039
65040 /*
65041diff --git a/fs/jfs/super.c b/fs/jfs/super.c
65042index adf8cb0..bb935fa 100644
65043--- a/fs/jfs/super.c
65044+++ b/fs/jfs/super.c
65045@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
65046
65047 jfs_inode_cachep =
65048 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
65049- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
65050+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
65051 init_once);
65052 if (jfs_inode_cachep == NULL)
65053 return -ENOMEM;
65054diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
65055index a693f5b..82276a1 100644
65056--- a/fs/kernfs/dir.c
65057+++ b/fs/kernfs/dir.c
65058@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
65059 *
65060 * Returns 31 bit hash of ns + name (so it fits in an off_t )
65061 */
65062-static unsigned int kernfs_name_hash(const char *name, const void *ns)
65063+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
65064 {
65065 unsigned long hash = init_name_hash();
65066 unsigned int len = strlen(name);
65067diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
65068index d895b4b..0b8af77 100644
65069--- a/fs/kernfs/file.c
65070+++ b/fs/kernfs/file.c
65071@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
65072
65073 struct kernfs_open_node {
65074 atomic_t refcnt;
65075- atomic_t event;
65076+ atomic_unchecked_t event;
65077 wait_queue_head_t poll;
65078 struct list_head files; /* goes through kernfs_open_file.list */
65079 };
65080@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
65081 {
65082 struct kernfs_open_file *of = sf->private;
65083
65084- of->event = atomic_read(&of->kn->attr.open->event);
65085+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65086
65087 return of->kn->attr.ops->seq_show(sf, v);
65088 }
65089@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
65090 return ret;
65091 }
65092
65093-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65094- void *buf, int len, int write)
65095+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65096+ void *buf, size_t len, int write)
65097 {
65098 struct file *file = vma->vm_file;
65099 struct kernfs_open_file *of = kernfs_of(file);
65100- int ret;
65101+ ssize_t ret;
65102
65103 if (!of->vm_ops)
65104 return -EINVAL;
65105@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
65106 return -ENOMEM;
65107
65108 atomic_set(&new_on->refcnt, 0);
65109- atomic_set(&new_on->event, 1);
65110+ atomic_set_unchecked(&new_on->event, 1);
65111 init_waitqueue_head(&new_on->poll);
65112 INIT_LIST_HEAD(&new_on->files);
65113 goto retry;
65114@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
65115
65116 kernfs_put_active(kn);
65117
65118- if (of->event != atomic_read(&on->event))
65119+ if (of->event != atomic_read_unchecked(&on->event))
65120 goto trigger;
65121
65122 return DEFAULT_POLLMASK;
65123@@ -818,7 +818,7 @@ repeat:
65124
65125 on = kn->attr.open;
65126 if (on) {
65127- atomic_inc(&on->event);
65128+ atomic_inc_unchecked(&on->event);
65129 wake_up_interruptible(&on->poll);
65130 }
65131
65132diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
65133index 8a19889..4c3069a 100644
65134--- a/fs/kernfs/symlink.c
65135+++ b/fs/kernfs/symlink.c
65136@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
65137 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
65138 void *cookie)
65139 {
65140- char *page = nd_get_link(nd);
65141+ const char *page = nd_get_link(nd);
65142 if (!IS_ERR(page))
65143 free_page((unsigned long)page);
65144 }
65145diff --git a/fs/libfs.c b/fs/libfs.c
65146index 88e3e00..979c262 100644
65147--- a/fs/libfs.c
65148+++ b/fs/libfs.c
65149@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65150
65151 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
65152 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
65153+ char d_name[sizeof(next->d_iname)];
65154+ const unsigned char *name;
65155+
65156 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
65157 if (!simple_positive(next)) {
65158 spin_unlock(&next->d_lock);
65159@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65160
65161 spin_unlock(&next->d_lock);
65162 spin_unlock(&dentry->d_lock);
65163- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
65164+ name = next->d_name.name;
65165+ if (name == next->d_iname) {
65166+ memcpy(d_name, name, next->d_name.len);
65167+ name = d_name;
65168+ }
65169+ if (!dir_emit(ctx, name, next->d_name.len,
65170 next->d_inode->i_ino, dt_type(next->d_inode)))
65171 return 0;
65172 spin_lock(&dentry->d_lock);
65173@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
65174 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
65175 void *cookie)
65176 {
65177- char *s = nd_get_link(nd);
65178+ const char *s = nd_get_link(nd);
65179 if (!IS_ERR(s))
65180 kfree(s);
65181 }
65182diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
65183index acd3947..1f896e2 100644
65184--- a/fs/lockd/clntproc.c
65185+++ b/fs/lockd/clntproc.c
65186@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
65187 /*
65188 * Cookie counter for NLM requests
65189 */
65190-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
65191+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
65192
65193 void nlmclnt_next_cookie(struct nlm_cookie *c)
65194 {
65195- u32 cookie = atomic_inc_return(&nlm_cookie);
65196+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
65197
65198 memcpy(c->data, &cookie, 4);
65199 c->len=4;
65200diff --git a/fs/locks.c b/fs/locks.c
65201index 717fbc4..74628c3 100644
65202--- a/fs/locks.c
65203+++ b/fs/locks.c
65204@@ -2327,7 +2327,7 @@ void locks_remove_file(struct file *filp)
65205 locks_remove_posix(filp, (fl_owner_t)filp);
65206
65207 if (filp->f_op->flock) {
65208- struct file_lock fl = {
65209+ struct file_lock flock = {
65210 .fl_owner = (fl_owner_t)filp,
65211 .fl_pid = current->tgid,
65212 .fl_file = filp,
65213@@ -2335,9 +2335,9 @@ void locks_remove_file(struct file *filp)
65214 .fl_type = F_UNLCK,
65215 .fl_end = OFFSET_MAX,
65216 };
65217- filp->f_op->flock(filp, F_SETLKW, &fl);
65218- if (fl.fl_ops && fl.fl_ops->fl_release_private)
65219- fl.fl_ops->fl_release_private(&fl);
65220+ filp->f_op->flock(filp, F_SETLKW, &flock);
65221+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
65222+ flock.fl_ops->fl_release_private(&flock);
65223 }
65224
65225 spin_lock(&inode->i_lock);
65226diff --git a/fs/mount.h b/fs/mount.h
65227index d55297f..f5b28c5 100644
65228--- a/fs/mount.h
65229+++ b/fs/mount.h
65230@@ -11,7 +11,7 @@ struct mnt_namespace {
65231 u64 seq; /* Sequence number to prevent loops */
65232 wait_queue_head_t poll;
65233 u64 event;
65234-};
65235+} __randomize_layout;
65236
65237 struct mnt_pcp {
65238 int mnt_count;
65239@@ -57,7 +57,7 @@ struct mount {
65240 int mnt_expiry_mark; /* true if marked for expiry */
65241 int mnt_pinned;
65242 struct path mnt_ex_mountpoint;
65243-};
65244+} __randomize_layout;
65245
65246 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
65247
65248diff --git a/fs/namei.c b/fs/namei.c
65249index 9eb787e..5f520b67 100644
65250--- a/fs/namei.c
65251+++ b/fs/namei.c
65252@@ -330,17 +330,32 @@ int generic_permission(struct inode *inode, int mask)
65253 if (ret != -EACCES)
65254 return ret;
65255
65256+#ifdef CONFIG_GRKERNSEC
65257+ /* we'll block if we have to log due to a denied capability use */
65258+ if (mask & MAY_NOT_BLOCK)
65259+ return -ECHILD;
65260+#endif
65261+
65262 if (S_ISDIR(inode->i_mode)) {
65263 /* DACs are overridable for directories */
65264- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65265- return 0;
65266 if (!(mask & MAY_WRITE))
65267- if (capable_wrt_inode_uidgid(inode,
65268- CAP_DAC_READ_SEARCH))
65269+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65270+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65271 return 0;
65272+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65273+ return 0;
65274 return -EACCES;
65275 }
65276 /*
65277+ * Searching includes executable on directories, else just read.
65278+ */
65279+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65280+ if (mask == MAY_READ)
65281+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65282+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65283+ return 0;
65284+
65285+ /*
65286 * Read/write DACs are always overridable.
65287 * Executable DACs are overridable when there is
65288 * at least one exec bit set.
65289@@ -349,14 +364,6 @@ int generic_permission(struct inode *inode, int mask)
65290 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65291 return 0;
65292
65293- /*
65294- * Searching includes executable on directories, else just read.
65295- */
65296- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65297- if (mask == MAY_READ)
65298- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65299- return 0;
65300-
65301 return -EACCES;
65302 }
65303 EXPORT_SYMBOL(generic_permission);
65304@@ -824,7 +831,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65305 {
65306 struct dentry *dentry = link->dentry;
65307 int error;
65308- char *s;
65309+ const char *s;
65310
65311 BUG_ON(nd->flags & LOOKUP_RCU);
65312
65313@@ -845,6 +852,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65314 if (error)
65315 goto out_put_nd_path;
65316
65317+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
65318+ dentry->d_inode, dentry, nd->path.mnt)) {
65319+ error = -EACCES;
65320+ goto out_put_nd_path;
65321+ }
65322+
65323 nd->last_type = LAST_BIND;
65324 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
65325 error = PTR_ERR(*p);
65326@@ -1596,6 +1609,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
65327 if (res)
65328 break;
65329 res = walk_component(nd, path, LOOKUP_FOLLOW);
65330+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
65331+ res = -EACCES;
65332 put_link(nd, &link, cookie);
65333 } while (res > 0);
65334
65335@@ -1669,7 +1684,7 @@ EXPORT_SYMBOL(full_name_hash);
65336 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
65337 {
65338 unsigned long a, b, adata, bdata, mask, hash, len;
65339- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65340+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65341
65342 hash = a = 0;
65343 len = -sizeof(unsigned long);
65344@@ -1953,6 +1968,8 @@ static int path_lookupat(int dfd, const char *name,
65345 if (err)
65346 break;
65347 err = lookup_last(nd, &path);
65348+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
65349+ err = -EACCES;
65350 put_link(nd, &link, cookie);
65351 }
65352 }
65353@@ -1960,6 +1977,13 @@ static int path_lookupat(int dfd, const char *name,
65354 if (!err)
65355 err = complete_walk(nd);
65356
65357+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
65358+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65359+ path_put(&nd->path);
65360+ err = -ENOENT;
65361+ }
65362+ }
65363+
65364 if (!err && nd->flags & LOOKUP_DIRECTORY) {
65365 if (!d_can_lookup(nd->path.dentry)) {
65366 path_put(&nd->path);
65367@@ -1987,8 +2011,15 @@ static int filename_lookup(int dfd, struct filename *name,
65368 retval = path_lookupat(dfd, name->name,
65369 flags | LOOKUP_REVAL, nd);
65370
65371- if (likely(!retval))
65372+ if (likely(!retval)) {
65373 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
65374+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
65375+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
65376+ path_put(&nd->path);
65377+ return -ENOENT;
65378+ }
65379+ }
65380+ }
65381 return retval;
65382 }
65383
65384@@ -2570,6 +2601,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
65385 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
65386 return -EPERM;
65387
65388+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
65389+ return -EPERM;
65390+ if (gr_handle_rawio(inode))
65391+ return -EPERM;
65392+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
65393+ return -EACCES;
65394+
65395 return 0;
65396 }
65397
65398@@ -2801,7 +2839,7 @@ looked_up:
65399 * cleared otherwise prior to returning.
65400 */
65401 static int lookup_open(struct nameidata *nd, struct path *path,
65402- struct file *file,
65403+ struct path *link, struct file *file,
65404 const struct open_flags *op,
65405 bool got_write, int *opened)
65406 {
65407@@ -2836,6 +2874,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65408 /* Negative dentry, just create the file */
65409 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
65410 umode_t mode = op->mode;
65411+
65412+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
65413+ error = -EACCES;
65414+ goto out_dput;
65415+ }
65416+
65417+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65418+ error = -EACCES;
65419+ goto out_dput;
65420+ }
65421+
65422 if (!IS_POSIXACL(dir->d_inode))
65423 mode &= ~current_umask();
65424 /*
65425@@ -2857,6 +2906,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65426 nd->flags & LOOKUP_EXCL);
65427 if (error)
65428 goto out_dput;
65429+ else
65430+ gr_handle_create(dentry, nd->path.mnt);
65431 }
65432 out_no_open:
65433 path->dentry = dentry;
65434@@ -2871,7 +2922,7 @@ out_dput:
65435 /*
65436 * Handle the last step of open()
65437 */
65438-static int do_last(struct nameidata *nd, struct path *path,
65439+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65440 struct file *file, const struct open_flags *op,
65441 int *opened, struct filename *name)
65442 {
65443@@ -2921,6 +2972,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65444 if (error)
65445 return error;
65446
65447+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65448+ error = -ENOENT;
65449+ goto out;
65450+ }
65451+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65452+ error = -EACCES;
65453+ goto out;
65454+ }
65455+
65456 audit_inode(name, dir, LOOKUP_PARENT);
65457 error = -EISDIR;
65458 /* trailing slashes? */
65459@@ -2940,7 +3000,7 @@ retry_lookup:
65460 */
65461 }
65462 mutex_lock(&dir->d_inode->i_mutex);
65463- error = lookup_open(nd, path, file, op, got_write, opened);
65464+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65465 mutex_unlock(&dir->d_inode->i_mutex);
65466
65467 if (error <= 0) {
65468@@ -2964,11 +3024,28 @@ retry_lookup:
65469 goto finish_open_created;
65470 }
65471
65472+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65473+ error = -ENOENT;
65474+ goto exit_dput;
65475+ }
65476+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65477+ error = -EACCES;
65478+ goto exit_dput;
65479+ }
65480+
65481 /*
65482 * create/update audit record if it already exists.
65483 */
65484- if (d_is_positive(path->dentry))
65485+ if (d_is_positive(path->dentry)) {
65486+ /* only check if O_CREAT is specified, all other checks need to go
65487+ into may_open */
65488+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65489+ error = -EACCES;
65490+ goto exit_dput;
65491+ }
65492+
65493 audit_inode(name, path->dentry, 0);
65494+ }
65495
65496 /*
65497 * If atomic_open() acquired write access it is dropped now due to
65498@@ -3009,6 +3086,11 @@ finish_lookup:
65499 }
65500 }
65501 BUG_ON(inode != path->dentry->d_inode);
65502+ /* if we're resolving a symlink to another symlink */
65503+ if (link && gr_handle_symlink_owner(link, inode)) {
65504+ error = -EACCES;
65505+ goto out;
65506+ }
65507 return 1;
65508 }
65509
65510@@ -3018,7 +3100,6 @@ finish_lookup:
65511 save_parent.dentry = nd->path.dentry;
65512 save_parent.mnt = mntget(path->mnt);
65513 nd->path.dentry = path->dentry;
65514-
65515 }
65516 nd->inode = inode;
65517 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
65518@@ -3028,7 +3109,18 @@ finish_open:
65519 path_put(&save_parent);
65520 return error;
65521 }
65522+
65523+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65524+ error = -ENOENT;
65525+ goto out;
65526+ }
65527+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65528+ error = -EACCES;
65529+ goto out;
65530+ }
65531+
65532 audit_inode(name, nd->path.dentry, 0);
65533+
65534 error = -EISDIR;
65535 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65536 goto out;
65537@@ -3191,7 +3283,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65538 if (unlikely(error))
65539 goto out;
65540
65541- error = do_last(nd, &path, file, op, &opened, pathname);
65542+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65543 while (unlikely(error > 0)) { /* trailing symlink */
65544 struct path link = path;
65545 void *cookie;
65546@@ -3209,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65547 error = follow_link(&link, nd, &cookie);
65548 if (unlikely(error))
65549 break;
65550- error = do_last(nd, &path, file, op, &opened, pathname);
65551+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65552 put_link(nd, &link, cookie);
65553 }
65554 out:
65555@@ -3309,9 +3401,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
65556 goto unlock;
65557
65558 error = -EEXIST;
65559- if (d_is_positive(dentry))
65560+ if (d_is_positive(dentry)) {
65561+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65562+ error = -ENOENT;
65563 goto fail;
65564-
65565+ }
65566 /*
65567 * Special case - lookup gave negative, but... we had foo/bar/
65568 * From the vfs_mknod() POV we just have a negative dentry -
65569@@ -3363,6 +3457,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65570 }
65571 EXPORT_SYMBOL(user_path_create);
65572
65573+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65574+{
65575+ struct filename *tmp = getname(pathname);
65576+ struct dentry *res;
65577+ if (IS_ERR(tmp))
65578+ return ERR_CAST(tmp);
65579+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65580+ if (IS_ERR(res))
65581+ putname(tmp);
65582+ else
65583+ *to = tmp;
65584+ return res;
65585+}
65586+
65587 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65588 {
65589 int error = may_create(dir, dentry);
65590@@ -3426,6 +3534,17 @@ retry:
65591
65592 if (!IS_POSIXACL(path.dentry->d_inode))
65593 mode &= ~current_umask();
65594+
65595+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65596+ error = -EPERM;
65597+ goto out;
65598+ }
65599+
65600+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65601+ error = -EACCES;
65602+ goto out;
65603+ }
65604+
65605 error = security_path_mknod(&path, dentry, mode, dev);
65606 if (error)
65607 goto out;
65608@@ -3442,6 +3561,8 @@ retry:
65609 break;
65610 }
65611 out:
65612+ if (!error)
65613+ gr_handle_create(dentry, path.mnt);
65614 done_path_create(&path, dentry);
65615 if (retry_estale(error, lookup_flags)) {
65616 lookup_flags |= LOOKUP_REVAL;
65617@@ -3495,9 +3616,16 @@ retry:
65618
65619 if (!IS_POSIXACL(path.dentry->d_inode))
65620 mode &= ~current_umask();
65621+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65622+ error = -EACCES;
65623+ goto out;
65624+ }
65625 error = security_path_mkdir(&path, dentry, mode);
65626 if (!error)
65627 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65628+ if (!error)
65629+ gr_handle_create(dentry, path.mnt);
65630+out:
65631 done_path_create(&path, dentry);
65632 if (retry_estale(error, lookup_flags)) {
65633 lookup_flags |= LOOKUP_REVAL;
65634@@ -3580,6 +3708,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65635 struct filename *name;
65636 struct dentry *dentry;
65637 struct nameidata nd;
65638+ ino_t saved_ino = 0;
65639+ dev_t saved_dev = 0;
65640 unsigned int lookup_flags = 0;
65641 retry:
65642 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65643@@ -3612,10 +3742,21 @@ retry:
65644 error = -ENOENT;
65645 goto exit3;
65646 }
65647+
65648+ saved_ino = dentry->d_inode->i_ino;
65649+ saved_dev = gr_get_dev_from_dentry(dentry);
65650+
65651+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65652+ error = -EACCES;
65653+ goto exit3;
65654+ }
65655+
65656 error = security_path_rmdir(&nd.path, dentry);
65657 if (error)
65658 goto exit3;
65659 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65660+ if (!error && (saved_dev || saved_ino))
65661+ gr_handle_delete(saved_ino, saved_dev);
65662 exit3:
65663 dput(dentry);
65664 exit2:
65665@@ -3706,6 +3847,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65666 struct nameidata nd;
65667 struct inode *inode = NULL;
65668 struct inode *delegated_inode = NULL;
65669+ ino_t saved_ino = 0;
65670+ dev_t saved_dev = 0;
65671 unsigned int lookup_flags = 0;
65672 retry:
65673 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65674@@ -3732,10 +3875,22 @@ retry_deleg:
65675 if (d_is_negative(dentry))
65676 goto slashes;
65677 ihold(inode);
65678+
65679+ if (inode->i_nlink <= 1) {
65680+ saved_ino = inode->i_ino;
65681+ saved_dev = gr_get_dev_from_dentry(dentry);
65682+ }
65683+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65684+ error = -EACCES;
65685+ goto exit2;
65686+ }
65687+
65688 error = security_path_unlink(&nd.path, dentry);
65689 if (error)
65690 goto exit2;
65691 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65692+ if (!error && (saved_ino || saved_dev))
65693+ gr_handle_delete(saved_ino, saved_dev);
65694 exit2:
65695 dput(dentry);
65696 }
65697@@ -3824,9 +3979,17 @@ retry:
65698 if (IS_ERR(dentry))
65699 goto out_putname;
65700
65701+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65702+ error = -EACCES;
65703+ goto out;
65704+ }
65705+
65706 error = security_path_symlink(&path, dentry, from->name);
65707 if (!error)
65708 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65709+ if (!error)
65710+ gr_handle_create(dentry, path.mnt);
65711+out:
65712 done_path_create(&path, dentry);
65713 if (retry_estale(error, lookup_flags)) {
65714 lookup_flags |= LOOKUP_REVAL;
65715@@ -3930,6 +4093,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65716 struct dentry *new_dentry;
65717 struct path old_path, new_path;
65718 struct inode *delegated_inode = NULL;
65719+ struct filename *to = NULL;
65720 int how = 0;
65721 int error;
65722
65723@@ -3953,7 +4117,7 @@ retry:
65724 if (error)
65725 return error;
65726
65727- new_dentry = user_path_create(newdfd, newname, &new_path,
65728+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65729 (how & LOOKUP_REVAL));
65730 error = PTR_ERR(new_dentry);
65731 if (IS_ERR(new_dentry))
65732@@ -3965,11 +4129,28 @@ retry:
65733 error = may_linkat(&old_path);
65734 if (unlikely(error))
65735 goto out_dput;
65736+
65737+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65738+ old_path.dentry->d_inode,
65739+ old_path.dentry->d_inode->i_mode, to)) {
65740+ error = -EACCES;
65741+ goto out_dput;
65742+ }
65743+
65744+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65745+ old_path.dentry, old_path.mnt, to)) {
65746+ error = -EACCES;
65747+ goto out_dput;
65748+ }
65749+
65750 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65751 if (error)
65752 goto out_dput;
65753 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65754+ if (!error)
65755+ gr_handle_create(new_dentry, new_path.mnt);
65756 out_dput:
65757+ putname(to);
65758 done_path_create(&new_path, new_dentry);
65759 if (delegated_inode) {
65760 error = break_deleg_wait(&delegated_inode);
65761@@ -4279,6 +4460,12 @@ retry_deleg:
65762 if (new_dentry == trap)
65763 goto exit5;
65764
65765+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65766+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65767+ to, flags);
65768+ if (error)
65769+ goto exit5;
65770+
65771 error = security_path_rename(&oldnd.path, old_dentry,
65772 &newnd.path, new_dentry, flags);
65773 if (error)
65774@@ -4286,6 +4473,9 @@ retry_deleg:
65775 error = vfs_rename(old_dir->d_inode, old_dentry,
65776 new_dir->d_inode, new_dentry,
65777 &delegated_inode, flags);
65778+ if (!error)
65779+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65780+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65781 exit5:
65782 dput(new_dentry);
65783 exit4:
65784@@ -4328,14 +4518,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
65785
65786 int readlink_copy(char __user *buffer, int buflen, const char *link)
65787 {
65788+ char tmpbuf[64];
65789+ const char *newlink;
65790 int len = PTR_ERR(link);
65791+
65792 if (IS_ERR(link))
65793 goto out;
65794
65795 len = strlen(link);
65796 if (len > (unsigned) buflen)
65797 len = buflen;
65798- if (copy_to_user(buffer, link, len))
65799+
65800+ if (len < sizeof(tmpbuf)) {
65801+ memcpy(tmpbuf, link, len);
65802+ newlink = tmpbuf;
65803+ } else
65804+ newlink = link;
65805+
65806+ if (copy_to_user(buffer, newlink, len))
65807 len = -EFAULT;
65808 out:
65809 return len;
65810diff --git a/fs/namespace.c b/fs/namespace.c
65811index 182bc41..72e3cf1 100644
65812--- a/fs/namespace.c
65813+++ b/fs/namespace.c
65814@@ -1348,6 +1348,9 @@ static int do_umount(struct mount *mnt, int flags)
65815 if (!(sb->s_flags & MS_RDONLY))
65816 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65817 up_write(&sb->s_umount);
65818+
65819+ gr_log_remount(mnt->mnt_devname, retval);
65820+
65821 return retval;
65822 }
65823
65824@@ -1370,6 +1373,9 @@ static int do_umount(struct mount *mnt, int flags)
65825 }
65826 unlock_mount_hash();
65827 namespace_unlock();
65828+
65829+ gr_log_unmount(mnt->mnt_devname, retval);
65830+
65831 return retval;
65832 }
65833
65834@@ -1389,7 +1395,7 @@ static inline bool may_mount(void)
65835 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
65836 */
65837
65838-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
65839+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
65840 {
65841 struct path path;
65842 struct mount *mnt;
65843@@ -1431,7 +1437,7 @@ out:
65844 /*
65845 * The 2.0 compatible umount. No flags.
65846 */
65847-SYSCALL_DEFINE1(oldumount, char __user *, name)
65848+SYSCALL_DEFINE1(oldumount, const char __user *, name)
65849 {
65850 return sys_umount(name, 0);
65851 }
65852@@ -2440,6 +2446,16 @@ long do_mount(const char *dev_name, const char *dir_name,
65853 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
65854 MS_STRICTATIME);
65855
65856+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65857+ retval = -EPERM;
65858+ goto dput_out;
65859+ }
65860+
65861+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65862+ retval = -EPERM;
65863+ goto dput_out;
65864+ }
65865+
65866 if (flags & MS_REMOUNT)
65867 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65868 data_page);
65869@@ -2454,6 +2470,9 @@ long do_mount(const char *dev_name, const char *dir_name,
65870 dev_name, data_page);
65871 dput_out:
65872 path_put(&path);
65873+
65874+ gr_log_mount(dev_name, dir_name, retval);
65875+
65876 return retval;
65877 }
65878
65879@@ -2471,7 +2490,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65880 * number incrementing at 10Ghz will take 12,427 years to wrap which
65881 * is effectively never, so we can ignore the possibility.
65882 */
65883-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65884+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65885
65886 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65887 {
65888@@ -2486,7 +2505,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65889 kfree(new_ns);
65890 return ERR_PTR(ret);
65891 }
65892- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65893+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
65894 atomic_set(&new_ns->count, 1);
65895 new_ns->root = NULL;
65896 INIT_LIST_HEAD(&new_ns->list);
65897@@ -2496,7 +2515,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65898 return new_ns;
65899 }
65900
65901-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65902+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65903 struct user_namespace *user_ns, struct fs_struct *new_fs)
65904 {
65905 struct mnt_namespace *new_ns;
65906@@ -2617,8 +2636,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65907 }
65908 EXPORT_SYMBOL(mount_subtree);
65909
65910-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65911- char __user *, type, unsigned long, flags, void __user *, data)
65912+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65913+ const char __user *, type, unsigned long, flags, void __user *, data)
65914 {
65915 int ret;
65916 char *kernel_type;
65917@@ -2731,6 +2750,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65918 if (error)
65919 goto out2;
65920
65921+ if (gr_handle_chroot_pivot()) {
65922+ error = -EPERM;
65923+ goto out2;
65924+ }
65925+
65926 get_fs_root(current->fs, &root);
65927 old_mp = lock_mount(&old);
65928 error = PTR_ERR(old_mp);
65929@@ -2999,7 +3023,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
65930 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65931 return -EPERM;
65932
65933- if (fs->users != 1)
65934+ if (atomic_read(&fs->users) != 1)
65935 return -EINVAL;
65936
65937 get_mnt_ns(mnt_ns);
65938diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65939index f4ccfe6..a5cf064 100644
65940--- a/fs/nfs/callback_xdr.c
65941+++ b/fs/nfs/callback_xdr.c
65942@@ -51,7 +51,7 @@ struct callback_op {
65943 callback_decode_arg_t decode_args;
65944 callback_encode_res_t encode_res;
65945 long res_maxsize;
65946-};
65947+} __do_const;
65948
65949 static struct callback_op callback_ops[];
65950
65951diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65952index 9927913..faffc5c 100644
65953--- a/fs/nfs/inode.c
65954+++ b/fs/nfs/inode.c
65955@@ -1219,16 +1219,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
65956 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
65957 }
65958
65959-static atomic_long_t nfs_attr_generation_counter;
65960+static atomic_long_unchecked_t nfs_attr_generation_counter;
65961
65962 static unsigned long nfs_read_attr_generation_counter(void)
65963 {
65964- return atomic_long_read(&nfs_attr_generation_counter);
65965+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65966 }
65967
65968 unsigned long nfs_inc_attr_generation_counter(void)
65969 {
65970- return atomic_long_inc_return(&nfs_attr_generation_counter);
65971+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65972 }
65973
65974 void nfs_fattr_init(struct nfs_fattr *fattr)
65975diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
65976index 8f029db..3688b84 100644
65977--- a/fs/nfsd/nfs4proc.c
65978+++ b/fs/nfsd/nfs4proc.c
65979@@ -1157,7 +1157,7 @@ struct nfsd4_operation {
65980 nfsd4op_rsize op_rsize_bop;
65981 stateid_getter op_get_currentstateid;
65982 stateid_setter op_set_currentstateid;
65983-};
65984+} __do_const;
65985
65986 static struct nfsd4_operation nfsd4_ops[];
65987
65988diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
65989index 944275c..6fc40a7 100644
65990--- a/fs/nfsd/nfs4xdr.c
65991+++ b/fs/nfsd/nfs4xdr.c
65992@@ -1539,7 +1539,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
65993
65994 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
65995
65996-static nfsd4_dec nfsd4_dec_ops[] = {
65997+static const nfsd4_dec nfsd4_dec_ops[] = {
65998 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
65999 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
66000 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
66001diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
66002index 6040da8..4348565 100644
66003--- a/fs/nfsd/nfscache.c
66004+++ b/fs/nfsd/nfscache.c
66005@@ -518,17 +518,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66006 {
66007 struct svc_cacherep *rp = rqstp->rq_cacherep;
66008 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
66009- int len;
66010+ long len;
66011 size_t bufsize = 0;
66012
66013 if (!rp)
66014 return;
66015
66016- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
66017- len >>= 2;
66018+ if (statp) {
66019+ len = (char*)statp - (char*)resv->iov_base;
66020+ len = resv->iov_len - len;
66021+ len >>= 2;
66022+ }
66023
66024 /* Don't cache excessive amounts of data and XDR failures */
66025- if (!statp || len > (256 >> 2)) {
66026+ if (!statp || len > (256 >> 2) || len < 0) {
66027 nfsd_reply_cache_free(rp);
66028 return;
66029 }
66030@@ -536,7 +539,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66031 switch (cachetype) {
66032 case RC_REPLSTAT:
66033 if (len != 1)
66034- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
66035+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
66036 rp->c_replstat = *statp;
66037 break;
66038 case RC_REPLBUFF:
66039diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
66040index 140c496..e9cbf14 100644
66041--- a/fs/nfsd/vfs.c
66042+++ b/fs/nfsd/vfs.c
66043@@ -855,7 +855,7 @@ int nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
66044
66045 oldfs = get_fs();
66046 set_fs(KERNEL_DS);
66047- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
66048+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
66049 set_fs(oldfs);
66050 return nfsd_finish_read(file, count, host_err);
66051 }
66052@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
66053
66054 /* Write the data. */
66055 oldfs = get_fs(); set_fs(KERNEL_DS);
66056- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
66057+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
66058 set_fs(oldfs);
66059 if (host_err < 0)
66060 goto out_nfserr;
66061@@ -1482,7 +1482,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
66062 */
66063
66064 oldfs = get_fs(); set_fs(KERNEL_DS);
66065- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
66066+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
66067 set_fs(oldfs);
66068
66069 if (host_err < 0)
66070diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
66071index 52ccd34..7a6b202 100644
66072--- a/fs/nls/nls_base.c
66073+++ b/fs/nls/nls_base.c
66074@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
66075
66076 int __register_nls(struct nls_table *nls, struct module *owner)
66077 {
66078- struct nls_table ** tmp = &tables;
66079+ struct nls_table *tmp = tables;
66080
66081 if (nls->next)
66082 return -EBUSY;
66083
66084- nls->owner = owner;
66085+ pax_open_kernel();
66086+ *(void **)&nls->owner = owner;
66087+ pax_close_kernel();
66088 spin_lock(&nls_lock);
66089- while (*tmp) {
66090- if (nls == *tmp) {
66091+ while (tmp) {
66092+ if (nls == tmp) {
66093 spin_unlock(&nls_lock);
66094 return -EBUSY;
66095 }
66096- tmp = &(*tmp)->next;
66097+ tmp = tmp->next;
66098 }
66099- nls->next = tables;
66100+ pax_open_kernel();
66101+ *(struct nls_table **)&nls->next = tables;
66102+ pax_close_kernel();
66103 tables = nls;
66104 spin_unlock(&nls_lock);
66105 return 0;
66106@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
66107
66108 int unregister_nls(struct nls_table * nls)
66109 {
66110- struct nls_table ** tmp = &tables;
66111+ struct nls_table * const * tmp = &tables;
66112
66113 spin_lock(&nls_lock);
66114 while (*tmp) {
66115 if (nls == *tmp) {
66116- *tmp = nls->next;
66117+ pax_open_kernel();
66118+ *(struct nls_table **)tmp = nls->next;
66119+ pax_close_kernel();
66120 spin_unlock(&nls_lock);
66121 return 0;
66122 }
66123@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
66124 return -EINVAL;
66125 }
66126
66127-static struct nls_table *find_nls(char *charset)
66128+static struct nls_table *find_nls(const char *charset)
66129 {
66130 struct nls_table *nls;
66131 spin_lock(&nls_lock);
66132@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
66133 return nls;
66134 }
66135
66136-struct nls_table *load_nls(char *charset)
66137+struct nls_table *load_nls(const char *charset)
66138 {
66139 return try_then_request_module(find_nls(charset), "nls_%s", charset);
66140 }
66141diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
66142index 162b3f1..6076a7c 100644
66143--- a/fs/nls/nls_euc-jp.c
66144+++ b/fs/nls/nls_euc-jp.c
66145@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
66146 p_nls = load_nls("cp932");
66147
66148 if (p_nls) {
66149- table.charset2upper = p_nls->charset2upper;
66150- table.charset2lower = p_nls->charset2lower;
66151+ pax_open_kernel();
66152+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66153+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66154+ pax_close_kernel();
66155 return register_nls(&table);
66156 }
66157
66158diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
66159index a80a741..7b96e1b 100644
66160--- a/fs/nls/nls_koi8-ru.c
66161+++ b/fs/nls/nls_koi8-ru.c
66162@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
66163 p_nls = load_nls("koi8-u");
66164
66165 if (p_nls) {
66166- table.charset2upper = p_nls->charset2upper;
66167- table.charset2lower = p_nls->charset2lower;
66168+ pax_open_kernel();
66169+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66170+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66171+ pax_close_kernel();
66172 return register_nls(&table);
66173 }
66174
66175diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
66176index 3fdc8a3..5888623 100644
66177--- a/fs/notify/fanotify/fanotify_user.c
66178+++ b/fs/notify/fanotify/fanotify_user.c
66179@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
66180
66181 fd = fanotify_event_metadata.fd;
66182 ret = -EFAULT;
66183- if (copy_to_user(buf, &fanotify_event_metadata,
66184- fanotify_event_metadata.event_len))
66185+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
66186+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
66187 goto out_close_fd;
66188
66189 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
66190diff --git a/fs/notify/notification.c b/fs/notify/notification.c
66191index 1e58402..bb2d6f4 100644
66192--- a/fs/notify/notification.c
66193+++ b/fs/notify/notification.c
66194@@ -48,7 +48,7 @@
66195 #include <linux/fsnotify_backend.h>
66196 #include "fsnotify.h"
66197
66198-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66199+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66200
66201 /**
66202 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
66203@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66204 */
66205 u32 fsnotify_get_cookie(void)
66206 {
66207- return atomic_inc_return(&fsnotify_sync_cookie);
66208+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
66209 }
66210 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
66211
66212diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
66213index 9e38daf..5727cae 100644
66214--- a/fs/ntfs/dir.c
66215+++ b/fs/ntfs/dir.c
66216@@ -1310,7 +1310,7 @@ find_next_index_buffer:
66217 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
66218 ~(s64)(ndir->itype.index.block_size - 1)));
66219 /* Bounds checks. */
66220- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66221+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66222 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
66223 "inode 0x%lx or driver bug.", vdir->i_ino);
66224 goto err_out;
66225diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
66226index 5c9e2c8..96e4ba0 100644
66227--- a/fs/ntfs/file.c
66228+++ b/fs/ntfs/file.c
66229@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
66230 char *addr;
66231 size_t total = 0;
66232 unsigned len;
66233- int left;
66234+ unsigned left;
66235
66236 do {
66237 len = PAGE_CACHE_SIZE - ofs;
66238diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
66239index 6c3296e..c0b99f0 100644
66240--- a/fs/ntfs/super.c
66241+++ b/fs/ntfs/super.c
66242@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66243 if (!silent)
66244 ntfs_error(sb, "Primary boot sector is invalid.");
66245 } else if (!silent)
66246- ntfs_error(sb, read_err_str, "primary");
66247+ ntfs_error(sb, read_err_str, "%s", "primary");
66248 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
66249 if (bh_primary)
66250 brelse(bh_primary);
66251@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66252 goto hotfix_primary_boot_sector;
66253 brelse(bh_backup);
66254 } else if (!silent)
66255- ntfs_error(sb, read_err_str, "backup");
66256+ ntfs_error(sb, read_err_str, "%s", "backup");
66257 /* Try to read NT3.51- backup boot sector. */
66258 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
66259 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
66260@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66261 "sector.");
66262 brelse(bh_backup);
66263 } else if (!silent)
66264- ntfs_error(sb, read_err_str, "backup");
66265+ ntfs_error(sb, read_err_str, "%s", "backup");
66266 /* We failed. Cleanup and return. */
66267 if (bh_primary)
66268 brelse(bh_primary);
66269diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
66270index 0440134..d52c93a 100644
66271--- a/fs/ocfs2/localalloc.c
66272+++ b/fs/ocfs2/localalloc.c
66273@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
66274 goto bail;
66275 }
66276
66277- atomic_inc(&osb->alloc_stats.moves);
66278+ atomic_inc_unchecked(&osb->alloc_stats.moves);
66279
66280 bail:
66281 if (handle)
66282diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
66283index bbec539..7b266d5 100644
66284--- a/fs/ocfs2/ocfs2.h
66285+++ b/fs/ocfs2/ocfs2.h
66286@@ -236,11 +236,11 @@ enum ocfs2_vol_state
66287
66288 struct ocfs2_alloc_stats
66289 {
66290- atomic_t moves;
66291- atomic_t local_data;
66292- atomic_t bitmap_data;
66293- atomic_t bg_allocs;
66294- atomic_t bg_extends;
66295+ atomic_unchecked_t moves;
66296+ atomic_unchecked_t local_data;
66297+ atomic_unchecked_t bitmap_data;
66298+ atomic_unchecked_t bg_allocs;
66299+ atomic_unchecked_t bg_extends;
66300 };
66301
66302 enum ocfs2_local_alloc_state
66303diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
66304index 0cb889a..6a26b24 100644
66305--- a/fs/ocfs2/suballoc.c
66306+++ b/fs/ocfs2/suballoc.c
66307@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
66308 mlog_errno(status);
66309 goto bail;
66310 }
66311- atomic_inc(&osb->alloc_stats.bg_extends);
66312+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
66313
66314 /* You should never ask for this much metadata */
66315 BUG_ON(bits_wanted >
66316@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
66317 mlog_errno(status);
66318 goto bail;
66319 }
66320- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66321+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66322
66323 *suballoc_loc = res.sr_bg_blkno;
66324 *suballoc_bit_start = res.sr_bit_offset;
66325@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66326 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
66327 res->sr_bits);
66328
66329- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66330+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66331
66332 BUG_ON(res->sr_bits != 1);
66333
66334@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
66335 mlog_errno(status);
66336 goto bail;
66337 }
66338- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66339+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66340
66341 BUG_ON(res.sr_bits != 1);
66342
66343@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66344 cluster_start,
66345 num_clusters);
66346 if (!status)
66347- atomic_inc(&osb->alloc_stats.local_data);
66348+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
66349 } else {
66350 if (min_clusters > (osb->bitmap_cpg - 1)) {
66351 /* The only paths asking for contiguousness
66352@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66353 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
66354 res.sr_bg_blkno,
66355 res.sr_bit_offset);
66356- atomic_inc(&osb->alloc_stats.bitmap_data);
66357+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
66358 *num_clusters = res.sr_bits;
66359 }
66360 }
66361diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
66362index ddb662b..f701c83 100644
66363--- a/fs/ocfs2/super.c
66364+++ b/fs/ocfs2/super.c
66365@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
66366 "%10s => GlobalAllocs: %d LocalAllocs: %d "
66367 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
66368 "Stats",
66369- atomic_read(&osb->alloc_stats.bitmap_data),
66370- atomic_read(&osb->alloc_stats.local_data),
66371- atomic_read(&osb->alloc_stats.bg_allocs),
66372- atomic_read(&osb->alloc_stats.moves),
66373- atomic_read(&osb->alloc_stats.bg_extends));
66374+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
66375+ atomic_read_unchecked(&osb->alloc_stats.local_data),
66376+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
66377+ atomic_read_unchecked(&osb->alloc_stats.moves),
66378+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66379
66380 out += snprintf(buf + out, len - out,
66381 "%10s => State: %u Descriptor: %llu Size: %u bits "
66382@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66383
66384 mutex_init(&osb->system_file_mutex);
66385
66386- atomic_set(&osb->alloc_stats.moves, 0);
66387- atomic_set(&osb->alloc_stats.local_data, 0);
66388- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66389- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66390- atomic_set(&osb->alloc_stats.bg_extends, 0);
66391+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66392+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66393+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66394+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66395+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66396
66397 /* Copy the blockcheck stats from the superblock probe */
66398 osb->osb_ecc_stats = *stats;
66399diff --git a/fs/open.c b/fs/open.c
66400index d6fd3ac..6ccf474 100644
66401--- a/fs/open.c
66402+++ b/fs/open.c
66403@@ -32,6 +32,8 @@
66404 #include <linux/dnotify.h>
66405 #include <linux/compat.h>
66406
66407+#define CREATE_TRACE_POINTS
66408+#include <trace/events/fs.h>
66409 #include "internal.h"
66410
66411 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66412@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66413 error = locks_verify_truncate(inode, NULL, length);
66414 if (!error)
66415 error = security_path_truncate(path);
66416+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66417+ error = -EACCES;
66418 if (!error)
66419 error = do_truncate(path->dentry, length, 0, NULL);
66420
66421@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66422 error = locks_verify_truncate(inode, f.file, length);
66423 if (!error)
66424 error = security_path_truncate(&f.file->f_path);
66425+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66426+ error = -EACCES;
66427 if (!error)
66428 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66429 sb_end_write(inode->i_sb);
66430@@ -380,6 +386,9 @@ retry:
66431 if (__mnt_is_readonly(path.mnt))
66432 res = -EROFS;
66433
66434+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66435+ res = -EACCES;
66436+
66437 out_path_release:
66438 path_put(&path);
66439 if (retry_estale(res, lookup_flags)) {
66440@@ -411,6 +420,8 @@ retry:
66441 if (error)
66442 goto dput_and_out;
66443
66444+ gr_log_chdir(path.dentry, path.mnt);
66445+
66446 set_fs_pwd(current->fs, &path);
66447
66448 dput_and_out:
66449@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66450 goto out_putf;
66451
66452 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66453+
66454+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66455+ error = -EPERM;
66456+
66457+ if (!error)
66458+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66459+
66460 if (!error)
66461 set_fs_pwd(current->fs, &f.file->f_path);
66462 out_putf:
66463@@ -469,7 +487,13 @@ retry:
66464 if (error)
66465 goto dput_and_out;
66466
66467+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66468+ goto dput_and_out;
66469+
66470 set_fs_root(current->fs, &path);
66471+
66472+ gr_handle_chroot_chdir(&path);
66473+
66474 error = 0;
66475 dput_and_out:
66476 path_put(&path);
66477@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
66478 return error;
66479 retry_deleg:
66480 mutex_lock(&inode->i_mutex);
66481+
66482+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66483+ error = -EACCES;
66484+ goto out_unlock;
66485+ }
66486+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66487+ error = -EACCES;
66488+ goto out_unlock;
66489+ }
66490+
66491 error = security_path_chmod(path, mode);
66492 if (error)
66493 goto out_unlock;
66494@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66495 uid = make_kuid(current_user_ns(), user);
66496 gid = make_kgid(current_user_ns(), group);
66497
66498+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66499+ return -EACCES;
66500+
66501 newattrs.ia_valid = ATTR_CTIME;
66502 if (user != (uid_t) -1) {
66503 if (!uid_valid(uid))
66504@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66505 } else {
66506 fsnotify_open(f);
66507 fd_install(fd, f);
66508+ trace_do_sys_open(tmp->name, flags, mode);
66509 }
66510 }
66511 putname(tmp);
66512diff --git a/fs/pipe.c b/fs/pipe.c
66513index 21981e5..3d5f55c 100644
66514--- a/fs/pipe.c
66515+++ b/fs/pipe.c
66516@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66517
66518 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66519 {
66520- if (pipe->files)
66521+ if (atomic_read(&pipe->files))
66522 mutex_lock_nested(&pipe->mutex, subclass);
66523 }
66524
66525@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66526
66527 void pipe_unlock(struct pipe_inode_info *pipe)
66528 {
66529- if (pipe->files)
66530+ if (atomic_read(&pipe->files))
66531 mutex_unlock(&pipe->mutex);
66532 }
66533 EXPORT_SYMBOL(pipe_unlock);
66534@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66535 }
66536 if (bufs) /* More to do? */
66537 continue;
66538- if (!pipe->writers)
66539+ if (!atomic_read(&pipe->writers))
66540 break;
66541- if (!pipe->waiting_writers) {
66542+ if (!atomic_read(&pipe->waiting_writers)) {
66543 /* syscall merging: Usually we must not sleep
66544 * if O_NONBLOCK is set, or if we got some data.
66545 * But if a writer sleeps in kernel space, then
66546@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66547
66548 __pipe_lock(pipe);
66549
66550- if (!pipe->readers) {
66551+ if (!atomic_read(&pipe->readers)) {
66552 send_sig(SIGPIPE, current, 0);
66553 ret = -EPIPE;
66554 goto out;
66555@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66556 for (;;) {
66557 int bufs;
66558
66559- if (!pipe->readers) {
66560+ if (!atomic_read(&pipe->readers)) {
66561 send_sig(SIGPIPE, current, 0);
66562 if (!ret)
66563 ret = -EPIPE;
66564@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66565 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66566 do_wakeup = 0;
66567 }
66568- pipe->waiting_writers++;
66569+ atomic_inc(&pipe->waiting_writers);
66570 pipe_wait(pipe);
66571- pipe->waiting_writers--;
66572+ atomic_dec(&pipe->waiting_writers);
66573 }
66574 out:
66575 __pipe_unlock(pipe);
66576@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66577 mask = 0;
66578 if (filp->f_mode & FMODE_READ) {
66579 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66580- if (!pipe->writers && filp->f_version != pipe->w_counter)
66581+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66582 mask |= POLLHUP;
66583 }
66584
66585@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66586 * Most Unices do not set POLLERR for FIFOs but on Linux they
66587 * behave exactly like pipes for poll().
66588 */
66589- if (!pipe->readers)
66590+ if (!atomic_read(&pipe->readers))
66591 mask |= POLLERR;
66592 }
66593
66594@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66595 int kill = 0;
66596
66597 spin_lock(&inode->i_lock);
66598- if (!--pipe->files) {
66599+ if (atomic_dec_and_test(&pipe->files)) {
66600 inode->i_pipe = NULL;
66601 kill = 1;
66602 }
66603@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66604
66605 __pipe_lock(pipe);
66606 if (file->f_mode & FMODE_READ)
66607- pipe->readers--;
66608+ atomic_dec(&pipe->readers);
66609 if (file->f_mode & FMODE_WRITE)
66610- pipe->writers--;
66611+ atomic_dec(&pipe->writers);
66612
66613- if (pipe->readers || pipe->writers) {
66614+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66615 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66616 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66617 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66618@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66619 kfree(pipe);
66620 }
66621
66622-static struct vfsmount *pipe_mnt __read_mostly;
66623+struct vfsmount *pipe_mnt __read_mostly;
66624
66625 /*
66626 * pipefs_dname() is called from d_path().
66627@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66628 goto fail_iput;
66629
66630 inode->i_pipe = pipe;
66631- pipe->files = 2;
66632- pipe->readers = pipe->writers = 1;
66633+ atomic_set(&pipe->files, 2);
66634+ atomic_set(&pipe->readers, 1);
66635+ atomic_set(&pipe->writers, 1);
66636 inode->i_fop = &pipefifo_fops;
66637
66638 /*
66639@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66640 spin_lock(&inode->i_lock);
66641 if (inode->i_pipe) {
66642 pipe = inode->i_pipe;
66643- pipe->files++;
66644+ atomic_inc(&pipe->files);
66645 spin_unlock(&inode->i_lock);
66646 } else {
66647 spin_unlock(&inode->i_lock);
66648 pipe = alloc_pipe_info();
66649 if (!pipe)
66650 return -ENOMEM;
66651- pipe->files = 1;
66652+ atomic_set(&pipe->files, 1);
66653 spin_lock(&inode->i_lock);
66654 if (unlikely(inode->i_pipe)) {
66655- inode->i_pipe->files++;
66656+ atomic_inc(&inode->i_pipe->files);
66657 spin_unlock(&inode->i_lock);
66658 free_pipe_info(pipe);
66659 pipe = inode->i_pipe;
66660@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66661 * opened, even when there is no process writing the FIFO.
66662 */
66663 pipe->r_counter++;
66664- if (pipe->readers++ == 0)
66665+ if (atomic_inc_return(&pipe->readers) == 1)
66666 wake_up_partner(pipe);
66667
66668- if (!is_pipe && !pipe->writers) {
66669+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66670 if ((filp->f_flags & O_NONBLOCK)) {
66671 /* suppress POLLHUP until we have
66672 * seen a writer */
66673@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66674 * errno=ENXIO when there is no process reading the FIFO.
66675 */
66676 ret = -ENXIO;
66677- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66678+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66679 goto err;
66680
66681 pipe->w_counter++;
66682- if (!pipe->writers++)
66683+ if (atomic_inc_return(&pipe->writers) == 1)
66684 wake_up_partner(pipe);
66685
66686- if (!is_pipe && !pipe->readers) {
66687+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66688 if (wait_for_partner(pipe, &pipe->r_counter))
66689 goto err_wr;
66690 }
66691@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66692 * the process can at least talk to itself.
66693 */
66694
66695- pipe->readers++;
66696- pipe->writers++;
66697+ atomic_inc(&pipe->readers);
66698+ atomic_inc(&pipe->writers);
66699 pipe->r_counter++;
66700 pipe->w_counter++;
66701- if (pipe->readers == 1 || pipe->writers == 1)
66702+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66703 wake_up_partner(pipe);
66704 break;
66705
66706@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66707 return 0;
66708
66709 err_rd:
66710- if (!--pipe->readers)
66711+ if (atomic_dec_and_test(&pipe->readers))
66712 wake_up_interruptible(&pipe->wait);
66713 ret = -ERESTARTSYS;
66714 goto err;
66715
66716 err_wr:
66717- if (!--pipe->writers)
66718+ if (atomic_dec_and_test(&pipe->writers))
66719 wake_up_interruptible(&pipe->wait);
66720 ret = -ERESTARTSYS;
66721 goto err;
66722diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66723index 0855f77..6787d50 100644
66724--- a/fs/posix_acl.c
66725+++ b/fs/posix_acl.c
66726@@ -20,6 +20,7 @@
66727 #include <linux/xattr.h>
66728 #include <linux/export.h>
66729 #include <linux/user_namespace.h>
66730+#include <linux/grsecurity.h>
66731
66732 struct posix_acl **acl_by_type(struct inode *inode, int type)
66733 {
66734@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66735 }
66736 }
66737 if (mode_p)
66738- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66739+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66740 return not_equiv;
66741 }
66742 EXPORT_SYMBOL(posix_acl_equiv_mode);
66743@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66744 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66745 }
66746
66747- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66748+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66749 return not_equiv;
66750 }
66751
66752@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66753 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66754 int err = -ENOMEM;
66755 if (clone) {
66756+ *mode_p &= ~gr_acl_umask();
66757+
66758 err = posix_acl_create_masq(clone, mode_p);
66759 if (err < 0) {
66760 posix_acl_release(clone);
66761@@ -659,11 +662,12 @@ struct posix_acl *
66762 posix_acl_from_xattr(struct user_namespace *user_ns,
66763 const void *value, size_t size)
66764 {
66765- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66766- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66767+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66768+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66769 int count;
66770 struct posix_acl *acl;
66771 struct posix_acl_entry *acl_e;
66772+ umode_t umask = gr_acl_umask();
66773
66774 if (!value)
66775 return NULL;
66776@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66777
66778 switch(acl_e->e_tag) {
66779 case ACL_USER_OBJ:
66780+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66781+ break;
66782 case ACL_GROUP_OBJ:
66783 case ACL_MASK:
66784+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66785+ break;
66786 case ACL_OTHER:
66787+ acl_e->e_perm &= ~(umask & S_IRWXO);
66788 break;
66789
66790 case ACL_USER:
66791+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66792 acl_e->e_uid =
66793 make_kuid(user_ns,
66794 le32_to_cpu(entry->e_id));
66795@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66796 goto fail;
66797 break;
66798 case ACL_GROUP:
66799+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66800 acl_e->e_gid =
66801 make_kgid(user_ns,
66802 le32_to_cpu(entry->e_id));
66803diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66804index 2183fcf..3c32a98 100644
66805--- a/fs/proc/Kconfig
66806+++ b/fs/proc/Kconfig
66807@@ -30,7 +30,7 @@ config PROC_FS
66808
66809 config PROC_KCORE
66810 bool "/proc/kcore support" if !ARM
66811- depends on PROC_FS && MMU
66812+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
66813 help
66814 Provides a virtual ELF core file of the live kernel. This can
66815 be read with gdb and other ELF tools. No modifications can be
66816@@ -38,8 +38,8 @@ config PROC_KCORE
66817
66818 config PROC_VMCORE
66819 bool "/proc/vmcore support"
66820- depends on PROC_FS && CRASH_DUMP
66821- default y
66822+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66823+ default n
66824 help
66825 Exports the dump image of crashed kernel in ELF format.
66826
66827@@ -63,8 +63,8 @@ config PROC_SYSCTL
66828 limited in memory.
66829
66830 config PROC_PAGE_MONITOR
66831- default y
66832- depends on PROC_FS && MMU
66833+ default n
66834+ depends on PROC_FS && MMU && !GRKERNSEC
66835 bool "Enable /proc page monitoring" if EXPERT
66836 help
66837 Various /proc files exist to monitor process memory utilization:
66838diff --git a/fs/proc/array.c b/fs/proc/array.c
66839index 64db2bc..a8185d6 100644
66840--- a/fs/proc/array.c
66841+++ b/fs/proc/array.c
66842@@ -60,6 +60,7 @@
66843 #include <linux/tty.h>
66844 #include <linux/string.h>
66845 #include <linux/mman.h>
66846+#include <linux/grsecurity.h>
66847 #include <linux/proc_fs.h>
66848 #include <linux/ioport.h>
66849 #include <linux/uaccess.h>
66850@@ -356,6 +357,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66851 seq_putc(m, '\n');
66852 }
66853
66854+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66855+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66856+{
66857+ if (p->mm)
66858+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66859+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66860+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66861+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66862+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66863+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66864+ else
66865+ seq_printf(m, "PaX:\t-----\n");
66866+}
66867+#endif
66868+
66869 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66870 struct pid *pid, struct task_struct *task)
66871 {
66872@@ -374,9 +390,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66873 task_cpus_allowed(m, task);
66874 cpuset_task_status_allowed(m, task);
66875 task_context_switch_counts(m, task);
66876+
66877+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66878+ task_pax(m, task);
66879+#endif
66880+
66881+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66882+ task_grsec_rbac(m, task);
66883+#endif
66884+
66885 return 0;
66886 }
66887
66888+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66889+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66890+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66891+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66892+#endif
66893+
66894 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66895 struct pid *pid, struct task_struct *task, int whole)
66896 {
66897@@ -398,6 +429,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66898 char tcomm[sizeof(task->comm)];
66899 unsigned long flags;
66900
66901+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66902+ if (current->exec_id != m->exec_id) {
66903+ gr_log_badprocpid("stat");
66904+ return 0;
66905+ }
66906+#endif
66907+
66908 state = *get_task_state(task);
66909 vsize = eip = esp = 0;
66910 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66911@@ -468,6 +506,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66912 gtime = task_gtime(task);
66913 }
66914
66915+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66916+ if (PAX_RAND_FLAGS(mm)) {
66917+ eip = 0;
66918+ esp = 0;
66919+ wchan = 0;
66920+ }
66921+#endif
66922+#ifdef CONFIG_GRKERNSEC_HIDESYM
66923+ wchan = 0;
66924+ eip =0;
66925+ esp =0;
66926+#endif
66927+
66928 /* scale priority and nice values from timeslices to -20..20 */
66929 /* to make it look like a "normal" Unix priority/nice value */
66930 priority = task_prio(task);
66931@@ -504,9 +555,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66932 seq_put_decimal_ull(m, ' ', vsize);
66933 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
66934 seq_put_decimal_ull(m, ' ', rsslim);
66935+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66936+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
66937+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
66938+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
66939+#else
66940 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
66941 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
66942 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
66943+#endif
66944 seq_put_decimal_ull(m, ' ', esp);
66945 seq_put_decimal_ull(m, ' ', eip);
66946 /* The signal information here is obsolete.
66947@@ -528,7 +585,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66948 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
66949 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
66950
66951- if (mm && permitted) {
66952+ if (mm && permitted
66953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66954+ && !PAX_RAND_FLAGS(mm)
66955+#endif
66956+ ) {
66957 seq_put_decimal_ull(m, ' ', mm->start_data);
66958 seq_put_decimal_ull(m, ' ', mm->end_data);
66959 seq_put_decimal_ull(m, ' ', mm->start_brk);
66960@@ -566,8 +627,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66961 struct pid *pid, struct task_struct *task)
66962 {
66963 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
66964- struct mm_struct *mm = get_task_mm(task);
66965+ struct mm_struct *mm;
66966
66967+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66968+ if (current->exec_id != m->exec_id) {
66969+ gr_log_badprocpid("statm");
66970+ return 0;
66971+ }
66972+#endif
66973+ mm = get_task_mm(task);
66974 if (mm) {
66975 size = task_statm(mm, &shared, &text, &data, &resident);
66976 mmput(mm);
66977@@ -590,6 +658,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66978 return 0;
66979 }
66980
66981+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66982+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
66983+{
66984+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
66985+}
66986+#endif
66987+
66988 #ifdef CONFIG_CHECKPOINT_RESTORE
66989 static struct pid *
66990 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
66991diff --git a/fs/proc/base.c b/fs/proc/base.c
66992index 2d696b0..b9da447 100644
66993--- a/fs/proc/base.c
66994+++ b/fs/proc/base.c
66995@@ -113,6 +113,14 @@ struct pid_entry {
66996 union proc_op op;
66997 };
66998
66999+struct getdents_callback {
67000+ struct linux_dirent __user * current_dir;
67001+ struct linux_dirent __user * previous;
67002+ struct file * file;
67003+ int count;
67004+ int error;
67005+};
67006+
67007 #define NOD(NAME, MODE, IOP, FOP, OP) { \
67008 .name = (NAME), \
67009 .len = sizeof(NAME) - 1, \
67010@@ -205,12 +213,28 @@ static int proc_pid_cmdline(struct task_struct *task, char *buffer)
67011 return get_cmdline(task, buffer, PAGE_SIZE);
67012 }
67013
67014+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67015+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67016+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67017+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67018+#endif
67019+
67020 static int proc_pid_auxv(struct task_struct *task, char *buffer)
67021 {
67022 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
67023 int res = PTR_ERR(mm);
67024 if (mm && !IS_ERR(mm)) {
67025 unsigned int nwords = 0;
67026+
67027+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67028+ /* allow if we're currently ptracing this task */
67029+ if (PAX_RAND_FLAGS(mm) &&
67030+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
67031+ mmput(mm);
67032+ return 0;
67033+ }
67034+#endif
67035+
67036 do {
67037 nwords += 2;
67038 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
67039@@ -224,7 +248,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
67040 }
67041
67042
67043-#ifdef CONFIG_KALLSYMS
67044+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67045 /*
67046 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
67047 * Returns the resolved symbol. If that fails, simply return the address.
67048@@ -263,7 +287,7 @@ static void unlock_trace(struct task_struct *task)
67049 mutex_unlock(&task->signal->cred_guard_mutex);
67050 }
67051
67052-#ifdef CONFIG_STACKTRACE
67053+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67054
67055 #define MAX_STACK_TRACE_DEPTH 64
67056
67057@@ -486,7 +510,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
67058 return count;
67059 }
67060
67061-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67062+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67063 static int proc_pid_syscall(struct task_struct *task, char *buffer)
67064 {
67065 long nr;
67066@@ -515,7 +539,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
67067 /************************************************************************/
67068
67069 /* permission checks */
67070-static int proc_fd_access_allowed(struct inode *inode)
67071+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
67072 {
67073 struct task_struct *task;
67074 int allowed = 0;
67075@@ -525,7 +549,10 @@ static int proc_fd_access_allowed(struct inode *inode)
67076 */
67077 task = get_proc_task(inode);
67078 if (task) {
67079- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67080+ if (log)
67081+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67082+ else
67083+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67084 put_task_struct(task);
67085 }
67086 return allowed;
67087@@ -556,10 +583,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
67088 struct task_struct *task,
67089 int hide_pid_min)
67090 {
67091+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67092+ return false;
67093+
67094+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67095+ rcu_read_lock();
67096+ {
67097+ const struct cred *tmpcred = current_cred();
67098+ const struct cred *cred = __task_cred(task);
67099+
67100+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
67101+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67102+ || in_group_p(grsec_proc_gid)
67103+#endif
67104+ ) {
67105+ rcu_read_unlock();
67106+ return true;
67107+ }
67108+ }
67109+ rcu_read_unlock();
67110+
67111+ if (!pid->hide_pid)
67112+ return false;
67113+#endif
67114+
67115 if (pid->hide_pid < hide_pid_min)
67116 return true;
67117 if (in_group_p(pid->pid_gid))
67118 return true;
67119+
67120 return ptrace_may_access(task, PTRACE_MODE_READ);
67121 }
67122
67123@@ -577,7 +629,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
67124 put_task_struct(task);
67125
67126 if (!has_perms) {
67127+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67128+ {
67129+#else
67130 if (pid->hide_pid == 2) {
67131+#endif
67132 /*
67133 * Let's make getdents(), stat(), and open()
67134 * consistent with each other. If a process
67135@@ -675,6 +731,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67136 if (!task)
67137 return -ESRCH;
67138
67139+ if (gr_acl_handle_procpidmem(task)) {
67140+ put_task_struct(task);
67141+ return -EPERM;
67142+ }
67143+
67144 mm = mm_access(task, mode);
67145 put_task_struct(task);
67146
67147@@ -690,6 +751,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67148
67149 file->private_data = mm;
67150
67151+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67152+ file->f_version = current->exec_id;
67153+#endif
67154+
67155 return 0;
67156 }
67157
67158@@ -711,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67159 ssize_t copied;
67160 char *page;
67161
67162+#ifdef CONFIG_GRKERNSEC
67163+ if (write)
67164+ return -EPERM;
67165+#endif
67166+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67167+ if (file->f_version != current->exec_id) {
67168+ gr_log_badprocpid("mem");
67169+ return 0;
67170+ }
67171+#endif
67172+
67173 if (!mm)
67174 return 0;
67175
67176@@ -723,7 +799,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67177 goto free;
67178
67179 while (count > 0) {
67180- int this_len = min_t(int, count, PAGE_SIZE);
67181+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
67182
67183 if (write && copy_from_user(page, buf, this_len)) {
67184 copied = -EFAULT;
67185@@ -815,6 +891,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67186 if (!mm)
67187 return 0;
67188
67189+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67190+ if (file->f_version != current->exec_id) {
67191+ gr_log_badprocpid("environ");
67192+ return 0;
67193+ }
67194+#endif
67195+
67196 page = (char *)__get_free_page(GFP_TEMPORARY);
67197 if (!page)
67198 return -ENOMEM;
67199@@ -824,7 +907,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67200 goto free;
67201 while (count > 0) {
67202 size_t this_len, max_len;
67203- int retval;
67204+ ssize_t retval;
67205
67206 if (src >= (mm->env_end - mm->env_start))
67207 break;
67208@@ -1438,7 +1521,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
67209 int error = -EACCES;
67210
67211 /* Are we allowed to snoop on the tasks file descriptors? */
67212- if (!proc_fd_access_allowed(inode))
67213+ if (!proc_fd_access_allowed(inode, 0))
67214 goto out;
67215
67216 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67217@@ -1482,8 +1565,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
67218 struct path path;
67219
67220 /* Are we allowed to snoop on the tasks file descriptors? */
67221- if (!proc_fd_access_allowed(inode))
67222- goto out;
67223+ /* logging this is needed for learning on chromium to work properly,
67224+ but we don't want to flood the logs from 'ps' which does a readlink
67225+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
67226+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
67227+ */
67228+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
67229+ if (!proc_fd_access_allowed(inode,0))
67230+ goto out;
67231+ } else {
67232+ if (!proc_fd_access_allowed(inode,1))
67233+ goto out;
67234+ }
67235
67236 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67237 if (error)
67238@@ -1533,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
67239 rcu_read_lock();
67240 cred = __task_cred(task);
67241 inode->i_uid = cred->euid;
67242+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67243+ inode->i_gid = grsec_proc_gid;
67244+#else
67245 inode->i_gid = cred->egid;
67246+#endif
67247 rcu_read_unlock();
67248 }
67249 security_task_to_inode(task, inode);
67250@@ -1569,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
67251 return -ENOENT;
67252 }
67253 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67254+#ifdef CONFIG_GRKERNSEC_PROC_USER
67255+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67256+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67257+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67258+#endif
67259 task_dumpable(task)) {
67260 cred = __task_cred(task);
67261 stat->uid = cred->euid;
67262+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67263+ stat->gid = grsec_proc_gid;
67264+#else
67265 stat->gid = cred->egid;
67266+#endif
67267 }
67268 }
67269 rcu_read_unlock();
67270@@ -1610,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
67271
67272 if (task) {
67273 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67274+#ifdef CONFIG_GRKERNSEC_PROC_USER
67275+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67276+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67277+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67278+#endif
67279 task_dumpable(task)) {
67280 rcu_read_lock();
67281 cred = __task_cred(task);
67282 inode->i_uid = cred->euid;
67283+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67284+ inode->i_gid = grsec_proc_gid;
67285+#else
67286 inode->i_gid = cred->egid;
67287+#endif
67288 rcu_read_unlock();
67289 } else {
67290 inode->i_uid = GLOBAL_ROOT_UID;
67291@@ -2149,6 +2264,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
67292 if (!task)
67293 goto out_no_task;
67294
67295+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67296+ goto out;
67297+
67298 /*
67299 * Yes, it does not scale. And it should not. Don't add
67300 * new entries into /proc/<tgid>/ without very good reasons.
67301@@ -2179,6 +2297,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
67302 if (!task)
67303 return -ENOENT;
67304
67305+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67306+ goto out;
67307+
67308 if (!dir_emit_dots(file, ctx))
67309 goto out;
67310
67311@@ -2568,7 +2689,7 @@ static const struct pid_entry tgid_base_stuff[] = {
67312 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
67313 #endif
67314 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67315-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67316+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67317 INF("syscall", S_IRUSR, proc_pid_syscall),
67318 #endif
67319 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67320@@ -2593,10 +2714,10 @@ static const struct pid_entry tgid_base_stuff[] = {
67321 #ifdef CONFIG_SECURITY
67322 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67323 #endif
67324-#ifdef CONFIG_KALLSYMS
67325+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67326 INF("wchan", S_IRUGO, proc_pid_wchan),
67327 #endif
67328-#ifdef CONFIG_STACKTRACE
67329+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67330 ONE("stack", S_IRUSR, proc_pid_stack),
67331 #endif
67332 #ifdef CONFIG_SCHEDSTATS
67333@@ -2630,6 +2751,9 @@ static const struct pid_entry tgid_base_stuff[] = {
67334 #ifdef CONFIG_HARDWALL
67335 INF("hardwall", S_IRUGO, proc_pid_hardwall),
67336 #endif
67337+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67338+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
67339+#endif
67340 #ifdef CONFIG_USER_NS
67341 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
67342 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
67343@@ -2760,7 +2884,14 @@ static int proc_pid_instantiate(struct inode *dir,
67344 if (!inode)
67345 goto out;
67346
67347+#ifdef CONFIG_GRKERNSEC_PROC_USER
67348+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
67349+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67350+ inode->i_gid = grsec_proc_gid;
67351+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
67352+#else
67353 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
67354+#endif
67355 inode->i_op = &proc_tgid_base_inode_operations;
67356 inode->i_fop = &proc_tgid_base_operations;
67357 inode->i_flags|=S_IMMUTABLE;
67358@@ -2798,7 +2929,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
67359 if (!task)
67360 goto out;
67361
67362+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67363+ goto out_put_task;
67364+
67365 result = proc_pid_instantiate(dir, dentry, task, NULL);
67366+out_put_task:
67367 put_task_struct(task);
67368 out:
67369 return ERR_PTR(result);
67370@@ -2904,7 +3039,7 @@ static const struct pid_entry tid_base_stuff[] = {
67371 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67372 #endif
67373 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67374-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67375+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67376 INF("syscall", S_IRUSR, proc_pid_syscall),
67377 #endif
67378 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67379@@ -2931,10 +3066,10 @@ static const struct pid_entry tid_base_stuff[] = {
67380 #ifdef CONFIG_SECURITY
67381 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67382 #endif
67383-#ifdef CONFIG_KALLSYMS
67384+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67385 INF("wchan", S_IRUGO, proc_pid_wchan),
67386 #endif
67387-#ifdef CONFIG_STACKTRACE
67388+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67389 ONE("stack", S_IRUSR, proc_pid_stack),
67390 #endif
67391 #ifdef CONFIG_SCHEDSTATS
67392diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67393index cbd82df..c0407d2 100644
67394--- a/fs/proc/cmdline.c
67395+++ b/fs/proc/cmdline.c
67396@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67397
67398 static int __init proc_cmdline_init(void)
67399 {
67400+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67401+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67402+#else
67403 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67404+#endif
67405 return 0;
67406 }
67407 fs_initcall(proc_cmdline_init);
67408diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67409index 50493ed..248166b 100644
67410--- a/fs/proc/devices.c
67411+++ b/fs/proc/devices.c
67412@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67413
67414 static int __init proc_devices_init(void)
67415 {
67416+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67417+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67418+#else
67419 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67420+#endif
67421 return 0;
67422 }
67423 fs_initcall(proc_devices_init);
67424diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67425index 0788d09..9cc1385 100644
67426--- a/fs/proc/fd.c
67427+++ b/fs/proc/fd.c
67428@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67429 if (!task)
67430 return -ENOENT;
67431
67432- files = get_files_struct(task);
67433+ if (!gr_acl_handle_procpidmem(task))
67434+ files = get_files_struct(task);
67435 put_task_struct(task);
67436
67437 if (files) {
67438@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67439 */
67440 int proc_fd_permission(struct inode *inode, int mask)
67441 {
67442+ struct task_struct *task;
67443 int rv = generic_permission(inode, mask);
67444- if (rv == 0)
67445- return 0;
67446+
67447 if (task_tgid(current) == proc_pid(inode))
67448 rv = 0;
67449+
67450+ task = get_proc_task(inode);
67451+ if (task == NULL)
67452+ return rv;
67453+
67454+ if (gr_acl_handle_procpidmem(task))
67455+ rv = -EACCES;
67456+
67457+ put_task_struct(task);
67458+
67459 return rv;
67460 }
67461
67462diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67463index b7f268e..3bea6b7 100644
67464--- a/fs/proc/generic.c
67465+++ b/fs/proc/generic.c
67466@@ -23,6 +23,7 @@
67467 #include <linux/bitops.h>
67468 #include <linux/spinlock.h>
67469 #include <linux/completion.h>
67470+#include <linux/grsecurity.h>
67471 #include <asm/uaccess.h>
67472
67473 #include "internal.h"
67474@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67475 return proc_lookup_de(PDE(dir), dir, dentry);
67476 }
67477
67478+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67479+ unsigned int flags)
67480+{
67481+ if (gr_proc_is_restricted())
67482+ return ERR_PTR(-EACCES);
67483+
67484+ return proc_lookup_de(PDE(dir), dir, dentry);
67485+}
67486+
67487 /*
67488 * This returns non-zero if at EOF, so that the /proc
67489 * root directory can use this and check if it should
67490@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67491 return proc_readdir_de(PDE(inode), file, ctx);
67492 }
67493
67494+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67495+{
67496+ struct inode *inode = file_inode(file);
67497+
67498+ if (gr_proc_is_restricted())
67499+ return -EACCES;
67500+
67501+ return proc_readdir_de(PDE(inode), file, ctx);
67502+}
67503+
67504 /*
67505 * These are the generic /proc directory operations. They
67506 * use the in-memory "struct proc_dir_entry" tree to parse
67507@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
67508 .iterate = proc_readdir,
67509 };
67510
67511+static const struct file_operations proc_dir_restricted_operations = {
67512+ .llseek = generic_file_llseek,
67513+ .read = generic_read_dir,
67514+ .iterate = proc_readdir_restrict,
67515+};
67516+
67517 /*
67518 * proc directories can do almost nothing..
67519 */
67520@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67521 .setattr = proc_notify_change,
67522 };
67523
67524+static const struct inode_operations proc_dir_restricted_inode_operations = {
67525+ .lookup = proc_lookup_restrict,
67526+ .getattr = proc_getattr,
67527+ .setattr = proc_notify_change,
67528+};
67529+
67530 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67531 {
67532 struct proc_dir_entry *tmp;
67533@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
67534 return ret;
67535
67536 if (S_ISDIR(dp->mode)) {
67537- dp->proc_fops = &proc_dir_operations;
67538- dp->proc_iops = &proc_dir_inode_operations;
67539+ if (dp->restricted) {
67540+ dp->proc_fops = &proc_dir_restricted_operations;
67541+ dp->proc_iops = &proc_dir_restricted_inode_operations;
67542+ } else {
67543+ dp->proc_fops = &proc_dir_operations;
67544+ dp->proc_iops = &proc_dir_inode_operations;
67545+ }
67546 dir->nlink++;
67547 } else if (S_ISLNK(dp->mode)) {
67548 dp->proc_iops = &proc_link_inode_operations;
67549@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67550 }
67551 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67552
67553+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67554+ struct proc_dir_entry *parent, void *data)
67555+{
67556+ struct proc_dir_entry *ent;
67557+
67558+ if (mode == 0)
67559+ mode = S_IRUGO | S_IXUGO;
67560+
67561+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67562+ if (ent) {
67563+ ent->data = data;
67564+ ent->restricted = 1;
67565+ if (proc_register(parent, ent) < 0) {
67566+ kfree(ent);
67567+ ent = NULL;
67568+ }
67569+ }
67570+ return ent;
67571+}
67572+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67573+
67574 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67575 struct proc_dir_entry *parent)
67576 {
67577@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67578 }
67579 EXPORT_SYMBOL(proc_mkdir);
67580
67581+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67582+ struct proc_dir_entry *parent)
67583+{
67584+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67585+}
67586+EXPORT_SYMBOL(proc_mkdir_restrict);
67587+
67588 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67589 struct proc_dir_entry *parent,
67590 const struct file_operations *proc_fops,
67591diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67592index 0adbc02..bee4d0b 100644
67593--- a/fs/proc/inode.c
67594+++ b/fs/proc/inode.c
67595@@ -23,11 +23,17 @@
67596 #include <linux/slab.h>
67597 #include <linux/mount.h>
67598 #include <linux/magic.h>
67599+#include <linux/grsecurity.h>
67600
67601 #include <asm/uaccess.h>
67602
67603 #include "internal.h"
67604
67605+#ifdef CONFIG_PROC_SYSCTL
67606+extern const struct inode_operations proc_sys_inode_operations;
67607+extern const struct inode_operations proc_sys_dir_operations;
67608+#endif
67609+
67610 static void proc_evict_inode(struct inode *inode)
67611 {
67612 struct proc_dir_entry *de;
67613@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
67614 ns = PROC_I(inode)->ns.ns;
67615 if (ns_ops && ns)
67616 ns_ops->put(ns);
67617+
67618+#ifdef CONFIG_PROC_SYSCTL
67619+ if (inode->i_op == &proc_sys_inode_operations ||
67620+ inode->i_op == &proc_sys_dir_operations)
67621+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67622+#endif
67623+
67624 }
67625
67626 static struct kmem_cache * proc_inode_cachep;
67627@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67628 if (de->mode) {
67629 inode->i_mode = de->mode;
67630 inode->i_uid = de->uid;
67631+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67632+ inode->i_gid = grsec_proc_gid;
67633+#else
67634 inode->i_gid = de->gid;
67635+#endif
67636 }
67637 if (de->size)
67638 inode->i_size = de->size;
67639diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67640index 3ab6d14..b26174e 100644
67641--- a/fs/proc/internal.h
67642+++ b/fs/proc/internal.h
67643@@ -46,9 +46,10 @@ struct proc_dir_entry {
67644 struct completion *pde_unload_completion;
67645 struct list_head pde_openers; /* who did ->open, but not ->release */
67646 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67647+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67648 u8 namelen;
67649 char name[];
67650-};
67651+} __randomize_layout;
67652
67653 union proc_op {
67654 int (*proc_get_link)(struct dentry *, struct path *);
67655@@ -67,7 +68,7 @@ struct proc_inode {
67656 struct ctl_table *sysctl_entry;
67657 struct proc_ns ns;
67658 struct inode vfs_inode;
67659-};
67660+} __randomize_layout;
67661
67662 /*
67663 * General functions
67664@@ -155,6 +156,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67665 struct pid *, struct task_struct *);
67666 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67667 struct pid *, struct task_struct *);
67668+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67669+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
67670+#endif
67671
67672 /*
67673 * base.c
67674@@ -181,9 +185,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67675 extern spinlock_t proc_subdir_lock;
67676
67677 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67678+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67679 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67680 struct dentry *);
67681 extern int proc_readdir(struct file *, struct dir_context *);
67682+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67683 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67684
67685 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67686diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67687index a352d57..cb94a5c 100644
67688--- a/fs/proc/interrupts.c
67689+++ b/fs/proc/interrupts.c
67690@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67691
67692 static int __init proc_interrupts_init(void)
67693 {
67694+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67695+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67696+#else
67697 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67698+#endif
67699 return 0;
67700 }
67701 fs_initcall(proc_interrupts_init);
67702diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67703index 39e6ef3..2f9cb5e 100644
67704--- a/fs/proc/kcore.c
67705+++ b/fs/proc/kcore.c
67706@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67707 * the addresses in the elf_phdr on our list.
67708 */
67709 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67710- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67711+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67712+ if (tsz > buflen)
67713 tsz = buflen;
67714-
67715+
67716 while (buflen) {
67717 struct kcore_list *m;
67718
67719@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67720 kfree(elf_buf);
67721 } else {
67722 if (kern_addr_valid(start)) {
67723- unsigned long n;
67724+ char *elf_buf;
67725+ mm_segment_t oldfs;
67726
67727- n = copy_to_user(buffer, (char *)start, tsz);
67728- /*
67729- * We cannot distinguish between fault on source
67730- * and fault on destination. When this happens
67731- * we clear too and hope it will trigger the
67732- * EFAULT again.
67733- */
67734- if (n) {
67735- if (clear_user(buffer + tsz - n,
67736- n))
67737+ elf_buf = kmalloc(tsz, GFP_KERNEL);
67738+ if (!elf_buf)
67739+ return -ENOMEM;
67740+ oldfs = get_fs();
67741+ set_fs(KERNEL_DS);
67742+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
67743+ set_fs(oldfs);
67744+ if (copy_to_user(buffer, elf_buf, tsz)) {
67745+ kfree(elf_buf);
67746 return -EFAULT;
67747+ }
67748 }
67749+ set_fs(oldfs);
67750+ kfree(elf_buf);
67751 } else {
67752 if (clear_user(buffer, tsz))
67753 return -EFAULT;
67754@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67755
67756 static int open_kcore(struct inode *inode, struct file *filp)
67757 {
67758+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67759+ return -EPERM;
67760+#endif
67761 if (!capable(CAP_SYS_RAWIO))
67762 return -EPERM;
67763 if (kcore_need_update)
67764diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67765index 7445af0..7c5113c 100644
67766--- a/fs/proc/meminfo.c
67767+++ b/fs/proc/meminfo.c
67768@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67769 vmi.used >> 10,
67770 vmi.largest_chunk >> 10
67771 #ifdef CONFIG_MEMORY_FAILURE
67772- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67773+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67774 #endif
67775 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67776 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67777diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67778index d4a3574..b421ce9 100644
67779--- a/fs/proc/nommu.c
67780+++ b/fs/proc/nommu.c
67781@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67782
67783 if (file) {
67784 seq_pad(m, ' ');
67785- seq_path(m, &file->f_path, "");
67786+ seq_path(m, &file->f_path, "\n\\");
67787 }
67788
67789 seq_putc(m, '\n');
67790diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67791index 4677bb7..dad3045 100644
67792--- a/fs/proc/proc_net.c
67793+++ b/fs/proc/proc_net.c
67794@@ -23,9 +23,27 @@
67795 #include <linux/nsproxy.h>
67796 #include <net/net_namespace.h>
67797 #include <linux/seq_file.h>
67798+#include <linux/grsecurity.h>
67799
67800 #include "internal.h"
67801
67802+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67803+static struct seq_operations *ipv6_seq_ops_addr;
67804+
67805+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67806+{
67807+ ipv6_seq_ops_addr = addr;
67808+}
67809+
67810+void unregister_ipv6_seq_ops_addr(void)
67811+{
67812+ ipv6_seq_ops_addr = NULL;
67813+}
67814+
67815+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67816+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67817+#endif
67818+
67819 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67820 {
67821 return pde->parent->data;
67822@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67823 return maybe_get_net(PDE_NET(PDE(inode)));
67824 }
67825
67826+extern const struct seq_operations dev_seq_ops;
67827+
67828 int seq_open_net(struct inode *ino, struct file *f,
67829 const struct seq_operations *ops, int size)
67830 {
67831@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67832
67833 BUG_ON(size < sizeof(*p));
67834
67835+ /* only permit access to /proc/net/dev */
67836+ if (
67837+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67838+ ops != ipv6_seq_ops_addr &&
67839+#endif
67840+ ops != &dev_seq_ops && gr_proc_is_restricted())
67841+ return -EACCES;
67842+
67843 net = get_proc_net(ino);
67844 if (net == NULL)
67845 return -ENXIO;
67846@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67847 int err;
67848 struct net *net;
67849
67850+ if (gr_proc_is_restricted())
67851+ return -EACCES;
67852+
67853 err = -ENXIO;
67854 net = get_proc_net(inode);
67855 if (net == NULL)
67856diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67857index 7129046..6914844 100644
67858--- a/fs/proc/proc_sysctl.c
67859+++ b/fs/proc/proc_sysctl.c
67860@@ -11,13 +11,21 @@
67861 #include <linux/namei.h>
67862 #include <linux/mm.h>
67863 #include <linux/module.h>
67864+#include <linux/nsproxy.h>
67865+#ifdef CONFIG_GRKERNSEC
67866+#include <net/net_namespace.h>
67867+#endif
67868 #include "internal.h"
67869
67870+extern int gr_handle_chroot_sysctl(const int op);
67871+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67872+ const int op);
67873+
67874 static const struct dentry_operations proc_sys_dentry_operations;
67875 static const struct file_operations proc_sys_file_operations;
67876-static const struct inode_operations proc_sys_inode_operations;
67877+const struct inode_operations proc_sys_inode_operations;
67878 static const struct file_operations proc_sys_dir_file_operations;
67879-static const struct inode_operations proc_sys_dir_operations;
67880+const struct inode_operations proc_sys_dir_operations;
67881
67882 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67883 {
67884@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67885
67886 err = NULL;
67887 d_set_d_op(dentry, &proc_sys_dentry_operations);
67888+
67889+ gr_handle_proc_create(dentry, inode);
67890+
67891 d_add(dentry, inode);
67892
67893 out:
67894@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67895 struct inode *inode = file_inode(filp);
67896 struct ctl_table_header *head = grab_header(inode);
67897 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
67898+ int op = write ? MAY_WRITE : MAY_READ;
67899 ssize_t error;
67900 size_t res;
67901
67902@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67903 * and won't be until we finish.
67904 */
67905 error = -EPERM;
67906- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
67907+ if (sysctl_perm(head, table, op))
67908 goto out;
67909
67910 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
67911@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67912 if (!table->proc_handler)
67913 goto out;
67914
67915+#ifdef CONFIG_GRKERNSEC
67916+ error = -EPERM;
67917+ if (gr_handle_chroot_sysctl(op))
67918+ goto out;
67919+ dget(filp->f_path.dentry);
67920+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
67921+ dput(filp->f_path.dentry);
67922+ goto out;
67923+ }
67924+ dput(filp->f_path.dentry);
67925+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
67926+ goto out;
67927+ if (write) {
67928+ if (current->nsproxy->net_ns != table->extra2) {
67929+ if (!capable(CAP_SYS_ADMIN))
67930+ goto out;
67931+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
67932+ goto out;
67933+ }
67934+#endif
67935+
67936 /* careful: calling conventions are nasty here */
67937 res = count;
67938 error = table->proc_handler(table, write, buf, &res, ppos);
67939@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
67940 return false;
67941 } else {
67942 d_set_d_op(child, &proc_sys_dentry_operations);
67943+
67944+ gr_handle_proc_create(child, inode);
67945+
67946 d_add(child, inode);
67947 }
67948 } else {
67949@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
67950 if ((*pos)++ < ctx->pos)
67951 return true;
67952
67953+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
67954+ return 0;
67955+
67956 if (unlikely(S_ISLNK(table->mode)))
67957 res = proc_sys_link_fill_cache(file, ctx, head, table);
67958 else
67959@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
67960 if (IS_ERR(head))
67961 return PTR_ERR(head);
67962
67963+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
67964+ return -ENOENT;
67965+
67966 generic_fillattr(inode, stat);
67967 if (table)
67968 stat->mode = (stat->mode & S_IFMT) | table->mode;
67969@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
67970 .llseek = generic_file_llseek,
67971 };
67972
67973-static const struct inode_operations proc_sys_inode_operations = {
67974+const struct inode_operations proc_sys_inode_operations = {
67975 .permission = proc_sys_permission,
67976 .setattr = proc_sys_setattr,
67977 .getattr = proc_sys_getattr,
67978 };
67979
67980-static const struct inode_operations proc_sys_dir_operations = {
67981+const struct inode_operations proc_sys_dir_operations = {
67982 .lookup = proc_sys_lookup,
67983 .permission = proc_sys_permission,
67984 .setattr = proc_sys_setattr,
67985@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
67986 static struct ctl_dir *new_dir(struct ctl_table_set *set,
67987 const char *name, int namelen)
67988 {
67989- struct ctl_table *table;
67990+ ctl_table_no_const *table;
67991 struct ctl_dir *new;
67992 struct ctl_node *node;
67993 char *new_name;
67994@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
67995 return NULL;
67996
67997 node = (struct ctl_node *)(new + 1);
67998- table = (struct ctl_table *)(node + 1);
67999+ table = (ctl_table_no_const *)(node + 1);
68000 new_name = (char *)(table + 2);
68001 memcpy(new_name, name, namelen);
68002 new_name[namelen] = '\0';
68003@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
68004 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
68005 struct ctl_table_root *link_root)
68006 {
68007- struct ctl_table *link_table, *entry, *link;
68008+ ctl_table_no_const *link_table, *link;
68009+ struct ctl_table *entry;
68010 struct ctl_table_header *links;
68011 struct ctl_node *node;
68012 char *link_name;
68013@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
68014 return NULL;
68015
68016 node = (struct ctl_node *)(links + 1);
68017- link_table = (struct ctl_table *)(node + nr_entries);
68018+ link_table = (ctl_table_no_const *)(node + nr_entries);
68019 link_name = (char *)&link_table[nr_entries + 1];
68020
68021 for (link = link_table, entry = table; entry->procname; link++, entry++) {
68022@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68023 struct ctl_table_header ***subheader, struct ctl_table_set *set,
68024 struct ctl_table *table)
68025 {
68026- struct ctl_table *ctl_table_arg = NULL;
68027- struct ctl_table *entry, *files;
68028+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
68029+ struct ctl_table *entry;
68030 int nr_files = 0;
68031 int nr_dirs = 0;
68032 int err = -ENOMEM;
68033@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68034 nr_files++;
68035 }
68036
68037- files = table;
68038 /* If there are mixed files and directories we need a new table */
68039 if (nr_dirs && nr_files) {
68040- struct ctl_table *new;
68041+ ctl_table_no_const *new;
68042 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
68043 GFP_KERNEL);
68044 if (!files)
68045@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68046 /* Register everything except a directory full of subdirectories */
68047 if (nr_files || !nr_dirs) {
68048 struct ctl_table_header *header;
68049- header = __register_sysctl_table(set, path, files);
68050+ header = __register_sysctl_table(set, path, files ? files : table);
68051 if (!header) {
68052 kfree(ctl_table_arg);
68053 goto out;
68054diff --git a/fs/proc/root.c b/fs/proc/root.c
68055index 5dbadec..473af2f 100644
68056--- a/fs/proc/root.c
68057+++ b/fs/proc/root.c
68058@@ -185,7 +185,15 @@ void __init proc_root_init(void)
68059 proc_mkdir("openprom", NULL);
68060 #endif
68061 proc_tty_init();
68062+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68063+#ifdef CONFIG_GRKERNSEC_PROC_USER
68064+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
68065+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68066+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
68067+#endif
68068+#else
68069 proc_mkdir("bus", NULL);
68070+#endif
68071 proc_sys_init();
68072 }
68073
68074diff --git a/fs/proc/stat.c b/fs/proc/stat.c
68075index bf2d03f..f058f9c 100644
68076--- a/fs/proc/stat.c
68077+++ b/fs/proc/stat.c
68078@@ -11,6 +11,7 @@
68079 #include <linux/irqnr.h>
68080 #include <linux/cputime.h>
68081 #include <linux/tick.h>
68082+#include <linux/grsecurity.h>
68083
68084 #ifndef arch_irq_stat_cpu
68085 #define arch_irq_stat_cpu(cpu) 0
68086@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
68087 u64 sum_softirq = 0;
68088 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
68089 struct timespec boottime;
68090+ int unrestricted = 1;
68091+
68092+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68093+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68094+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
68095+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68096+ && !in_group_p(grsec_proc_gid)
68097+#endif
68098+ )
68099+ unrestricted = 0;
68100+#endif
68101+#endif
68102
68103 user = nice = system = idle = iowait =
68104 irq = softirq = steal = 0;
68105@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
68106 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68107 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68108 idle += get_idle_time(i);
68109- iowait += get_iowait_time(i);
68110- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68111- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68112- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68113- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68114- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68115- sum += kstat_cpu_irqs_sum(i);
68116- sum += arch_irq_stat_cpu(i);
68117+ if (unrestricted) {
68118+ iowait += get_iowait_time(i);
68119+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68120+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68121+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68122+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68123+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68124+ sum += kstat_cpu_irqs_sum(i);
68125+ sum += arch_irq_stat_cpu(i);
68126+ for (j = 0; j < NR_SOFTIRQS; j++) {
68127+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68128
68129- for (j = 0; j < NR_SOFTIRQS; j++) {
68130- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68131-
68132- per_softirq_sums[j] += softirq_stat;
68133- sum_softirq += softirq_stat;
68134+ per_softirq_sums[j] += softirq_stat;
68135+ sum_softirq += softirq_stat;
68136+ }
68137 }
68138 }
68139- sum += arch_irq_stat();
68140+ if (unrestricted)
68141+ sum += arch_irq_stat();
68142
68143 seq_puts(p, "cpu ");
68144 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68145@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
68146 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68147 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68148 idle = get_idle_time(i);
68149- iowait = get_iowait_time(i);
68150- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68151- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68152- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68153- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68154- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68155+ if (unrestricted) {
68156+ iowait = get_iowait_time(i);
68157+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68158+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68159+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68160+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68161+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68162+ }
68163 seq_printf(p, "cpu%d", i);
68164 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68165 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
68166@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
68167
68168 /* sum again ? it could be updated? */
68169 for_each_irq_nr(j)
68170- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
68171+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
68172
68173 seq_printf(p,
68174 "\nctxt %llu\n"
68175@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
68176 "processes %lu\n"
68177 "procs_running %lu\n"
68178 "procs_blocked %lu\n",
68179- nr_context_switches(),
68180+ unrestricted ? nr_context_switches() : 0ULL,
68181 (unsigned long)jif,
68182- total_forks,
68183- nr_running(),
68184- nr_iowait());
68185+ unrestricted ? total_forks : 0UL,
68186+ unrestricted ? nr_running() : 0UL,
68187+ unrestricted ? nr_iowait() : 0UL);
68188
68189 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
68190
68191diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
68192index cfa63ee..fce112e 100644
68193--- a/fs/proc/task_mmu.c
68194+++ b/fs/proc/task_mmu.c
68195@@ -13,12 +13,19 @@
68196 #include <linux/swap.h>
68197 #include <linux/swapops.h>
68198 #include <linux/mmu_notifier.h>
68199+#include <linux/grsecurity.h>
68200
68201 #include <asm/elf.h>
68202 #include <asm/uaccess.h>
68203 #include <asm/tlbflush.h>
68204 #include "internal.h"
68205
68206+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68207+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
68208+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
68209+ _mm->pax_flags & MF_PAX_SEGMEXEC))
68210+#endif
68211+
68212 void task_mem(struct seq_file *m, struct mm_struct *mm)
68213 {
68214 unsigned long data, text, lib, swap;
68215@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68216 "VmExe:\t%8lu kB\n"
68217 "VmLib:\t%8lu kB\n"
68218 "VmPTE:\t%8lu kB\n"
68219- "VmSwap:\t%8lu kB\n",
68220- hiwater_vm << (PAGE_SHIFT-10),
68221+ "VmSwap:\t%8lu kB\n"
68222+
68223+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68224+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
68225+#endif
68226+
68227+ ,hiwater_vm << (PAGE_SHIFT-10),
68228 total_vm << (PAGE_SHIFT-10),
68229 mm->locked_vm << (PAGE_SHIFT-10),
68230 mm->pinned_vm << (PAGE_SHIFT-10),
68231@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68232 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
68233 (PTRS_PER_PTE * sizeof(pte_t) *
68234 atomic_long_read(&mm->nr_ptes)) >> 10,
68235- swap << (PAGE_SHIFT-10));
68236+ swap << (PAGE_SHIFT-10)
68237+
68238+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68239+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68240+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
68241+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
68242+#else
68243+ , mm->context.user_cs_base
68244+ , mm->context.user_cs_limit
68245+#endif
68246+#endif
68247+
68248+ );
68249 }
68250
68251 unsigned long task_vsize(struct mm_struct *mm)
68252@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68253 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
68254 }
68255
68256- /* We don't show the stack guard page in /proc/maps */
68257+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68258+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
68259+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
68260+#else
68261 start = vma->vm_start;
68262- if (stack_guard_page_start(vma, start))
68263- start += PAGE_SIZE;
68264 end = vma->vm_end;
68265- if (stack_guard_page_end(vma, end))
68266- end -= PAGE_SIZE;
68267+#endif
68268
68269 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
68270 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
68271@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68272 flags & VM_WRITE ? 'w' : '-',
68273 flags & VM_EXEC ? 'x' : '-',
68274 flags & VM_MAYSHARE ? 's' : 'p',
68275+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68276+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
68277+#else
68278 pgoff,
68279+#endif
68280 MAJOR(dev), MINOR(dev), ino);
68281
68282 /*
68283@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68284 */
68285 if (file) {
68286 seq_pad(m, ' ');
68287- seq_path(m, &file->f_path, "\n");
68288+ seq_path(m, &file->f_path, "\n\\");
68289 goto done;
68290 }
68291
68292@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68293 * Thread stack in /proc/PID/task/TID/maps or
68294 * the main process stack.
68295 */
68296- if (!is_pid || (vma->vm_start <= mm->start_stack &&
68297- vma->vm_end >= mm->start_stack)) {
68298+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
68299+ (vma->vm_start <= mm->start_stack &&
68300+ vma->vm_end >= mm->start_stack)) {
68301 name = "[stack]";
68302 } else {
68303 /* Thread stack in /proc/PID/maps */
68304@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
68305 struct proc_maps_private *priv = m->private;
68306 struct task_struct *task = priv->task;
68307
68308+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68309+ if (current->exec_id != m->exec_id) {
68310+ gr_log_badprocpid("maps");
68311+ return 0;
68312+ }
68313+#endif
68314+
68315 show_map_vma(m, vma, is_pid);
68316
68317 if (m->count < m->size) /* vma is copied successfully */
68318@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68319 .private = &mss,
68320 };
68321
68322+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68323+ if (current->exec_id != m->exec_id) {
68324+ gr_log_badprocpid("smaps");
68325+ return 0;
68326+ }
68327+#endif
68328 memset(&mss, 0, sizeof mss);
68329- mss.vma = vma;
68330- /* mmap_sem is held in m_start */
68331- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68332- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68333-
68334+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68335+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
68336+#endif
68337+ mss.vma = vma;
68338+ /* mmap_sem is held in m_start */
68339+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68340+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68341+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68342+ }
68343+#endif
68344 show_map_vma(m, vma, is_pid);
68345
68346 seq_printf(m,
68347@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68348 "KernelPageSize: %8lu kB\n"
68349 "MMUPageSize: %8lu kB\n"
68350 "Locked: %8lu kB\n",
68351+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68352+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
68353+#else
68354 (vma->vm_end - vma->vm_start) >> 10,
68355+#endif
68356 mss.resident >> 10,
68357 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
68358 mss.shared_clean >> 10,
68359@@ -1398,6 +1449,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68360 char buffer[64];
68361 int nid;
68362
68363+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68364+ if (current->exec_id != m->exec_id) {
68365+ gr_log_badprocpid("numa_maps");
68366+ return 0;
68367+ }
68368+#endif
68369+
68370 if (!mm)
68371 return 0;
68372
68373@@ -1415,11 +1473,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68374 mpol_to_str(buffer, sizeof(buffer), pol);
68375 mpol_cond_put(pol);
68376
68377+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68378+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68379+#else
68380 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68381+#endif
68382
68383 if (file) {
68384 seq_puts(m, " file=");
68385- seq_path(m, &file->f_path, "\n\t= ");
68386+ seq_path(m, &file->f_path, "\n\t\\= ");
68387 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68388 seq_puts(m, " heap");
68389 } else {
68390diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68391index 678455d..ebd3245 100644
68392--- a/fs/proc/task_nommu.c
68393+++ b/fs/proc/task_nommu.c
68394@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68395 else
68396 bytes += kobjsize(mm);
68397
68398- if (current->fs && current->fs->users > 1)
68399+ if (current->fs && atomic_read(&current->fs->users) > 1)
68400 sbytes += kobjsize(current->fs);
68401 else
68402 bytes += kobjsize(current->fs);
68403@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68404
68405 if (file) {
68406 seq_pad(m, ' ');
68407- seq_path(m, &file->f_path, "");
68408+ seq_path(m, &file->f_path, "\n\\");
68409 } else if (mm) {
68410 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
68411
68412diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68413index 382aa89..6b03974 100644
68414--- a/fs/proc/vmcore.c
68415+++ b/fs/proc/vmcore.c
68416@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68417 nr_bytes = count;
68418
68419 /* If pfn is not ram, return zeros for sparse dump files */
68420- if (pfn_is_ram(pfn) == 0)
68421- memset(buf, 0, nr_bytes);
68422- else {
68423+ if (pfn_is_ram(pfn) == 0) {
68424+ if (userbuf) {
68425+ if (clear_user((char __force_user *)buf, nr_bytes))
68426+ return -EFAULT;
68427+ } else
68428+ memset(buf, 0, nr_bytes);
68429+ } else {
68430 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68431 offset, userbuf);
68432 if (tmp < 0)
68433@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68434 static int copy_to(void *target, void *src, size_t size, int userbuf)
68435 {
68436 if (userbuf) {
68437- if (copy_to_user((char __user *) target, src, size))
68438+ if (copy_to_user((char __force_user *) target, src, size))
68439 return -EFAULT;
68440 } else {
68441 memcpy(target, src, size);
68442@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68443 if (*fpos < m->offset + m->size) {
68444 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68445 start = m->paddr + *fpos - m->offset;
68446- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68447+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68448 if (tmp < 0)
68449 return tmp;
68450 buflen -= tsz;
68451@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68452 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68453 size_t buflen, loff_t *fpos)
68454 {
68455- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68456+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68457 }
68458
68459 /*
68460diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68461index b00fcc9..e0c6381 100644
68462--- a/fs/qnx6/qnx6.h
68463+++ b/fs/qnx6/qnx6.h
68464@@ -74,7 +74,7 @@ enum {
68465 BYTESEX_BE,
68466 };
68467
68468-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68469+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68470 {
68471 if (sbi->s_bytesex == BYTESEX_LE)
68472 return le64_to_cpu((__force __le64)n);
68473@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68474 return (__force __fs64)cpu_to_be64(n);
68475 }
68476
68477-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68478+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68479 {
68480 if (sbi->s_bytesex == BYTESEX_LE)
68481 return le32_to_cpu((__force __le32)n);
68482diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68483index 72d2917..c917c12 100644
68484--- a/fs/quota/netlink.c
68485+++ b/fs/quota/netlink.c
68486@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
68487 void quota_send_warning(struct kqid qid, dev_t dev,
68488 const char warntype)
68489 {
68490- static atomic_t seq;
68491+ static atomic_unchecked_t seq;
68492 struct sk_buff *skb;
68493 void *msg_head;
68494 int ret;
68495@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68496 "VFS: Not enough memory to send quota warning.\n");
68497 return;
68498 }
68499- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68500+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68501 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68502 if (!msg_head) {
68503 printk(KERN_ERR
68504diff --git a/fs/read_write.c b/fs/read_write.c
68505index 009d854..16ce214 100644
68506--- a/fs/read_write.c
68507+++ b/fs/read_write.c
68508@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68509
68510 old_fs = get_fs();
68511 set_fs(get_ds());
68512- p = (__force const char __user *)buf;
68513+ p = (const char __force_user *)buf;
68514 if (count > MAX_RW_COUNT)
68515 count = MAX_RW_COUNT;
68516 if (file->f_op->write)
68517diff --git a/fs/readdir.c b/fs/readdir.c
68518index 33fd922..e0d6094 100644
68519--- a/fs/readdir.c
68520+++ b/fs/readdir.c
68521@@ -18,6 +18,7 @@
68522 #include <linux/security.h>
68523 #include <linux/syscalls.h>
68524 #include <linux/unistd.h>
68525+#include <linux/namei.h>
68526
68527 #include <asm/uaccess.h>
68528
68529@@ -71,6 +72,7 @@ struct old_linux_dirent {
68530 struct readdir_callback {
68531 struct dir_context ctx;
68532 struct old_linux_dirent __user * dirent;
68533+ struct file * file;
68534 int result;
68535 };
68536
68537@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
68538 buf->result = -EOVERFLOW;
68539 return -EOVERFLOW;
68540 }
68541+
68542+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68543+ return 0;
68544+
68545 buf->result++;
68546 dirent = buf->dirent;
68547 if (!access_ok(VERIFY_WRITE, dirent,
68548@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68549 if (!f.file)
68550 return -EBADF;
68551
68552+ buf.file = f.file;
68553 error = iterate_dir(f.file, &buf.ctx);
68554 if (buf.result)
68555 error = buf.result;
68556@@ -144,6 +151,7 @@ struct getdents_callback {
68557 struct dir_context ctx;
68558 struct linux_dirent __user * current_dir;
68559 struct linux_dirent __user * previous;
68560+ struct file * file;
68561 int count;
68562 int error;
68563 };
68564@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
68565 buf->error = -EOVERFLOW;
68566 return -EOVERFLOW;
68567 }
68568+
68569+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68570+ return 0;
68571+
68572 dirent = buf->previous;
68573 if (dirent) {
68574 if (__put_user(offset, &dirent->d_off))
68575@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68576 if (!f.file)
68577 return -EBADF;
68578
68579+ buf.file = f.file;
68580 error = iterate_dir(f.file, &buf.ctx);
68581 if (error >= 0)
68582 error = buf.error;
68583@@ -228,6 +241,7 @@ struct getdents_callback64 {
68584 struct dir_context ctx;
68585 struct linux_dirent64 __user * current_dir;
68586 struct linux_dirent64 __user * previous;
68587+ struct file *file;
68588 int count;
68589 int error;
68590 };
68591@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
68592 buf->error = -EINVAL; /* only used if we fail.. */
68593 if (reclen > buf->count)
68594 return -EINVAL;
68595+
68596+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68597+ return 0;
68598+
68599 dirent = buf->previous;
68600 if (dirent) {
68601 if (__put_user(offset, &dirent->d_off))
68602@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68603 if (!f.file)
68604 return -EBADF;
68605
68606+ buf.file = f.file;
68607 error = iterate_dir(f.file, &buf.ctx);
68608 if (error >= 0)
68609 error = buf.error;
68610diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68611index 54fdf19..987862b 100644
68612--- a/fs/reiserfs/do_balan.c
68613+++ b/fs/reiserfs/do_balan.c
68614@@ -1872,7 +1872,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68615 return;
68616 }
68617
68618- atomic_inc(&fs_generation(tb->tb_sb));
68619+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68620 do_balance_starts(tb);
68621
68622 /*
68623diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68624index cfaee91..b9d0d60 100644
68625--- a/fs/reiserfs/item_ops.c
68626+++ b/fs/reiserfs/item_ops.c
68627@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68628 }
68629
68630 static struct item_operations errcatch_ops = {
68631- errcatch_bytes_number,
68632- errcatch_decrement_key,
68633- errcatch_is_left_mergeable,
68634- errcatch_print_item,
68635- errcatch_check_item,
68636+ .bytes_number = errcatch_bytes_number,
68637+ .decrement_key = errcatch_decrement_key,
68638+ .is_left_mergeable = errcatch_is_left_mergeable,
68639+ .print_item = errcatch_print_item,
68640+ .check_item = errcatch_check_item,
68641
68642- errcatch_create_vi,
68643- errcatch_check_left,
68644- errcatch_check_right,
68645- errcatch_part_size,
68646- errcatch_unit_num,
68647- errcatch_print_vi
68648+ .create_vi = errcatch_create_vi,
68649+ .check_left = errcatch_check_left,
68650+ .check_right = errcatch_check_right,
68651+ .part_size = errcatch_part_size,
68652+ .unit_num = errcatch_unit_num,
68653+ .print_vi = errcatch_print_vi
68654 };
68655
68656 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68657diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68658index 02b0b7d..c85018b 100644
68659--- a/fs/reiserfs/procfs.c
68660+++ b/fs/reiserfs/procfs.c
68661@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68662 "SMALL_TAILS " : "NO_TAILS ",
68663 replay_only(sb) ? "REPLAY_ONLY " : "",
68664 convert_reiserfs(sb) ? "CONV " : "",
68665- atomic_read(&r->s_generation_counter),
68666+ atomic_read_unchecked(&r->s_generation_counter),
68667 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68668 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68669 SF(s_good_search_by_key_reada), SF(s_bmaps),
68670diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68671index bf53888..227f5ae 100644
68672--- a/fs/reiserfs/reiserfs.h
68673+++ b/fs/reiserfs/reiserfs.h
68674@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
68675 /* Comment? -Hans */
68676 wait_queue_head_t s_wait;
68677 /* increased by one every time the tree gets re-balanced */
68678- atomic_t s_generation_counter;
68679+ atomic_unchecked_t s_generation_counter;
68680
68681 /* File system properties. Currently holds on-disk FS format */
68682 unsigned long s_properties;
68683@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68684 #define REISERFS_USER_MEM 1 /* user memory mode */
68685
68686 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68687-#define get_generation(s) atomic_read (&fs_generation(s))
68688+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68689 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68690 #define __fs_changed(gen,s) (gen != get_generation (s))
68691 #define fs_changed(gen,s) \
68692diff --git a/fs/select.c b/fs/select.c
68693index 467bb1c..cf9d65a 100644
68694--- a/fs/select.c
68695+++ b/fs/select.c
68696@@ -20,6 +20,7 @@
68697 #include <linux/export.h>
68698 #include <linux/slab.h>
68699 #include <linux/poll.h>
68700+#include <linux/security.h>
68701 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68702 #include <linux/file.h>
68703 #include <linux/fdtable.h>
68704@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68705 struct poll_list *walk = head;
68706 unsigned long todo = nfds;
68707
68708+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68709 if (nfds > rlimit(RLIMIT_NOFILE))
68710 return -EINVAL;
68711
68712diff --git a/fs/seq_file.c b/fs/seq_file.c
68713index 3857b72..0b7281e 100644
68714--- a/fs/seq_file.c
68715+++ b/fs/seq_file.c
68716@@ -12,6 +12,8 @@
68717 #include <linux/slab.h>
68718 #include <linux/cred.h>
68719 #include <linux/mm.h>
68720+#include <linux/sched.h>
68721+#include <linux/grsecurity.h>
68722
68723 #include <asm/uaccess.h>
68724 #include <asm/page.h>
68725@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
68726
68727 static void *seq_buf_alloc(unsigned long size)
68728 {
68729- void *buf;
68730-
68731- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
68732- if (!buf && size > PAGE_SIZE)
68733- buf = vmalloc(size);
68734- return buf;
68735+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68736 }
68737
68738 /**
68739@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68740 #ifdef CONFIG_USER_NS
68741 p->user_ns = file->f_cred->user_ns;
68742 #endif
68743+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68744+ p->exec_id = current->exec_id;
68745+#endif
68746
68747 /*
68748 * Wrappers around seq_open(e.g. swaps_open) need to be
68749@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68750 }
68751 EXPORT_SYMBOL(seq_open);
68752
68753+
68754+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68755+{
68756+ if (gr_proc_is_restricted())
68757+ return -EACCES;
68758+
68759+ return seq_open(file, op);
68760+}
68761+EXPORT_SYMBOL(seq_open_restrict);
68762+
68763 static int traverse(struct seq_file *m, loff_t offset)
68764 {
68765 loff_t pos = 0, index;
68766@@ -165,7 +175,7 @@ Eoverflow:
68767 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68768 {
68769 struct seq_file *m = file->private_data;
68770- size_t copied = 0;
68771+ ssize_t copied = 0;
68772 loff_t pos;
68773 size_t n;
68774 void *p;
68775@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
68776 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68777 void *data)
68778 {
68779- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68780+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68781 int res = -ENOMEM;
68782
68783 if (op) {
68784@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68785 }
68786 EXPORT_SYMBOL(single_open_size);
68787
68788+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68789+ void *data)
68790+{
68791+ if (gr_proc_is_restricted())
68792+ return -EACCES;
68793+
68794+ return single_open(file, show, data);
68795+}
68796+EXPORT_SYMBOL(single_open_restrict);
68797+
68798+
68799 int single_release(struct inode *inode, struct file *file)
68800 {
68801 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68802diff --git a/fs/splice.c b/fs/splice.c
68803index f5cb9ba..8ddb1e9 100644
68804--- a/fs/splice.c
68805+++ b/fs/splice.c
68806@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68807 pipe_lock(pipe);
68808
68809 for (;;) {
68810- if (!pipe->readers) {
68811+ if (!atomic_read(&pipe->readers)) {
68812 send_sig(SIGPIPE, current, 0);
68813 if (!ret)
68814 ret = -EPIPE;
68815@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68816 page_nr++;
68817 ret += buf->len;
68818
68819- if (pipe->files)
68820+ if (atomic_read(&pipe->files))
68821 do_wakeup = 1;
68822
68823 if (!--spd->nr_pages)
68824@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68825 do_wakeup = 0;
68826 }
68827
68828- pipe->waiting_writers++;
68829+ atomic_inc(&pipe->waiting_writers);
68830 pipe_wait(pipe);
68831- pipe->waiting_writers--;
68832+ atomic_dec(&pipe->waiting_writers);
68833 }
68834
68835 pipe_unlock(pipe);
68836@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68837 old_fs = get_fs();
68838 set_fs(get_ds());
68839 /* The cast to a user pointer is valid due to the set_fs() */
68840- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68841+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68842 set_fs(old_fs);
68843
68844 return res;
68845@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68846 old_fs = get_fs();
68847 set_fs(get_ds());
68848 /* The cast to a user pointer is valid due to the set_fs() */
68849- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68850+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68851 set_fs(old_fs);
68852
68853 return res;
68854@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68855 goto err;
68856
68857 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68858- vec[i].iov_base = (void __user *) page_address(page);
68859+ vec[i].iov_base = (void __force_user *) page_address(page);
68860 vec[i].iov_len = this_len;
68861 spd.pages[i] = page;
68862 spd.nr_pages++;
68863@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68864 ops->release(pipe, buf);
68865 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68866 pipe->nrbufs--;
68867- if (pipe->files)
68868+ if (atomic_read(&pipe->files))
68869 sd->need_wakeup = true;
68870 }
68871
68872@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68873 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68874 {
68875 while (!pipe->nrbufs) {
68876- if (!pipe->writers)
68877+ if (!atomic_read(&pipe->writers))
68878 return 0;
68879
68880- if (!pipe->waiting_writers && sd->num_spliced)
68881+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68882 return 0;
68883
68884 if (sd->flags & SPLICE_F_NONBLOCK)
68885@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
68886 ops->release(pipe, buf);
68887 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68888 pipe->nrbufs--;
68889- if (pipe->files)
68890+ if (atomic_read(&pipe->files))
68891 sd.need_wakeup = true;
68892 } else {
68893 buf->offset += ret;
68894@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
68895 * out of the pipe right after the splice_to_pipe(). So set
68896 * PIPE_READERS appropriately.
68897 */
68898- pipe->readers = 1;
68899+ atomic_set(&pipe->readers, 1);
68900
68901 current->splice_pipe = pipe;
68902 }
68903@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
68904
68905 partial[buffers].offset = off;
68906 partial[buffers].len = plen;
68907+ partial[buffers].private = 0;
68908
68909 off = 0;
68910 len -= plen;
68911@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68912 ret = -ERESTARTSYS;
68913 break;
68914 }
68915- if (!pipe->writers)
68916+ if (!atomic_read(&pipe->writers))
68917 break;
68918- if (!pipe->waiting_writers) {
68919+ if (!atomic_read(&pipe->waiting_writers)) {
68920 if (flags & SPLICE_F_NONBLOCK) {
68921 ret = -EAGAIN;
68922 break;
68923@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68924 pipe_lock(pipe);
68925
68926 while (pipe->nrbufs >= pipe->buffers) {
68927- if (!pipe->readers) {
68928+ if (!atomic_read(&pipe->readers)) {
68929 send_sig(SIGPIPE, current, 0);
68930 ret = -EPIPE;
68931 break;
68932@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68933 ret = -ERESTARTSYS;
68934 break;
68935 }
68936- pipe->waiting_writers++;
68937+ atomic_inc(&pipe->waiting_writers);
68938 pipe_wait(pipe);
68939- pipe->waiting_writers--;
68940+ atomic_dec(&pipe->waiting_writers);
68941 }
68942
68943 pipe_unlock(pipe);
68944@@ -1817,14 +1818,14 @@ retry:
68945 pipe_double_lock(ipipe, opipe);
68946
68947 do {
68948- if (!opipe->readers) {
68949+ if (!atomic_read(&opipe->readers)) {
68950 send_sig(SIGPIPE, current, 0);
68951 if (!ret)
68952 ret = -EPIPE;
68953 break;
68954 }
68955
68956- if (!ipipe->nrbufs && !ipipe->writers)
68957+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
68958 break;
68959
68960 /*
68961@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68962 pipe_double_lock(ipipe, opipe);
68963
68964 do {
68965- if (!opipe->readers) {
68966+ if (!atomic_read(&opipe->readers)) {
68967 send_sig(SIGPIPE, current, 0);
68968 if (!ret)
68969 ret = -EPIPE;
68970@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68971 * return EAGAIN if we have the potential of some data in the
68972 * future, otherwise just return 0
68973 */
68974- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
68975+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
68976 ret = -EAGAIN;
68977
68978 pipe_unlock(ipipe);
68979diff --git a/fs/stat.c b/fs/stat.c
68980index ae0c3ce..9ee641c 100644
68981--- a/fs/stat.c
68982+++ b/fs/stat.c
68983@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
68984 stat->gid = inode->i_gid;
68985 stat->rdev = inode->i_rdev;
68986 stat->size = i_size_read(inode);
68987- stat->atime = inode->i_atime;
68988- stat->mtime = inode->i_mtime;
68989+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68990+ stat->atime = inode->i_ctime;
68991+ stat->mtime = inode->i_ctime;
68992+ } else {
68993+ stat->atime = inode->i_atime;
68994+ stat->mtime = inode->i_mtime;
68995+ }
68996 stat->ctime = inode->i_ctime;
68997 stat->blksize = (1 << inode->i_blkbits);
68998 stat->blocks = inode->i_blocks;
68999@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
69000 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
69001 {
69002 struct inode *inode = path->dentry->d_inode;
69003+ int retval;
69004
69005- if (inode->i_op->getattr)
69006- return inode->i_op->getattr(path->mnt, path->dentry, stat);
69007+ if (inode->i_op->getattr) {
69008+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
69009+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69010+ stat->atime = stat->ctime;
69011+ stat->mtime = stat->ctime;
69012+ }
69013+ return retval;
69014+ }
69015
69016 generic_fillattr(inode, stat);
69017 return 0;
69018diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
69019index 0b45ff4..847de5b 100644
69020--- a/fs/sysfs/dir.c
69021+++ b/fs/sysfs/dir.c
69022@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69023 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69024 {
69025 struct kernfs_node *parent, *kn;
69026+ const char *name;
69027+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
69028+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69029+ const char *parent_name;
69030+#endif
69031
69032 BUG_ON(!kobj);
69033
69034+ name = kobject_name(kobj);
69035+
69036 if (kobj->parent)
69037 parent = kobj->parent->sd;
69038 else
69039@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69040 if (!parent)
69041 return -ENOENT;
69042
69043- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
69044- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
69045+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69046+ parent_name = parent->name;
69047+ mode = S_IRWXU;
69048+
69049+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
69050+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
69051+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
69052+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
69053+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69054+#endif
69055+
69056+ kn = kernfs_create_dir_ns(parent, name,
69057+ mode, kobj, ns);
69058 if (IS_ERR(kn)) {
69059 if (PTR_ERR(kn) == -EEXIST)
69060- sysfs_warn_dup(parent, kobject_name(kobj));
69061+ sysfs_warn_dup(parent, name);
69062 return PTR_ERR(kn);
69063 }
69064
69065diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
69066index 69d4889..a810bd4 100644
69067--- a/fs/sysv/sysv.h
69068+++ b/fs/sysv/sysv.h
69069@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
69070 #endif
69071 }
69072
69073-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69074+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69075 {
69076 if (sbi->s_bytesex == BYTESEX_PDP)
69077 return PDP_swab((__force __u32)n);
69078diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
69079index 2290d58..7791371 100644
69080--- a/fs/ubifs/io.c
69081+++ b/fs/ubifs/io.c
69082@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
69083 return err;
69084 }
69085
69086-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69087+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69088 {
69089 int err;
69090
69091diff --git a/fs/udf/misc.c b/fs/udf/misc.c
69092index c175b4d..8f36a16 100644
69093--- a/fs/udf/misc.c
69094+++ b/fs/udf/misc.c
69095@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
69096
69097 u8 udf_tag_checksum(const struct tag *t)
69098 {
69099- u8 *data = (u8 *)t;
69100+ const u8 *data = (const u8 *)t;
69101 u8 checksum = 0;
69102 int i;
69103 for (i = 0; i < sizeof(struct tag); ++i)
69104diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
69105index 8d974c4..b82f6ec 100644
69106--- a/fs/ufs/swab.h
69107+++ b/fs/ufs/swab.h
69108@@ -22,7 +22,7 @@ enum {
69109 BYTESEX_BE
69110 };
69111
69112-static inline u64
69113+static inline u64 __intentional_overflow(-1)
69114 fs64_to_cpu(struct super_block *sbp, __fs64 n)
69115 {
69116 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69117@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
69118 return (__force __fs64)cpu_to_be64(n);
69119 }
69120
69121-static inline u32
69122+static inline u32 __intentional_overflow(-1)
69123 fs32_to_cpu(struct super_block *sbp, __fs32 n)
69124 {
69125 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69126diff --git a/fs/utimes.c b/fs/utimes.c
69127index aa138d6..5f3a811 100644
69128--- a/fs/utimes.c
69129+++ b/fs/utimes.c
69130@@ -1,6 +1,7 @@
69131 #include <linux/compiler.h>
69132 #include <linux/file.h>
69133 #include <linux/fs.h>
69134+#include <linux/security.h>
69135 #include <linux/linkage.h>
69136 #include <linux/mount.h>
69137 #include <linux/namei.h>
69138@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
69139 }
69140 }
69141 retry_deleg:
69142+
69143+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
69144+ error = -EACCES;
69145+ goto mnt_drop_write_and_out;
69146+ }
69147+
69148 mutex_lock(&inode->i_mutex);
69149 error = notify_change(path->dentry, &newattrs, &delegated_inode);
69150 mutex_unlock(&inode->i_mutex);
69151diff --git a/fs/xattr.c b/fs/xattr.c
69152index c69e6d4..cc56af5 100644
69153--- a/fs/xattr.c
69154+++ b/fs/xattr.c
69155@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
69156 return rc;
69157 }
69158
69159+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
69160+ssize_t
69161+pax_getxattr(struct dentry *dentry, void *value, size_t size)
69162+{
69163+ struct inode *inode = dentry->d_inode;
69164+ ssize_t error;
69165+
69166+ error = inode_permission(inode, MAY_EXEC);
69167+ if (error)
69168+ return error;
69169+
69170+ if (inode->i_op->getxattr)
69171+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
69172+ else
69173+ error = -EOPNOTSUPP;
69174+
69175+ return error;
69176+}
69177+EXPORT_SYMBOL(pax_getxattr);
69178+#endif
69179+
69180 ssize_t
69181 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
69182 {
69183@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
69184 * Extended attribute SET operations
69185 */
69186 static long
69187-setxattr(struct dentry *d, const char __user *name, const void __user *value,
69188+setxattr(struct path *path, const char __user *name, const void __user *value,
69189 size_t size, int flags)
69190 {
69191 int error;
69192@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
69193 posix_acl_fix_xattr_from_user(kvalue, size);
69194 }
69195
69196- error = vfs_setxattr(d, kname, kvalue, size, flags);
69197+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
69198+ error = -EACCES;
69199+ goto out;
69200+ }
69201+
69202+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
69203 out:
69204 if (vvalue)
69205 vfree(vvalue);
69206@@ -377,7 +403,7 @@ retry:
69207 return error;
69208 error = mnt_want_write(path.mnt);
69209 if (!error) {
69210- error = setxattr(path.dentry, name, value, size, flags);
69211+ error = setxattr(&path, name, value, size, flags);
69212 mnt_drop_write(path.mnt);
69213 }
69214 path_put(&path);
69215@@ -401,7 +427,7 @@ retry:
69216 return error;
69217 error = mnt_want_write(path.mnt);
69218 if (!error) {
69219- error = setxattr(path.dentry, name, value, size, flags);
69220+ error = setxattr(&path, name, value, size, flags);
69221 mnt_drop_write(path.mnt);
69222 }
69223 path_put(&path);
69224@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
69225 const void __user *,value, size_t, size, int, flags)
69226 {
69227 struct fd f = fdget(fd);
69228- struct dentry *dentry;
69229 int error = -EBADF;
69230
69231 if (!f.file)
69232 return error;
69233- dentry = f.file->f_path.dentry;
69234- audit_inode(NULL, dentry, 0);
69235+ audit_inode(NULL, f.file->f_path.dentry, 0);
69236 error = mnt_want_write_file(f.file);
69237 if (!error) {
69238- error = setxattr(dentry, name, value, size, flags);
69239+ error = setxattr(&f.file->f_path, name, value, size, flags);
69240 mnt_drop_write_file(f.file);
69241 }
69242 fdput(f);
69243@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
69244 * Extended attribute REMOVE operations
69245 */
69246 static long
69247-removexattr(struct dentry *d, const char __user *name)
69248+removexattr(struct path *path, const char __user *name)
69249 {
69250 int error;
69251 char kname[XATTR_NAME_MAX + 1];
69252@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
69253 if (error < 0)
69254 return error;
69255
69256- return vfs_removexattr(d, kname);
69257+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
69258+ return -EACCES;
69259+
69260+ return vfs_removexattr(path->dentry, kname);
69261 }
69262
69263 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
69264@@ -652,7 +679,7 @@ retry:
69265 return error;
69266 error = mnt_want_write(path.mnt);
69267 if (!error) {
69268- error = removexattr(path.dentry, name);
69269+ error = removexattr(&path, name);
69270 mnt_drop_write(path.mnt);
69271 }
69272 path_put(&path);
69273@@ -675,7 +702,7 @@ retry:
69274 return error;
69275 error = mnt_want_write(path.mnt);
69276 if (!error) {
69277- error = removexattr(path.dentry, name);
69278+ error = removexattr(&path, name);
69279 mnt_drop_write(path.mnt);
69280 }
69281 path_put(&path);
69282@@ -689,16 +716,16 @@ retry:
69283 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
69284 {
69285 struct fd f = fdget(fd);
69286- struct dentry *dentry;
69287+ struct path *path;
69288 int error = -EBADF;
69289
69290 if (!f.file)
69291 return error;
69292- dentry = f.file->f_path.dentry;
69293- audit_inode(NULL, dentry, 0);
69294+ path = &f.file->f_path;
69295+ audit_inode(NULL, path->dentry, 0);
69296 error = mnt_want_write_file(f.file);
69297 if (!error) {
69298- error = removexattr(dentry, name);
69299+ error = removexattr(path, name);
69300 mnt_drop_write_file(f.file);
69301 }
69302 fdput(f);
69303diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
69304index 75c3fe5..b0f6bbe 100644
69305--- a/fs/xfs/xfs_bmap.c
69306+++ b/fs/xfs/xfs_bmap.c
69307@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
69308
69309 #else
69310 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
69311-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
69312+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
69313 #endif /* DEBUG */
69314
69315 /*
69316diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
69317index 48e99af..54ebae3 100644
69318--- a/fs/xfs/xfs_dir2_readdir.c
69319+++ b/fs/xfs/xfs_dir2_readdir.c
69320@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
69321 ino = dp->d_ops->sf_get_ino(sfp, sfep);
69322 filetype = dp->d_ops->sf_get_ftype(sfep);
69323 ctx->pos = off & 0x7fffffff;
69324- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69325+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
69326+ char name[sfep->namelen];
69327+ memcpy(name, sfep->name, sfep->namelen);
69328+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
69329+ return 0;
69330+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69331 xfs_dir3_get_dtype(dp->i_mount, filetype)))
69332 return 0;
69333 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
69334diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
69335index 8bc1bbc..0d6911b 100644
69336--- a/fs/xfs/xfs_ioctl.c
69337+++ b/fs/xfs/xfs_ioctl.c
69338@@ -122,7 +122,7 @@ xfs_find_handle(
69339 }
69340
69341 error = -EFAULT;
69342- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
69343+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
69344 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
69345 goto out_put;
69346
69347diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
69348new file mode 100644
69349index 0000000..27cec32
69350--- /dev/null
69351+++ b/grsecurity/Kconfig
69352@@ -0,0 +1,1166 @@
69353+#
69354+# grecurity configuration
69355+#
69356+menu "Memory Protections"
69357+depends on GRKERNSEC
69358+
69359+config GRKERNSEC_KMEM
69360+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69361+ default y if GRKERNSEC_CONFIG_AUTO
69362+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69363+ help
69364+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69365+ be written to or read from to modify or leak the contents of the running
69366+ kernel. /dev/port will also not be allowed to be opened, writing to
69367+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69368+ If you have module support disabled, enabling this will close up several
69369+ ways that are currently used to insert malicious code into the running
69370+ kernel.
69371+
69372+ Even with this feature enabled, we still highly recommend that
69373+ you use the RBAC system, as it is still possible for an attacker to
69374+ modify the running kernel through other more obscure methods.
69375+
69376+ It is highly recommended that you say Y here if you meet all the
69377+ conditions above.
69378+
69379+config GRKERNSEC_VM86
69380+ bool "Restrict VM86 mode"
69381+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69382+ depends on X86_32
69383+
69384+ help
69385+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69386+ make use of a special execution mode on 32bit x86 processors called
69387+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69388+ video cards and will still work with this option enabled. The purpose
69389+ of the option is to prevent exploitation of emulation errors in
69390+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69391+ Nearly all users should be able to enable this option.
69392+
69393+config GRKERNSEC_IO
69394+ bool "Disable privileged I/O"
69395+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69396+ depends on X86
69397+ select RTC_CLASS
69398+ select RTC_INTF_DEV
69399+ select RTC_DRV_CMOS
69400+
69401+ help
69402+ If you say Y here, all ioperm and iopl calls will return an error.
69403+ Ioperm and iopl can be used to modify the running kernel.
69404+ Unfortunately, some programs need this access to operate properly,
69405+ the most notable of which are XFree86 and hwclock. hwclock can be
69406+ remedied by having RTC support in the kernel, so real-time
69407+ clock support is enabled if this option is enabled, to ensure
69408+ that hwclock operates correctly. If hwclock still does not work,
69409+ either update udev or symlink /dev/rtc to /dev/rtc0.
69410+
69411+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69412+ you may not be able to boot into a graphical environment with this
69413+ option enabled. In this case, you should use the RBAC system instead.
69414+
69415+config GRKERNSEC_JIT_HARDEN
69416+ bool "Harden BPF JIT against spray attacks"
69417+ default y if GRKERNSEC_CONFIG_AUTO
69418+ depends on BPF_JIT && X86
69419+ help
69420+ If you say Y here, the native code generated by the kernel's Berkeley
69421+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
69422+ attacks that attempt to fit attacker-beneficial instructions in
69423+ 32bit immediate fields of JIT-generated native instructions. The
69424+ attacker will generally aim to cause an unintended instruction sequence
69425+ of JIT-generated native code to execute by jumping into the middle of
69426+ a generated instruction. This feature effectively randomizes the 32bit
69427+ immediate constants present in the generated code to thwart such attacks.
69428+
69429+ If you're using KERNEXEC, it's recommended that you enable this option
69430+ to supplement the hardening of the kernel.
69431+
69432+config GRKERNSEC_PERF_HARDEN
69433+ bool "Disable unprivileged PERF_EVENTS usage by default"
69434+ default y if GRKERNSEC_CONFIG_AUTO
69435+ depends on PERF_EVENTS
69436+ help
69437+ If you say Y here, the range of acceptable values for the
69438+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69439+ default to a new value: 3. When the sysctl is set to this value, no
69440+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69441+
69442+ Though PERF_EVENTS can be used legitimately for performance monitoring
69443+ and low-level application profiling, it is forced on regardless of
69444+ configuration, has been at fault for several vulnerabilities, and
69445+ creates new opportunities for side channels and other information leaks.
69446+
69447+ This feature puts PERF_EVENTS into a secure default state and permits
69448+ the administrator to change out of it temporarily if unprivileged
69449+ application profiling is needed.
69450+
69451+config GRKERNSEC_RAND_THREADSTACK
69452+ bool "Insert random gaps between thread stacks"
69453+ default y if GRKERNSEC_CONFIG_AUTO
69454+ depends on PAX_RANDMMAP && !PPC
69455+ help
69456+ If you say Y here, a random-sized gap will be enforced between allocated
69457+ thread stacks. Glibc's NPTL and other threading libraries that
69458+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69459+ The implementation currently provides 8 bits of entropy for the gap.
69460+
69461+ Many distributions do not compile threaded remote services with the
69462+ -fstack-check argument to GCC, causing the variable-sized stack-based
69463+ allocator, alloca(), to not probe the stack on allocation. This
69464+ permits an unbounded alloca() to skip over any guard page and potentially
69465+ modify another thread's stack reliably. An enforced random gap
69466+ reduces the reliability of such an attack and increases the chance
69467+ that such a read/write to another thread's stack instead lands in
69468+ an unmapped area, causing a crash and triggering grsecurity's
69469+ anti-bruteforcing logic.
69470+
69471+config GRKERNSEC_PROC_MEMMAP
69472+ bool "Harden ASLR against information leaks and entropy reduction"
69473+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69474+ depends on PAX_NOEXEC || PAX_ASLR
69475+ help
69476+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69477+ give no information about the addresses of its mappings if
69478+ PaX features that rely on random addresses are enabled on the task.
69479+ In addition to sanitizing this information and disabling other
69480+ dangerous sources of information, this option causes reads of sensitive
69481+ /proc/<pid> entries where the file descriptor was opened in a different
69482+ task than the one performing the read. Such attempts are logged.
69483+ This option also limits argv/env strings for suid/sgid binaries
69484+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69485+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69486+ binaries to prevent alternative mmap layouts from being abused.
69487+
69488+ If you use PaX it is essential that you say Y here as it closes up
69489+ several holes that make full ASLR useless locally.
69490+
69491+
69492+config GRKERNSEC_KSTACKOVERFLOW
69493+ bool "Prevent kernel stack overflows"
69494+ default y if GRKERNSEC_CONFIG_AUTO
69495+ depends on !IA64 && 64BIT
69496+ help
69497+ If you say Y here, the kernel's process stacks will be allocated
69498+ with vmalloc instead of the kernel's default allocator. This
69499+ introduces guard pages that in combination with the alloca checking
69500+ of the STACKLEAK feature prevents all forms of kernel process stack
69501+ overflow abuse. Note that this is different from kernel stack
69502+ buffer overflows.
69503+
69504+config GRKERNSEC_BRUTE
69505+ bool "Deter exploit bruteforcing"
69506+ default y if GRKERNSEC_CONFIG_AUTO
69507+ help
69508+ If you say Y here, attempts to bruteforce exploits against forking
69509+ daemons such as apache or sshd, as well as against suid/sgid binaries
69510+ will be deterred. When a child of a forking daemon is killed by PaX
69511+ or crashes due to an illegal instruction or other suspicious signal,
69512+ the parent process will be delayed 30 seconds upon every subsequent
69513+ fork until the administrator is able to assess the situation and
69514+ restart the daemon.
69515+ In the suid/sgid case, the attempt is logged, the user has all their
69516+ existing instances of the suid/sgid binary terminated and will
69517+ be unable to execute any suid/sgid binaries for 15 minutes.
69518+
69519+ It is recommended that you also enable signal logging in the auditing
69520+ section so that logs are generated when a process triggers a suspicious
69521+ signal.
69522+ If the sysctl option is enabled, a sysctl option with name
69523+ "deter_bruteforce" is created.
69524+
69525+config GRKERNSEC_MODHARDEN
69526+ bool "Harden module auto-loading"
69527+ default y if GRKERNSEC_CONFIG_AUTO
69528+ depends on MODULES
69529+ help
69530+ If you say Y here, module auto-loading in response to use of some
69531+ feature implemented by an unloaded module will be restricted to
69532+ root users. Enabling this option helps defend against attacks
69533+ by unprivileged users who abuse the auto-loading behavior to
69534+ cause a vulnerable module to load that is then exploited.
69535+
69536+ If this option prevents a legitimate use of auto-loading for a
69537+ non-root user, the administrator can execute modprobe manually
69538+ with the exact name of the module mentioned in the alert log.
69539+ Alternatively, the administrator can add the module to the list
69540+ of modules loaded at boot by modifying init scripts.
69541+
69542+ Modification of init scripts will most likely be needed on
69543+ Ubuntu servers with encrypted home directory support enabled,
69544+ as the first non-root user logging in will cause the ecb(aes),
69545+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69546+
69547+config GRKERNSEC_HIDESYM
69548+ bool "Hide kernel symbols"
69549+ default y if GRKERNSEC_CONFIG_AUTO
69550+ select PAX_USERCOPY_SLABS
69551+ help
69552+ If you say Y here, getting information on loaded modules, and
69553+ displaying all kernel symbols through a syscall will be restricted
69554+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69555+ /proc/kallsyms will be restricted to the root user. The RBAC
69556+ system can hide that entry even from root.
69557+
69558+ This option also prevents leaking of kernel addresses through
69559+ several /proc entries.
69560+
69561+ Note that this option is only effective provided the following
69562+ conditions are met:
69563+ 1) The kernel using grsecurity is not precompiled by some distribution
69564+ 2) You have also enabled GRKERNSEC_DMESG
69565+ 3) You are using the RBAC system and hiding other files such as your
69566+ kernel image and System.map. Alternatively, enabling this option
69567+ causes the permissions on /boot, /lib/modules, and the kernel
69568+ source directory to change at compile time to prevent
69569+ reading by non-root users.
69570+ If the above conditions are met, this option will aid in providing a
69571+ useful protection against local kernel exploitation of overflows
69572+ and arbitrary read/write vulnerabilities.
69573+
69574+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69575+ in addition to this feature.
69576+
69577+config GRKERNSEC_RANDSTRUCT
69578+ bool "Randomize layout of sensitive kernel structures"
69579+ default y if GRKERNSEC_CONFIG_AUTO
69580+ select GRKERNSEC_HIDESYM
69581+ select MODVERSIONS if MODULES
69582+ help
69583+ If you say Y here, the layouts of a number of sensitive kernel
69584+ structures (task, fs, cred, etc) and all structures composed entirely
69585+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69586+ This can introduce the requirement of an additional infoleak
69587+ vulnerability for exploits targeting these structure types.
69588+
69589+ Enabling this feature will introduce some performance impact, slightly
69590+ increase memory usage, and prevent the use of forensic tools like
69591+ Volatility against the system (unless the kernel source tree isn't
69592+ cleaned after kernel installation).
69593+
69594+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69595+ It remains after a make clean to allow for external modules to be compiled
69596+ with the existing seed and will be removed by a make mrproper or
69597+ make distclean.
69598+
69599+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69600+ to install the supporting headers explicitly in addition to the normal
69601+ gcc package.
69602+
69603+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69604+ bool "Use cacheline-aware structure randomization"
69605+ depends on GRKERNSEC_RANDSTRUCT
69606+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69607+ help
69608+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69609+ at restricting randomization to cacheline-sized groups of elements. It
69610+ will further not randomize bitfields in structures. This reduces the
69611+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69612+
69613+config GRKERNSEC_KERN_LOCKOUT
69614+ bool "Active kernel exploit response"
69615+ default y if GRKERNSEC_CONFIG_AUTO
69616+ depends on X86 || ARM || PPC || SPARC
69617+ help
69618+ If you say Y here, when a PaX alert is triggered due to suspicious
69619+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69620+ or an OOPS occurs due to bad memory accesses, instead of just
69621+ terminating the offending process (and potentially allowing
69622+ a subsequent exploit from the same user), we will take one of two
69623+ actions:
69624+ If the user was root, we will panic the system
69625+ If the user was non-root, we will log the attempt, terminate
69626+ all processes owned by the user, then prevent them from creating
69627+ any new processes until the system is restarted
69628+ This deters repeated kernel exploitation/bruteforcing attempts
69629+ and is useful for later forensics.
69630+
69631+config GRKERNSEC_OLD_ARM_USERLAND
69632+ bool "Old ARM userland compatibility"
69633+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69634+ help
69635+ If you say Y here, stubs of executable code to perform such operations
69636+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69637+ table. This is unfortunately needed for old ARM userland meant to run
69638+ across a wide range of processors. Without this option enabled,
69639+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69640+ which is enough for Linaro userlands or other userlands designed for v6
69641+ and newer ARM CPUs. It's recommended that you try without this option enabled
69642+ first, and only enable it if your userland does not boot (it will likely fail
69643+ at init time).
69644+
69645+endmenu
69646+menu "Role Based Access Control Options"
69647+depends on GRKERNSEC
69648+
69649+config GRKERNSEC_RBAC_DEBUG
69650+ bool
69651+
69652+config GRKERNSEC_NO_RBAC
69653+ bool "Disable RBAC system"
69654+ help
69655+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69656+ preventing the RBAC system from being enabled. You should only say Y
69657+ here if you have no intention of using the RBAC system, so as to prevent
69658+ an attacker with root access from misusing the RBAC system to hide files
69659+ and processes when loadable module support and /dev/[k]mem have been
69660+ locked down.
69661+
69662+config GRKERNSEC_ACL_HIDEKERN
69663+ bool "Hide kernel processes"
69664+ help
69665+ If you say Y here, all kernel threads will be hidden to all
69666+ processes but those whose subject has the "view hidden processes"
69667+ flag.
69668+
69669+config GRKERNSEC_ACL_MAXTRIES
69670+ int "Maximum tries before password lockout"
69671+ default 3
69672+ help
69673+ This option enforces the maximum number of times a user can attempt
69674+ to authorize themselves with the grsecurity RBAC system before being
69675+ denied the ability to attempt authorization again for a specified time.
69676+ The lower the number, the harder it will be to brute-force a password.
69677+
69678+config GRKERNSEC_ACL_TIMEOUT
69679+ int "Time to wait after max password tries, in seconds"
69680+ default 30
69681+ help
69682+ This option specifies the time the user must wait after attempting to
69683+ authorize to the RBAC system with the maximum number of invalid
69684+ passwords. The higher the number, the harder it will be to brute-force
69685+ a password.
69686+
69687+endmenu
69688+menu "Filesystem Protections"
69689+depends on GRKERNSEC
69690+
69691+config GRKERNSEC_PROC
69692+ bool "Proc restrictions"
69693+ default y if GRKERNSEC_CONFIG_AUTO
69694+ help
69695+ If you say Y here, the permissions of the /proc filesystem
69696+ will be altered to enhance system security and privacy. You MUST
69697+ choose either a user only restriction or a user and group restriction.
69698+ Depending upon the option you choose, you can either restrict users to
69699+ see only the processes they themselves run, or choose a group that can
69700+ view all processes and files normally restricted to root if you choose
69701+ the "restrict to user only" option. NOTE: If you're running identd or
69702+ ntpd as a non-root user, you will have to run it as the group you
69703+ specify here.
69704+
69705+config GRKERNSEC_PROC_USER
69706+ bool "Restrict /proc to user only"
69707+ depends on GRKERNSEC_PROC
69708+ help
69709+ If you say Y here, non-root users will only be able to view their own
69710+ processes, and restricts them from viewing network-related information,
69711+ and viewing kernel symbol and module information.
69712+
69713+config GRKERNSEC_PROC_USERGROUP
69714+ bool "Allow special group"
69715+ default y if GRKERNSEC_CONFIG_AUTO
69716+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
69717+ help
69718+ If you say Y here, you will be able to select a group that will be
69719+ able to view all processes and network-related information. If you've
69720+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
69721+ remain hidden. This option is useful if you want to run identd as
69722+ a non-root user. The group you select may also be chosen at boot time
69723+ via "grsec_proc_gid=" on the kernel commandline.
69724+
69725+config GRKERNSEC_PROC_GID
69726+ int "GID for special group"
69727+ depends on GRKERNSEC_PROC_USERGROUP
69728+ default 1001
69729+
69730+config GRKERNSEC_PROC_ADD
69731+ bool "Additional restrictions"
69732+ default y if GRKERNSEC_CONFIG_AUTO
69733+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
69734+ help
69735+ If you say Y here, additional restrictions will be placed on
69736+ /proc that keep normal users from viewing device information and
69737+ slabinfo information that could be useful for exploits.
69738+
69739+config GRKERNSEC_LINK
69740+ bool "Linking restrictions"
69741+ default y if GRKERNSEC_CONFIG_AUTO
69742+ help
69743+ If you say Y here, /tmp race exploits will be prevented, since users
69744+ will no longer be able to follow symlinks owned by other users in
69745+ world-writable +t directories (e.g. /tmp), unless the owner of the
69746+ symlink is the owner of the directory. users will also not be
69747+ able to hardlink to files they do not own. If the sysctl option is
69748+ enabled, a sysctl option with name "linking_restrictions" is created.
69749+
69750+config GRKERNSEC_SYMLINKOWN
69751+ bool "Kernel-enforced SymlinksIfOwnerMatch"
69752+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69753+ help
69754+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
69755+ that prevents it from being used as a security feature. As Apache
69756+ verifies the symlink by performing a stat() against the target of
69757+ the symlink before it is followed, an attacker can setup a symlink
69758+ to point to a same-owned file, then replace the symlink with one
69759+ that targets another user's file just after Apache "validates" the
69760+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
69761+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
69762+ will be in place for the group you specify. If the sysctl option
69763+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
69764+ created.
69765+
69766+config GRKERNSEC_SYMLINKOWN_GID
69767+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
69768+ depends on GRKERNSEC_SYMLINKOWN
69769+ default 1006
69770+ help
69771+ Setting this GID determines what group kernel-enforced
69772+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
69773+ is enabled, a sysctl option with name "symlinkown_gid" is created.
69774+
69775+config GRKERNSEC_FIFO
69776+ bool "FIFO restrictions"
69777+ default y if GRKERNSEC_CONFIG_AUTO
69778+ help
69779+ If you say Y here, users will not be able to write to FIFOs they don't
69780+ own in world-writable +t directories (e.g. /tmp), unless the owner of
69781+ the FIFO is the same owner of the directory it's held in. If the sysctl
69782+ option is enabled, a sysctl option with name "fifo_restrictions" is
69783+ created.
69784+
69785+config GRKERNSEC_SYSFS_RESTRICT
69786+ bool "Sysfs/debugfs restriction"
69787+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69788+ depends on SYSFS
69789+ help
69790+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
69791+ any filesystem normally mounted under it (e.g. debugfs) will be
69792+ mostly accessible only by root. These filesystems generally provide access
69793+ to hardware and debug information that isn't appropriate for unprivileged
69794+ users of the system. Sysfs and debugfs have also become a large source
69795+ of new vulnerabilities, ranging from infoleaks to local compromise.
69796+ There has been very little oversight with an eye toward security involved
69797+ in adding new exporters of information to these filesystems, so their
69798+ use is discouraged.
69799+ For reasons of compatibility, a few directories have been whitelisted
69800+ for access by non-root users:
69801+ /sys/fs/selinux
69802+ /sys/fs/fuse
69803+ /sys/devices/system/cpu
69804+
69805+config GRKERNSEC_ROFS
69806+ bool "Runtime read-only mount protection"
69807+ depends on SYSCTL
69808+ help
69809+ If you say Y here, a sysctl option with name "romount_protect" will
69810+ be created. By setting this option to 1 at runtime, filesystems
69811+ will be protected in the following ways:
69812+ * No new writable mounts will be allowed
69813+ * Existing read-only mounts won't be able to be remounted read/write
69814+ * Write operations will be denied on all block devices
69815+ This option acts independently of grsec_lock: once it is set to 1,
69816+ it cannot be turned off. Therefore, please be mindful of the resulting
69817+ behavior if this option is enabled in an init script on a read-only
69818+ filesystem.
69819+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
69820+ and GRKERNSEC_IO should be enabled and module loading disabled via
69821+ config or at runtime.
69822+ This feature is mainly intended for secure embedded systems.
69823+
69824+
69825+config GRKERNSEC_DEVICE_SIDECHANNEL
69826+ bool "Eliminate stat/notify-based device sidechannels"
69827+ default y if GRKERNSEC_CONFIG_AUTO
69828+ help
69829+ If you say Y here, timing analyses on block or character
69830+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
69831+ will be thwarted for unprivileged users. If a process without
69832+ CAP_MKNOD stats such a device, the last access and last modify times
69833+ will match the device's create time. No access or modify events
69834+ will be triggered through inotify/dnotify/fanotify for such devices.
69835+ This feature will prevent attacks that may at a minimum
69836+ allow an attacker to determine the administrator's password length.
69837+
69838+config GRKERNSEC_CHROOT
69839+ bool "Chroot jail restrictions"
69840+ default y if GRKERNSEC_CONFIG_AUTO
69841+ help
69842+ If you say Y here, you will be able to choose several options that will
69843+ make breaking out of a chrooted jail much more difficult. If you
69844+ encounter no software incompatibilities with the following options, it
69845+ is recommended that you enable each one.
69846+
69847+ Note that the chroot restrictions are not intended to apply to "chroots"
69848+ to directories that are simple bind mounts of the global root filesystem.
69849+ For several other reasons, a user shouldn't expect any significant
69850+ security by performing such a chroot.
69851+
69852+config GRKERNSEC_CHROOT_MOUNT
69853+ bool "Deny mounts"
69854+ default y if GRKERNSEC_CONFIG_AUTO
69855+ depends on GRKERNSEC_CHROOT
69856+ help
69857+ If you say Y here, processes inside a chroot will not be able to
69858+ mount or remount filesystems. If the sysctl option is enabled, a
69859+ sysctl option with name "chroot_deny_mount" is created.
69860+
69861+config GRKERNSEC_CHROOT_DOUBLE
69862+ bool "Deny double-chroots"
69863+ default y if GRKERNSEC_CONFIG_AUTO
69864+ depends on GRKERNSEC_CHROOT
69865+ help
69866+ If you say Y here, processes inside a chroot will not be able to chroot
69867+ again outside the chroot. This is a widely used method of breaking
69868+ out of a chroot jail and should not be allowed. If the sysctl
69869+ option is enabled, a sysctl option with name
69870+ "chroot_deny_chroot" is created.
69871+
69872+config GRKERNSEC_CHROOT_PIVOT
69873+ bool "Deny pivot_root in chroot"
69874+ default y if GRKERNSEC_CONFIG_AUTO
69875+ depends on GRKERNSEC_CHROOT
69876+ help
69877+ If you say Y here, processes inside a chroot will not be able to use
69878+ a function called pivot_root() that was introduced in Linux 2.3.41. It
69879+ works similar to chroot in that it changes the root filesystem. This
69880+ function could be misused in a chrooted process to attempt to break out
69881+ of the chroot, and therefore should not be allowed. If the sysctl
69882+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
69883+ created.
69884+
69885+config GRKERNSEC_CHROOT_CHDIR
69886+ bool "Enforce chdir(\"/\") on all chroots"
69887+ default y if GRKERNSEC_CONFIG_AUTO
69888+ depends on GRKERNSEC_CHROOT
69889+ help
69890+ If you say Y here, the current working directory of all newly-chrooted
69891+ applications will be set to the the root directory of the chroot.
69892+ The man page on chroot(2) states:
69893+ Note that this call does not change the current working
69894+ directory, so that `.' can be outside the tree rooted at
69895+ `/'. In particular, the super-user can escape from a
69896+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
69897+
69898+ It is recommended that you say Y here, since it's not known to break
69899+ any software. If the sysctl option is enabled, a sysctl option with
69900+ name "chroot_enforce_chdir" is created.
69901+
69902+config GRKERNSEC_CHROOT_CHMOD
69903+ bool "Deny (f)chmod +s"
69904+ default y if GRKERNSEC_CONFIG_AUTO
69905+ depends on GRKERNSEC_CHROOT
69906+ help
69907+ If you say Y here, processes inside a chroot will not be able to chmod
69908+ or fchmod files to make them have suid or sgid bits. This protects
69909+ against another published method of breaking a chroot. If the sysctl
69910+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
69911+ created.
69912+
69913+config GRKERNSEC_CHROOT_FCHDIR
69914+ bool "Deny fchdir and fhandle out of chroot"
69915+ default y if GRKERNSEC_CONFIG_AUTO
69916+ depends on GRKERNSEC_CHROOT
69917+ help
69918+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
69919+ to a file descriptor of the chrooting process that points to a directory
69920+ outside the filesystem will be stopped. Additionally, this option prevents
69921+ use of the recently-created syscall for opening files by a guessable "file
69922+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
69923+ with name "chroot_deny_fchdir" is created.
69924+
69925+config GRKERNSEC_CHROOT_MKNOD
69926+ bool "Deny mknod"
69927+ default y if GRKERNSEC_CONFIG_AUTO
69928+ depends on GRKERNSEC_CHROOT
69929+ help
69930+ If you say Y here, processes inside a chroot will not be allowed to
69931+ mknod. The problem with using mknod inside a chroot is that it
69932+ would allow an attacker to create a device entry that is the same
69933+ as one on the physical root of your system, which could range from
69934+ anything from the console device to a device for your harddrive (which
69935+ they could then use to wipe the drive or steal data). It is recommended
69936+ that you say Y here, unless you run into software incompatibilities.
69937+ If the sysctl option is enabled, a sysctl option with name
69938+ "chroot_deny_mknod" is created.
69939+
69940+config GRKERNSEC_CHROOT_SHMAT
69941+ bool "Deny shmat() out of chroot"
69942+ default y if GRKERNSEC_CONFIG_AUTO
69943+ depends on GRKERNSEC_CHROOT
69944+ help
69945+ If you say Y here, processes inside a chroot will not be able to attach
69946+ to shared memory segments that were created outside of the chroot jail.
69947+ It is recommended that you say Y here. If the sysctl option is enabled,
69948+ a sysctl option with name "chroot_deny_shmat" is created.
69949+
69950+config GRKERNSEC_CHROOT_UNIX
69951+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
69952+ default y if GRKERNSEC_CONFIG_AUTO
69953+ depends on GRKERNSEC_CHROOT
69954+ help
69955+ If you say Y here, processes inside a chroot will not be able to
69956+ connect to abstract (meaning not belonging to a filesystem) Unix
69957+ domain sockets that were bound outside of a chroot. It is recommended
69958+ that you say Y here. If the sysctl option is enabled, a sysctl option
69959+ with name "chroot_deny_unix" is created.
69960+
69961+config GRKERNSEC_CHROOT_FINDTASK
69962+ bool "Protect outside processes"
69963+ default y if GRKERNSEC_CONFIG_AUTO
69964+ depends on GRKERNSEC_CHROOT
69965+ help
69966+ If you say Y here, processes inside a chroot will not be able to
69967+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
69968+ getsid, or view any process outside of the chroot. If the sysctl
69969+ option is enabled, a sysctl option with name "chroot_findtask" is
69970+ created.
69971+
69972+config GRKERNSEC_CHROOT_NICE
69973+ bool "Restrict priority changes"
69974+ default y if GRKERNSEC_CONFIG_AUTO
69975+ depends on GRKERNSEC_CHROOT
69976+ help
69977+ If you say Y here, processes inside a chroot will not be able to raise
69978+ the priority of processes in the chroot, or alter the priority of
69979+ processes outside the chroot. This provides more security than simply
69980+ removing CAP_SYS_NICE from the process' capability set. If the
69981+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
69982+ is created.
69983+
69984+config GRKERNSEC_CHROOT_SYSCTL
69985+ bool "Deny sysctl writes"
69986+ default y if GRKERNSEC_CONFIG_AUTO
69987+ depends on GRKERNSEC_CHROOT
69988+ help
69989+ If you say Y here, an attacker in a chroot will not be able to
69990+ write to sysctl entries, either by sysctl(2) or through a /proc
69991+ interface. It is strongly recommended that you say Y here. If the
69992+ sysctl option is enabled, a sysctl option with name
69993+ "chroot_deny_sysctl" is created.
69994+
69995+config GRKERNSEC_CHROOT_CAPS
69996+ bool "Capability restrictions"
69997+ default y if GRKERNSEC_CONFIG_AUTO
69998+ depends on GRKERNSEC_CHROOT
69999+ help
70000+ If you say Y here, the capabilities on all processes within a
70001+ chroot jail will be lowered to stop module insertion, raw i/o,
70002+ system and net admin tasks, rebooting the system, modifying immutable
70003+ files, modifying IPC owned by another, and changing the system time.
70004+ This is left an option because it can break some apps. Disable this
70005+ if your chrooted apps are having problems performing those kinds of
70006+ tasks. If the sysctl option is enabled, a sysctl option with
70007+ name "chroot_caps" is created.
70008+
70009+config GRKERNSEC_CHROOT_INITRD
70010+ bool "Exempt initrd tasks from restrictions"
70011+ default y if GRKERNSEC_CONFIG_AUTO
70012+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
70013+ help
70014+ If you say Y here, tasks started prior to init will be exempted from
70015+ grsecurity's chroot restrictions. This option is mainly meant to
70016+ resolve Plymouth's performing privileged operations unnecessarily
70017+ in a chroot.
70018+
70019+endmenu
70020+menu "Kernel Auditing"
70021+depends on GRKERNSEC
70022+
70023+config GRKERNSEC_AUDIT_GROUP
70024+ bool "Single group for auditing"
70025+ help
70026+ If you say Y here, the exec and chdir logging features will only operate
70027+ on a group you specify. This option is recommended if you only want to
70028+ watch certain users instead of having a large amount of logs from the
70029+ entire system. If the sysctl option is enabled, a sysctl option with
70030+ name "audit_group" is created.
70031+
70032+config GRKERNSEC_AUDIT_GID
70033+ int "GID for auditing"
70034+ depends on GRKERNSEC_AUDIT_GROUP
70035+ default 1007
70036+
70037+config GRKERNSEC_EXECLOG
70038+ bool "Exec logging"
70039+ help
70040+ If you say Y here, all execve() calls will be logged (since the
70041+ other exec*() calls are frontends to execve(), all execution
70042+ will be logged). Useful for shell-servers that like to keep track
70043+ of their users. If the sysctl option is enabled, a sysctl option with
70044+ name "exec_logging" is created.
70045+ WARNING: This option when enabled will produce a LOT of logs, especially
70046+ on an active system.
70047+
70048+config GRKERNSEC_RESLOG
70049+ bool "Resource logging"
70050+ default y if GRKERNSEC_CONFIG_AUTO
70051+ help
70052+ If you say Y here, all attempts to overstep resource limits will
70053+ be logged with the resource name, the requested size, and the current
70054+ limit. It is highly recommended that you say Y here. If the sysctl
70055+ option is enabled, a sysctl option with name "resource_logging" is
70056+ created. If the RBAC system is enabled, the sysctl value is ignored.
70057+
70058+config GRKERNSEC_CHROOT_EXECLOG
70059+ bool "Log execs within chroot"
70060+ help
70061+ If you say Y here, all executions inside a chroot jail will be logged
70062+ to syslog. This can cause a large amount of logs if certain
70063+ applications (eg. djb's daemontools) are installed on the system, and
70064+ is therefore left as an option. If the sysctl option is enabled, a
70065+ sysctl option with name "chroot_execlog" is created.
70066+
70067+config GRKERNSEC_AUDIT_PTRACE
70068+ bool "Ptrace logging"
70069+ help
70070+ If you say Y here, all attempts to attach to a process via ptrace
70071+ will be logged. If the sysctl option is enabled, a sysctl option
70072+ with name "audit_ptrace" is created.
70073+
70074+config GRKERNSEC_AUDIT_CHDIR
70075+ bool "Chdir logging"
70076+ help
70077+ If you say Y here, all chdir() calls will be logged. If the sysctl
70078+ option is enabled, a sysctl option with name "audit_chdir" is created.
70079+
70080+config GRKERNSEC_AUDIT_MOUNT
70081+ bool "(Un)Mount logging"
70082+ help
70083+ If you say Y here, all mounts and unmounts will be logged. If the
70084+ sysctl option is enabled, a sysctl option with name "audit_mount" is
70085+ created.
70086+
70087+config GRKERNSEC_SIGNAL
70088+ bool "Signal logging"
70089+ default y if GRKERNSEC_CONFIG_AUTO
70090+ help
70091+ If you say Y here, certain important signals will be logged, such as
70092+ SIGSEGV, which will as a result inform you of when a error in a program
70093+ occurred, which in some cases could mean a possible exploit attempt.
70094+ If the sysctl option is enabled, a sysctl option with name
70095+ "signal_logging" is created.
70096+
70097+config GRKERNSEC_FORKFAIL
70098+ bool "Fork failure logging"
70099+ help
70100+ If you say Y here, all failed fork() attempts will be logged.
70101+ This could suggest a fork bomb, or someone attempting to overstep
70102+ their process limit. If the sysctl option is enabled, a sysctl option
70103+ with name "forkfail_logging" is created.
70104+
70105+config GRKERNSEC_TIME
70106+ bool "Time change logging"
70107+ default y if GRKERNSEC_CONFIG_AUTO
70108+ help
70109+ If you say Y here, any changes of the system clock will be logged.
70110+ If the sysctl option is enabled, a sysctl option with name
70111+ "timechange_logging" is created.
70112+
70113+config GRKERNSEC_PROC_IPADDR
70114+ bool "/proc/<pid>/ipaddr support"
70115+ default y if GRKERNSEC_CONFIG_AUTO
70116+ help
70117+ If you say Y here, a new entry will be added to each /proc/<pid>
70118+ directory that contains the IP address of the person using the task.
70119+ The IP is carried across local TCP and AF_UNIX stream sockets.
70120+ This information can be useful for IDS/IPSes to perform remote response
70121+ to a local attack. The entry is readable by only the owner of the
70122+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
70123+ the RBAC system), and thus does not create privacy concerns.
70124+
70125+config GRKERNSEC_RWXMAP_LOG
70126+ bool 'Denied RWX mmap/mprotect logging'
70127+ default y if GRKERNSEC_CONFIG_AUTO
70128+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
70129+ help
70130+ If you say Y here, calls to mmap() and mprotect() with explicit
70131+ usage of PROT_WRITE and PROT_EXEC together will be logged when
70132+ denied by the PAX_MPROTECT feature. This feature will also
70133+ log other problematic scenarios that can occur when PAX_MPROTECT
70134+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
70135+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
70136+ is created.
70137+
70138+endmenu
70139+
70140+menu "Executable Protections"
70141+depends on GRKERNSEC
70142+
70143+config GRKERNSEC_DMESG
70144+ bool "Dmesg(8) restriction"
70145+ default y if GRKERNSEC_CONFIG_AUTO
70146+ help
70147+ If you say Y here, non-root users will not be able to use dmesg(8)
70148+ to view the contents of the kernel's circular log buffer.
70149+ The kernel's log buffer often contains kernel addresses and other
70150+ identifying information useful to an attacker in fingerprinting a
70151+ system for a targeted exploit.
70152+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
70153+ created.
70154+
70155+config GRKERNSEC_HARDEN_PTRACE
70156+ bool "Deter ptrace-based process snooping"
70157+ default y if GRKERNSEC_CONFIG_AUTO
70158+ help
70159+ If you say Y here, TTY sniffers and other malicious monitoring
70160+ programs implemented through ptrace will be defeated. If you
70161+ have been using the RBAC system, this option has already been
70162+ enabled for several years for all users, with the ability to make
70163+ fine-grained exceptions.
70164+
70165+ This option only affects the ability of non-root users to ptrace
70166+ processes that are not a descendent of the ptracing process.
70167+ This means that strace ./binary and gdb ./binary will still work,
70168+ but attaching to arbitrary processes will not. If the sysctl
70169+ option is enabled, a sysctl option with name "harden_ptrace" is
70170+ created.
70171+
70172+config GRKERNSEC_PTRACE_READEXEC
70173+ bool "Require read access to ptrace sensitive binaries"
70174+ default y if GRKERNSEC_CONFIG_AUTO
70175+ help
70176+ If you say Y here, unprivileged users will not be able to ptrace unreadable
70177+ binaries. This option is useful in environments that
70178+ remove the read bits (e.g. file mode 4711) from suid binaries to
70179+ prevent infoleaking of their contents. This option adds
70180+ consistency to the use of that file mode, as the binary could normally
70181+ be read out when run without privileges while ptracing.
70182+
70183+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
70184+ is created.
70185+
70186+config GRKERNSEC_SETXID
70187+ bool "Enforce consistent multithreaded privileges"
70188+ default y if GRKERNSEC_CONFIG_AUTO
70189+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
70190+ help
70191+ If you say Y here, a change from a root uid to a non-root uid
70192+ in a multithreaded application will cause the resulting uids,
70193+ gids, supplementary groups, and capabilities in that thread
70194+ to be propagated to the other threads of the process. In most
70195+ cases this is unnecessary, as glibc will emulate this behavior
70196+ on behalf of the application. Other libcs do not act in the
70197+ same way, allowing the other threads of the process to continue
70198+ running with root privileges. If the sysctl option is enabled,
70199+ a sysctl option with name "consistent_setxid" is created.
70200+
70201+config GRKERNSEC_HARDEN_IPC
70202+ bool "Disallow access to overly-permissive IPC objects"
70203+ default y if GRKERNSEC_CONFIG_AUTO
70204+ depends on SYSVIPC
70205+ help
70206+ If you say Y here, access to overly-permissive IPC objects (shared
70207+ memory, message queues, and semaphores) will be denied for processes
70208+ given the following criteria beyond normal permission checks:
70209+ 1) If the IPC object is world-accessible and the euid doesn't match
70210+ that of the creator or current uid for the IPC object
70211+ 2) If the IPC object is group-accessible and the egid doesn't
70212+ match that of the creator or current gid for the IPC object
70213+ It's a common error to grant too much permission to these objects,
70214+ with impact ranging from denial of service and information leaking to
70215+ privilege escalation. This feature was developed in response to
70216+ research by Tim Brown:
70217+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
70218+ who found hundreds of such insecure usages. Processes with
70219+ CAP_IPC_OWNER are still permitted to access these IPC objects.
70220+ If the sysctl option is enabled, a sysctl option with name
70221+ "harden_ipc" is created.
70222+
70223+config GRKERNSEC_TPE
70224+ bool "Trusted Path Execution (TPE)"
70225+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70226+ help
70227+ If you say Y here, you will be able to choose a gid to add to the
70228+ supplementary groups of users you want to mark as "untrusted."
70229+ These users will not be able to execute any files that are not in
70230+ root-owned directories writable only by root. If the sysctl option
70231+ is enabled, a sysctl option with name "tpe" is created.
70232+
70233+config GRKERNSEC_TPE_ALL
70234+ bool "Partially restrict all non-root users"
70235+ depends on GRKERNSEC_TPE
70236+ help
70237+ If you say Y here, all non-root users will be covered under
70238+ a weaker TPE restriction. This is separate from, and in addition to,
70239+ the main TPE options that you have selected elsewhere. Thus, if a
70240+ "trusted" GID is chosen, this restriction applies to even that GID.
70241+ Under this restriction, all non-root users will only be allowed to
70242+ execute files in directories they own that are not group or
70243+ world-writable, or in directories owned by root and writable only by
70244+ root. If the sysctl option is enabled, a sysctl option with name
70245+ "tpe_restrict_all" is created.
70246+
70247+config GRKERNSEC_TPE_INVERT
70248+ bool "Invert GID option"
70249+ depends on GRKERNSEC_TPE
70250+ help
70251+ If you say Y here, the group you specify in the TPE configuration will
70252+ decide what group TPE restrictions will be *disabled* for. This
70253+ option is useful if you want TPE restrictions to be applied to most
70254+ users on the system. If the sysctl option is enabled, a sysctl option
70255+ with name "tpe_invert" is created. Unlike other sysctl options, this
70256+ entry will default to on for backward-compatibility.
70257+
70258+config GRKERNSEC_TPE_GID
70259+ int
70260+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
70261+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
70262+
70263+config GRKERNSEC_TPE_UNTRUSTED_GID
70264+ int "GID for TPE-untrusted users"
70265+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
70266+ default 1005
70267+ help
70268+ Setting this GID determines what group TPE restrictions will be
70269+ *enabled* for. If the sysctl option is enabled, a sysctl option
70270+ with name "tpe_gid" is created.
70271+
70272+config GRKERNSEC_TPE_TRUSTED_GID
70273+ int "GID for TPE-trusted users"
70274+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
70275+ default 1005
70276+ help
70277+ Setting this GID determines what group TPE restrictions will be
70278+ *disabled* for. If the sysctl option is enabled, a sysctl option
70279+ with name "tpe_gid" is created.
70280+
70281+endmenu
70282+menu "Network Protections"
70283+depends on GRKERNSEC
70284+
70285+config GRKERNSEC_BLACKHOLE
70286+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
70287+ default y if GRKERNSEC_CONFIG_AUTO
70288+ depends on NET
70289+ help
70290+ If you say Y here, neither TCP resets nor ICMP
70291+ destination-unreachable packets will be sent in response to packets
70292+ sent to ports for which no associated listening process exists.
70293+ This feature supports both IPV4 and IPV6 and exempts the
70294+ loopback interface from blackholing. Enabling this feature
70295+ makes a host more resilient to DoS attacks and reduces network
70296+ visibility against scanners.
70297+
70298+ The blackhole feature as-implemented is equivalent to the FreeBSD
70299+ blackhole feature, as it prevents RST responses to all packets, not
70300+ just SYNs. Under most application behavior this causes no
70301+ problems, but applications (like haproxy) may not close certain
70302+ connections in a way that cleanly terminates them on the remote
70303+ end, leaving the remote host in LAST_ACK state. Because of this
70304+ side-effect and to prevent intentional LAST_ACK DoSes, this
70305+ feature also adds automatic mitigation against such attacks.
70306+ The mitigation drastically reduces the amount of time a socket
70307+ can spend in LAST_ACK state. If you're using haproxy and not
70308+ all servers it connects to have this option enabled, consider
70309+ disabling this feature on the haproxy host.
70310+
70311+ If the sysctl option is enabled, two sysctl options with names
70312+ "ip_blackhole" and "lastack_retries" will be created.
70313+ While "ip_blackhole" takes the standard zero/non-zero on/off
70314+ toggle, "lastack_retries" uses the same kinds of values as
70315+ "tcp_retries1" and "tcp_retries2". The default value of 4
70316+ prevents a socket from lasting more than 45 seconds in LAST_ACK
70317+ state.
70318+
70319+config GRKERNSEC_NO_SIMULT_CONNECT
70320+ bool "Disable TCP Simultaneous Connect"
70321+ default y if GRKERNSEC_CONFIG_AUTO
70322+ depends on NET
70323+ help
70324+ If you say Y here, a feature by Willy Tarreau will be enabled that
70325+ removes a weakness in Linux's strict implementation of TCP that
70326+ allows two clients to connect to each other without either entering
70327+ a listening state. The weakness allows an attacker to easily prevent
70328+ a client from connecting to a known server provided the source port
70329+ for the connection is guessed correctly.
70330+
70331+ As the weakness could be used to prevent an antivirus or IPS from
70332+ fetching updates, or prevent an SSL gateway from fetching a CRL,
70333+ it should be eliminated by enabling this option. Though Linux is
70334+ one of few operating systems supporting simultaneous connect, it
70335+ has no legitimate use in practice and is rarely supported by firewalls.
70336+
70337+config GRKERNSEC_SOCKET
70338+ bool "Socket restrictions"
70339+ depends on NET
70340+ help
70341+ If you say Y here, you will be able to choose from several options.
70342+ If you assign a GID on your system and add it to the supplementary
70343+ groups of users you want to restrict socket access to, this patch
70344+ will perform up to three things, based on the option(s) you choose.
70345+
70346+config GRKERNSEC_SOCKET_ALL
70347+ bool "Deny any sockets to group"
70348+ depends on GRKERNSEC_SOCKET
70349+ help
70350+ If you say Y here, you will be able to choose a GID of whose users will
70351+ be unable to connect to other hosts from your machine or run server
70352+ applications from your machine. If the sysctl option is enabled, a
70353+ sysctl option with name "socket_all" is created.
70354+
70355+config GRKERNSEC_SOCKET_ALL_GID
70356+ int "GID to deny all sockets for"
70357+ depends on GRKERNSEC_SOCKET_ALL
70358+ default 1004
70359+ help
70360+ Here you can choose the GID to disable socket access for. Remember to
70361+ add the users you want socket access disabled for to the GID
70362+ specified here. If the sysctl option is enabled, a sysctl option
70363+ with name "socket_all_gid" is created.
70364+
70365+config GRKERNSEC_SOCKET_CLIENT
70366+ bool "Deny client sockets to group"
70367+ depends on GRKERNSEC_SOCKET
70368+ help
70369+ If you say Y here, you will be able to choose a GID of whose users will
70370+ be unable to connect to other hosts from your machine, but will be
70371+ able to run servers. If this option is enabled, all users in the group
70372+ you specify will have to use passive mode when initiating ftp transfers
70373+ from the shell on your machine. If the sysctl option is enabled, a
70374+ sysctl option with name "socket_client" is created.
70375+
70376+config GRKERNSEC_SOCKET_CLIENT_GID
70377+ int "GID to deny client sockets for"
70378+ depends on GRKERNSEC_SOCKET_CLIENT
70379+ default 1003
70380+ help
70381+ Here you can choose the GID to disable client socket access for.
70382+ Remember to add the users you want client socket access disabled for to
70383+ the GID specified here. If the sysctl option is enabled, a sysctl
70384+ option with name "socket_client_gid" is created.
70385+
70386+config GRKERNSEC_SOCKET_SERVER
70387+ bool "Deny server sockets to group"
70388+ depends on GRKERNSEC_SOCKET
70389+ help
70390+ If you say Y here, you will be able to choose a GID of whose users will
70391+ be unable to run server applications from your machine. If the sysctl
70392+ option is enabled, a sysctl option with name "socket_server" is created.
70393+
70394+config GRKERNSEC_SOCKET_SERVER_GID
70395+ int "GID to deny server sockets for"
70396+ depends on GRKERNSEC_SOCKET_SERVER
70397+ default 1002
70398+ help
70399+ Here you can choose the GID to disable server socket access for.
70400+ Remember to add the users you want server socket access disabled for to
70401+ the GID specified here. If the sysctl option is enabled, a sysctl
70402+ option with name "socket_server_gid" is created.
70403+
70404+endmenu
70405+
70406+menu "Physical Protections"
70407+depends on GRKERNSEC
70408+
70409+config GRKERNSEC_DENYUSB
70410+ bool "Deny new USB connections after toggle"
70411+ default y if GRKERNSEC_CONFIG_AUTO
70412+ depends on SYSCTL && USB_SUPPORT
70413+ help
70414+ If you say Y here, a new sysctl option with name "deny_new_usb"
70415+ will be created. Setting its value to 1 will prevent any new
70416+ USB devices from being recognized by the OS. Any attempted USB
70417+ device insertion will be logged. This option is intended to be
70418+ used against custom USB devices designed to exploit vulnerabilities
70419+ in various USB device drivers.
70420+
70421+ For greatest effectiveness, this sysctl should be set after any
70422+ relevant init scripts. This option is safe to enable in distros
70423+ as each user can choose whether or not to toggle the sysctl.
70424+
70425+config GRKERNSEC_DENYUSB_FORCE
70426+ bool "Reject all USB devices not connected at boot"
70427+ select USB
70428+ depends on GRKERNSEC_DENYUSB
70429+ help
70430+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70431+ that doesn't involve a sysctl entry. This option should only be
70432+ enabled if you're sure you want to deny all new USB connections
70433+ at runtime and don't want to modify init scripts. This should not
70434+ be enabled by distros. It forces the core USB code to be built
70435+ into the kernel image so that all devices connected at boot time
70436+ can be recognized and new USB device connections can be prevented
70437+ prior to init running.
70438+
70439+endmenu
70440+
70441+menu "Sysctl Support"
70442+depends on GRKERNSEC && SYSCTL
70443+
70444+config GRKERNSEC_SYSCTL
70445+ bool "Sysctl support"
70446+ default y if GRKERNSEC_CONFIG_AUTO
70447+ help
70448+ If you say Y here, you will be able to change the options that
70449+ grsecurity runs with at bootup, without having to recompile your
70450+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70451+ to enable (1) or disable (0) various features. All the sysctl entries
70452+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70453+ All features enabled in the kernel configuration are disabled at boot
70454+ if you do not say Y to the "Turn on features by default" option.
70455+ All options should be set at startup, and the grsec_lock entry should
70456+ be set to a non-zero value after all the options are set.
70457+ *THIS IS EXTREMELY IMPORTANT*
70458+
70459+config GRKERNSEC_SYSCTL_DISTRO
70460+ bool "Extra sysctl support for distro makers (READ HELP)"
70461+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70462+ help
70463+ If you say Y here, additional sysctl options will be created
70464+ for features that affect processes running as root. Therefore,
70465+ it is critical when using this option that the grsec_lock entry be
70466+ enabled after boot. Only distros with prebuilt kernel packages
70467+ with this option enabled that can ensure grsec_lock is enabled
70468+ after boot should use this option.
70469+ *Failure to set grsec_lock after boot makes all grsec features
70470+ this option covers useless*
70471+
70472+ Currently this option creates the following sysctl entries:
70473+ "Disable Privileged I/O": "disable_priv_io"
70474+
70475+config GRKERNSEC_SYSCTL_ON
70476+ bool "Turn on features by default"
70477+ default y if GRKERNSEC_CONFIG_AUTO
70478+ depends on GRKERNSEC_SYSCTL
70479+ help
70480+ If you say Y here, instead of having all features enabled in the
70481+ kernel configuration disabled at boot time, the features will be
70482+ enabled at boot time. It is recommended you say Y here unless
70483+ there is some reason you would want all sysctl-tunable features to
70484+ be disabled by default. As mentioned elsewhere, it is important
70485+ to enable the grsec_lock entry once you have finished modifying
70486+ the sysctl entries.
70487+
70488+endmenu
70489+menu "Logging Options"
70490+depends on GRKERNSEC
70491+
70492+config GRKERNSEC_FLOODTIME
70493+ int "Seconds in between log messages (minimum)"
70494+ default 10
70495+ help
70496+ This option allows you to enforce the number of seconds between
70497+ grsecurity log messages. The default should be suitable for most
70498+ people, however, if you choose to change it, choose a value small enough
70499+ to allow informative logs to be produced, but large enough to
70500+ prevent flooding.
70501+
70502+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70503+ any rate limiting on grsecurity log messages.
70504+
70505+config GRKERNSEC_FLOODBURST
70506+ int "Number of messages in a burst (maximum)"
70507+ default 6
70508+ help
70509+ This option allows you to choose the maximum number of messages allowed
70510+ within the flood time interval you chose in a separate option. The
70511+ default should be suitable for most people, however if you find that
70512+ many of your logs are being interpreted as flooding, you may want to
70513+ raise this value.
70514+
70515+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70516+ any rate limiting on grsecurity log messages.
70517+
70518+endmenu
70519diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70520new file mode 100644
70521index 0000000..30ababb
70522--- /dev/null
70523+++ b/grsecurity/Makefile
70524@@ -0,0 +1,54 @@
70525+# grsecurity – access control and security hardening for Linux
70526+# All code in this directory and various hooks located throughout the Linux kernel are
70527+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70528+# http://www.grsecurity.net spender@grsecurity.net
70529+#
70530+# This program is free software; you can redistribute it and/or
70531+# modify it under the terms of the GNU General Public License version 2
70532+# as published by the Free Software Foundation.
70533+#
70534+# This program is distributed in the hope that it will be useful,
70535+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70536+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70537+# GNU General Public License for more details.
70538+#
70539+# You should have received a copy of the GNU General Public License
70540+# along with this program; if not, write to the Free Software
70541+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70542+
70543+KBUILD_CFLAGS += -Werror
70544+
70545+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70546+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70547+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70548+ grsec_usb.o grsec_ipc.o grsec_proc.o
70549+
70550+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70551+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70552+ gracl_learn.o grsec_log.o gracl_policy.o
70553+ifdef CONFIG_COMPAT
70554+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70555+endif
70556+
70557+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70558+
70559+ifdef CONFIG_NET
70560+obj-y += grsec_sock.o
70561+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70562+endif
70563+
70564+ifndef CONFIG_GRKERNSEC
70565+obj-y += grsec_disabled.o
70566+endif
70567+
70568+ifdef CONFIG_GRKERNSEC_HIDESYM
70569+extra-y := grsec_hidesym.o
70570+$(obj)/grsec_hidesym.o:
70571+ @-chmod -f 500 /boot
70572+ @-chmod -f 500 /lib/modules
70573+ @-chmod -f 500 /lib64/modules
70574+ @-chmod -f 500 /lib32/modules
70575+ @-chmod -f 700 .
70576+ @-chmod -f 700 $(objtree)
70577+ @echo ' grsec: protected kernel image paths'
70578+endif
70579diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70580new file mode 100644
70581index 0000000..58223f6
70582--- /dev/null
70583+++ b/grsecurity/gracl.c
70584@@ -0,0 +1,2702 @@
70585+#include <linux/kernel.h>
70586+#include <linux/module.h>
70587+#include <linux/sched.h>
70588+#include <linux/mm.h>
70589+#include <linux/file.h>
70590+#include <linux/fs.h>
70591+#include <linux/namei.h>
70592+#include <linux/mount.h>
70593+#include <linux/tty.h>
70594+#include <linux/proc_fs.h>
70595+#include <linux/lglock.h>
70596+#include <linux/slab.h>
70597+#include <linux/vmalloc.h>
70598+#include <linux/types.h>
70599+#include <linux/sysctl.h>
70600+#include <linux/netdevice.h>
70601+#include <linux/ptrace.h>
70602+#include <linux/gracl.h>
70603+#include <linux/gralloc.h>
70604+#include <linux/security.h>
70605+#include <linux/grinternal.h>
70606+#include <linux/pid_namespace.h>
70607+#include <linux/stop_machine.h>
70608+#include <linux/fdtable.h>
70609+#include <linux/percpu.h>
70610+#include <linux/lglock.h>
70611+#include <linux/hugetlb.h>
70612+#include <linux/posix-timers.h>
70613+#include <linux/prefetch.h>
70614+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70615+#include <linux/magic.h>
70616+#include <linux/pagemap.h>
70617+#include "../fs/btrfs/async-thread.h"
70618+#include "../fs/btrfs/ctree.h"
70619+#include "../fs/btrfs/btrfs_inode.h"
70620+#endif
70621+#include "../fs/mount.h"
70622+
70623+#include <asm/uaccess.h>
70624+#include <asm/errno.h>
70625+#include <asm/mman.h>
70626+
70627+#define FOR_EACH_ROLE_START(role) \
70628+ role = running_polstate.role_list; \
70629+ while (role) {
70630+
70631+#define FOR_EACH_ROLE_END(role) \
70632+ role = role->prev; \
70633+ }
70634+
70635+extern struct path gr_real_root;
70636+
70637+static struct gr_policy_state running_polstate;
70638+struct gr_policy_state *polstate = &running_polstate;
70639+extern struct gr_alloc_state *current_alloc_state;
70640+
70641+extern char *gr_shared_page[4];
70642+DEFINE_RWLOCK(gr_inode_lock);
70643+
70644+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70645+
70646+#ifdef CONFIG_NET
70647+extern struct vfsmount *sock_mnt;
70648+#endif
70649+
70650+extern struct vfsmount *pipe_mnt;
70651+extern struct vfsmount *shm_mnt;
70652+
70653+#ifdef CONFIG_HUGETLBFS
70654+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70655+#endif
70656+
70657+extern u16 acl_sp_role_value;
70658+extern struct acl_object_label *fakefs_obj_rw;
70659+extern struct acl_object_label *fakefs_obj_rwx;
70660+
70661+int gr_acl_is_enabled(void)
70662+{
70663+ return (gr_status & GR_READY);
70664+}
70665+
70666+void gr_enable_rbac_system(void)
70667+{
70668+ pax_open_kernel();
70669+ gr_status |= GR_READY;
70670+ pax_close_kernel();
70671+}
70672+
70673+int gr_rbac_disable(void *unused)
70674+{
70675+ pax_open_kernel();
70676+ gr_status &= ~GR_READY;
70677+ pax_close_kernel();
70678+
70679+ return 0;
70680+}
70681+
70682+static inline dev_t __get_dev(const struct dentry *dentry)
70683+{
70684+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70685+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70686+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
70687+ else
70688+#endif
70689+ return dentry->d_sb->s_dev;
70690+}
70691+
70692+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70693+{
70694+ return __get_dev(dentry);
70695+}
70696+
70697+static char gr_task_roletype_to_char(struct task_struct *task)
70698+{
70699+ switch (task->role->roletype &
70700+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
70701+ GR_ROLE_SPECIAL)) {
70702+ case GR_ROLE_DEFAULT:
70703+ return 'D';
70704+ case GR_ROLE_USER:
70705+ return 'U';
70706+ case GR_ROLE_GROUP:
70707+ return 'G';
70708+ case GR_ROLE_SPECIAL:
70709+ return 'S';
70710+ }
70711+
70712+ return 'X';
70713+}
70714+
70715+char gr_roletype_to_char(void)
70716+{
70717+ return gr_task_roletype_to_char(current);
70718+}
70719+
70720+__inline__ int
70721+gr_acl_tpe_check(void)
70722+{
70723+ if (unlikely(!(gr_status & GR_READY)))
70724+ return 0;
70725+ if (current->role->roletype & GR_ROLE_TPE)
70726+ return 1;
70727+ else
70728+ return 0;
70729+}
70730+
70731+int
70732+gr_handle_rawio(const struct inode *inode)
70733+{
70734+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70735+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
70736+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
70737+ !capable(CAP_SYS_RAWIO))
70738+ return 1;
70739+#endif
70740+ return 0;
70741+}
70742+
70743+int
70744+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
70745+{
70746+ if (likely(lena != lenb))
70747+ return 0;
70748+
70749+ return !memcmp(a, b, lena);
70750+}
70751+
70752+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
70753+{
70754+ *buflen -= namelen;
70755+ if (*buflen < 0)
70756+ return -ENAMETOOLONG;
70757+ *buffer -= namelen;
70758+ memcpy(*buffer, str, namelen);
70759+ return 0;
70760+}
70761+
70762+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
70763+{
70764+ return prepend(buffer, buflen, name->name, name->len);
70765+}
70766+
70767+static int prepend_path(const struct path *path, struct path *root,
70768+ char **buffer, int *buflen)
70769+{
70770+ struct dentry *dentry = path->dentry;
70771+ struct vfsmount *vfsmnt = path->mnt;
70772+ struct mount *mnt = real_mount(vfsmnt);
70773+ bool slash = false;
70774+ int error = 0;
70775+
70776+ while (dentry != root->dentry || vfsmnt != root->mnt) {
70777+ struct dentry * parent;
70778+
70779+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
70780+ /* Global root? */
70781+ if (!mnt_has_parent(mnt)) {
70782+ goto out;
70783+ }
70784+ dentry = mnt->mnt_mountpoint;
70785+ mnt = mnt->mnt_parent;
70786+ vfsmnt = &mnt->mnt;
70787+ continue;
70788+ }
70789+ parent = dentry->d_parent;
70790+ prefetch(parent);
70791+ spin_lock(&dentry->d_lock);
70792+ error = prepend_name(buffer, buflen, &dentry->d_name);
70793+ spin_unlock(&dentry->d_lock);
70794+ if (!error)
70795+ error = prepend(buffer, buflen, "/", 1);
70796+ if (error)
70797+ break;
70798+
70799+ slash = true;
70800+ dentry = parent;
70801+ }
70802+
70803+out:
70804+ if (!error && !slash)
70805+ error = prepend(buffer, buflen, "/", 1);
70806+
70807+ return error;
70808+}
70809+
70810+/* this must be called with mount_lock and rename_lock held */
70811+
70812+static char *__our_d_path(const struct path *path, struct path *root,
70813+ char *buf, int buflen)
70814+{
70815+ char *res = buf + buflen;
70816+ int error;
70817+
70818+ prepend(&res, &buflen, "\0", 1);
70819+ error = prepend_path(path, root, &res, &buflen);
70820+ if (error)
70821+ return ERR_PTR(error);
70822+
70823+ return res;
70824+}
70825+
70826+static char *
70827+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
70828+{
70829+ char *retval;
70830+
70831+ retval = __our_d_path(path, root, buf, buflen);
70832+ if (unlikely(IS_ERR(retval)))
70833+ retval = strcpy(buf, "<path too long>");
70834+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
70835+ retval[1] = '\0';
70836+
70837+ return retval;
70838+}
70839+
70840+static char *
70841+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70842+ char *buf, int buflen)
70843+{
70844+ struct path path;
70845+ char *res;
70846+
70847+ path.dentry = (struct dentry *)dentry;
70848+ path.mnt = (struct vfsmount *)vfsmnt;
70849+
70850+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
70851+ by the RBAC system */
70852+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
70853+
70854+ return res;
70855+}
70856+
70857+static char *
70858+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70859+ char *buf, int buflen)
70860+{
70861+ char *res;
70862+ struct path path;
70863+ struct path root;
70864+ struct task_struct *reaper = init_pid_ns.child_reaper;
70865+
70866+ path.dentry = (struct dentry *)dentry;
70867+ path.mnt = (struct vfsmount *)vfsmnt;
70868+
70869+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
70870+ get_fs_root(reaper->fs, &root);
70871+
70872+ read_seqlock_excl(&mount_lock);
70873+ write_seqlock(&rename_lock);
70874+ res = gen_full_path(&path, &root, buf, buflen);
70875+ write_sequnlock(&rename_lock);
70876+ read_sequnlock_excl(&mount_lock);
70877+
70878+ path_put(&root);
70879+ return res;
70880+}
70881+
70882+char *
70883+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70884+{
70885+ char *ret;
70886+ read_seqlock_excl(&mount_lock);
70887+ write_seqlock(&rename_lock);
70888+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70889+ PAGE_SIZE);
70890+ write_sequnlock(&rename_lock);
70891+ read_sequnlock_excl(&mount_lock);
70892+ return ret;
70893+}
70894+
70895+static char *
70896+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70897+{
70898+ char *ret;
70899+ char *buf;
70900+ int buflen;
70901+
70902+ read_seqlock_excl(&mount_lock);
70903+ write_seqlock(&rename_lock);
70904+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
70905+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
70906+ buflen = (int)(ret - buf);
70907+ if (buflen >= 5)
70908+ prepend(&ret, &buflen, "/proc", 5);
70909+ else
70910+ ret = strcpy(buf, "<path too long>");
70911+ write_sequnlock(&rename_lock);
70912+ read_sequnlock_excl(&mount_lock);
70913+ return ret;
70914+}
70915+
70916+char *
70917+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
70918+{
70919+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70920+ PAGE_SIZE);
70921+}
70922+
70923+char *
70924+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
70925+{
70926+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
70927+ PAGE_SIZE);
70928+}
70929+
70930+char *
70931+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
70932+{
70933+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
70934+ PAGE_SIZE);
70935+}
70936+
70937+char *
70938+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
70939+{
70940+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
70941+ PAGE_SIZE);
70942+}
70943+
70944+char *
70945+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
70946+{
70947+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
70948+ PAGE_SIZE);
70949+}
70950+
70951+__inline__ __u32
70952+to_gr_audit(const __u32 reqmode)
70953+{
70954+ /* masks off auditable permission flags, then shifts them to create
70955+ auditing flags, and adds the special case of append auditing if
70956+ we're requesting write */
70957+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
70958+}
70959+
70960+struct acl_role_label *
70961+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
70962+ const gid_t gid)
70963+{
70964+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
70965+ struct acl_role_label *match;
70966+ struct role_allowed_ip *ipp;
70967+ unsigned int x;
70968+ u32 curr_ip = task->signal->saved_ip;
70969+
70970+ match = state->acl_role_set.r_hash[index];
70971+
70972+ while (match) {
70973+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
70974+ for (x = 0; x < match->domain_child_num; x++) {
70975+ if (match->domain_children[x] == uid)
70976+ goto found;
70977+ }
70978+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
70979+ break;
70980+ match = match->next;
70981+ }
70982+found:
70983+ if (match == NULL) {
70984+ try_group:
70985+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
70986+ match = state->acl_role_set.r_hash[index];
70987+
70988+ while (match) {
70989+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
70990+ for (x = 0; x < match->domain_child_num; x++) {
70991+ if (match->domain_children[x] == gid)
70992+ goto found2;
70993+ }
70994+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
70995+ break;
70996+ match = match->next;
70997+ }
70998+found2:
70999+ if (match == NULL)
71000+ match = state->default_role;
71001+ if (match->allowed_ips == NULL)
71002+ return match;
71003+ else {
71004+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71005+ if (likely
71006+ ((ntohl(curr_ip) & ipp->netmask) ==
71007+ (ntohl(ipp->addr) & ipp->netmask)))
71008+ return match;
71009+ }
71010+ match = state->default_role;
71011+ }
71012+ } else if (match->allowed_ips == NULL) {
71013+ return match;
71014+ } else {
71015+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71016+ if (likely
71017+ ((ntohl(curr_ip) & ipp->netmask) ==
71018+ (ntohl(ipp->addr) & ipp->netmask)))
71019+ return match;
71020+ }
71021+ goto try_group;
71022+ }
71023+
71024+ return match;
71025+}
71026+
71027+static struct acl_role_label *
71028+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
71029+ const gid_t gid)
71030+{
71031+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
71032+}
71033+
71034+struct acl_subject_label *
71035+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
71036+ const struct acl_role_label *role)
71037+{
71038+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71039+ struct acl_subject_label *match;
71040+
71041+ match = role->subj_hash[index];
71042+
71043+ while (match && (match->inode != ino || match->device != dev ||
71044+ (match->mode & GR_DELETED))) {
71045+ match = match->next;
71046+ }
71047+
71048+ if (match && !(match->mode & GR_DELETED))
71049+ return match;
71050+ else
71051+ return NULL;
71052+}
71053+
71054+struct acl_subject_label *
71055+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
71056+ const struct acl_role_label *role)
71057+{
71058+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71059+ struct acl_subject_label *match;
71060+
71061+ match = role->subj_hash[index];
71062+
71063+ while (match && (match->inode != ino || match->device != dev ||
71064+ !(match->mode & GR_DELETED))) {
71065+ match = match->next;
71066+ }
71067+
71068+ if (match && (match->mode & GR_DELETED))
71069+ return match;
71070+ else
71071+ return NULL;
71072+}
71073+
71074+static struct acl_object_label *
71075+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
71076+ const struct acl_subject_label *subj)
71077+{
71078+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71079+ struct acl_object_label *match;
71080+
71081+ match = subj->obj_hash[index];
71082+
71083+ while (match && (match->inode != ino || match->device != dev ||
71084+ (match->mode & GR_DELETED))) {
71085+ match = match->next;
71086+ }
71087+
71088+ if (match && !(match->mode & GR_DELETED))
71089+ return match;
71090+ else
71091+ return NULL;
71092+}
71093+
71094+static struct acl_object_label *
71095+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
71096+ const struct acl_subject_label *subj)
71097+{
71098+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71099+ struct acl_object_label *match;
71100+
71101+ match = subj->obj_hash[index];
71102+
71103+ while (match && (match->inode != ino || match->device != dev ||
71104+ !(match->mode & GR_DELETED))) {
71105+ match = match->next;
71106+ }
71107+
71108+ if (match && (match->mode & GR_DELETED))
71109+ return match;
71110+
71111+ match = subj->obj_hash[index];
71112+
71113+ while (match && (match->inode != ino || match->device != dev ||
71114+ (match->mode & GR_DELETED))) {
71115+ match = match->next;
71116+ }
71117+
71118+ if (match && !(match->mode & GR_DELETED))
71119+ return match;
71120+ else
71121+ return NULL;
71122+}
71123+
71124+struct name_entry *
71125+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
71126+{
71127+ unsigned int len = strlen(name);
71128+ unsigned int key = full_name_hash(name, len);
71129+ unsigned int index = key % state->name_set.n_size;
71130+ struct name_entry *match;
71131+
71132+ match = state->name_set.n_hash[index];
71133+
71134+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
71135+ match = match->next;
71136+
71137+ return match;
71138+}
71139+
71140+static struct name_entry *
71141+lookup_name_entry(const char *name)
71142+{
71143+ return __lookup_name_entry(&running_polstate, name);
71144+}
71145+
71146+static struct name_entry *
71147+lookup_name_entry_create(const char *name)
71148+{
71149+ unsigned int len = strlen(name);
71150+ unsigned int key = full_name_hash(name, len);
71151+ unsigned int index = key % running_polstate.name_set.n_size;
71152+ struct name_entry *match;
71153+
71154+ match = running_polstate.name_set.n_hash[index];
71155+
71156+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71157+ !match->deleted))
71158+ match = match->next;
71159+
71160+ if (match && match->deleted)
71161+ return match;
71162+
71163+ match = running_polstate.name_set.n_hash[index];
71164+
71165+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71166+ match->deleted))
71167+ match = match->next;
71168+
71169+ if (match && !match->deleted)
71170+ return match;
71171+ else
71172+ return NULL;
71173+}
71174+
71175+static struct inodev_entry *
71176+lookup_inodev_entry(const ino_t ino, const dev_t dev)
71177+{
71178+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
71179+ struct inodev_entry *match;
71180+
71181+ match = running_polstate.inodev_set.i_hash[index];
71182+
71183+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
71184+ match = match->next;
71185+
71186+ return match;
71187+}
71188+
71189+void
71190+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
71191+{
71192+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
71193+ state->inodev_set.i_size);
71194+ struct inodev_entry **curr;
71195+
71196+ entry->prev = NULL;
71197+
71198+ curr = &state->inodev_set.i_hash[index];
71199+ if (*curr != NULL)
71200+ (*curr)->prev = entry;
71201+
71202+ entry->next = *curr;
71203+ *curr = entry;
71204+
71205+ return;
71206+}
71207+
71208+static void
71209+insert_inodev_entry(struct inodev_entry *entry)
71210+{
71211+ __insert_inodev_entry(&running_polstate, entry);
71212+}
71213+
71214+void
71215+insert_acl_obj_label(struct acl_object_label *obj,
71216+ struct acl_subject_label *subj)
71217+{
71218+ unsigned int index =
71219+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
71220+ struct acl_object_label **curr;
71221+
71222+ obj->prev = NULL;
71223+
71224+ curr = &subj->obj_hash[index];
71225+ if (*curr != NULL)
71226+ (*curr)->prev = obj;
71227+
71228+ obj->next = *curr;
71229+ *curr = obj;
71230+
71231+ return;
71232+}
71233+
71234+void
71235+insert_acl_subj_label(struct acl_subject_label *obj,
71236+ struct acl_role_label *role)
71237+{
71238+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
71239+ struct acl_subject_label **curr;
71240+
71241+ obj->prev = NULL;
71242+
71243+ curr = &role->subj_hash[index];
71244+ if (*curr != NULL)
71245+ (*curr)->prev = obj;
71246+
71247+ obj->next = *curr;
71248+ *curr = obj;
71249+
71250+ return;
71251+}
71252+
71253+/* derived from glibc fnmatch() 0: match, 1: no match*/
71254+
71255+static int
71256+glob_match(const char *p, const char *n)
71257+{
71258+ char c;
71259+
71260+ while ((c = *p++) != '\0') {
71261+ switch (c) {
71262+ case '?':
71263+ if (*n == '\0')
71264+ return 1;
71265+ else if (*n == '/')
71266+ return 1;
71267+ break;
71268+ case '\\':
71269+ if (*n != c)
71270+ return 1;
71271+ break;
71272+ case '*':
71273+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
71274+ if (*n == '/')
71275+ return 1;
71276+ else if (c == '?') {
71277+ if (*n == '\0')
71278+ return 1;
71279+ else
71280+ ++n;
71281+ }
71282+ }
71283+ if (c == '\0') {
71284+ return 0;
71285+ } else {
71286+ const char *endp;
71287+
71288+ if ((endp = strchr(n, '/')) == NULL)
71289+ endp = n + strlen(n);
71290+
71291+ if (c == '[') {
71292+ for (--p; n < endp; ++n)
71293+ if (!glob_match(p, n))
71294+ return 0;
71295+ } else if (c == '/') {
71296+ while (*n != '\0' && *n != '/')
71297+ ++n;
71298+ if (*n == '/' && !glob_match(p, n + 1))
71299+ return 0;
71300+ } else {
71301+ for (--p; n < endp; ++n)
71302+ if (*n == c && !glob_match(p, n))
71303+ return 0;
71304+ }
71305+
71306+ return 1;
71307+ }
71308+ case '[':
71309+ {
71310+ int not;
71311+ char cold;
71312+
71313+ if (*n == '\0' || *n == '/')
71314+ return 1;
71315+
71316+ not = (*p == '!' || *p == '^');
71317+ if (not)
71318+ ++p;
71319+
71320+ c = *p++;
71321+ for (;;) {
71322+ unsigned char fn = (unsigned char)*n;
71323+
71324+ if (c == '\0')
71325+ return 1;
71326+ else {
71327+ if (c == fn)
71328+ goto matched;
71329+ cold = c;
71330+ c = *p++;
71331+
71332+ if (c == '-' && *p != ']') {
71333+ unsigned char cend = *p++;
71334+
71335+ if (cend == '\0')
71336+ return 1;
71337+
71338+ if (cold <= fn && fn <= cend)
71339+ goto matched;
71340+
71341+ c = *p++;
71342+ }
71343+ }
71344+
71345+ if (c == ']')
71346+ break;
71347+ }
71348+ if (!not)
71349+ return 1;
71350+ break;
71351+ matched:
71352+ while (c != ']') {
71353+ if (c == '\0')
71354+ return 1;
71355+
71356+ c = *p++;
71357+ }
71358+ if (not)
71359+ return 1;
71360+ }
71361+ break;
71362+ default:
71363+ if (c != *n)
71364+ return 1;
71365+ }
71366+
71367+ ++n;
71368+ }
71369+
71370+ if (*n == '\0')
71371+ return 0;
71372+
71373+ if (*n == '/')
71374+ return 0;
71375+
71376+ return 1;
71377+}
71378+
71379+static struct acl_object_label *
71380+chk_glob_label(struct acl_object_label *globbed,
71381+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71382+{
71383+ struct acl_object_label *tmp;
71384+
71385+ if (*path == NULL)
71386+ *path = gr_to_filename_nolock(dentry, mnt);
71387+
71388+ tmp = globbed;
71389+
71390+ while (tmp) {
71391+ if (!glob_match(tmp->filename, *path))
71392+ return tmp;
71393+ tmp = tmp->next;
71394+ }
71395+
71396+ return NULL;
71397+}
71398+
71399+static struct acl_object_label *
71400+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71401+ const ino_t curr_ino, const dev_t curr_dev,
71402+ const struct acl_subject_label *subj, char **path, const int checkglob)
71403+{
71404+ struct acl_subject_label *tmpsubj;
71405+ struct acl_object_label *retval;
71406+ struct acl_object_label *retval2;
71407+
71408+ tmpsubj = (struct acl_subject_label *) subj;
71409+ read_lock(&gr_inode_lock);
71410+ do {
71411+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71412+ if (retval) {
71413+ if (checkglob && retval->globbed) {
71414+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71415+ if (retval2)
71416+ retval = retval2;
71417+ }
71418+ break;
71419+ }
71420+ } while ((tmpsubj = tmpsubj->parent_subject));
71421+ read_unlock(&gr_inode_lock);
71422+
71423+ return retval;
71424+}
71425+
71426+static __inline__ struct acl_object_label *
71427+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71428+ struct dentry *curr_dentry,
71429+ const struct acl_subject_label *subj, char **path, const int checkglob)
71430+{
71431+ int newglob = checkglob;
71432+ ino_t inode;
71433+ dev_t device;
71434+
71435+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71436+ as we don't want a / * rule to match instead of the / object
71437+ don't do this for create lookups that call this function though, since they're looking up
71438+ on the parent and thus need globbing checks on all paths
71439+ */
71440+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71441+ newglob = GR_NO_GLOB;
71442+
71443+ spin_lock(&curr_dentry->d_lock);
71444+ inode = curr_dentry->d_inode->i_ino;
71445+ device = __get_dev(curr_dentry);
71446+ spin_unlock(&curr_dentry->d_lock);
71447+
71448+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71449+}
71450+
71451+#ifdef CONFIG_HUGETLBFS
71452+static inline bool
71453+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71454+{
71455+ int i;
71456+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71457+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71458+ return true;
71459+ }
71460+
71461+ return false;
71462+}
71463+#endif
71464+
71465+static struct acl_object_label *
71466+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71467+ const struct acl_subject_label *subj, char *path, const int checkglob)
71468+{
71469+ struct dentry *dentry = (struct dentry *) l_dentry;
71470+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71471+ struct mount *real_mnt = real_mount(mnt);
71472+ struct acl_object_label *retval;
71473+ struct dentry *parent;
71474+
71475+ read_seqlock_excl(&mount_lock);
71476+ write_seqlock(&rename_lock);
71477+
71478+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71479+#ifdef CONFIG_NET
71480+ mnt == sock_mnt ||
71481+#endif
71482+#ifdef CONFIG_HUGETLBFS
71483+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71484+#endif
71485+ /* ignore Eric Biederman */
71486+ IS_PRIVATE(l_dentry->d_inode))) {
71487+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71488+ goto out;
71489+ }
71490+
71491+ for (;;) {
71492+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71493+ break;
71494+
71495+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71496+ if (!mnt_has_parent(real_mnt))
71497+ break;
71498+
71499+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71500+ if (retval != NULL)
71501+ goto out;
71502+
71503+ dentry = real_mnt->mnt_mountpoint;
71504+ real_mnt = real_mnt->mnt_parent;
71505+ mnt = &real_mnt->mnt;
71506+ continue;
71507+ }
71508+
71509+ parent = dentry->d_parent;
71510+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71511+ if (retval != NULL)
71512+ goto out;
71513+
71514+ dentry = parent;
71515+ }
71516+
71517+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71518+
71519+ /* gr_real_root is pinned so we don't have to hold a reference */
71520+ if (retval == NULL)
71521+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71522+out:
71523+ write_sequnlock(&rename_lock);
71524+ read_sequnlock_excl(&mount_lock);
71525+
71526+ BUG_ON(retval == NULL);
71527+
71528+ return retval;
71529+}
71530+
71531+static __inline__ struct acl_object_label *
71532+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71533+ const struct acl_subject_label *subj)
71534+{
71535+ char *path = NULL;
71536+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71537+}
71538+
71539+static __inline__ struct acl_object_label *
71540+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71541+ const struct acl_subject_label *subj)
71542+{
71543+ char *path = NULL;
71544+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71545+}
71546+
71547+static __inline__ struct acl_object_label *
71548+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71549+ const struct acl_subject_label *subj, char *path)
71550+{
71551+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71552+}
71553+
71554+struct acl_subject_label *
71555+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71556+ const struct acl_role_label *role)
71557+{
71558+ struct dentry *dentry = (struct dentry *) l_dentry;
71559+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71560+ struct mount *real_mnt = real_mount(mnt);
71561+ struct acl_subject_label *retval;
71562+ struct dentry *parent;
71563+
71564+ read_seqlock_excl(&mount_lock);
71565+ write_seqlock(&rename_lock);
71566+
71567+ for (;;) {
71568+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71569+ break;
71570+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71571+ if (!mnt_has_parent(real_mnt))
71572+ break;
71573+
71574+ spin_lock(&dentry->d_lock);
71575+ read_lock(&gr_inode_lock);
71576+ retval =
71577+ lookup_acl_subj_label(dentry->d_inode->i_ino,
71578+ __get_dev(dentry), role);
71579+ read_unlock(&gr_inode_lock);
71580+ spin_unlock(&dentry->d_lock);
71581+ if (retval != NULL)
71582+ goto out;
71583+
71584+ dentry = real_mnt->mnt_mountpoint;
71585+ real_mnt = real_mnt->mnt_parent;
71586+ mnt = &real_mnt->mnt;
71587+ continue;
71588+ }
71589+
71590+ spin_lock(&dentry->d_lock);
71591+ read_lock(&gr_inode_lock);
71592+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71593+ __get_dev(dentry), role);
71594+ read_unlock(&gr_inode_lock);
71595+ parent = dentry->d_parent;
71596+ spin_unlock(&dentry->d_lock);
71597+
71598+ if (retval != NULL)
71599+ goto out;
71600+
71601+ dentry = parent;
71602+ }
71603+
71604+ spin_lock(&dentry->d_lock);
71605+ read_lock(&gr_inode_lock);
71606+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71607+ __get_dev(dentry), role);
71608+ read_unlock(&gr_inode_lock);
71609+ spin_unlock(&dentry->d_lock);
71610+
71611+ if (unlikely(retval == NULL)) {
71612+ /* gr_real_root is pinned, we don't need to hold a reference */
71613+ read_lock(&gr_inode_lock);
71614+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
71615+ __get_dev(gr_real_root.dentry), role);
71616+ read_unlock(&gr_inode_lock);
71617+ }
71618+out:
71619+ write_sequnlock(&rename_lock);
71620+ read_sequnlock_excl(&mount_lock);
71621+
71622+ BUG_ON(retval == NULL);
71623+
71624+ return retval;
71625+}
71626+
71627+void
71628+assign_special_role(const char *rolename)
71629+{
71630+ struct acl_object_label *obj;
71631+ struct acl_role_label *r;
71632+ struct acl_role_label *assigned = NULL;
71633+ struct task_struct *tsk;
71634+ struct file *filp;
71635+
71636+ FOR_EACH_ROLE_START(r)
71637+ if (!strcmp(rolename, r->rolename) &&
71638+ (r->roletype & GR_ROLE_SPECIAL)) {
71639+ assigned = r;
71640+ break;
71641+ }
71642+ FOR_EACH_ROLE_END(r)
71643+
71644+ if (!assigned)
71645+ return;
71646+
71647+ read_lock(&tasklist_lock);
71648+ read_lock(&grsec_exec_file_lock);
71649+
71650+ tsk = current->real_parent;
71651+ if (tsk == NULL)
71652+ goto out_unlock;
71653+
71654+ filp = tsk->exec_file;
71655+ if (filp == NULL)
71656+ goto out_unlock;
71657+
71658+ tsk->is_writable = 0;
71659+ tsk->inherited = 0;
71660+
71661+ tsk->acl_sp_role = 1;
71662+ tsk->acl_role_id = ++acl_sp_role_value;
71663+ tsk->role = assigned;
71664+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71665+
71666+ /* ignore additional mmap checks for processes that are writable
71667+ by the default ACL */
71668+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71669+ if (unlikely(obj->mode & GR_WRITE))
71670+ tsk->is_writable = 1;
71671+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71672+ if (unlikely(obj->mode & GR_WRITE))
71673+ tsk->is_writable = 1;
71674+
71675+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71676+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71677+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71678+#endif
71679+
71680+out_unlock:
71681+ read_unlock(&grsec_exec_file_lock);
71682+ read_unlock(&tasklist_lock);
71683+ return;
71684+}
71685+
71686+
71687+static void
71688+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
71689+{
71690+ struct task_struct *task = current;
71691+ const struct cred *cred = current_cred();
71692+
71693+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
71694+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71695+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71696+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
71697+
71698+ return;
71699+}
71700+
71701+static void
71702+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
71703+{
71704+ struct task_struct *task = current;
71705+ const struct cred *cred = current_cred();
71706+
71707+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71708+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71709+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71710+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
71711+
71712+ return;
71713+}
71714+
71715+static void
71716+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
71717+{
71718+ struct task_struct *task = current;
71719+ const struct cred *cred = current_cred();
71720+
71721+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71722+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71723+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71724+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
71725+
71726+ return;
71727+}
71728+
71729+static void
71730+gr_set_proc_res(struct task_struct *task)
71731+{
71732+ struct acl_subject_label *proc;
71733+ unsigned short i;
71734+
71735+ proc = task->acl;
71736+
71737+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
71738+ return;
71739+
71740+ for (i = 0; i < RLIM_NLIMITS; i++) {
71741+ if (!(proc->resmask & (1U << i)))
71742+ continue;
71743+
71744+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
71745+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
71746+
71747+ if (i == RLIMIT_CPU)
71748+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
71749+ }
71750+
71751+ return;
71752+}
71753+
71754+/* both of the below must be called with
71755+ rcu_read_lock();
71756+ read_lock(&tasklist_lock);
71757+ read_lock(&grsec_exec_file_lock);
71758+*/
71759+
71760+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
71761+{
71762+ char *tmpname;
71763+ struct acl_subject_label *tmpsubj;
71764+ struct file *filp;
71765+ struct name_entry *nmatch;
71766+
71767+ filp = task->exec_file;
71768+ if (filp == NULL)
71769+ return NULL;
71770+
71771+ /* the following is to apply the correct subject
71772+ on binaries running when the RBAC system
71773+ is enabled, when the binaries have been
71774+ replaced or deleted since their execution
71775+ -----
71776+ when the RBAC system starts, the inode/dev
71777+ from exec_file will be one the RBAC system
71778+ is unaware of. It only knows the inode/dev
71779+ of the present file on disk, or the absence
71780+ of it.
71781+ */
71782+
71783+ if (filename)
71784+ nmatch = __lookup_name_entry(state, filename);
71785+ else {
71786+ preempt_disable();
71787+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
71788+
71789+ nmatch = __lookup_name_entry(state, tmpname);
71790+ preempt_enable();
71791+ }
71792+ tmpsubj = NULL;
71793+ if (nmatch) {
71794+ if (nmatch->deleted)
71795+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
71796+ else
71797+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
71798+ }
71799+ /* this also works for the reload case -- if we don't match a potentially inherited subject
71800+ then we fall back to a normal lookup based on the binary's ino/dev
71801+ */
71802+ if (tmpsubj == NULL)
71803+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
71804+
71805+ return tmpsubj;
71806+}
71807+
71808+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
71809+{
71810+ return __gr_get_subject_for_task(&running_polstate, task, filename);
71811+}
71812+
71813+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
71814+{
71815+ struct acl_object_label *obj;
71816+ struct file *filp;
71817+
71818+ filp = task->exec_file;
71819+
71820+ task->acl = subj;
71821+ task->is_writable = 0;
71822+ /* ignore additional mmap checks for processes that are writable
71823+ by the default ACL */
71824+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
71825+ if (unlikely(obj->mode & GR_WRITE))
71826+ task->is_writable = 1;
71827+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71828+ if (unlikely(obj->mode & GR_WRITE))
71829+ task->is_writable = 1;
71830+
71831+ gr_set_proc_res(task);
71832+
71833+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71834+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71835+#endif
71836+}
71837+
71838+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
71839+{
71840+ __gr_apply_subject_to_task(&running_polstate, task, subj);
71841+}
71842+
71843+__u32
71844+gr_search_file(const struct dentry * dentry, const __u32 mode,
71845+ const struct vfsmount * mnt)
71846+{
71847+ __u32 retval = mode;
71848+ struct acl_subject_label *curracl;
71849+ struct acl_object_label *currobj;
71850+
71851+ if (unlikely(!(gr_status & GR_READY)))
71852+ return (mode & ~GR_AUDITS);
71853+
71854+ curracl = current->acl;
71855+
71856+ currobj = chk_obj_label(dentry, mnt, curracl);
71857+ retval = currobj->mode & mode;
71858+
71859+ /* if we're opening a specified transfer file for writing
71860+ (e.g. /dev/initctl), then transfer our role to init
71861+ */
71862+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
71863+ current->role->roletype & GR_ROLE_PERSIST)) {
71864+ struct task_struct *task = init_pid_ns.child_reaper;
71865+
71866+ if (task->role != current->role) {
71867+ struct acl_subject_label *subj;
71868+
71869+ task->acl_sp_role = 0;
71870+ task->acl_role_id = current->acl_role_id;
71871+ task->role = current->role;
71872+ rcu_read_lock();
71873+ read_lock(&grsec_exec_file_lock);
71874+ subj = gr_get_subject_for_task(task, NULL);
71875+ gr_apply_subject_to_task(task, subj);
71876+ read_unlock(&grsec_exec_file_lock);
71877+ rcu_read_unlock();
71878+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
71879+ }
71880+ }
71881+
71882+ if (unlikely
71883+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
71884+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
71885+ __u32 new_mode = mode;
71886+
71887+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71888+
71889+ retval = new_mode;
71890+
71891+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
71892+ new_mode |= GR_INHERIT;
71893+
71894+ if (!(mode & GR_NOLEARN))
71895+ gr_log_learn(dentry, mnt, new_mode);
71896+ }
71897+
71898+ return retval;
71899+}
71900+
71901+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
71902+ const struct dentry *parent,
71903+ const struct vfsmount *mnt)
71904+{
71905+ struct name_entry *match;
71906+ struct acl_object_label *matchpo;
71907+ struct acl_subject_label *curracl;
71908+ char *path;
71909+
71910+ if (unlikely(!(gr_status & GR_READY)))
71911+ return NULL;
71912+
71913+ preempt_disable();
71914+ path = gr_to_filename_rbac(new_dentry, mnt);
71915+ match = lookup_name_entry_create(path);
71916+
71917+ curracl = current->acl;
71918+
71919+ if (match) {
71920+ read_lock(&gr_inode_lock);
71921+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
71922+ read_unlock(&gr_inode_lock);
71923+
71924+ if (matchpo) {
71925+ preempt_enable();
71926+ return matchpo;
71927+ }
71928+ }
71929+
71930+ // lookup parent
71931+
71932+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
71933+
71934+ preempt_enable();
71935+ return matchpo;
71936+}
71937+
71938+__u32
71939+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
71940+ const struct vfsmount * mnt, const __u32 mode)
71941+{
71942+ struct acl_object_label *matchpo;
71943+ __u32 retval;
71944+
71945+ if (unlikely(!(gr_status & GR_READY)))
71946+ return (mode & ~GR_AUDITS);
71947+
71948+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
71949+
71950+ retval = matchpo->mode & mode;
71951+
71952+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
71953+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71954+ __u32 new_mode = mode;
71955+
71956+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71957+
71958+ gr_log_learn(new_dentry, mnt, new_mode);
71959+ return new_mode;
71960+ }
71961+
71962+ return retval;
71963+}
71964+
71965+__u32
71966+gr_check_link(const struct dentry * new_dentry,
71967+ const struct dentry * parent_dentry,
71968+ const struct vfsmount * parent_mnt,
71969+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
71970+{
71971+ struct acl_object_label *obj;
71972+ __u32 oldmode, newmode;
71973+ __u32 needmode;
71974+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
71975+ GR_DELETE | GR_INHERIT;
71976+
71977+ if (unlikely(!(gr_status & GR_READY)))
71978+ return (GR_CREATE | GR_LINK);
71979+
71980+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
71981+ oldmode = obj->mode;
71982+
71983+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
71984+ newmode = obj->mode;
71985+
71986+ needmode = newmode & checkmodes;
71987+
71988+ // old name for hardlink must have at least the permissions of the new name
71989+ if ((oldmode & needmode) != needmode)
71990+ goto bad;
71991+
71992+ // if old name had restrictions/auditing, make sure the new name does as well
71993+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
71994+
71995+ // don't allow hardlinking of suid/sgid/fcapped files without permission
71996+ if (is_privileged_binary(old_dentry))
71997+ needmode |= GR_SETID;
71998+
71999+ if ((newmode & needmode) != needmode)
72000+ goto bad;
72001+
72002+ // enforce minimum permissions
72003+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
72004+ return newmode;
72005+bad:
72006+ needmode = oldmode;
72007+ if (is_privileged_binary(old_dentry))
72008+ needmode |= GR_SETID;
72009+
72010+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72011+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
72012+ return (GR_CREATE | GR_LINK);
72013+ } else if (newmode & GR_SUPPRESS)
72014+ return GR_SUPPRESS;
72015+ else
72016+ return 0;
72017+}
72018+
72019+int
72020+gr_check_hidden_task(const struct task_struct *task)
72021+{
72022+ if (unlikely(!(gr_status & GR_READY)))
72023+ return 0;
72024+
72025+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
72026+ return 1;
72027+
72028+ return 0;
72029+}
72030+
72031+int
72032+gr_check_protected_task(const struct task_struct *task)
72033+{
72034+ if (unlikely(!(gr_status & GR_READY) || !task))
72035+ return 0;
72036+
72037+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72038+ task->acl != current->acl)
72039+ return 1;
72040+
72041+ return 0;
72042+}
72043+
72044+int
72045+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72046+{
72047+ struct task_struct *p;
72048+ int ret = 0;
72049+
72050+ if (unlikely(!(gr_status & GR_READY) || !pid))
72051+ return ret;
72052+
72053+ read_lock(&tasklist_lock);
72054+ do_each_pid_task(pid, type, p) {
72055+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72056+ p->acl != current->acl) {
72057+ ret = 1;
72058+ goto out;
72059+ }
72060+ } while_each_pid_task(pid, type, p);
72061+out:
72062+ read_unlock(&tasklist_lock);
72063+
72064+ return ret;
72065+}
72066+
72067+void
72068+gr_copy_label(struct task_struct *tsk)
72069+{
72070+ struct task_struct *p = current;
72071+
72072+ tsk->inherited = p->inherited;
72073+ tsk->acl_sp_role = 0;
72074+ tsk->acl_role_id = p->acl_role_id;
72075+ tsk->acl = p->acl;
72076+ tsk->role = p->role;
72077+ tsk->signal->used_accept = 0;
72078+ tsk->signal->curr_ip = p->signal->curr_ip;
72079+ tsk->signal->saved_ip = p->signal->saved_ip;
72080+ if (p->exec_file)
72081+ get_file(p->exec_file);
72082+ tsk->exec_file = p->exec_file;
72083+ tsk->is_writable = p->is_writable;
72084+ if (unlikely(p->signal->used_accept)) {
72085+ p->signal->curr_ip = 0;
72086+ p->signal->saved_ip = 0;
72087+ }
72088+
72089+ return;
72090+}
72091+
72092+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
72093+
72094+int
72095+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72096+{
72097+ unsigned int i;
72098+ __u16 num;
72099+ uid_t *uidlist;
72100+ uid_t curuid;
72101+ int realok = 0;
72102+ int effectiveok = 0;
72103+ int fsok = 0;
72104+ uid_t globalreal, globaleffective, globalfs;
72105+
72106+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
72107+ struct user_struct *user;
72108+
72109+ if (!uid_valid(real))
72110+ goto skipit;
72111+
72112+ /* find user based on global namespace */
72113+
72114+ globalreal = GR_GLOBAL_UID(real);
72115+
72116+ user = find_user(make_kuid(&init_user_ns, globalreal));
72117+ if (user == NULL)
72118+ goto skipit;
72119+
72120+ if (gr_process_kernel_setuid_ban(user)) {
72121+ /* for find_user */
72122+ free_uid(user);
72123+ return 1;
72124+ }
72125+
72126+ /* for find_user */
72127+ free_uid(user);
72128+
72129+skipit:
72130+#endif
72131+
72132+ if (unlikely(!(gr_status & GR_READY)))
72133+ return 0;
72134+
72135+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72136+ gr_log_learn_uid_change(real, effective, fs);
72137+
72138+ num = current->acl->user_trans_num;
72139+ uidlist = current->acl->user_transitions;
72140+
72141+ if (uidlist == NULL)
72142+ return 0;
72143+
72144+ if (!uid_valid(real)) {
72145+ realok = 1;
72146+ globalreal = (uid_t)-1;
72147+ } else {
72148+ globalreal = GR_GLOBAL_UID(real);
72149+ }
72150+ if (!uid_valid(effective)) {
72151+ effectiveok = 1;
72152+ globaleffective = (uid_t)-1;
72153+ } else {
72154+ globaleffective = GR_GLOBAL_UID(effective);
72155+ }
72156+ if (!uid_valid(fs)) {
72157+ fsok = 1;
72158+ globalfs = (uid_t)-1;
72159+ } else {
72160+ globalfs = GR_GLOBAL_UID(fs);
72161+ }
72162+
72163+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
72164+ for (i = 0; i < num; i++) {
72165+ curuid = uidlist[i];
72166+ if (globalreal == curuid)
72167+ realok = 1;
72168+ if (globaleffective == curuid)
72169+ effectiveok = 1;
72170+ if (globalfs == curuid)
72171+ fsok = 1;
72172+ }
72173+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
72174+ for (i = 0; i < num; i++) {
72175+ curuid = uidlist[i];
72176+ if (globalreal == curuid)
72177+ break;
72178+ if (globaleffective == curuid)
72179+ break;
72180+ if (globalfs == curuid)
72181+ break;
72182+ }
72183+ /* not in deny list */
72184+ if (i == num) {
72185+ realok = 1;
72186+ effectiveok = 1;
72187+ fsok = 1;
72188+ }
72189+ }
72190+
72191+ if (realok && effectiveok && fsok)
72192+ return 0;
72193+ else {
72194+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72195+ return 1;
72196+ }
72197+}
72198+
72199+int
72200+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72201+{
72202+ unsigned int i;
72203+ __u16 num;
72204+ gid_t *gidlist;
72205+ gid_t curgid;
72206+ int realok = 0;
72207+ int effectiveok = 0;
72208+ int fsok = 0;
72209+ gid_t globalreal, globaleffective, globalfs;
72210+
72211+ if (unlikely(!(gr_status & GR_READY)))
72212+ return 0;
72213+
72214+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72215+ gr_log_learn_gid_change(real, effective, fs);
72216+
72217+ num = current->acl->group_trans_num;
72218+ gidlist = current->acl->group_transitions;
72219+
72220+ if (gidlist == NULL)
72221+ return 0;
72222+
72223+ if (!gid_valid(real)) {
72224+ realok = 1;
72225+ globalreal = (gid_t)-1;
72226+ } else {
72227+ globalreal = GR_GLOBAL_GID(real);
72228+ }
72229+ if (!gid_valid(effective)) {
72230+ effectiveok = 1;
72231+ globaleffective = (gid_t)-1;
72232+ } else {
72233+ globaleffective = GR_GLOBAL_GID(effective);
72234+ }
72235+ if (!gid_valid(fs)) {
72236+ fsok = 1;
72237+ globalfs = (gid_t)-1;
72238+ } else {
72239+ globalfs = GR_GLOBAL_GID(fs);
72240+ }
72241+
72242+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
72243+ for (i = 0; i < num; i++) {
72244+ curgid = gidlist[i];
72245+ if (globalreal == curgid)
72246+ realok = 1;
72247+ if (globaleffective == curgid)
72248+ effectiveok = 1;
72249+ if (globalfs == curgid)
72250+ fsok = 1;
72251+ }
72252+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
72253+ for (i = 0; i < num; i++) {
72254+ curgid = gidlist[i];
72255+ if (globalreal == curgid)
72256+ break;
72257+ if (globaleffective == curgid)
72258+ break;
72259+ if (globalfs == curgid)
72260+ break;
72261+ }
72262+ /* not in deny list */
72263+ if (i == num) {
72264+ realok = 1;
72265+ effectiveok = 1;
72266+ fsok = 1;
72267+ }
72268+ }
72269+
72270+ if (realok && effectiveok && fsok)
72271+ return 0;
72272+ else {
72273+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72274+ return 1;
72275+ }
72276+}
72277+
72278+extern int gr_acl_is_capable(const int cap);
72279+
72280+void
72281+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
72282+{
72283+ struct acl_role_label *role = task->role;
72284+ struct acl_subject_label *subj = NULL;
72285+ struct acl_object_label *obj;
72286+ struct file *filp;
72287+ uid_t uid;
72288+ gid_t gid;
72289+
72290+ if (unlikely(!(gr_status & GR_READY)))
72291+ return;
72292+
72293+ uid = GR_GLOBAL_UID(kuid);
72294+ gid = GR_GLOBAL_GID(kgid);
72295+
72296+ filp = task->exec_file;
72297+
72298+ /* kernel process, we'll give them the kernel role */
72299+ if (unlikely(!filp)) {
72300+ task->role = running_polstate.kernel_role;
72301+ task->acl = running_polstate.kernel_role->root_label;
72302+ return;
72303+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
72304+ /* save the current ip at time of role lookup so that the proper
72305+ IP will be learned for role_allowed_ip */
72306+ task->signal->saved_ip = task->signal->curr_ip;
72307+ role = lookup_acl_role_label(task, uid, gid);
72308+ }
72309+
72310+ /* don't change the role if we're not a privileged process */
72311+ if (role && task->role != role &&
72312+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
72313+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
72314+ return;
72315+
72316+ /* perform subject lookup in possibly new role
72317+ we can use this result below in the case where role == task->role
72318+ */
72319+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
72320+
72321+ /* if we changed uid/gid, but result in the same role
72322+ and are using inheritance, don't lose the inherited subject
72323+ if current subject is other than what normal lookup
72324+ would result in, we arrived via inheritance, don't
72325+ lose subject
72326+ */
72327+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
72328+ (subj == task->acl)))
72329+ task->acl = subj;
72330+
72331+ /* leave task->inherited unaffected */
72332+
72333+ task->role = role;
72334+
72335+ task->is_writable = 0;
72336+
72337+ /* ignore additional mmap checks for processes that are writable
72338+ by the default ACL */
72339+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72340+ if (unlikely(obj->mode & GR_WRITE))
72341+ task->is_writable = 1;
72342+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72343+ if (unlikely(obj->mode & GR_WRITE))
72344+ task->is_writable = 1;
72345+
72346+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72347+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72348+#endif
72349+
72350+ gr_set_proc_res(task);
72351+
72352+ return;
72353+}
72354+
72355+int
72356+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72357+ const int unsafe_flags)
72358+{
72359+ struct task_struct *task = current;
72360+ struct acl_subject_label *newacl;
72361+ struct acl_object_label *obj;
72362+ __u32 retmode;
72363+
72364+ if (unlikely(!(gr_status & GR_READY)))
72365+ return 0;
72366+
72367+ newacl = chk_subj_label(dentry, mnt, task->role);
72368+
72369+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72370+ did an exec
72371+ */
72372+ rcu_read_lock();
72373+ read_lock(&tasklist_lock);
72374+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72375+ (task->parent->acl->mode & GR_POVERRIDE))) {
72376+ read_unlock(&tasklist_lock);
72377+ rcu_read_unlock();
72378+ goto skip_check;
72379+ }
72380+ read_unlock(&tasklist_lock);
72381+ rcu_read_unlock();
72382+
72383+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72384+ !(task->role->roletype & GR_ROLE_GOD) &&
72385+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72386+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72387+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72388+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72389+ else
72390+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72391+ return -EACCES;
72392+ }
72393+
72394+skip_check:
72395+
72396+ obj = chk_obj_label(dentry, mnt, task->acl);
72397+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72398+
72399+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72400+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72401+ if (obj->nested)
72402+ task->acl = obj->nested;
72403+ else
72404+ task->acl = newacl;
72405+ task->inherited = 0;
72406+ } else {
72407+ task->inherited = 1;
72408+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72409+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72410+ }
72411+
72412+ task->is_writable = 0;
72413+
72414+ /* ignore additional mmap checks for processes that are writable
72415+ by the default ACL */
72416+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72417+ if (unlikely(obj->mode & GR_WRITE))
72418+ task->is_writable = 1;
72419+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72420+ if (unlikely(obj->mode & GR_WRITE))
72421+ task->is_writable = 1;
72422+
72423+ gr_set_proc_res(task);
72424+
72425+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72426+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72427+#endif
72428+ return 0;
72429+}
72430+
72431+/* always called with valid inodev ptr */
72432+static void
72433+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
72434+{
72435+ struct acl_object_label *matchpo;
72436+ struct acl_subject_label *matchps;
72437+ struct acl_subject_label *subj;
72438+ struct acl_role_label *role;
72439+ unsigned int x;
72440+
72441+ FOR_EACH_ROLE_START(role)
72442+ FOR_EACH_SUBJECT_START(role, subj, x)
72443+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72444+ matchpo->mode |= GR_DELETED;
72445+ FOR_EACH_SUBJECT_END(subj,x)
72446+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72447+ /* nested subjects aren't in the role's subj_hash table */
72448+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72449+ matchpo->mode |= GR_DELETED;
72450+ FOR_EACH_NESTED_SUBJECT_END(subj)
72451+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72452+ matchps->mode |= GR_DELETED;
72453+ FOR_EACH_ROLE_END(role)
72454+
72455+ inodev->nentry->deleted = 1;
72456+
72457+ return;
72458+}
72459+
72460+void
72461+gr_handle_delete(const ino_t ino, const dev_t dev)
72462+{
72463+ struct inodev_entry *inodev;
72464+
72465+ if (unlikely(!(gr_status & GR_READY)))
72466+ return;
72467+
72468+ write_lock(&gr_inode_lock);
72469+ inodev = lookup_inodev_entry(ino, dev);
72470+ if (inodev != NULL)
72471+ do_handle_delete(inodev, ino, dev);
72472+ write_unlock(&gr_inode_lock);
72473+
72474+ return;
72475+}
72476+
72477+static void
72478+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
72479+ const ino_t newinode, const dev_t newdevice,
72480+ struct acl_subject_label *subj)
72481+{
72482+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72483+ struct acl_object_label *match;
72484+
72485+ match = subj->obj_hash[index];
72486+
72487+ while (match && (match->inode != oldinode ||
72488+ match->device != olddevice ||
72489+ !(match->mode & GR_DELETED)))
72490+ match = match->next;
72491+
72492+ if (match && (match->inode == oldinode)
72493+ && (match->device == olddevice)
72494+ && (match->mode & GR_DELETED)) {
72495+ if (match->prev == NULL) {
72496+ subj->obj_hash[index] = match->next;
72497+ if (match->next != NULL)
72498+ match->next->prev = NULL;
72499+ } else {
72500+ match->prev->next = match->next;
72501+ if (match->next != NULL)
72502+ match->next->prev = match->prev;
72503+ }
72504+ match->prev = NULL;
72505+ match->next = NULL;
72506+ match->inode = newinode;
72507+ match->device = newdevice;
72508+ match->mode &= ~GR_DELETED;
72509+
72510+ insert_acl_obj_label(match, subj);
72511+ }
72512+
72513+ return;
72514+}
72515+
72516+static void
72517+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
72518+ const ino_t newinode, const dev_t newdevice,
72519+ struct acl_role_label *role)
72520+{
72521+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72522+ struct acl_subject_label *match;
72523+
72524+ match = role->subj_hash[index];
72525+
72526+ while (match && (match->inode != oldinode ||
72527+ match->device != olddevice ||
72528+ !(match->mode & GR_DELETED)))
72529+ match = match->next;
72530+
72531+ if (match && (match->inode == oldinode)
72532+ && (match->device == olddevice)
72533+ && (match->mode & GR_DELETED)) {
72534+ if (match->prev == NULL) {
72535+ role->subj_hash[index] = match->next;
72536+ if (match->next != NULL)
72537+ match->next->prev = NULL;
72538+ } else {
72539+ match->prev->next = match->next;
72540+ if (match->next != NULL)
72541+ match->next->prev = match->prev;
72542+ }
72543+ match->prev = NULL;
72544+ match->next = NULL;
72545+ match->inode = newinode;
72546+ match->device = newdevice;
72547+ match->mode &= ~GR_DELETED;
72548+
72549+ insert_acl_subj_label(match, role);
72550+ }
72551+
72552+ return;
72553+}
72554+
72555+static void
72556+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
72557+ const ino_t newinode, const dev_t newdevice)
72558+{
72559+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72560+ struct inodev_entry *match;
72561+
72562+ match = running_polstate.inodev_set.i_hash[index];
72563+
72564+ while (match && (match->nentry->inode != oldinode ||
72565+ match->nentry->device != olddevice || !match->nentry->deleted))
72566+ match = match->next;
72567+
72568+ if (match && (match->nentry->inode == oldinode)
72569+ && (match->nentry->device == olddevice) &&
72570+ match->nentry->deleted) {
72571+ if (match->prev == NULL) {
72572+ running_polstate.inodev_set.i_hash[index] = match->next;
72573+ if (match->next != NULL)
72574+ match->next->prev = NULL;
72575+ } else {
72576+ match->prev->next = match->next;
72577+ if (match->next != NULL)
72578+ match->next->prev = match->prev;
72579+ }
72580+ match->prev = NULL;
72581+ match->next = NULL;
72582+ match->nentry->inode = newinode;
72583+ match->nentry->device = newdevice;
72584+ match->nentry->deleted = 0;
72585+
72586+ insert_inodev_entry(match);
72587+ }
72588+
72589+ return;
72590+}
72591+
72592+static void
72593+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
72594+{
72595+ struct acl_subject_label *subj;
72596+ struct acl_role_label *role;
72597+ unsigned int x;
72598+
72599+ FOR_EACH_ROLE_START(role)
72600+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72601+
72602+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72603+ if ((subj->inode == ino) && (subj->device == dev)) {
72604+ subj->inode = ino;
72605+ subj->device = dev;
72606+ }
72607+ /* nested subjects aren't in the role's subj_hash table */
72608+ update_acl_obj_label(matchn->inode, matchn->device,
72609+ ino, dev, subj);
72610+ FOR_EACH_NESTED_SUBJECT_END(subj)
72611+ FOR_EACH_SUBJECT_START(role, subj, x)
72612+ update_acl_obj_label(matchn->inode, matchn->device,
72613+ ino, dev, subj);
72614+ FOR_EACH_SUBJECT_END(subj,x)
72615+ FOR_EACH_ROLE_END(role)
72616+
72617+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72618+
72619+ return;
72620+}
72621+
72622+static void
72623+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72624+ const struct vfsmount *mnt)
72625+{
72626+ ino_t ino = dentry->d_inode->i_ino;
72627+ dev_t dev = __get_dev(dentry);
72628+
72629+ __do_handle_create(matchn, ino, dev);
72630+
72631+ return;
72632+}
72633+
72634+void
72635+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72636+{
72637+ struct name_entry *matchn;
72638+
72639+ if (unlikely(!(gr_status & GR_READY)))
72640+ return;
72641+
72642+ preempt_disable();
72643+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72644+
72645+ if (unlikely((unsigned long)matchn)) {
72646+ write_lock(&gr_inode_lock);
72647+ do_handle_create(matchn, dentry, mnt);
72648+ write_unlock(&gr_inode_lock);
72649+ }
72650+ preempt_enable();
72651+
72652+ return;
72653+}
72654+
72655+void
72656+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72657+{
72658+ struct name_entry *matchn;
72659+
72660+ if (unlikely(!(gr_status & GR_READY)))
72661+ return;
72662+
72663+ preempt_disable();
72664+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72665+
72666+ if (unlikely((unsigned long)matchn)) {
72667+ write_lock(&gr_inode_lock);
72668+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72669+ write_unlock(&gr_inode_lock);
72670+ }
72671+ preempt_enable();
72672+
72673+ return;
72674+}
72675+
72676+void
72677+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72678+ struct dentry *old_dentry,
72679+ struct dentry *new_dentry,
72680+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72681+{
72682+ struct name_entry *matchn;
72683+ struct name_entry *matchn2 = NULL;
72684+ struct inodev_entry *inodev;
72685+ struct inode *inode = new_dentry->d_inode;
72686+ ino_t old_ino = old_dentry->d_inode->i_ino;
72687+ dev_t old_dev = __get_dev(old_dentry);
72688+ unsigned int exchange = flags & RENAME_EXCHANGE;
72689+
72690+ /* vfs_rename swaps the name and parent link for old_dentry and
72691+ new_dentry
72692+ at this point, old_dentry has the new name, parent link, and inode
72693+ for the renamed file
72694+ if a file is being replaced by a rename, new_dentry has the inode
72695+ and name for the replaced file
72696+ */
72697+
72698+ if (unlikely(!(gr_status & GR_READY)))
72699+ return;
72700+
72701+ preempt_disable();
72702+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
72703+
72704+ /* exchange cases:
72705+ a filename exists for the source, but not dest
72706+ do a recreate on source
72707+ a filename exists for the dest, but not source
72708+ do a recreate on dest
72709+ a filename exists for both source and dest
72710+ delete source and dest, then create source and dest
72711+ a filename exists for neither source nor dest
72712+ no updates needed
72713+
72714+ the name entry lookups get us the old inode/dev associated with
72715+ each name, so do the deletes first (if possible) so that when
72716+ we do the create, we pick up on the right entries
72717+ */
72718+
72719+ if (exchange)
72720+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
72721+
72722+ /* we wouldn't have to check d_inode if it weren't for
72723+ NFS silly-renaming
72724+ */
72725+
72726+ write_lock(&gr_inode_lock);
72727+ if (unlikely((replace || exchange) && inode)) {
72728+ ino_t new_ino = inode->i_ino;
72729+ dev_t new_dev = __get_dev(new_dentry);
72730+
72731+ inodev = lookup_inodev_entry(new_ino, new_dev);
72732+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
72733+ do_handle_delete(inodev, new_ino, new_dev);
72734+ }
72735+
72736+ inodev = lookup_inodev_entry(old_ino, old_dev);
72737+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
72738+ do_handle_delete(inodev, old_ino, old_dev);
72739+
72740+ if (unlikely(matchn != NULL))
72741+ do_handle_create(matchn, old_dentry, mnt);
72742+
72743+ if (unlikely(matchn2 != NULL))
72744+ do_handle_create(matchn2, new_dentry, mnt);
72745+
72746+ write_unlock(&gr_inode_lock);
72747+ preempt_enable();
72748+
72749+ return;
72750+}
72751+
72752+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
72753+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
72754+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
72755+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
72756+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
72757+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
72758+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
72759+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
72760+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
72761+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
72762+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
72763+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
72764+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
72765+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
72766+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
72767+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
72768+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
72769+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
72770+};
72771+
72772+void
72773+gr_learn_resource(const struct task_struct *task,
72774+ const int res, const unsigned long wanted, const int gt)
72775+{
72776+ struct acl_subject_label *acl;
72777+ const struct cred *cred;
72778+
72779+ if (unlikely((gr_status & GR_READY) &&
72780+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
72781+ goto skip_reslog;
72782+
72783+ gr_log_resource(task, res, wanted, gt);
72784+skip_reslog:
72785+
72786+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
72787+ return;
72788+
72789+ acl = task->acl;
72790+
72791+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
72792+ !(acl->resmask & (1U << (unsigned short) res))))
72793+ return;
72794+
72795+ if (wanted >= acl->res[res].rlim_cur) {
72796+ unsigned long res_add;
72797+
72798+ res_add = wanted + res_learn_bumps[res];
72799+
72800+ acl->res[res].rlim_cur = res_add;
72801+
72802+ if (wanted > acl->res[res].rlim_max)
72803+ acl->res[res].rlim_max = res_add;
72804+
72805+ /* only log the subject filename, since resource logging is supported for
72806+ single-subject learning only */
72807+ rcu_read_lock();
72808+ cred = __task_cred(task);
72809+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72810+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
72811+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
72812+ "", (unsigned long) res, &task->signal->saved_ip);
72813+ rcu_read_unlock();
72814+ }
72815+
72816+ return;
72817+}
72818+EXPORT_SYMBOL_GPL(gr_learn_resource);
72819+#endif
72820+
72821+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
72822+void
72823+pax_set_initial_flags(struct linux_binprm *bprm)
72824+{
72825+ struct task_struct *task = current;
72826+ struct acl_subject_label *proc;
72827+ unsigned long flags;
72828+
72829+ if (unlikely(!(gr_status & GR_READY)))
72830+ return;
72831+
72832+ flags = pax_get_flags(task);
72833+
72834+ proc = task->acl;
72835+
72836+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
72837+ flags &= ~MF_PAX_PAGEEXEC;
72838+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
72839+ flags &= ~MF_PAX_SEGMEXEC;
72840+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
72841+ flags &= ~MF_PAX_RANDMMAP;
72842+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
72843+ flags &= ~MF_PAX_EMUTRAMP;
72844+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
72845+ flags &= ~MF_PAX_MPROTECT;
72846+
72847+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
72848+ flags |= MF_PAX_PAGEEXEC;
72849+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
72850+ flags |= MF_PAX_SEGMEXEC;
72851+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
72852+ flags |= MF_PAX_RANDMMAP;
72853+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
72854+ flags |= MF_PAX_EMUTRAMP;
72855+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
72856+ flags |= MF_PAX_MPROTECT;
72857+
72858+ pax_set_flags(task, flags);
72859+
72860+ return;
72861+}
72862+#endif
72863+
72864+int
72865+gr_handle_proc_ptrace(struct task_struct *task)
72866+{
72867+ struct file *filp;
72868+ struct task_struct *tmp = task;
72869+ struct task_struct *curtemp = current;
72870+ __u32 retmode;
72871+
72872+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72873+ if (unlikely(!(gr_status & GR_READY)))
72874+ return 0;
72875+#endif
72876+
72877+ read_lock(&tasklist_lock);
72878+ read_lock(&grsec_exec_file_lock);
72879+ filp = task->exec_file;
72880+
72881+ while (task_pid_nr(tmp) > 0) {
72882+ if (tmp == curtemp)
72883+ break;
72884+ tmp = tmp->real_parent;
72885+ }
72886+
72887+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72888+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
72889+ read_unlock(&grsec_exec_file_lock);
72890+ read_unlock(&tasklist_lock);
72891+ return 1;
72892+ }
72893+
72894+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72895+ if (!(gr_status & GR_READY)) {
72896+ read_unlock(&grsec_exec_file_lock);
72897+ read_unlock(&tasklist_lock);
72898+ return 0;
72899+ }
72900+#endif
72901+
72902+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
72903+ read_unlock(&grsec_exec_file_lock);
72904+ read_unlock(&tasklist_lock);
72905+
72906+ if (retmode & GR_NOPTRACE)
72907+ return 1;
72908+
72909+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
72910+ && (current->acl != task->acl || (current->acl != current->role->root_label
72911+ && task_pid_nr(current) != task_pid_nr(task))))
72912+ return 1;
72913+
72914+ return 0;
72915+}
72916+
72917+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
72918+{
72919+ if (unlikely(!(gr_status & GR_READY)))
72920+ return;
72921+
72922+ if (!(current->role->roletype & GR_ROLE_GOD))
72923+ return;
72924+
72925+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
72926+ p->role->rolename, gr_task_roletype_to_char(p),
72927+ p->acl->filename);
72928+}
72929+
72930+int
72931+gr_handle_ptrace(struct task_struct *task, const long request)
72932+{
72933+ struct task_struct *tmp = task;
72934+ struct task_struct *curtemp = current;
72935+ __u32 retmode;
72936+
72937+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72938+ if (unlikely(!(gr_status & GR_READY)))
72939+ return 0;
72940+#endif
72941+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72942+ read_lock(&tasklist_lock);
72943+ while (task_pid_nr(tmp) > 0) {
72944+ if (tmp == curtemp)
72945+ break;
72946+ tmp = tmp->real_parent;
72947+ }
72948+
72949+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72950+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
72951+ read_unlock(&tasklist_lock);
72952+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72953+ return 1;
72954+ }
72955+ read_unlock(&tasklist_lock);
72956+ }
72957+
72958+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72959+ if (!(gr_status & GR_READY))
72960+ return 0;
72961+#endif
72962+
72963+ read_lock(&grsec_exec_file_lock);
72964+ if (unlikely(!task->exec_file)) {
72965+ read_unlock(&grsec_exec_file_lock);
72966+ return 0;
72967+ }
72968+
72969+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
72970+ read_unlock(&grsec_exec_file_lock);
72971+
72972+ if (retmode & GR_NOPTRACE) {
72973+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72974+ return 1;
72975+ }
72976+
72977+ if (retmode & GR_PTRACERD) {
72978+ switch (request) {
72979+ case PTRACE_SEIZE:
72980+ case PTRACE_POKETEXT:
72981+ case PTRACE_POKEDATA:
72982+ case PTRACE_POKEUSR:
72983+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
72984+ case PTRACE_SETREGS:
72985+ case PTRACE_SETFPREGS:
72986+#endif
72987+#ifdef CONFIG_X86
72988+ case PTRACE_SETFPXREGS:
72989+#endif
72990+#ifdef CONFIG_ALTIVEC
72991+ case PTRACE_SETVRREGS:
72992+#endif
72993+ return 1;
72994+ default:
72995+ return 0;
72996+ }
72997+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
72998+ !(current->role->roletype & GR_ROLE_GOD) &&
72999+ (current->acl != task->acl)) {
73000+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73001+ return 1;
73002+ }
73003+
73004+ return 0;
73005+}
73006+
73007+static int is_writable_mmap(const struct file *filp)
73008+{
73009+ struct task_struct *task = current;
73010+ struct acl_object_label *obj, *obj2;
73011+
73012+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
73013+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
73014+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
73015+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
73016+ task->role->root_label);
73017+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
73018+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
73019+ return 1;
73020+ }
73021+ }
73022+ return 0;
73023+}
73024+
73025+int
73026+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
73027+{
73028+ __u32 mode;
73029+
73030+ if (unlikely(!file || !(prot & PROT_EXEC)))
73031+ return 1;
73032+
73033+ if (is_writable_mmap(file))
73034+ return 0;
73035+
73036+ mode =
73037+ gr_search_file(file->f_path.dentry,
73038+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73039+ file->f_path.mnt);
73040+
73041+ if (!gr_tpe_allow(file))
73042+ return 0;
73043+
73044+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73045+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73046+ return 0;
73047+ } else if (unlikely(!(mode & GR_EXEC))) {
73048+ return 0;
73049+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73050+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73051+ return 1;
73052+ }
73053+
73054+ return 1;
73055+}
73056+
73057+int
73058+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73059+{
73060+ __u32 mode;
73061+
73062+ if (unlikely(!file || !(prot & PROT_EXEC)))
73063+ return 1;
73064+
73065+ if (is_writable_mmap(file))
73066+ return 0;
73067+
73068+ mode =
73069+ gr_search_file(file->f_path.dentry,
73070+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73071+ file->f_path.mnt);
73072+
73073+ if (!gr_tpe_allow(file))
73074+ return 0;
73075+
73076+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73077+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73078+ return 0;
73079+ } else if (unlikely(!(mode & GR_EXEC))) {
73080+ return 0;
73081+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73082+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73083+ return 1;
73084+ }
73085+
73086+ return 1;
73087+}
73088+
73089+void
73090+gr_acl_handle_psacct(struct task_struct *task, const long code)
73091+{
73092+ unsigned long runtime, cputime;
73093+ cputime_t utime, stime;
73094+ unsigned int wday, cday;
73095+ __u8 whr, chr;
73096+ __u8 wmin, cmin;
73097+ __u8 wsec, csec;
73098+ struct timespec timeval;
73099+
73100+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
73101+ !(task->acl->mode & GR_PROCACCT)))
73102+ return;
73103+
73104+ do_posix_clock_monotonic_gettime(&timeval);
73105+ runtime = timeval.tv_sec - task->start_time.tv_sec;
73106+ wday = runtime / (60 * 60 * 24);
73107+ runtime -= wday * (60 * 60 * 24);
73108+ whr = runtime / (60 * 60);
73109+ runtime -= whr * (60 * 60);
73110+ wmin = runtime / 60;
73111+ runtime -= wmin * 60;
73112+ wsec = runtime;
73113+
73114+ task_cputime(task, &utime, &stime);
73115+ cputime = cputime_to_secs(utime + stime);
73116+ cday = cputime / (60 * 60 * 24);
73117+ cputime -= cday * (60 * 60 * 24);
73118+ chr = cputime / (60 * 60);
73119+ cputime -= chr * (60 * 60);
73120+ cmin = cputime / 60;
73121+ cputime -= cmin * 60;
73122+ csec = cputime;
73123+
73124+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
73125+
73126+ return;
73127+}
73128+
73129+#ifdef CONFIG_TASKSTATS
73130+int gr_is_taskstats_denied(int pid)
73131+{
73132+ struct task_struct *task;
73133+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73134+ const struct cred *cred;
73135+#endif
73136+ int ret = 0;
73137+
73138+ /* restrict taskstats viewing to un-chrooted root users
73139+ who have the 'view' subject flag if the RBAC system is enabled
73140+ */
73141+
73142+ rcu_read_lock();
73143+ read_lock(&tasklist_lock);
73144+ task = find_task_by_vpid(pid);
73145+ if (task) {
73146+#ifdef CONFIG_GRKERNSEC_CHROOT
73147+ if (proc_is_chrooted(task))
73148+ ret = -EACCES;
73149+#endif
73150+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73151+ cred = __task_cred(task);
73152+#ifdef CONFIG_GRKERNSEC_PROC_USER
73153+ if (gr_is_global_nonroot(cred->uid))
73154+ ret = -EACCES;
73155+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73156+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
73157+ ret = -EACCES;
73158+#endif
73159+#endif
73160+ if (gr_status & GR_READY) {
73161+ if (!(task->acl->mode & GR_VIEW))
73162+ ret = -EACCES;
73163+ }
73164+ } else
73165+ ret = -ENOENT;
73166+
73167+ read_unlock(&tasklist_lock);
73168+ rcu_read_unlock();
73169+
73170+ return ret;
73171+}
73172+#endif
73173+
73174+/* AUXV entries are filled via a descendant of search_binary_handler
73175+ after we've already applied the subject for the target
73176+*/
73177+int gr_acl_enable_at_secure(void)
73178+{
73179+ if (unlikely(!(gr_status & GR_READY)))
73180+ return 0;
73181+
73182+ if (current->acl->mode & GR_ATSECURE)
73183+ return 1;
73184+
73185+ return 0;
73186+}
73187+
73188+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
73189+{
73190+ struct task_struct *task = current;
73191+ struct dentry *dentry = file->f_path.dentry;
73192+ struct vfsmount *mnt = file->f_path.mnt;
73193+ struct acl_object_label *obj, *tmp;
73194+ struct acl_subject_label *subj;
73195+ unsigned int bufsize;
73196+ int is_not_root;
73197+ char *path;
73198+ dev_t dev = __get_dev(dentry);
73199+
73200+ if (unlikely(!(gr_status & GR_READY)))
73201+ return 1;
73202+
73203+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
73204+ return 1;
73205+
73206+ /* ignore Eric Biederman */
73207+ if (IS_PRIVATE(dentry->d_inode))
73208+ return 1;
73209+
73210+ subj = task->acl;
73211+ read_lock(&gr_inode_lock);
73212+ do {
73213+ obj = lookup_acl_obj_label(ino, dev, subj);
73214+ if (obj != NULL) {
73215+ read_unlock(&gr_inode_lock);
73216+ return (obj->mode & GR_FIND) ? 1 : 0;
73217+ }
73218+ } while ((subj = subj->parent_subject));
73219+ read_unlock(&gr_inode_lock);
73220+
73221+ /* this is purely an optimization since we're looking for an object
73222+ for the directory we're doing a readdir on
73223+ if it's possible for any globbed object to match the entry we're
73224+ filling into the directory, then the object we find here will be
73225+ an anchor point with attached globbed objects
73226+ */
73227+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
73228+ if (obj->globbed == NULL)
73229+ return (obj->mode & GR_FIND) ? 1 : 0;
73230+
73231+ is_not_root = ((obj->filename[0] == '/') &&
73232+ (obj->filename[1] == '\0')) ? 0 : 1;
73233+ bufsize = PAGE_SIZE - namelen - is_not_root;
73234+
73235+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
73236+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
73237+ return 1;
73238+
73239+ preempt_disable();
73240+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
73241+ bufsize);
73242+
73243+ bufsize = strlen(path);
73244+
73245+ /* if base is "/", don't append an additional slash */
73246+ if (is_not_root)
73247+ *(path + bufsize) = '/';
73248+ memcpy(path + bufsize + is_not_root, name, namelen);
73249+ *(path + bufsize + namelen + is_not_root) = '\0';
73250+
73251+ tmp = obj->globbed;
73252+ while (tmp) {
73253+ if (!glob_match(tmp->filename, path)) {
73254+ preempt_enable();
73255+ return (tmp->mode & GR_FIND) ? 1 : 0;
73256+ }
73257+ tmp = tmp->next;
73258+ }
73259+ preempt_enable();
73260+ return (obj->mode & GR_FIND) ? 1 : 0;
73261+}
73262+
73263+void gr_put_exec_file(struct task_struct *task)
73264+{
73265+ struct file *filp;
73266+
73267+ write_lock(&grsec_exec_file_lock);
73268+ filp = task->exec_file;
73269+ task->exec_file = NULL;
73270+ write_unlock(&grsec_exec_file_lock);
73271+
73272+ if (filp)
73273+ fput(filp);
73274+
73275+ return;
73276+}
73277+
73278+
73279+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
73280+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
73281+#endif
73282+#ifdef CONFIG_SECURITY
73283+EXPORT_SYMBOL_GPL(gr_check_user_change);
73284+EXPORT_SYMBOL_GPL(gr_check_group_change);
73285+#endif
73286+
73287diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
73288new file mode 100644
73289index 0000000..18ffbbd
73290--- /dev/null
73291+++ b/grsecurity/gracl_alloc.c
73292@@ -0,0 +1,105 @@
73293+#include <linux/kernel.h>
73294+#include <linux/mm.h>
73295+#include <linux/slab.h>
73296+#include <linux/vmalloc.h>
73297+#include <linux/gracl.h>
73298+#include <linux/grsecurity.h>
73299+
73300+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
73301+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
73302+
73303+static __inline__ int
73304+alloc_pop(void)
73305+{
73306+ if (current_alloc_state->alloc_stack_next == 1)
73307+ return 0;
73308+
73309+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
73310+
73311+ current_alloc_state->alloc_stack_next--;
73312+
73313+ return 1;
73314+}
73315+
73316+static __inline__ int
73317+alloc_push(void *buf)
73318+{
73319+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
73320+ return 1;
73321+
73322+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
73323+
73324+ current_alloc_state->alloc_stack_next++;
73325+
73326+ return 0;
73327+}
73328+
73329+void *
73330+acl_alloc(unsigned long len)
73331+{
73332+ void *ret = NULL;
73333+
73334+ if (!len || len > PAGE_SIZE)
73335+ goto out;
73336+
73337+ ret = kmalloc(len, GFP_KERNEL);
73338+
73339+ if (ret) {
73340+ if (alloc_push(ret)) {
73341+ kfree(ret);
73342+ ret = NULL;
73343+ }
73344+ }
73345+
73346+out:
73347+ return ret;
73348+}
73349+
73350+void *
73351+acl_alloc_num(unsigned long num, unsigned long len)
73352+{
73353+ if (!len || (num > (PAGE_SIZE / len)))
73354+ return NULL;
73355+
73356+ return acl_alloc(num * len);
73357+}
73358+
73359+void
73360+acl_free_all(void)
73361+{
73362+ if (!current_alloc_state->alloc_stack)
73363+ return;
73364+
73365+ while (alloc_pop()) ;
73366+
73367+ if (current_alloc_state->alloc_stack) {
73368+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73369+ kfree(current_alloc_state->alloc_stack);
73370+ else
73371+ vfree(current_alloc_state->alloc_stack);
73372+ }
73373+
73374+ current_alloc_state->alloc_stack = NULL;
73375+ current_alloc_state->alloc_stack_size = 1;
73376+ current_alloc_state->alloc_stack_next = 1;
73377+
73378+ return;
73379+}
73380+
73381+int
73382+acl_alloc_stack_init(unsigned long size)
73383+{
73384+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73385+ current_alloc_state->alloc_stack =
73386+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73387+ else
73388+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73389+
73390+ current_alloc_state->alloc_stack_size = size;
73391+ current_alloc_state->alloc_stack_next = 1;
73392+
73393+ if (!current_alloc_state->alloc_stack)
73394+ return 0;
73395+ else
73396+ return 1;
73397+}
73398diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73399new file mode 100644
73400index 0000000..1a94c11
73401--- /dev/null
73402+++ b/grsecurity/gracl_cap.c
73403@@ -0,0 +1,127 @@
73404+#include <linux/kernel.h>
73405+#include <linux/module.h>
73406+#include <linux/sched.h>
73407+#include <linux/gracl.h>
73408+#include <linux/grsecurity.h>
73409+#include <linux/grinternal.h>
73410+
73411+extern const char *captab_log[];
73412+extern int captab_log_entries;
73413+
73414+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73415+{
73416+ struct acl_subject_label *curracl;
73417+
73418+ if (!gr_acl_is_enabled())
73419+ return 1;
73420+
73421+ curracl = task->acl;
73422+
73423+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73424+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73425+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73426+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73427+ gr_to_filename(task->exec_file->f_path.dentry,
73428+ task->exec_file->f_path.mnt) : curracl->filename,
73429+ curracl->filename, 0UL,
73430+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73431+ return 1;
73432+ }
73433+
73434+ return 0;
73435+}
73436+
73437+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73438+{
73439+ struct acl_subject_label *curracl;
73440+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73441+ kernel_cap_t cap_audit = __cap_empty_set;
73442+
73443+ if (!gr_acl_is_enabled())
73444+ return 1;
73445+
73446+ curracl = task->acl;
73447+
73448+ cap_drop = curracl->cap_lower;
73449+ cap_mask = curracl->cap_mask;
73450+ cap_audit = curracl->cap_invert_audit;
73451+
73452+ while ((curracl = curracl->parent_subject)) {
73453+ /* if the cap isn't specified in the current computed mask but is specified in the
73454+ current level subject, and is lowered in the current level subject, then add
73455+ it to the set of dropped capabilities
73456+ otherwise, add the current level subject's mask to the current computed mask
73457+ */
73458+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73459+ cap_raise(cap_mask, cap);
73460+ if (cap_raised(curracl->cap_lower, cap))
73461+ cap_raise(cap_drop, cap);
73462+ if (cap_raised(curracl->cap_invert_audit, cap))
73463+ cap_raise(cap_audit, cap);
73464+ }
73465+ }
73466+
73467+ if (!cap_raised(cap_drop, cap)) {
73468+ if (cap_raised(cap_audit, cap))
73469+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73470+ return 1;
73471+ }
73472+
73473+ /* only learn the capability use if the process has the capability in the
73474+ general case, the two uses in sys.c of gr_learn_cap are an exception
73475+ to this rule to ensure any role transition involves what the full-learned
73476+ policy believes in a privileged process
73477+ */
73478+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73479+ return 1;
73480+
73481+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73482+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73483+
73484+ return 0;
73485+}
73486+
73487+int
73488+gr_acl_is_capable(const int cap)
73489+{
73490+ return gr_task_acl_is_capable(current, current_cred(), cap);
73491+}
73492+
73493+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73494+{
73495+ struct acl_subject_label *curracl;
73496+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73497+
73498+ if (!gr_acl_is_enabled())
73499+ return 1;
73500+
73501+ curracl = task->acl;
73502+
73503+ cap_drop = curracl->cap_lower;
73504+ cap_mask = curracl->cap_mask;
73505+
73506+ while ((curracl = curracl->parent_subject)) {
73507+ /* if the cap isn't specified in the current computed mask but is specified in the
73508+ current level subject, and is lowered in the current level subject, then add
73509+ it to the set of dropped capabilities
73510+ otherwise, add the current level subject's mask to the current computed mask
73511+ */
73512+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73513+ cap_raise(cap_mask, cap);
73514+ if (cap_raised(curracl->cap_lower, cap))
73515+ cap_raise(cap_drop, cap);
73516+ }
73517+ }
73518+
73519+ if (!cap_raised(cap_drop, cap))
73520+ return 1;
73521+
73522+ return 0;
73523+}
73524+
73525+int
73526+gr_acl_is_capable_nolog(const int cap)
73527+{
73528+ return gr_task_acl_is_capable_nolog(current, cap);
73529+}
73530+
73531diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73532new file mode 100644
73533index 0000000..ca25605
73534--- /dev/null
73535+++ b/grsecurity/gracl_compat.c
73536@@ -0,0 +1,270 @@
73537+#include <linux/kernel.h>
73538+#include <linux/gracl.h>
73539+#include <linux/compat.h>
73540+#include <linux/gracl_compat.h>
73541+
73542+#include <asm/uaccess.h>
73543+
73544+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73545+{
73546+ struct gr_arg_wrapper_compat uwrapcompat;
73547+
73548+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73549+ return -EFAULT;
73550+
73551+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
73552+ (uwrapcompat.version != 0x2901)) ||
73553+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73554+ return -EINVAL;
73555+
73556+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73557+ uwrap->version = uwrapcompat.version;
73558+ uwrap->size = sizeof(struct gr_arg);
73559+
73560+ return 0;
73561+}
73562+
73563+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73564+{
73565+ struct gr_arg_compat argcompat;
73566+
73567+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73568+ return -EFAULT;
73569+
73570+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73571+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73572+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73573+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73574+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73575+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73576+
73577+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73578+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73579+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73580+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73581+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73582+ arg->segv_device = argcompat.segv_device;
73583+ arg->segv_inode = argcompat.segv_inode;
73584+ arg->segv_uid = argcompat.segv_uid;
73585+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73586+ arg->mode = argcompat.mode;
73587+
73588+ return 0;
73589+}
73590+
73591+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73592+{
73593+ struct acl_object_label_compat objcompat;
73594+
73595+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73596+ return -EFAULT;
73597+
73598+ obj->filename = compat_ptr(objcompat.filename);
73599+ obj->inode = objcompat.inode;
73600+ obj->device = objcompat.device;
73601+ obj->mode = objcompat.mode;
73602+
73603+ obj->nested = compat_ptr(objcompat.nested);
73604+ obj->globbed = compat_ptr(objcompat.globbed);
73605+
73606+ obj->prev = compat_ptr(objcompat.prev);
73607+ obj->next = compat_ptr(objcompat.next);
73608+
73609+ return 0;
73610+}
73611+
73612+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73613+{
73614+ unsigned int i;
73615+ struct acl_subject_label_compat subjcompat;
73616+
73617+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73618+ return -EFAULT;
73619+
73620+ subj->filename = compat_ptr(subjcompat.filename);
73621+ subj->inode = subjcompat.inode;
73622+ subj->device = subjcompat.device;
73623+ subj->mode = subjcompat.mode;
73624+ subj->cap_mask = subjcompat.cap_mask;
73625+ subj->cap_lower = subjcompat.cap_lower;
73626+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73627+
73628+ for (i = 0; i < GR_NLIMITS; i++) {
73629+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73630+ subj->res[i].rlim_cur = RLIM_INFINITY;
73631+ else
73632+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73633+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73634+ subj->res[i].rlim_max = RLIM_INFINITY;
73635+ else
73636+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73637+ }
73638+ subj->resmask = subjcompat.resmask;
73639+
73640+ subj->user_trans_type = subjcompat.user_trans_type;
73641+ subj->group_trans_type = subjcompat.group_trans_type;
73642+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73643+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73644+ subj->user_trans_num = subjcompat.user_trans_num;
73645+ subj->group_trans_num = subjcompat.group_trans_num;
73646+
73647+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73648+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73649+ subj->ip_type = subjcompat.ip_type;
73650+ subj->ips = compat_ptr(subjcompat.ips);
73651+ subj->ip_num = subjcompat.ip_num;
73652+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73653+
73654+ subj->crashes = subjcompat.crashes;
73655+ subj->expires = subjcompat.expires;
73656+
73657+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73658+ subj->hash = compat_ptr(subjcompat.hash);
73659+ subj->prev = compat_ptr(subjcompat.prev);
73660+ subj->next = compat_ptr(subjcompat.next);
73661+
73662+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73663+ subj->obj_hash_size = subjcompat.obj_hash_size;
73664+ subj->pax_flags = subjcompat.pax_flags;
73665+
73666+ return 0;
73667+}
73668+
73669+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73670+{
73671+ struct acl_role_label_compat rolecompat;
73672+
73673+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73674+ return -EFAULT;
73675+
73676+ role->rolename = compat_ptr(rolecompat.rolename);
73677+ role->uidgid = rolecompat.uidgid;
73678+ role->roletype = rolecompat.roletype;
73679+
73680+ role->auth_attempts = rolecompat.auth_attempts;
73681+ role->expires = rolecompat.expires;
73682+
73683+ role->root_label = compat_ptr(rolecompat.root_label);
73684+ role->hash = compat_ptr(rolecompat.hash);
73685+
73686+ role->prev = compat_ptr(rolecompat.prev);
73687+ role->next = compat_ptr(rolecompat.next);
73688+
73689+ role->transitions = compat_ptr(rolecompat.transitions);
73690+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
73691+ role->domain_children = compat_ptr(rolecompat.domain_children);
73692+ role->domain_child_num = rolecompat.domain_child_num;
73693+
73694+ role->umask = rolecompat.umask;
73695+
73696+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
73697+ role->subj_hash_size = rolecompat.subj_hash_size;
73698+
73699+ return 0;
73700+}
73701+
73702+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73703+{
73704+ struct role_allowed_ip_compat roleip_compat;
73705+
73706+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
73707+ return -EFAULT;
73708+
73709+ roleip->addr = roleip_compat.addr;
73710+ roleip->netmask = roleip_compat.netmask;
73711+
73712+ roleip->prev = compat_ptr(roleip_compat.prev);
73713+ roleip->next = compat_ptr(roleip_compat.next);
73714+
73715+ return 0;
73716+}
73717+
73718+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
73719+{
73720+ struct role_transition_compat trans_compat;
73721+
73722+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
73723+ return -EFAULT;
73724+
73725+ trans->rolename = compat_ptr(trans_compat.rolename);
73726+
73727+ trans->prev = compat_ptr(trans_compat.prev);
73728+ trans->next = compat_ptr(trans_compat.next);
73729+
73730+ return 0;
73731+
73732+}
73733+
73734+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73735+{
73736+ struct gr_hash_struct_compat hash_compat;
73737+
73738+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
73739+ return -EFAULT;
73740+
73741+ hash->table = compat_ptr(hash_compat.table);
73742+ hash->nametable = compat_ptr(hash_compat.nametable);
73743+ hash->first = compat_ptr(hash_compat.first);
73744+
73745+ hash->table_size = hash_compat.table_size;
73746+ hash->used_size = hash_compat.used_size;
73747+
73748+ hash->type = hash_compat.type;
73749+
73750+ return 0;
73751+}
73752+
73753+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
73754+{
73755+ compat_uptr_t ptrcompat;
73756+
73757+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
73758+ return -EFAULT;
73759+
73760+ *(void **)ptr = compat_ptr(ptrcompat);
73761+
73762+ return 0;
73763+}
73764+
73765+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73766+{
73767+ struct acl_ip_label_compat ip_compat;
73768+
73769+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
73770+ return -EFAULT;
73771+
73772+ ip->iface = compat_ptr(ip_compat.iface);
73773+ ip->addr = ip_compat.addr;
73774+ ip->netmask = ip_compat.netmask;
73775+ ip->low = ip_compat.low;
73776+ ip->high = ip_compat.high;
73777+ ip->mode = ip_compat.mode;
73778+ ip->type = ip_compat.type;
73779+
73780+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
73781+
73782+ ip->prev = compat_ptr(ip_compat.prev);
73783+ ip->next = compat_ptr(ip_compat.next);
73784+
73785+ return 0;
73786+}
73787+
73788+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73789+{
73790+ struct sprole_pw_compat pw_compat;
73791+
73792+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
73793+ return -EFAULT;
73794+
73795+ pw->rolename = compat_ptr(pw_compat.rolename);
73796+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
73797+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
73798+
73799+ return 0;
73800+}
73801+
73802+size_t get_gr_arg_wrapper_size_compat(void)
73803+{
73804+ return sizeof(struct gr_arg_wrapper_compat);
73805+}
73806+
73807diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
73808new file mode 100644
73809index 0000000..4008fdc
73810--- /dev/null
73811+++ b/grsecurity/gracl_fs.c
73812@@ -0,0 +1,445 @@
73813+#include <linux/kernel.h>
73814+#include <linux/sched.h>
73815+#include <linux/types.h>
73816+#include <linux/fs.h>
73817+#include <linux/file.h>
73818+#include <linux/stat.h>
73819+#include <linux/grsecurity.h>
73820+#include <linux/grinternal.h>
73821+#include <linux/gracl.h>
73822+
73823+umode_t
73824+gr_acl_umask(void)
73825+{
73826+ if (unlikely(!gr_acl_is_enabled()))
73827+ return 0;
73828+
73829+ return current->role->umask;
73830+}
73831+
73832+__u32
73833+gr_acl_handle_hidden_file(const struct dentry * dentry,
73834+ const struct vfsmount * mnt)
73835+{
73836+ __u32 mode;
73837+
73838+ if (unlikely(d_is_negative(dentry)))
73839+ return GR_FIND;
73840+
73841+ mode =
73842+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
73843+
73844+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
73845+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73846+ return mode;
73847+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
73848+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73849+ return 0;
73850+ } else if (unlikely(!(mode & GR_FIND)))
73851+ return 0;
73852+
73853+ return GR_FIND;
73854+}
73855+
73856+__u32
73857+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73858+ int acc_mode)
73859+{
73860+ __u32 reqmode = GR_FIND;
73861+ __u32 mode;
73862+
73863+ if (unlikely(d_is_negative(dentry)))
73864+ return reqmode;
73865+
73866+ if (acc_mode & MAY_APPEND)
73867+ reqmode |= GR_APPEND;
73868+ else if (acc_mode & MAY_WRITE)
73869+ reqmode |= GR_WRITE;
73870+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
73871+ reqmode |= GR_READ;
73872+
73873+ mode =
73874+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73875+ mnt);
73876+
73877+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73878+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73879+ reqmode & GR_READ ? " reading" : "",
73880+ reqmode & GR_WRITE ? " writing" : reqmode &
73881+ GR_APPEND ? " appending" : "");
73882+ return reqmode;
73883+ } else
73884+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73885+ {
73886+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73887+ reqmode & GR_READ ? " reading" : "",
73888+ reqmode & GR_WRITE ? " writing" : reqmode &
73889+ GR_APPEND ? " appending" : "");
73890+ return 0;
73891+ } else if (unlikely((mode & reqmode) != reqmode))
73892+ return 0;
73893+
73894+ return reqmode;
73895+}
73896+
73897+__u32
73898+gr_acl_handle_creat(const struct dentry * dentry,
73899+ const struct dentry * p_dentry,
73900+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73901+ const int imode)
73902+{
73903+ __u32 reqmode = GR_WRITE | GR_CREATE;
73904+ __u32 mode;
73905+
73906+ if (acc_mode & MAY_APPEND)
73907+ reqmode |= GR_APPEND;
73908+ // if a directory was required or the directory already exists, then
73909+ // don't count this open as a read
73910+ if ((acc_mode & MAY_READ) &&
73911+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
73912+ reqmode |= GR_READ;
73913+ if ((open_flags & O_CREAT) &&
73914+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73915+ reqmode |= GR_SETID;
73916+
73917+ mode =
73918+ gr_check_create(dentry, p_dentry, p_mnt,
73919+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73920+
73921+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73922+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73923+ reqmode & GR_READ ? " reading" : "",
73924+ reqmode & GR_WRITE ? " writing" : reqmode &
73925+ GR_APPEND ? " appending" : "");
73926+ return reqmode;
73927+ } else
73928+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73929+ {
73930+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73931+ reqmode & GR_READ ? " reading" : "",
73932+ reqmode & GR_WRITE ? " writing" : reqmode &
73933+ GR_APPEND ? " appending" : "");
73934+ return 0;
73935+ } else if (unlikely((mode & reqmode) != reqmode))
73936+ return 0;
73937+
73938+ return reqmode;
73939+}
73940+
73941+__u32
73942+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
73943+ const int fmode)
73944+{
73945+ __u32 mode, reqmode = GR_FIND;
73946+
73947+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
73948+ reqmode |= GR_EXEC;
73949+ if (fmode & S_IWOTH)
73950+ reqmode |= GR_WRITE;
73951+ if (fmode & S_IROTH)
73952+ reqmode |= GR_READ;
73953+
73954+ mode =
73955+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73956+ mnt);
73957+
73958+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73959+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73960+ reqmode & GR_READ ? " reading" : "",
73961+ reqmode & GR_WRITE ? " writing" : "",
73962+ reqmode & GR_EXEC ? " executing" : "");
73963+ return reqmode;
73964+ } else
73965+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73966+ {
73967+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73968+ reqmode & GR_READ ? " reading" : "",
73969+ reqmode & GR_WRITE ? " writing" : "",
73970+ reqmode & GR_EXEC ? " executing" : "");
73971+ return 0;
73972+ } else if (unlikely((mode & reqmode) != reqmode))
73973+ return 0;
73974+
73975+ return reqmode;
73976+}
73977+
73978+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
73979+{
73980+ __u32 mode;
73981+
73982+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
73983+
73984+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73985+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
73986+ return mode;
73987+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73988+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
73989+ return 0;
73990+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73991+ return 0;
73992+
73993+ return (reqmode);
73994+}
73995+
73996+__u32
73997+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73998+{
73999+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
74000+}
74001+
74002+__u32
74003+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
74004+{
74005+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
74006+}
74007+
74008+__u32
74009+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
74010+{
74011+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
74012+}
74013+
74014+__u32
74015+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
74016+{
74017+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
74018+}
74019+
74020+__u32
74021+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
74022+ umode_t *modeptr)
74023+{
74024+ umode_t mode;
74025+
74026+ *modeptr &= ~gr_acl_umask();
74027+ mode = *modeptr;
74028+
74029+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
74030+ return 1;
74031+
74032+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
74033+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
74034+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
74035+ GR_CHMOD_ACL_MSG);
74036+ } else {
74037+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
74038+ }
74039+}
74040+
74041+__u32
74042+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
74043+{
74044+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
74045+}
74046+
74047+__u32
74048+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
74049+{
74050+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
74051+}
74052+
74053+__u32
74054+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
74055+{
74056+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
74057+}
74058+
74059+__u32
74060+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
74061+{
74062+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
74063+}
74064+
74065+__u32
74066+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
74067+{
74068+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
74069+ GR_UNIXCONNECT_ACL_MSG);
74070+}
74071+
74072+/* hardlinks require at minimum create and link permission,
74073+ any additional privilege required is based on the
74074+ privilege of the file being linked to
74075+*/
74076+__u32
74077+gr_acl_handle_link(const struct dentry * new_dentry,
74078+ const struct dentry * parent_dentry,
74079+ const struct vfsmount * parent_mnt,
74080+ const struct dentry * old_dentry,
74081+ const struct vfsmount * old_mnt, const struct filename *to)
74082+{
74083+ __u32 mode;
74084+ __u32 needmode = GR_CREATE | GR_LINK;
74085+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
74086+
74087+ mode =
74088+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
74089+ old_mnt);
74090+
74091+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
74092+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74093+ return mode;
74094+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74095+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74096+ return 0;
74097+ } else if (unlikely((mode & needmode) != needmode))
74098+ return 0;
74099+
74100+ return 1;
74101+}
74102+
74103+__u32
74104+gr_acl_handle_symlink(const struct dentry * new_dentry,
74105+ const struct dentry * parent_dentry,
74106+ const struct vfsmount * parent_mnt, const struct filename *from)
74107+{
74108+ __u32 needmode = GR_WRITE | GR_CREATE;
74109+ __u32 mode;
74110+
74111+ mode =
74112+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
74113+ GR_CREATE | GR_AUDIT_CREATE |
74114+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
74115+
74116+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
74117+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74118+ return mode;
74119+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74120+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74121+ return 0;
74122+ } else if (unlikely((mode & needmode) != needmode))
74123+ return 0;
74124+
74125+ return (GR_WRITE | GR_CREATE);
74126+}
74127+
74128+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
74129+{
74130+ __u32 mode;
74131+
74132+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74133+
74134+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74135+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
74136+ return mode;
74137+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74138+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
74139+ return 0;
74140+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74141+ return 0;
74142+
74143+ return (reqmode);
74144+}
74145+
74146+__u32
74147+gr_acl_handle_mknod(const struct dentry * new_dentry,
74148+ const struct dentry * parent_dentry,
74149+ const struct vfsmount * parent_mnt,
74150+ const int mode)
74151+{
74152+ __u32 reqmode = GR_WRITE | GR_CREATE;
74153+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74154+ reqmode |= GR_SETID;
74155+
74156+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74157+ reqmode, GR_MKNOD_ACL_MSG);
74158+}
74159+
74160+__u32
74161+gr_acl_handle_mkdir(const struct dentry *new_dentry,
74162+ const struct dentry *parent_dentry,
74163+ const struct vfsmount *parent_mnt)
74164+{
74165+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74166+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
74167+}
74168+
74169+#define RENAME_CHECK_SUCCESS(old, new) \
74170+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
74171+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
74172+
74173+int
74174+gr_acl_handle_rename(struct dentry *new_dentry,
74175+ struct dentry *parent_dentry,
74176+ const struct vfsmount *parent_mnt,
74177+ struct dentry *old_dentry,
74178+ struct inode *old_parent_inode,
74179+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
74180+{
74181+ __u32 comp1, comp2;
74182+ int error = 0;
74183+
74184+ if (unlikely(!gr_acl_is_enabled()))
74185+ return 0;
74186+
74187+ if (flags & RENAME_EXCHANGE) {
74188+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74189+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74190+ GR_SUPPRESS, parent_mnt);
74191+ comp2 =
74192+ gr_search_file(old_dentry,
74193+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74194+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74195+ } else if (d_is_negative(new_dentry)) {
74196+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
74197+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
74198+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
74199+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
74200+ GR_DELETE | GR_AUDIT_DELETE |
74201+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74202+ GR_SUPPRESS, old_mnt);
74203+ } else {
74204+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74205+ GR_CREATE | GR_DELETE |
74206+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
74207+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74208+ GR_SUPPRESS, parent_mnt);
74209+ comp2 =
74210+ gr_search_file(old_dentry,
74211+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74212+ GR_DELETE | GR_AUDIT_DELETE |
74213+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74214+ }
74215+
74216+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
74217+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
74218+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74219+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
74220+ && !(comp2 & GR_SUPPRESS)) {
74221+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74222+ error = -EACCES;
74223+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
74224+ error = -EACCES;
74225+
74226+ return error;
74227+}
74228+
74229+void
74230+gr_acl_handle_exit(void)
74231+{
74232+ u16 id;
74233+ char *rolename;
74234+
74235+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
74236+ !(current->role->roletype & GR_ROLE_PERSIST))) {
74237+ id = current->acl_role_id;
74238+ rolename = current->role->rolename;
74239+ gr_set_acls(1);
74240+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
74241+ }
74242+
74243+ gr_put_exec_file(current);
74244+ return;
74245+}
74246+
74247+int
74248+gr_acl_handle_procpidmem(const struct task_struct *task)
74249+{
74250+ if (unlikely(!gr_acl_is_enabled()))
74251+ return 0;
74252+
74253+ if (task != current && task->acl->mode & GR_PROTPROCFD)
74254+ return -EACCES;
74255+
74256+ return 0;
74257+}
74258diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
74259new file mode 100644
74260index 0000000..f056b81
74261--- /dev/null
74262+++ b/grsecurity/gracl_ip.c
74263@@ -0,0 +1,386 @@
74264+#include <linux/kernel.h>
74265+#include <asm/uaccess.h>
74266+#include <asm/errno.h>
74267+#include <net/sock.h>
74268+#include <linux/file.h>
74269+#include <linux/fs.h>
74270+#include <linux/net.h>
74271+#include <linux/in.h>
74272+#include <linux/skbuff.h>
74273+#include <linux/ip.h>
74274+#include <linux/udp.h>
74275+#include <linux/types.h>
74276+#include <linux/sched.h>
74277+#include <linux/netdevice.h>
74278+#include <linux/inetdevice.h>
74279+#include <linux/gracl.h>
74280+#include <linux/grsecurity.h>
74281+#include <linux/grinternal.h>
74282+
74283+#define GR_BIND 0x01
74284+#define GR_CONNECT 0x02
74285+#define GR_INVERT 0x04
74286+#define GR_BINDOVERRIDE 0x08
74287+#define GR_CONNECTOVERRIDE 0x10
74288+#define GR_SOCK_FAMILY 0x20
74289+
74290+static const char * gr_protocols[IPPROTO_MAX] = {
74291+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
74292+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
74293+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
74294+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
74295+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
74296+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
74297+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
74298+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
74299+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
74300+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
74301+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
74302+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
74303+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
74304+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
74305+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
74306+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
74307+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
74308+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
74309+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
74310+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
74311+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
74312+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
74313+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
74314+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
74315+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
74316+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
74317+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
74318+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
74319+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
74320+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
74321+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
74322+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
74323+ };
74324+
74325+static const char * gr_socktypes[SOCK_MAX] = {
74326+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
74327+ "unknown:7", "unknown:8", "unknown:9", "packet"
74328+ };
74329+
74330+static const char * gr_sockfamilies[AF_MAX+1] = {
74331+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
74332+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
74333+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
74334+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
74335+ };
74336+
74337+const char *
74338+gr_proto_to_name(unsigned char proto)
74339+{
74340+ return gr_protocols[proto];
74341+}
74342+
74343+const char *
74344+gr_socktype_to_name(unsigned char type)
74345+{
74346+ return gr_socktypes[type];
74347+}
74348+
74349+const char *
74350+gr_sockfamily_to_name(unsigned char family)
74351+{
74352+ return gr_sockfamilies[family];
74353+}
74354+
74355+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74356+
74357+int
74358+gr_search_socket(const int domain, const int type, const int protocol)
74359+{
74360+ struct acl_subject_label *curr;
74361+ const struct cred *cred = current_cred();
74362+
74363+ if (unlikely(!gr_acl_is_enabled()))
74364+ goto exit;
74365+
74366+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74367+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74368+ goto exit; // let the kernel handle it
74369+
74370+ curr = current->acl;
74371+
74372+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74373+ /* the family is allowed, if this is PF_INET allow it only if
74374+ the extra sock type/protocol checks pass */
74375+ if (domain == PF_INET)
74376+ goto inet_check;
74377+ goto exit;
74378+ } else {
74379+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74380+ __u32 fakeip = 0;
74381+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74382+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74383+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74384+ gr_to_filename(current->exec_file->f_path.dentry,
74385+ current->exec_file->f_path.mnt) :
74386+ curr->filename, curr->filename,
74387+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74388+ &current->signal->saved_ip);
74389+ goto exit;
74390+ }
74391+ goto exit_fail;
74392+ }
74393+
74394+inet_check:
74395+ /* the rest of this checking is for IPv4 only */
74396+ if (!curr->ips)
74397+ goto exit;
74398+
74399+ if ((curr->ip_type & (1U << type)) &&
74400+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74401+ goto exit;
74402+
74403+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74404+ /* we don't place acls on raw sockets , and sometimes
74405+ dgram/ip sockets are opened for ioctl and not
74406+ bind/connect, so we'll fake a bind learn log */
74407+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74408+ __u32 fakeip = 0;
74409+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74410+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74411+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74412+ gr_to_filename(current->exec_file->f_path.dentry,
74413+ current->exec_file->f_path.mnt) :
74414+ curr->filename, curr->filename,
74415+ &fakeip, 0, type,
74416+ protocol, GR_CONNECT, &current->signal->saved_ip);
74417+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74418+ __u32 fakeip = 0;
74419+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74420+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74421+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74422+ gr_to_filename(current->exec_file->f_path.dentry,
74423+ current->exec_file->f_path.mnt) :
74424+ curr->filename, curr->filename,
74425+ &fakeip, 0, type,
74426+ protocol, GR_BIND, &current->signal->saved_ip);
74427+ }
74428+ /* we'll log when they use connect or bind */
74429+ goto exit;
74430+ }
74431+
74432+exit_fail:
74433+ if (domain == PF_INET)
74434+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74435+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74436+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74437+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74438+ gr_socktype_to_name(type), protocol);
74439+
74440+ return 0;
74441+exit:
74442+ return 1;
74443+}
74444+
74445+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74446+{
74447+ if ((ip->mode & mode) &&
74448+ (ip_port >= ip->low) &&
74449+ (ip_port <= ip->high) &&
74450+ ((ntohl(ip_addr) & our_netmask) ==
74451+ (ntohl(our_addr) & our_netmask))
74452+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74453+ && (ip->type & (1U << type))) {
74454+ if (ip->mode & GR_INVERT)
74455+ return 2; // specifically denied
74456+ else
74457+ return 1; // allowed
74458+ }
74459+
74460+ return 0; // not specifically allowed, may continue parsing
74461+}
74462+
74463+static int
74464+gr_search_connectbind(const int full_mode, struct sock *sk,
74465+ struct sockaddr_in *addr, const int type)
74466+{
74467+ char iface[IFNAMSIZ] = {0};
74468+ struct acl_subject_label *curr;
74469+ struct acl_ip_label *ip;
74470+ struct inet_sock *isk;
74471+ struct net_device *dev;
74472+ struct in_device *idev;
74473+ unsigned long i;
74474+ int ret;
74475+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74476+ __u32 ip_addr = 0;
74477+ __u32 our_addr;
74478+ __u32 our_netmask;
74479+ char *p;
74480+ __u16 ip_port = 0;
74481+ const struct cred *cred = current_cred();
74482+
74483+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74484+ return 0;
74485+
74486+ curr = current->acl;
74487+ isk = inet_sk(sk);
74488+
74489+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74490+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74491+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74492+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74493+ struct sockaddr_in saddr;
74494+ int err;
74495+
74496+ saddr.sin_family = AF_INET;
74497+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74498+ saddr.sin_port = isk->inet_sport;
74499+
74500+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74501+ if (err)
74502+ return err;
74503+
74504+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74505+ if (err)
74506+ return err;
74507+ }
74508+
74509+ if (!curr->ips)
74510+ return 0;
74511+
74512+ ip_addr = addr->sin_addr.s_addr;
74513+ ip_port = ntohs(addr->sin_port);
74514+
74515+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74516+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74517+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74518+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74519+ gr_to_filename(current->exec_file->f_path.dentry,
74520+ current->exec_file->f_path.mnt) :
74521+ curr->filename, curr->filename,
74522+ &ip_addr, ip_port, type,
74523+ sk->sk_protocol, mode, &current->signal->saved_ip);
74524+ return 0;
74525+ }
74526+
74527+ for (i = 0; i < curr->ip_num; i++) {
74528+ ip = *(curr->ips + i);
74529+ if (ip->iface != NULL) {
74530+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74531+ p = strchr(iface, ':');
74532+ if (p != NULL)
74533+ *p = '\0';
74534+ dev = dev_get_by_name(sock_net(sk), iface);
74535+ if (dev == NULL)
74536+ continue;
74537+ idev = in_dev_get(dev);
74538+ if (idev == NULL) {
74539+ dev_put(dev);
74540+ continue;
74541+ }
74542+ rcu_read_lock();
74543+ for_ifa(idev) {
74544+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74545+ our_addr = ifa->ifa_address;
74546+ our_netmask = 0xffffffff;
74547+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74548+ if (ret == 1) {
74549+ rcu_read_unlock();
74550+ in_dev_put(idev);
74551+ dev_put(dev);
74552+ return 0;
74553+ } else if (ret == 2) {
74554+ rcu_read_unlock();
74555+ in_dev_put(idev);
74556+ dev_put(dev);
74557+ goto denied;
74558+ }
74559+ }
74560+ } endfor_ifa(idev);
74561+ rcu_read_unlock();
74562+ in_dev_put(idev);
74563+ dev_put(dev);
74564+ } else {
74565+ our_addr = ip->addr;
74566+ our_netmask = ip->netmask;
74567+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74568+ if (ret == 1)
74569+ return 0;
74570+ else if (ret == 2)
74571+ goto denied;
74572+ }
74573+ }
74574+
74575+denied:
74576+ if (mode == GR_BIND)
74577+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74578+ else if (mode == GR_CONNECT)
74579+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74580+
74581+ return -EACCES;
74582+}
74583+
74584+int
74585+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74586+{
74587+ /* always allow disconnection of dgram sockets with connect */
74588+ if (addr->sin_family == AF_UNSPEC)
74589+ return 0;
74590+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74591+}
74592+
74593+int
74594+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74595+{
74596+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74597+}
74598+
74599+int gr_search_listen(struct socket *sock)
74600+{
74601+ struct sock *sk = sock->sk;
74602+ struct sockaddr_in addr;
74603+
74604+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74605+ addr.sin_port = inet_sk(sk)->inet_sport;
74606+
74607+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74608+}
74609+
74610+int gr_search_accept(struct socket *sock)
74611+{
74612+ struct sock *sk = sock->sk;
74613+ struct sockaddr_in addr;
74614+
74615+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74616+ addr.sin_port = inet_sk(sk)->inet_sport;
74617+
74618+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74619+}
74620+
74621+int
74622+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74623+{
74624+ if (addr)
74625+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74626+ else {
74627+ struct sockaddr_in sin;
74628+ const struct inet_sock *inet = inet_sk(sk);
74629+
74630+ sin.sin_addr.s_addr = inet->inet_daddr;
74631+ sin.sin_port = inet->inet_dport;
74632+
74633+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74634+ }
74635+}
74636+
74637+int
74638+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74639+{
74640+ struct sockaddr_in sin;
74641+
74642+ if (unlikely(skb->len < sizeof (struct udphdr)))
74643+ return 0; // skip this packet
74644+
74645+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74646+ sin.sin_port = udp_hdr(skb)->source;
74647+
74648+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74649+}
74650diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74651new file mode 100644
74652index 0000000..25f54ef
74653--- /dev/null
74654+++ b/grsecurity/gracl_learn.c
74655@@ -0,0 +1,207 @@
74656+#include <linux/kernel.h>
74657+#include <linux/mm.h>
74658+#include <linux/sched.h>
74659+#include <linux/poll.h>
74660+#include <linux/string.h>
74661+#include <linux/file.h>
74662+#include <linux/types.h>
74663+#include <linux/vmalloc.h>
74664+#include <linux/grinternal.h>
74665+
74666+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74667+ size_t count, loff_t *ppos);
74668+extern int gr_acl_is_enabled(void);
74669+
74670+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74671+static int gr_learn_attached;
74672+
74673+/* use a 512k buffer */
74674+#define LEARN_BUFFER_SIZE (512 * 1024)
74675+
74676+static DEFINE_SPINLOCK(gr_learn_lock);
74677+static DEFINE_MUTEX(gr_learn_user_mutex);
74678+
74679+/* we need to maintain two buffers, so that the kernel context of grlearn
74680+ uses a semaphore around the userspace copying, and the other kernel contexts
74681+ use a spinlock when copying into the buffer, since they cannot sleep
74682+*/
74683+static char *learn_buffer;
74684+static char *learn_buffer_user;
74685+static int learn_buffer_len;
74686+static int learn_buffer_user_len;
74687+
74688+static ssize_t
74689+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
74690+{
74691+ DECLARE_WAITQUEUE(wait, current);
74692+ ssize_t retval = 0;
74693+
74694+ add_wait_queue(&learn_wait, &wait);
74695+ set_current_state(TASK_INTERRUPTIBLE);
74696+ do {
74697+ mutex_lock(&gr_learn_user_mutex);
74698+ spin_lock(&gr_learn_lock);
74699+ if (learn_buffer_len)
74700+ break;
74701+ spin_unlock(&gr_learn_lock);
74702+ mutex_unlock(&gr_learn_user_mutex);
74703+ if (file->f_flags & O_NONBLOCK) {
74704+ retval = -EAGAIN;
74705+ goto out;
74706+ }
74707+ if (signal_pending(current)) {
74708+ retval = -ERESTARTSYS;
74709+ goto out;
74710+ }
74711+
74712+ schedule();
74713+ } while (1);
74714+
74715+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
74716+ learn_buffer_user_len = learn_buffer_len;
74717+ retval = learn_buffer_len;
74718+ learn_buffer_len = 0;
74719+
74720+ spin_unlock(&gr_learn_lock);
74721+
74722+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
74723+ retval = -EFAULT;
74724+
74725+ mutex_unlock(&gr_learn_user_mutex);
74726+out:
74727+ set_current_state(TASK_RUNNING);
74728+ remove_wait_queue(&learn_wait, &wait);
74729+ return retval;
74730+}
74731+
74732+static unsigned int
74733+poll_learn(struct file * file, poll_table * wait)
74734+{
74735+ poll_wait(file, &learn_wait, wait);
74736+
74737+ if (learn_buffer_len)
74738+ return (POLLIN | POLLRDNORM);
74739+
74740+ return 0;
74741+}
74742+
74743+void
74744+gr_clear_learn_entries(void)
74745+{
74746+ char *tmp;
74747+
74748+ mutex_lock(&gr_learn_user_mutex);
74749+ spin_lock(&gr_learn_lock);
74750+ tmp = learn_buffer;
74751+ learn_buffer = NULL;
74752+ spin_unlock(&gr_learn_lock);
74753+ if (tmp)
74754+ vfree(tmp);
74755+ if (learn_buffer_user != NULL) {
74756+ vfree(learn_buffer_user);
74757+ learn_buffer_user = NULL;
74758+ }
74759+ learn_buffer_len = 0;
74760+ mutex_unlock(&gr_learn_user_mutex);
74761+
74762+ return;
74763+}
74764+
74765+void
74766+gr_add_learn_entry(const char *fmt, ...)
74767+{
74768+ va_list args;
74769+ unsigned int len;
74770+
74771+ if (!gr_learn_attached)
74772+ return;
74773+
74774+ spin_lock(&gr_learn_lock);
74775+
74776+ /* leave a gap at the end so we know when it's "full" but don't have to
74777+ compute the exact length of the string we're trying to append
74778+ */
74779+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
74780+ spin_unlock(&gr_learn_lock);
74781+ wake_up_interruptible(&learn_wait);
74782+ return;
74783+ }
74784+ if (learn_buffer == NULL) {
74785+ spin_unlock(&gr_learn_lock);
74786+ return;
74787+ }
74788+
74789+ va_start(args, fmt);
74790+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
74791+ va_end(args);
74792+
74793+ learn_buffer_len += len + 1;
74794+
74795+ spin_unlock(&gr_learn_lock);
74796+ wake_up_interruptible(&learn_wait);
74797+
74798+ return;
74799+}
74800+
74801+static int
74802+open_learn(struct inode *inode, struct file *file)
74803+{
74804+ if (file->f_mode & FMODE_READ && gr_learn_attached)
74805+ return -EBUSY;
74806+ if (file->f_mode & FMODE_READ) {
74807+ int retval = 0;
74808+ mutex_lock(&gr_learn_user_mutex);
74809+ if (learn_buffer == NULL)
74810+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
74811+ if (learn_buffer_user == NULL)
74812+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
74813+ if (learn_buffer == NULL) {
74814+ retval = -ENOMEM;
74815+ goto out_error;
74816+ }
74817+ if (learn_buffer_user == NULL) {
74818+ retval = -ENOMEM;
74819+ goto out_error;
74820+ }
74821+ learn_buffer_len = 0;
74822+ learn_buffer_user_len = 0;
74823+ gr_learn_attached = 1;
74824+out_error:
74825+ mutex_unlock(&gr_learn_user_mutex);
74826+ return retval;
74827+ }
74828+ return 0;
74829+}
74830+
74831+static int
74832+close_learn(struct inode *inode, struct file *file)
74833+{
74834+ if (file->f_mode & FMODE_READ) {
74835+ char *tmp = NULL;
74836+ mutex_lock(&gr_learn_user_mutex);
74837+ spin_lock(&gr_learn_lock);
74838+ tmp = learn_buffer;
74839+ learn_buffer = NULL;
74840+ spin_unlock(&gr_learn_lock);
74841+ if (tmp)
74842+ vfree(tmp);
74843+ if (learn_buffer_user != NULL) {
74844+ vfree(learn_buffer_user);
74845+ learn_buffer_user = NULL;
74846+ }
74847+ learn_buffer_len = 0;
74848+ learn_buffer_user_len = 0;
74849+ gr_learn_attached = 0;
74850+ mutex_unlock(&gr_learn_user_mutex);
74851+ }
74852+
74853+ return 0;
74854+}
74855+
74856+const struct file_operations grsec_fops = {
74857+ .read = read_learn,
74858+ .write = write_grsec_handler,
74859+ .open = open_learn,
74860+ .release = close_learn,
74861+ .poll = poll_learn,
74862+};
74863diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
74864new file mode 100644
74865index 0000000..3f8ade0
74866--- /dev/null
74867+++ b/grsecurity/gracl_policy.c
74868@@ -0,0 +1,1782 @@
74869+#include <linux/kernel.h>
74870+#include <linux/module.h>
74871+#include <linux/sched.h>
74872+#include <linux/mm.h>
74873+#include <linux/file.h>
74874+#include <linux/fs.h>
74875+#include <linux/namei.h>
74876+#include <linux/mount.h>
74877+#include <linux/tty.h>
74878+#include <linux/proc_fs.h>
74879+#include <linux/lglock.h>
74880+#include <linux/slab.h>
74881+#include <linux/vmalloc.h>
74882+#include <linux/types.h>
74883+#include <linux/sysctl.h>
74884+#include <linux/netdevice.h>
74885+#include <linux/ptrace.h>
74886+#include <linux/gracl.h>
74887+#include <linux/gralloc.h>
74888+#include <linux/security.h>
74889+#include <linux/grinternal.h>
74890+#include <linux/pid_namespace.h>
74891+#include <linux/stop_machine.h>
74892+#include <linux/fdtable.h>
74893+#include <linux/percpu.h>
74894+#include <linux/lglock.h>
74895+#include <linux/hugetlb.h>
74896+#include <linux/posix-timers.h>
74897+#include "../fs/mount.h"
74898+
74899+#include <asm/uaccess.h>
74900+#include <asm/errno.h>
74901+#include <asm/mman.h>
74902+
74903+extern struct gr_policy_state *polstate;
74904+
74905+#define FOR_EACH_ROLE_START(role) \
74906+ role = polstate->role_list; \
74907+ while (role) {
74908+
74909+#define FOR_EACH_ROLE_END(role) \
74910+ role = role->prev; \
74911+ }
74912+
74913+struct path gr_real_root;
74914+
74915+extern struct gr_alloc_state *current_alloc_state;
74916+
74917+u16 acl_sp_role_value;
74918+
74919+static DEFINE_MUTEX(gr_dev_mutex);
74920+
74921+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74922+extern void gr_clear_learn_entries(void);
74923+
74924+struct gr_arg *gr_usermode __read_only;
74925+unsigned char *gr_system_salt __read_only;
74926+unsigned char *gr_system_sum __read_only;
74927+
74928+static unsigned int gr_auth_attempts = 0;
74929+static unsigned long gr_auth_expires = 0UL;
74930+
74931+struct acl_object_label *fakefs_obj_rw;
74932+struct acl_object_label *fakefs_obj_rwx;
74933+
74934+extern int gr_init_uidset(void);
74935+extern void gr_free_uidset(void);
74936+extern void gr_remove_uid(uid_t uid);
74937+extern int gr_find_uid(uid_t uid);
74938+
74939+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
74940+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
74941+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
74942+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
74943+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
74944+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
74945+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
74946+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
74947+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
74948+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74949+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74950+extern void assign_special_role(const char *rolename);
74951+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
74952+extern int gr_rbac_disable(void *unused);
74953+extern void gr_enable_rbac_system(void);
74954+
74955+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
74956+{
74957+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
74958+ return -EFAULT;
74959+
74960+ return 0;
74961+}
74962+
74963+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74964+{
74965+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
74966+ return -EFAULT;
74967+
74968+ return 0;
74969+}
74970+
74971+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74972+{
74973+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
74974+ return -EFAULT;
74975+
74976+ return 0;
74977+}
74978+
74979+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
74980+{
74981+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
74982+ return -EFAULT;
74983+
74984+ return 0;
74985+}
74986+
74987+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74988+{
74989+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
74990+ return -EFAULT;
74991+
74992+ return 0;
74993+}
74994+
74995+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74996+{
74997+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
74998+ return -EFAULT;
74999+
75000+ return 0;
75001+}
75002+
75003+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
75004+{
75005+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
75006+ return -EFAULT;
75007+
75008+ return 0;
75009+}
75010+
75011+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
75012+{
75013+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
75014+ return -EFAULT;
75015+
75016+ return 0;
75017+}
75018+
75019+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
75020+{
75021+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
75022+ return -EFAULT;
75023+
75024+ return 0;
75025+}
75026+
75027+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
75028+{
75029+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
75030+ return -EFAULT;
75031+
75032+ if (((uwrap->version != GRSECURITY_VERSION) &&
75033+ (uwrap->version != 0x2901)) ||
75034+ (uwrap->size != sizeof(struct gr_arg)))
75035+ return -EINVAL;
75036+
75037+ return 0;
75038+}
75039+
75040+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
75041+{
75042+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
75043+ return -EFAULT;
75044+
75045+ return 0;
75046+}
75047+
75048+static size_t get_gr_arg_wrapper_size_normal(void)
75049+{
75050+ return sizeof(struct gr_arg_wrapper);
75051+}
75052+
75053+#ifdef CONFIG_COMPAT
75054+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
75055+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
75056+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
75057+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
75058+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
75059+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
75060+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
75061+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
75062+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
75063+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
75064+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
75065+extern size_t get_gr_arg_wrapper_size_compat(void);
75066+
75067+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
75068+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
75069+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
75070+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
75071+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
75072+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
75073+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
75074+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
75075+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
75076+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
75077+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
75078+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
75079+
75080+#else
75081+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
75082+#define copy_gr_arg copy_gr_arg_normal
75083+#define copy_gr_hash_struct copy_gr_hash_struct_normal
75084+#define copy_acl_object_label copy_acl_object_label_normal
75085+#define copy_acl_subject_label copy_acl_subject_label_normal
75086+#define copy_acl_role_label copy_acl_role_label_normal
75087+#define copy_acl_ip_label copy_acl_ip_label_normal
75088+#define copy_pointer_from_array copy_pointer_from_array_normal
75089+#define copy_sprole_pw copy_sprole_pw_normal
75090+#define copy_role_transition copy_role_transition_normal
75091+#define copy_role_allowed_ip copy_role_allowed_ip_normal
75092+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
75093+#endif
75094+
75095+static struct acl_subject_label *
75096+lookup_subject_map(const struct acl_subject_label *userp)
75097+{
75098+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
75099+ struct subject_map *match;
75100+
75101+ match = polstate->subj_map_set.s_hash[index];
75102+
75103+ while (match && match->user != userp)
75104+ match = match->next;
75105+
75106+ if (match != NULL)
75107+ return match->kernel;
75108+ else
75109+ return NULL;
75110+}
75111+
75112+static void
75113+insert_subj_map_entry(struct subject_map *subjmap)
75114+{
75115+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
75116+ struct subject_map **curr;
75117+
75118+ subjmap->prev = NULL;
75119+
75120+ curr = &polstate->subj_map_set.s_hash[index];
75121+ if (*curr != NULL)
75122+ (*curr)->prev = subjmap;
75123+
75124+ subjmap->next = *curr;
75125+ *curr = subjmap;
75126+
75127+ return;
75128+}
75129+
75130+static void
75131+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75132+{
75133+ unsigned int index =
75134+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
75135+ struct acl_role_label **curr;
75136+ struct acl_role_label *tmp, *tmp2;
75137+
75138+ curr = &polstate->acl_role_set.r_hash[index];
75139+
75140+ /* simple case, slot is empty, just set it to our role */
75141+ if (*curr == NULL) {
75142+ *curr = role;
75143+ } else {
75144+ /* example:
75145+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75146+ 2 -> 3
75147+ */
75148+ /* first check to see if we can already be reached via this slot */
75149+ tmp = *curr;
75150+ while (tmp && tmp != role)
75151+ tmp = tmp->next;
75152+ if (tmp == role) {
75153+ /* we don't need to add ourselves to this slot's chain */
75154+ return;
75155+ }
75156+ /* we need to add ourselves to this chain, two cases */
75157+ if (role->next == NULL) {
75158+ /* simple case, append the current chain to our role */
75159+ role->next = *curr;
75160+ *curr = role;
75161+ } else {
75162+ /* 1 -> 2 -> 3 -> 4
75163+ 2 -> 3 -> 4
75164+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75165+ */
75166+ /* trickier case: walk our role's chain until we find
75167+ the role for the start of the current slot's chain */
75168+ tmp = role;
75169+ tmp2 = *curr;
75170+ while (tmp->next && tmp->next != tmp2)
75171+ tmp = tmp->next;
75172+ if (tmp->next == tmp2) {
75173+ /* from example above, we found 3, so just
75174+ replace this slot's chain with ours */
75175+ *curr = role;
75176+ } else {
75177+ /* we didn't find a subset of our role's chain
75178+ in the current slot's chain, so append their
75179+ chain to ours, and set us as the first role in
75180+ the slot's chain
75181+
75182+ we could fold this case with the case above,
75183+ but making it explicit for clarity
75184+ */
75185+ tmp->next = tmp2;
75186+ *curr = role;
75187+ }
75188+ }
75189+ }
75190+
75191+ return;
75192+}
75193+
75194+static void
75195+insert_acl_role_label(struct acl_role_label *role)
75196+{
75197+ int i;
75198+
75199+ if (polstate->role_list == NULL) {
75200+ polstate->role_list = role;
75201+ role->prev = NULL;
75202+ } else {
75203+ role->prev = polstate->role_list;
75204+ polstate->role_list = role;
75205+ }
75206+
75207+ /* used for hash chains */
75208+ role->next = NULL;
75209+
75210+ if (role->roletype & GR_ROLE_DOMAIN) {
75211+ for (i = 0; i < role->domain_child_num; i++)
75212+ __insert_acl_role_label(role, role->domain_children[i]);
75213+ } else
75214+ __insert_acl_role_label(role, role->uidgid);
75215+}
75216+
75217+static int
75218+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75219+{
75220+ struct name_entry **curr, *nentry;
75221+ struct inodev_entry *ientry;
75222+ unsigned int len = strlen(name);
75223+ unsigned int key = full_name_hash(name, len);
75224+ unsigned int index = key % polstate->name_set.n_size;
75225+
75226+ curr = &polstate->name_set.n_hash[index];
75227+
75228+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75229+ curr = &((*curr)->next);
75230+
75231+ if (*curr != NULL)
75232+ return 1;
75233+
75234+ nentry = acl_alloc(sizeof (struct name_entry));
75235+ if (nentry == NULL)
75236+ return 0;
75237+ ientry = acl_alloc(sizeof (struct inodev_entry));
75238+ if (ientry == NULL)
75239+ return 0;
75240+ ientry->nentry = nentry;
75241+
75242+ nentry->key = key;
75243+ nentry->name = name;
75244+ nentry->inode = inode;
75245+ nentry->device = device;
75246+ nentry->len = len;
75247+ nentry->deleted = deleted;
75248+
75249+ nentry->prev = NULL;
75250+ curr = &polstate->name_set.n_hash[index];
75251+ if (*curr != NULL)
75252+ (*curr)->prev = nentry;
75253+ nentry->next = *curr;
75254+ *curr = nentry;
75255+
75256+ /* insert us into the table searchable by inode/dev */
75257+ __insert_inodev_entry(polstate, ientry);
75258+
75259+ return 1;
75260+}
75261+
75262+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75263+
75264+static void *
75265+create_table(__u32 * len, int elementsize)
75266+{
75267+ unsigned int table_sizes[] = {
75268+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75269+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75270+ 4194301, 8388593, 16777213, 33554393, 67108859
75271+ };
75272+ void *newtable = NULL;
75273+ unsigned int pwr = 0;
75274+
75275+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75276+ table_sizes[pwr] <= *len)
75277+ pwr++;
75278+
75279+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75280+ return newtable;
75281+
75282+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75283+ newtable =
75284+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75285+ else
75286+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75287+
75288+ *len = table_sizes[pwr];
75289+
75290+ return newtable;
75291+}
75292+
75293+static int
75294+init_variables(const struct gr_arg *arg, bool reload)
75295+{
75296+ struct task_struct *reaper = init_pid_ns.child_reaper;
75297+ unsigned int stacksize;
75298+
75299+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
75300+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75301+ polstate->name_set.n_size = arg->role_db.num_objects;
75302+ polstate->inodev_set.i_size = arg->role_db.num_objects;
75303+
75304+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
75305+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
75306+ return 1;
75307+
75308+ if (!reload) {
75309+ if (!gr_init_uidset())
75310+ return 1;
75311+ }
75312+
75313+ /* set up the stack that holds allocation info */
75314+
75315+ stacksize = arg->role_db.num_pointers + 5;
75316+
75317+ if (!acl_alloc_stack_init(stacksize))
75318+ return 1;
75319+
75320+ if (!reload) {
75321+ /* grab reference for the real root dentry and vfsmount */
75322+ get_fs_root(reaper->fs, &gr_real_root);
75323+
75324+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75325+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
75326+#endif
75327+
75328+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75329+ if (fakefs_obj_rw == NULL)
75330+ return 1;
75331+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75332+
75333+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75334+ if (fakefs_obj_rwx == NULL)
75335+ return 1;
75336+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75337+ }
75338+
75339+ polstate->subj_map_set.s_hash =
75340+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
75341+ polstate->acl_role_set.r_hash =
75342+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
75343+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
75344+ polstate->inodev_set.i_hash =
75345+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
75346+
75347+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
75348+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
75349+ return 1;
75350+
75351+ memset(polstate->subj_map_set.s_hash, 0,
75352+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
75353+ memset(polstate->acl_role_set.r_hash, 0,
75354+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
75355+ memset(polstate->name_set.n_hash, 0,
75356+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75357+ memset(polstate->inodev_set.i_hash, 0,
75358+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75359+
75360+ return 0;
75361+}
75362+
75363+/* free information not needed after startup
75364+ currently contains user->kernel pointer mappings for subjects
75365+*/
75366+
75367+static void
75368+free_init_variables(void)
75369+{
75370+ __u32 i;
75371+
75372+ if (polstate->subj_map_set.s_hash) {
75373+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75374+ if (polstate->subj_map_set.s_hash[i]) {
75375+ kfree(polstate->subj_map_set.s_hash[i]);
75376+ polstate->subj_map_set.s_hash[i] = NULL;
75377+ }
75378+ }
75379+
75380+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75381+ PAGE_SIZE)
75382+ kfree(polstate->subj_map_set.s_hash);
75383+ else
75384+ vfree(polstate->subj_map_set.s_hash);
75385+ }
75386+
75387+ return;
75388+}
75389+
75390+static void
75391+free_variables(bool reload)
75392+{
75393+ struct acl_subject_label *s;
75394+ struct acl_role_label *r;
75395+ struct task_struct *task, *task2;
75396+ unsigned int x;
75397+
75398+ if (!reload) {
75399+ gr_clear_learn_entries();
75400+
75401+ read_lock(&tasklist_lock);
75402+ do_each_thread(task2, task) {
75403+ task->acl_sp_role = 0;
75404+ task->acl_role_id = 0;
75405+ task->inherited = 0;
75406+ task->acl = NULL;
75407+ task->role = NULL;
75408+ } while_each_thread(task2, task);
75409+ read_unlock(&tasklist_lock);
75410+
75411+ kfree(fakefs_obj_rw);
75412+ fakefs_obj_rw = NULL;
75413+ kfree(fakefs_obj_rwx);
75414+ fakefs_obj_rwx = NULL;
75415+
75416+ /* release the reference to the real root dentry and vfsmount */
75417+ path_put(&gr_real_root);
75418+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75419+ }
75420+
75421+ /* free all object hash tables */
75422+
75423+ FOR_EACH_ROLE_START(r)
75424+ if (r->subj_hash == NULL)
75425+ goto next_role;
75426+ FOR_EACH_SUBJECT_START(r, s, x)
75427+ if (s->obj_hash == NULL)
75428+ break;
75429+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75430+ kfree(s->obj_hash);
75431+ else
75432+ vfree(s->obj_hash);
75433+ FOR_EACH_SUBJECT_END(s, x)
75434+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75435+ if (s->obj_hash == NULL)
75436+ break;
75437+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75438+ kfree(s->obj_hash);
75439+ else
75440+ vfree(s->obj_hash);
75441+ FOR_EACH_NESTED_SUBJECT_END(s)
75442+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75443+ kfree(r->subj_hash);
75444+ else
75445+ vfree(r->subj_hash);
75446+ r->subj_hash = NULL;
75447+next_role:
75448+ FOR_EACH_ROLE_END(r)
75449+
75450+ acl_free_all();
75451+
75452+ if (polstate->acl_role_set.r_hash) {
75453+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75454+ PAGE_SIZE)
75455+ kfree(polstate->acl_role_set.r_hash);
75456+ else
75457+ vfree(polstate->acl_role_set.r_hash);
75458+ }
75459+ if (polstate->name_set.n_hash) {
75460+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75461+ PAGE_SIZE)
75462+ kfree(polstate->name_set.n_hash);
75463+ else
75464+ vfree(polstate->name_set.n_hash);
75465+ }
75466+
75467+ if (polstate->inodev_set.i_hash) {
75468+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75469+ PAGE_SIZE)
75470+ kfree(polstate->inodev_set.i_hash);
75471+ else
75472+ vfree(polstate->inodev_set.i_hash);
75473+ }
75474+
75475+ if (!reload)
75476+ gr_free_uidset();
75477+
75478+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75479+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75480+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75481+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75482+
75483+ polstate->default_role = NULL;
75484+ polstate->kernel_role = NULL;
75485+ polstate->role_list = NULL;
75486+
75487+ return;
75488+}
75489+
75490+static struct acl_subject_label *
75491+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75492+
75493+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75494+{
75495+ unsigned int len = strnlen_user(*name, maxlen);
75496+ char *tmp;
75497+
75498+ if (!len || len >= maxlen)
75499+ return -EINVAL;
75500+
75501+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75502+ return -ENOMEM;
75503+
75504+ if (copy_from_user(tmp, *name, len))
75505+ return -EFAULT;
75506+
75507+ tmp[len-1] = '\0';
75508+ *name = tmp;
75509+
75510+ return 0;
75511+}
75512+
75513+static int
75514+copy_user_glob(struct acl_object_label *obj)
75515+{
75516+ struct acl_object_label *g_tmp, **guser;
75517+ int error;
75518+
75519+ if (obj->globbed == NULL)
75520+ return 0;
75521+
75522+ guser = &obj->globbed;
75523+ while (*guser) {
75524+ g_tmp = (struct acl_object_label *)
75525+ acl_alloc(sizeof (struct acl_object_label));
75526+ if (g_tmp == NULL)
75527+ return -ENOMEM;
75528+
75529+ if (copy_acl_object_label(g_tmp, *guser))
75530+ return -EFAULT;
75531+
75532+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75533+ if (error)
75534+ return error;
75535+
75536+ *guser = g_tmp;
75537+ guser = &(g_tmp->next);
75538+ }
75539+
75540+ return 0;
75541+}
75542+
75543+static int
75544+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75545+ struct acl_role_label *role)
75546+{
75547+ struct acl_object_label *o_tmp;
75548+ int ret;
75549+
75550+ while (userp) {
75551+ if ((o_tmp = (struct acl_object_label *)
75552+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75553+ return -ENOMEM;
75554+
75555+ if (copy_acl_object_label(o_tmp, userp))
75556+ return -EFAULT;
75557+
75558+ userp = o_tmp->prev;
75559+
75560+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75561+ if (ret)
75562+ return ret;
75563+
75564+ insert_acl_obj_label(o_tmp, subj);
75565+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75566+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75567+ return -ENOMEM;
75568+
75569+ ret = copy_user_glob(o_tmp);
75570+ if (ret)
75571+ return ret;
75572+
75573+ if (o_tmp->nested) {
75574+ int already_copied;
75575+
75576+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75577+ if (IS_ERR(o_tmp->nested))
75578+ return PTR_ERR(o_tmp->nested);
75579+
75580+ /* insert into nested subject list if we haven't copied this one yet
75581+ to prevent duplicate entries */
75582+ if (!already_copied) {
75583+ o_tmp->nested->next = role->hash->first;
75584+ role->hash->first = o_tmp->nested;
75585+ }
75586+ }
75587+ }
75588+
75589+ return 0;
75590+}
75591+
75592+static __u32
75593+count_user_subjs(struct acl_subject_label *userp)
75594+{
75595+ struct acl_subject_label s_tmp;
75596+ __u32 num = 0;
75597+
75598+ while (userp) {
75599+ if (copy_acl_subject_label(&s_tmp, userp))
75600+ break;
75601+
75602+ userp = s_tmp.prev;
75603+ }
75604+
75605+ return num;
75606+}
75607+
75608+static int
75609+copy_user_allowedips(struct acl_role_label *rolep)
75610+{
75611+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75612+
75613+ ruserip = rolep->allowed_ips;
75614+
75615+ while (ruserip) {
75616+ rlast = rtmp;
75617+
75618+ if ((rtmp = (struct role_allowed_ip *)
75619+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75620+ return -ENOMEM;
75621+
75622+ if (copy_role_allowed_ip(rtmp, ruserip))
75623+ return -EFAULT;
75624+
75625+ ruserip = rtmp->prev;
75626+
75627+ if (!rlast) {
75628+ rtmp->prev = NULL;
75629+ rolep->allowed_ips = rtmp;
75630+ } else {
75631+ rlast->next = rtmp;
75632+ rtmp->prev = rlast;
75633+ }
75634+
75635+ if (!ruserip)
75636+ rtmp->next = NULL;
75637+ }
75638+
75639+ return 0;
75640+}
75641+
75642+static int
75643+copy_user_transitions(struct acl_role_label *rolep)
75644+{
75645+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75646+ int error;
75647+
75648+ rusertp = rolep->transitions;
75649+
75650+ while (rusertp) {
75651+ rlast = rtmp;
75652+
75653+ if ((rtmp = (struct role_transition *)
75654+ acl_alloc(sizeof (struct role_transition))) == NULL)
75655+ return -ENOMEM;
75656+
75657+ if (copy_role_transition(rtmp, rusertp))
75658+ return -EFAULT;
75659+
75660+ rusertp = rtmp->prev;
75661+
75662+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75663+ if (error)
75664+ return error;
75665+
75666+ if (!rlast) {
75667+ rtmp->prev = NULL;
75668+ rolep->transitions = rtmp;
75669+ } else {
75670+ rlast->next = rtmp;
75671+ rtmp->prev = rlast;
75672+ }
75673+
75674+ if (!rusertp)
75675+ rtmp->next = NULL;
75676+ }
75677+
75678+ return 0;
75679+}
75680+
75681+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75682+{
75683+ struct acl_object_label o_tmp;
75684+ __u32 num = 0;
75685+
75686+ while (userp) {
75687+ if (copy_acl_object_label(&o_tmp, userp))
75688+ break;
75689+
75690+ userp = o_tmp.prev;
75691+ num++;
75692+ }
75693+
75694+ return num;
75695+}
75696+
75697+static struct acl_subject_label *
75698+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
75699+{
75700+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75701+ __u32 num_objs;
75702+ struct acl_ip_label **i_tmp, *i_utmp2;
75703+ struct gr_hash_struct ghash;
75704+ struct subject_map *subjmap;
75705+ unsigned int i_num;
75706+ int err;
75707+
75708+ if (already_copied != NULL)
75709+ *already_copied = 0;
75710+
75711+ s_tmp = lookup_subject_map(userp);
75712+
75713+ /* we've already copied this subject into the kernel, just return
75714+ the reference to it, and don't copy it over again
75715+ */
75716+ if (s_tmp) {
75717+ if (already_copied != NULL)
75718+ *already_copied = 1;
75719+ return(s_tmp);
75720+ }
75721+
75722+ if ((s_tmp = (struct acl_subject_label *)
75723+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75724+ return ERR_PTR(-ENOMEM);
75725+
75726+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75727+ if (subjmap == NULL)
75728+ return ERR_PTR(-ENOMEM);
75729+
75730+ subjmap->user = userp;
75731+ subjmap->kernel = s_tmp;
75732+ insert_subj_map_entry(subjmap);
75733+
75734+ if (copy_acl_subject_label(s_tmp, userp))
75735+ return ERR_PTR(-EFAULT);
75736+
75737+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
75738+ if (err)
75739+ return ERR_PTR(err);
75740+
75741+ if (!strcmp(s_tmp->filename, "/"))
75742+ role->root_label = s_tmp;
75743+
75744+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
75745+ return ERR_PTR(-EFAULT);
75746+
75747+ /* copy user and group transition tables */
75748+
75749+ if (s_tmp->user_trans_num) {
75750+ uid_t *uidlist;
75751+
75752+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75753+ if (uidlist == NULL)
75754+ return ERR_PTR(-ENOMEM);
75755+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75756+ return ERR_PTR(-EFAULT);
75757+
75758+ s_tmp->user_transitions = uidlist;
75759+ }
75760+
75761+ if (s_tmp->group_trans_num) {
75762+ gid_t *gidlist;
75763+
75764+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75765+ if (gidlist == NULL)
75766+ return ERR_PTR(-ENOMEM);
75767+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75768+ return ERR_PTR(-EFAULT);
75769+
75770+ s_tmp->group_transitions = gidlist;
75771+ }
75772+
75773+ /* set up object hash table */
75774+ num_objs = count_user_objs(ghash.first);
75775+
75776+ s_tmp->obj_hash_size = num_objs;
75777+ s_tmp->obj_hash =
75778+ (struct acl_object_label **)
75779+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75780+
75781+ if (!s_tmp->obj_hash)
75782+ return ERR_PTR(-ENOMEM);
75783+
75784+ memset(s_tmp->obj_hash, 0,
75785+ s_tmp->obj_hash_size *
75786+ sizeof (struct acl_object_label *));
75787+
75788+ /* add in objects */
75789+ err = copy_user_objs(ghash.first, s_tmp, role);
75790+
75791+ if (err)
75792+ return ERR_PTR(err);
75793+
75794+ /* set pointer for parent subject */
75795+ if (s_tmp->parent_subject) {
75796+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
75797+
75798+ if (IS_ERR(s_tmp2))
75799+ return s_tmp2;
75800+
75801+ s_tmp->parent_subject = s_tmp2;
75802+ }
75803+
75804+ /* add in ip acls */
75805+
75806+ if (!s_tmp->ip_num) {
75807+ s_tmp->ips = NULL;
75808+ goto insert;
75809+ }
75810+
75811+ i_tmp =
75812+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75813+ sizeof (struct acl_ip_label *));
75814+
75815+ if (!i_tmp)
75816+ return ERR_PTR(-ENOMEM);
75817+
75818+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75819+ *(i_tmp + i_num) =
75820+ (struct acl_ip_label *)
75821+ acl_alloc(sizeof (struct acl_ip_label));
75822+ if (!*(i_tmp + i_num))
75823+ return ERR_PTR(-ENOMEM);
75824+
75825+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
75826+ return ERR_PTR(-EFAULT);
75827+
75828+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
75829+ return ERR_PTR(-EFAULT);
75830+
75831+ if ((*(i_tmp + i_num))->iface == NULL)
75832+ continue;
75833+
75834+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
75835+ if (err)
75836+ return ERR_PTR(err);
75837+ }
75838+
75839+ s_tmp->ips = i_tmp;
75840+
75841+insert:
75842+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75843+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75844+ return ERR_PTR(-ENOMEM);
75845+
75846+ return s_tmp;
75847+}
75848+
75849+static int
75850+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75851+{
75852+ struct acl_subject_label s_pre;
75853+ struct acl_subject_label * ret;
75854+ int err;
75855+
75856+ while (userp) {
75857+ if (copy_acl_subject_label(&s_pre, userp))
75858+ return -EFAULT;
75859+
75860+ ret = do_copy_user_subj(userp, role, NULL);
75861+
75862+ err = PTR_ERR(ret);
75863+ if (IS_ERR(ret))
75864+ return err;
75865+
75866+ insert_acl_subj_label(ret, role);
75867+
75868+ userp = s_pre.prev;
75869+ }
75870+
75871+ return 0;
75872+}
75873+
75874+static int
75875+copy_user_acl(struct gr_arg *arg)
75876+{
75877+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75878+ struct acl_subject_label *subj_list;
75879+ struct sprole_pw *sptmp;
75880+ struct gr_hash_struct *ghash;
75881+ uid_t *domainlist;
75882+ unsigned int r_num;
75883+ int err = 0;
75884+ __u16 i;
75885+ __u32 num_subjs;
75886+
75887+ /* we need a default and kernel role */
75888+ if (arg->role_db.num_roles < 2)
75889+ return -EINVAL;
75890+
75891+ /* copy special role authentication info from userspace */
75892+
75893+ polstate->num_sprole_pws = arg->num_sprole_pws;
75894+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
75895+
75896+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
75897+ return -ENOMEM;
75898+
75899+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75900+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75901+ if (!sptmp)
75902+ return -ENOMEM;
75903+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
75904+ return -EFAULT;
75905+
75906+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
75907+ if (err)
75908+ return err;
75909+
75910+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75911+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
75912+#endif
75913+
75914+ polstate->acl_special_roles[i] = sptmp;
75915+ }
75916+
75917+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75918+
75919+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75920+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75921+
75922+ if (!r_tmp)
75923+ return -ENOMEM;
75924+
75925+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
75926+ return -EFAULT;
75927+
75928+ if (copy_acl_role_label(r_tmp, r_utmp2))
75929+ return -EFAULT;
75930+
75931+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
75932+ if (err)
75933+ return err;
75934+
75935+ if (!strcmp(r_tmp->rolename, "default")
75936+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75937+ polstate->default_role = r_tmp;
75938+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75939+ polstate->kernel_role = r_tmp;
75940+ }
75941+
75942+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
75943+ return -ENOMEM;
75944+
75945+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
75946+ return -EFAULT;
75947+
75948+ r_tmp->hash = ghash;
75949+
75950+ num_subjs = count_user_subjs(r_tmp->hash->first);
75951+
75952+ r_tmp->subj_hash_size = num_subjs;
75953+ r_tmp->subj_hash =
75954+ (struct acl_subject_label **)
75955+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75956+
75957+ if (!r_tmp->subj_hash)
75958+ return -ENOMEM;
75959+
75960+ err = copy_user_allowedips(r_tmp);
75961+ if (err)
75962+ return err;
75963+
75964+ /* copy domain info */
75965+ if (r_tmp->domain_children != NULL) {
75966+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75967+ if (domainlist == NULL)
75968+ return -ENOMEM;
75969+
75970+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
75971+ return -EFAULT;
75972+
75973+ r_tmp->domain_children = domainlist;
75974+ }
75975+
75976+ err = copy_user_transitions(r_tmp);
75977+ if (err)
75978+ return err;
75979+
75980+ memset(r_tmp->subj_hash, 0,
75981+ r_tmp->subj_hash_size *
75982+ sizeof (struct acl_subject_label *));
75983+
75984+ /* acquire the list of subjects, then NULL out
75985+ the list prior to parsing the subjects for this role,
75986+ as during this parsing the list is replaced with a list
75987+ of *nested* subjects for the role
75988+ */
75989+ subj_list = r_tmp->hash->first;
75990+
75991+ /* set nested subject list to null */
75992+ r_tmp->hash->first = NULL;
75993+
75994+ err = copy_user_subjs(subj_list, r_tmp);
75995+
75996+ if (err)
75997+ return err;
75998+
75999+ insert_acl_role_label(r_tmp);
76000+ }
76001+
76002+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
76003+ return -EINVAL;
76004+
76005+ return err;
76006+}
76007+
76008+static int gracl_reload_apply_policies(void *reload)
76009+{
76010+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
76011+ struct task_struct *task, *task2;
76012+ struct acl_role_label *role, *rtmp;
76013+ struct acl_subject_label *subj;
76014+ const struct cred *cred;
76015+ int role_applied;
76016+ int ret = 0;
76017+
76018+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
76019+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
76020+
76021+ /* first make sure we'll be able to apply the new policy cleanly */
76022+ do_each_thread(task2, task) {
76023+ if (task->exec_file == NULL)
76024+ continue;
76025+ role_applied = 0;
76026+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76027+ /* preserve special roles */
76028+ FOR_EACH_ROLE_START(role)
76029+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76030+ rtmp = task->role;
76031+ task->role = role;
76032+ role_applied = 1;
76033+ break;
76034+ }
76035+ FOR_EACH_ROLE_END(role)
76036+ }
76037+ if (!role_applied) {
76038+ cred = __task_cred(task);
76039+ rtmp = task->role;
76040+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76041+ }
76042+ /* this handles non-nested inherited subjects, nested subjects will still
76043+ be dropped currently */
76044+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
76045+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
76046+ /* change the role back so that we've made no modifications to the policy */
76047+ task->role = rtmp;
76048+
76049+ if (subj == NULL || task->tmpacl == NULL) {
76050+ ret = -EINVAL;
76051+ goto out;
76052+ }
76053+ } while_each_thread(task2, task);
76054+
76055+ /* now actually apply the policy */
76056+
76057+ do_each_thread(task2, task) {
76058+ if (task->exec_file) {
76059+ role_applied = 0;
76060+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76061+ /* preserve special roles */
76062+ FOR_EACH_ROLE_START(role)
76063+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76064+ task->role = role;
76065+ role_applied = 1;
76066+ break;
76067+ }
76068+ FOR_EACH_ROLE_END(role)
76069+ }
76070+ if (!role_applied) {
76071+ cred = __task_cred(task);
76072+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76073+ }
76074+ /* this handles non-nested inherited subjects, nested subjects will still
76075+ be dropped currently */
76076+ if (!reload_state->oldmode && task->inherited)
76077+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
76078+ else {
76079+ /* looked up and tagged to the task previously */
76080+ subj = task->tmpacl;
76081+ }
76082+ /* subj will be non-null */
76083+ __gr_apply_subject_to_task(polstate, task, subj);
76084+ if (reload_state->oldmode) {
76085+ task->acl_role_id = 0;
76086+ task->acl_sp_role = 0;
76087+ task->inherited = 0;
76088+ }
76089+ } else {
76090+ // it's a kernel process
76091+ task->role = polstate->kernel_role;
76092+ task->acl = polstate->kernel_role->root_label;
76093+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76094+ task->acl->mode &= ~GR_PROCFIND;
76095+#endif
76096+ }
76097+ } while_each_thread(task2, task);
76098+
76099+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
76100+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
76101+
76102+out:
76103+
76104+ return ret;
76105+}
76106+
76107+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
76108+{
76109+ struct gr_reload_state new_reload_state = { };
76110+ int err;
76111+
76112+ new_reload_state.oldpolicy_ptr = polstate;
76113+ new_reload_state.oldalloc_ptr = current_alloc_state;
76114+ new_reload_state.oldmode = oldmode;
76115+
76116+ current_alloc_state = &new_reload_state.newalloc;
76117+ polstate = &new_reload_state.newpolicy;
76118+
76119+ /* everything relevant is now saved off, copy in the new policy */
76120+ if (init_variables(args, true)) {
76121+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76122+ err = -ENOMEM;
76123+ goto error;
76124+ }
76125+
76126+ err = copy_user_acl(args);
76127+ free_init_variables();
76128+ if (err)
76129+ goto error;
76130+ /* the new policy is copied in, with the old policy available via saved_state
76131+ first go through applying roles, making sure to preserve special roles
76132+ then apply new subjects, making sure to preserve inherited and nested subjects,
76133+ though currently only inherited subjects will be preserved
76134+ */
76135+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
76136+ if (err)
76137+ goto error;
76138+
76139+ /* we've now applied the new policy, so restore the old policy state to free it */
76140+ polstate = &new_reload_state.oldpolicy;
76141+ current_alloc_state = &new_reload_state.oldalloc;
76142+ free_variables(true);
76143+
76144+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
76145+ to running_polstate/current_alloc_state inside stop_machine
76146+ */
76147+ err = 0;
76148+ goto out;
76149+error:
76150+ /* on error of loading the new policy, we'll just keep the previous
76151+ policy set around
76152+ */
76153+ free_variables(true);
76154+
76155+ /* doesn't affect runtime, but maintains consistent state */
76156+out:
76157+ polstate = new_reload_state.oldpolicy_ptr;
76158+ current_alloc_state = new_reload_state.oldalloc_ptr;
76159+
76160+ return err;
76161+}
76162+
76163+static int
76164+gracl_init(struct gr_arg *args)
76165+{
76166+ int error = 0;
76167+
76168+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76169+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76170+
76171+ if (init_variables(args, false)) {
76172+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76173+ error = -ENOMEM;
76174+ goto out;
76175+ }
76176+
76177+ error = copy_user_acl(args);
76178+ free_init_variables();
76179+ if (error)
76180+ goto out;
76181+
76182+ error = gr_set_acls(0);
76183+ if (error)
76184+ goto out;
76185+
76186+ gr_enable_rbac_system();
76187+
76188+ return 0;
76189+
76190+out:
76191+ free_variables(false);
76192+ return error;
76193+}
76194+
76195+static int
76196+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
76197+ unsigned char **sum)
76198+{
76199+ struct acl_role_label *r;
76200+ struct role_allowed_ip *ipp;
76201+ struct role_transition *trans;
76202+ unsigned int i;
76203+ int found = 0;
76204+ u32 curr_ip = current->signal->curr_ip;
76205+
76206+ current->signal->saved_ip = curr_ip;
76207+
76208+ /* check transition table */
76209+
76210+ for (trans = current->role->transitions; trans; trans = trans->next) {
76211+ if (!strcmp(rolename, trans->rolename)) {
76212+ found = 1;
76213+ break;
76214+ }
76215+ }
76216+
76217+ if (!found)
76218+ return 0;
76219+
76220+ /* handle special roles that do not require authentication
76221+ and check ip */
76222+
76223+ FOR_EACH_ROLE_START(r)
76224+ if (!strcmp(rolename, r->rolename) &&
76225+ (r->roletype & GR_ROLE_SPECIAL)) {
76226+ found = 0;
76227+ if (r->allowed_ips != NULL) {
76228+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
76229+ if ((ntohl(curr_ip) & ipp->netmask) ==
76230+ (ntohl(ipp->addr) & ipp->netmask))
76231+ found = 1;
76232+ }
76233+ } else
76234+ found = 2;
76235+ if (!found)
76236+ return 0;
76237+
76238+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
76239+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
76240+ *salt = NULL;
76241+ *sum = NULL;
76242+ return 1;
76243+ }
76244+ }
76245+ FOR_EACH_ROLE_END(r)
76246+
76247+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76248+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
76249+ *salt = polstate->acl_special_roles[i]->salt;
76250+ *sum = polstate->acl_special_roles[i]->sum;
76251+ return 1;
76252+ }
76253+ }
76254+
76255+ return 0;
76256+}
76257+
76258+int gr_check_secure_terminal(struct task_struct *task)
76259+{
76260+ struct task_struct *p, *p2, *p3;
76261+ struct files_struct *files;
76262+ struct fdtable *fdt;
76263+ struct file *our_file = NULL, *file;
76264+ int i;
76265+
76266+ if (task->signal->tty == NULL)
76267+ return 1;
76268+
76269+ files = get_files_struct(task);
76270+ if (files != NULL) {
76271+ rcu_read_lock();
76272+ fdt = files_fdtable(files);
76273+ for (i=0; i < fdt->max_fds; i++) {
76274+ file = fcheck_files(files, i);
76275+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
76276+ get_file(file);
76277+ our_file = file;
76278+ }
76279+ }
76280+ rcu_read_unlock();
76281+ put_files_struct(files);
76282+ }
76283+
76284+ if (our_file == NULL)
76285+ return 1;
76286+
76287+ read_lock(&tasklist_lock);
76288+ do_each_thread(p2, p) {
76289+ files = get_files_struct(p);
76290+ if (files == NULL ||
76291+ (p->signal && p->signal->tty == task->signal->tty)) {
76292+ if (files != NULL)
76293+ put_files_struct(files);
76294+ continue;
76295+ }
76296+ rcu_read_lock();
76297+ fdt = files_fdtable(files);
76298+ for (i=0; i < fdt->max_fds; i++) {
76299+ file = fcheck_files(files, i);
76300+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
76301+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
76302+ p3 = task;
76303+ while (task_pid_nr(p3) > 0) {
76304+ if (p3 == p)
76305+ break;
76306+ p3 = p3->real_parent;
76307+ }
76308+ if (p3 == p)
76309+ break;
76310+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
76311+ gr_handle_alertkill(p);
76312+ rcu_read_unlock();
76313+ put_files_struct(files);
76314+ read_unlock(&tasklist_lock);
76315+ fput(our_file);
76316+ return 0;
76317+ }
76318+ }
76319+ rcu_read_unlock();
76320+ put_files_struct(files);
76321+ } while_each_thread(p2, p);
76322+ read_unlock(&tasklist_lock);
76323+
76324+ fput(our_file);
76325+ return 1;
76326+}
76327+
76328+ssize_t
76329+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
76330+{
76331+ struct gr_arg_wrapper uwrap;
76332+ unsigned char *sprole_salt = NULL;
76333+ unsigned char *sprole_sum = NULL;
76334+ int error = 0;
76335+ int error2 = 0;
76336+ size_t req_count = 0;
76337+ unsigned char oldmode = 0;
76338+
76339+ mutex_lock(&gr_dev_mutex);
76340+
76341+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
76342+ error = -EPERM;
76343+ goto out;
76344+ }
76345+
76346+#ifdef CONFIG_COMPAT
76347+ pax_open_kernel();
76348+ if (is_compat_task()) {
76349+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
76350+ copy_gr_arg = &copy_gr_arg_compat;
76351+ copy_acl_object_label = &copy_acl_object_label_compat;
76352+ copy_acl_subject_label = &copy_acl_subject_label_compat;
76353+ copy_acl_role_label = &copy_acl_role_label_compat;
76354+ copy_acl_ip_label = &copy_acl_ip_label_compat;
76355+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76356+ copy_role_transition = &copy_role_transition_compat;
76357+ copy_sprole_pw = &copy_sprole_pw_compat;
76358+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76359+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76360+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76361+ } else {
76362+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76363+ copy_gr_arg = &copy_gr_arg_normal;
76364+ copy_acl_object_label = &copy_acl_object_label_normal;
76365+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76366+ copy_acl_role_label = &copy_acl_role_label_normal;
76367+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76368+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76369+ copy_role_transition = &copy_role_transition_normal;
76370+ copy_sprole_pw = &copy_sprole_pw_normal;
76371+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76372+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76373+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76374+ }
76375+ pax_close_kernel();
76376+#endif
76377+
76378+ req_count = get_gr_arg_wrapper_size();
76379+
76380+ if (count != req_count) {
76381+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76382+ error = -EINVAL;
76383+ goto out;
76384+ }
76385+
76386+
76387+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76388+ gr_auth_expires = 0;
76389+ gr_auth_attempts = 0;
76390+ }
76391+
76392+ error = copy_gr_arg_wrapper(buf, &uwrap);
76393+ if (error)
76394+ goto out;
76395+
76396+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76397+ if (error)
76398+ goto out;
76399+
76400+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76401+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76402+ time_after(gr_auth_expires, get_seconds())) {
76403+ error = -EBUSY;
76404+ goto out;
76405+ }
76406+
76407+ /* if non-root trying to do anything other than use a special role,
76408+ do not attempt authentication, do not count towards authentication
76409+ locking
76410+ */
76411+
76412+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76413+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76414+ gr_is_global_nonroot(current_uid())) {
76415+ error = -EPERM;
76416+ goto out;
76417+ }
76418+
76419+ /* ensure pw and special role name are null terminated */
76420+
76421+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76422+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76423+
76424+ /* Okay.
76425+ * We have our enough of the argument structure..(we have yet
76426+ * to copy_from_user the tables themselves) . Copy the tables
76427+ * only if we need them, i.e. for loading operations. */
76428+
76429+ switch (gr_usermode->mode) {
76430+ case GR_STATUS:
76431+ if (gr_acl_is_enabled()) {
76432+ error = 1;
76433+ if (!gr_check_secure_terminal(current))
76434+ error = 3;
76435+ } else
76436+ error = 2;
76437+ goto out;
76438+ case GR_SHUTDOWN:
76439+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76440+ stop_machine(gr_rbac_disable, NULL, NULL);
76441+ free_variables(false);
76442+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76443+ memset(gr_system_salt, 0, GR_SALT_LEN);
76444+ memset(gr_system_sum, 0, GR_SHA_LEN);
76445+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76446+ } else if (gr_acl_is_enabled()) {
76447+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76448+ error = -EPERM;
76449+ } else {
76450+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76451+ error = -EAGAIN;
76452+ }
76453+ break;
76454+ case GR_ENABLE:
76455+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76456+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76457+ else {
76458+ if (gr_acl_is_enabled())
76459+ error = -EAGAIN;
76460+ else
76461+ error = error2;
76462+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76463+ }
76464+ break;
76465+ case GR_OLDRELOAD:
76466+ oldmode = 1;
76467+ case GR_RELOAD:
76468+ if (!gr_acl_is_enabled()) {
76469+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76470+ error = -EAGAIN;
76471+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76472+ error2 = gracl_reload(gr_usermode, oldmode);
76473+ if (!error2)
76474+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76475+ else {
76476+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76477+ error = error2;
76478+ }
76479+ } else {
76480+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76481+ error = -EPERM;
76482+ }
76483+ break;
76484+ case GR_SEGVMOD:
76485+ if (unlikely(!gr_acl_is_enabled())) {
76486+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76487+ error = -EAGAIN;
76488+ break;
76489+ }
76490+
76491+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76492+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76493+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76494+ struct acl_subject_label *segvacl;
76495+ segvacl =
76496+ lookup_acl_subj_label(gr_usermode->segv_inode,
76497+ gr_usermode->segv_device,
76498+ current->role);
76499+ if (segvacl) {
76500+ segvacl->crashes = 0;
76501+ segvacl->expires = 0;
76502+ }
76503+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76504+ gr_remove_uid(gr_usermode->segv_uid);
76505+ }
76506+ } else {
76507+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76508+ error = -EPERM;
76509+ }
76510+ break;
76511+ case GR_SPROLE:
76512+ case GR_SPROLEPAM:
76513+ if (unlikely(!gr_acl_is_enabled())) {
76514+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76515+ error = -EAGAIN;
76516+ break;
76517+ }
76518+
76519+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76520+ current->role->expires = 0;
76521+ current->role->auth_attempts = 0;
76522+ }
76523+
76524+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76525+ time_after(current->role->expires, get_seconds())) {
76526+ error = -EBUSY;
76527+ goto out;
76528+ }
76529+
76530+ if (lookup_special_role_auth
76531+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76532+ && ((!sprole_salt && !sprole_sum)
76533+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76534+ char *p = "";
76535+ assign_special_role(gr_usermode->sp_role);
76536+ read_lock(&tasklist_lock);
76537+ if (current->real_parent)
76538+ p = current->real_parent->role->rolename;
76539+ read_unlock(&tasklist_lock);
76540+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76541+ p, acl_sp_role_value);
76542+ } else {
76543+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76544+ error = -EPERM;
76545+ if(!(current->role->auth_attempts++))
76546+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76547+
76548+ goto out;
76549+ }
76550+ break;
76551+ case GR_UNSPROLE:
76552+ if (unlikely(!gr_acl_is_enabled())) {
76553+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76554+ error = -EAGAIN;
76555+ break;
76556+ }
76557+
76558+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76559+ char *p = "";
76560+ int i = 0;
76561+
76562+ read_lock(&tasklist_lock);
76563+ if (current->real_parent) {
76564+ p = current->real_parent->role->rolename;
76565+ i = current->real_parent->acl_role_id;
76566+ }
76567+ read_unlock(&tasklist_lock);
76568+
76569+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76570+ gr_set_acls(1);
76571+ } else {
76572+ error = -EPERM;
76573+ goto out;
76574+ }
76575+ break;
76576+ default:
76577+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76578+ error = -EINVAL;
76579+ break;
76580+ }
76581+
76582+ if (error != -EPERM)
76583+ goto out;
76584+
76585+ if(!(gr_auth_attempts++))
76586+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76587+
76588+ out:
76589+ mutex_unlock(&gr_dev_mutex);
76590+
76591+ if (!error)
76592+ error = req_count;
76593+
76594+ return error;
76595+}
76596+
76597+int
76598+gr_set_acls(const int type)
76599+{
76600+ struct task_struct *task, *task2;
76601+ struct acl_role_label *role = current->role;
76602+ struct acl_subject_label *subj;
76603+ __u16 acl_role_id = current->acl_role_id;
76604+ const struct cred *cred;
76605+ int ret;
76606+
76607+ rcu_read_lock();
76608+ read_lock(&tasklist_lock);
76609+ read_lock(&grsec_exec_file_lock);
76610+ do_each_thread(task2, task) {
76611+ /* check to see if we're called from the exit handler,
76612+ if so, only replace ACLs that have inherited the admin
76613+ ACL */
76614+
76615+ if (type && (task->role != role ||
76616+ task->acl_role_id != acl_role_id))
76617+ continue;
76618+
76619+ task->acl_role_id = 0;
76620+ task->acl_sp_role = 0;
76621+ task->inherited = 0;
76622+
76623+ if (task->exec_file) {
76624+ cred = __task_cred(task);
76625+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76626+ subj = __gr_get_subject_for_task(polstate, task, NULL);
76627+ if (subj == NULL) {
76628+ ret = -EINVAL;
76629+ read_unlock(&grsec_exec_file_lock);
76630+ read_unlock(&tasklist_lock);
76631+ rcu_read_unlock();
76632+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76633+ return ret;
76634+ }
76635+ __gr_apply_subject_to_task(polstate, task, subj);
76636+ } else {
76637+ // it's a kernel process
76638+ task->role = polstate->kernel_role;
76639+ task->acl = polstate->kernel_role->root_label;
76640+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76641+ task->acl->mode &= ~GR_PROCFIND;
76642+#endif
76643+ }
76644+ } while_each_thread(task2, task);
76645+ read_unlock(&grsec_exec_file_lock);
76646+ read_unlock(&tasklist_lock);
76647+ rcu_read_unlock();
76648+
76649+ return 0;
76650+}
76651diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76652new file mode 100644
76653index 0000000..39645c9
76654--- /dev/null
76655+++ b/grsecurity/gracl_res.c
76656@@ -0,0 +1,68 @@
76657+#include <linux/kernel.h>
76658+#include <linux/sched.h>
76659+#include <linux/gracl.h>
76660+#include <linux/grinternal.h>
76661+
76662+static const char *restab_log[] = {
76663+ [RLIMIT_CPU] = "RLIMIT_CPU",
76664+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76665+ [RLIMIT_DATA] = "RLIMIT_DATA",
76666+ [RLIMIT_STACK] = "RLIMIT_STACK",
76667+ [RLIMIT_CORE] = "RLIMIT_CORE",
76668+ [RLIMIT_RSS] = "RLIMIT_RSS",
76669+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76670+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76671+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76672+ [RLIMIT_AS] = "RLIMIT_AS",
76673+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76674+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76675+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76676+ [RLIMIT_NICE] = "RLIMIT_NICE",
76677+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76678+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76679+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76680+};
76681+
76682+void
76683+gr_log_resource(const struct task_struct *task,
76684+ const int res, const unsigned long wanted, const int gt)
76685+{
76686+ const struct cred *cred;
76687+ unsigned long rlim;
76688+
76689+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
76690+ return;
76691+
76692+ // not yet supported resource
76693+ if (unlikely(!restab_log[res]))
76694+ return;
76695+
76696+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
76697+ rlim = task_rlimit_max(task, res);
76698+ else
76699+ rlim = task_rlimit(task, res);
76700+
76701+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
76702+ return;
76703+
76704+ rcu_read_lock();
76705+ cred = __task_cred(task);
76706+
76707+ if (res == RLIMIT_NPROC &&
76708+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
76709+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
76710+ goto out_rcu_unlock;
76711+ else if (res == RLIMIT_MEMLOCK &&
76712+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
76713+ goto out_rcu_unlock;
76714+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
76715+ goto out_rcu_unlock;
76716+ rcu_read_unlock();
76717+
76718+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
76719+
76720+ return;
76721+out_rcu_unlock:
76722+ rcu_read_unlock();
76723+ return;
76724+}
76725diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
76726new file mode 100644
76727index 0000000..2040e61
76728--- /dev/null
76729+++ b/grsecurity/gracl_segv.c
76730@@ -0,0 +1,313 @@
76731+#include <linux/kernel.h>
76732+#include <linux/mm.h>
76733+#include <asm/uaccess.h>
76734+#include <asm/errno.h>
76735+#include <asm/mman.h>
76736+#include <net/sock.h>
76737+#include <linux/file.h>
76738+#include <linux/fs.h>
76739+#include <linux/net.h>
76740+#include <linux/in.h>
76741+#include <linux/slab.h>
76742+#include <linux/types.h>
76743+#include <linux/sched.h>
76744+#include <linux/timer.h>
76745+#include <linux/gracl.h>
76746+#include <linux/grsecurity.h>
76747+#include <linux/grinternal.h>
76748+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76749+#include <linux/magic.h>
76750+#include <linux/pagemap.h>
76751+#include "../fs/btrfs/async-thread.h"
76752+#include "../fs/btrfs/ctree.h"
76753+#include "../fs/btrfs/btrfs_inode.h"
76754+#endif
76755+
76756+static struct crash_uid *uid_set;
76757+static unsigned short uid_used;
76758+static DEFINE_SPINLOCK(gr_uid_lock);
76759+extern rwlock_t gr_inode_lock;
76760+extern struct acl_subject_label *
76761+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
76762+ struct acl_role_label *role);
76763+
76764+static inline dev_t __get_dev(const struct dentry *dentry)
76765+{
76766+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76767+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76768+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
76769+ else
76770+#endif
76771+ return dentry->d_sb->s_dev;
76772+}
76773+
76774+int
76775+gr_init_uidset(void)
76776+{
76777+ uid_set =
76778+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
76779+ uid_used = 0;
76780+
76781+ return uid_set ? 1 : 0;
76782+}
76783+
76784+void
76785+gr_free_uidset(void)
76786+{
76787+ if (uid_set) {
76788+ struct crash_uid *tmpset;
76789+ spin_lock(&gr_uid_lock);
76790+ tmpset = uid_set;
76791+ uid_set = NULL;
76792+ uid_used = 0;
76793+ spin_unlock(&gr_uid_lock);
76794+ if (tmpset)
76795+ kfree(tmpset);
76796+ }
76797+
76798+ return;
76799+}
76800+
76801+int
76802+gr_find_uid(const uid_t uid)
76803+{
76804+ struct crash_uid *tmp = uid_set;
76805+ uid_t buid;
76806+ int low = 0, high = uid_used - 1, mid;
76807+
76808+ while (high >= low) {
76809+ mid = (low + high) >> 1;
76810+ buid = tmp[mid].uid;
76811+ if (buid == uid)
76812+ return mid;
76813+ if (buid > uid)
76814+ high = mid - 1;
76815+ if (buid < uid)
76816+ low = mid + 1;
76817+ }
76818+
76819+ return -1;
76820+}
76821+
76822+static __inline__ void
76823+gr_insertsort(void)
76824+{
76825+ unsigned short i, j;
76826+ struct crash_uid index;
76827+
76828+ for (i = 1; i < uid_used; i++) {
76829+ index = uid_set[i];
76830+ j = i;
76831+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
76832+ uid_set[j] = uid_set[j - 1];
76833+ j--;
76834+ }
76835+ uid_set[j] = index;
76836+ }
76837+
76838+ return;
76839+}
76840+
76841+static __inline__ void
76842+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
76843+{
76844+ int loc;
76845+ uid_t uid = GR_GLOBAL_UID(kuid);
76846+
76847+ if (uid_used == GR_UIDTABLE_MAX)
76848+ return;
76849+
76850+ loc = gr_find_uid(uid);
76851+
76852+ if (loc >= 0) {
76853+ uid_set[loc].expires = expires;
76854+ return;
76855+ }
76856+
76857+ uid_set[uid_used].uid = uid;
76858+ uid_set[uid_used].expires = expires;
76859+ uid_used++;
76860+
76861+ gr_insertsort();
76862+
76863+ return;
76864+}
76865+
76866+void
76867+gr_remove_uid(const unsigned short loc)
76868+{
76869+ unsigned short i;
76870+
76871+ for (i = loc + 1; i < uid_used; i++)
76872+ uid_set[i - 1] = uid_set[i];
76873+
76874+ uid_used--;
76875+
76876+ return;
76877+}
76878+
76879+int
76880+gr_check_crash_uid(const kuid_t kuid)
76881+{
76882+ int loc;
76883+ int ret = 0;
76884+ uid_t uid;
76885+
76886+ if (unlikely(!gr_acl_is_enabled()))
76887+ return 0;
76888+
76889+ uid = GR_GLOBAL_UID(kuid);
76890+
76891+ spin_lock(&gr_uid_lock);
76892+ loc = gr_find_uid(uid);
76893+
76894+ if (loc < 0)
76895+ goto out_unlock;
76896+
76897+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
76898+ gr_remove_uid(loc);
76899+ else
76900+ ret = 1;
76901+
76902+out_unlock:
76903+ spin_unlock(&gr_uid_lock);
76904+ return ret;
76905+}
76906+
76907+static __inline__ int
76908+proc_is_setxid(const struct cred *cred)
76909+{
76910+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
76911+ !uid_eq(cred->uid, cred->fsuid))
76912+ return 1;
76913+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
76914+ !gid_eq(cred->gid, cred->fsgid))
76915+ return 1;
76916+
76917+ return 0;
76918+}
76919+
76920+extern int gr_fake_force_sig(int sig, struct task_struct *t);
76921+
76922+void
76923+gr_handle_crash(struct task_struct *task, const int sig)
76924+{
76925+ struct acl_subject_label *curr;
76926+ struct task_struct *tsk, *tsk2;
76927+ const struct cred *cred;
76928+ const struct cred *cred2;
76929+
76930+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
76931+ return;
76932+
76933+ if (unlikely(!gr_acl_is_enabled()))
76934+ return;
76935+
76936+ curr = task->acl;
76937+
76938+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
76939+ return;
76940+
76941+ if (time_before_eq(curr->expires, get_seconds())) {
76942+ curr->expires = 0;
76943+ curr->crashes = 0;
76944+ }
76945+
76946+ curr->crashes++;
76947+
76948+ if (!curr->expires)
76949+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
76950+
76951+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76952+ time_after(curr->expires, get_seconds())) {
76953+ rcu_read_lock();
76954+ cred = __task_cred(task);
76955+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
76956+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76957+ spin_lock(&gr_uid_lock);
76958+ gr_insert_uid(cred->uid, curr->expires);
76959+ spin_unlock(&gr_uid_lock);
76960+ curr->expires = 0;
76961+ curr->crashes = 0;
76962+ read_lock(&tasklist_lock);
76963+ do_each_thread(tsk2, tsk) {
76964+ cred2 = __task_cred(tsk);
76965+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
76966+ gr_fake_force_sig(SIGKILL, tsk);
76967+ } while_each_thread(tsk2, tsk);
76968+ read_unlock(&tasklist_lock);
76969+ } else {
76970+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76971+ read_lock(&tasklist_lock);
76972+ read_lock(&grsec_exec_file_lock);
76973+ do_each_thread(tsk2, tsk) {
76974+ if (likely(tsk != task)) {
76975+ // if this thread has the same subject as the one that triggered
76976+ // RES_CRASH and it's the same binary, kill it
76977+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
76978+ gr_fake_force_sig(SIGKILL, tsk);
76979+ }
76980+ } while_each_thread(tsk2, tsk);
76981+ read_unlock(&grsec_exec_file_lock);
76982+ read_unlock(&tasklist_lock);
76983+ }
76984+ rcu_read_unlock();
76985+ }
76986+
76987+ return;
76988+}
76989+
76990+int
76991+gr_check_crash_exec(const struct file *filp)
76992+{
76993+ struct acl_subject_label *curr;
76994+
76995+ if (unlikely(!gr_acl_is_enabled()))
76996+ return 0;
76997+
76998+ read_lock(&gr_inode_lock);
76999+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
77000+ __get_dev(filp->f_path.dentry),
77001+ current->role);
77002+ read_unlock(&gr_inode_lock);
77003+
77004+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
77005+ (!curr->crashes && !curr->expires))
77006+ return 0;
77007+
77008+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77009+ time_after(curr->expires, get_seconds()))
77010+ return 1;
77011+ else if (time_before_eq(curr->expires, get_seconds())) {
77012+ curr->crashes = 0;
77013+ curr->expires = 0;
77014+ }
77015+
77016+ return 0;
77017+}
77018+
77019+void
77020+gr_handle_alertkill(struct task_struct *task)
77021+{
77022+ struct acl_subject_label *curracl;
77023+ __u32 curr_ip;
77024+ struct task_struct *p, *p2;
77025+
77026+ if (unlikely(!gr_acl_is_enabled()))
77027+ return;
77028+
77029+ curracl = task->acl;
77030+ curr_ip = task->signal->curr_ip;
77031+
77032+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
77033+ read_lock(&tasklist_lock);
77034+ do_each_thread(p2, p) {
77035+ if (p->signal->curr_ip == curr_ip)
77036+ gr_fake_force_sig(SIGKILL, p);
77037+ } while_each_thread(p2, p);
77038+ read_unlock(&tasklist_lock);
77039+ } else if (curracl->mode & GR_KILLPROC)
77040+ gr_fake_force_sig(SIGKILL, task);
77041+
77042+ return;
77043+}
77044diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
77045new file mode 100644
77046index 0000000..98011b0
77047--- /dev/null
77048+++ b/grsecurity/gracl_shm.c
77049@@ -0,0 +1,40 @@
77050+#include <linux/kernel.h>
77051+#include <linux/mm.h>
77052+#include <linux/sched.h>
77053+#include <linux/file.h>
77054+#include <linux/ipc.h>
77055+#include <linux/gracl.h>
77056+#include <linux/grsecurity.h>
77057+#include <linux/grinternal.h>
77058+
77059+int
77060+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77061+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
77062+{
77063+ struct task_struct *task;
77064+
77065+ if (!gr_acl_is_enabled())
77066+ return 1;
77067+
77068+ rcu_read_lock();
77069+ read_lock(&tasklist_lock);
77070+
77071+ task = find_task_by_vpid(shm_cprid);
77072+
77073+ if (unlikely(!task))
77074+ task = find_task_by_vpid(shm_lapid);
77075+
77076+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
77077+ (task_pid_nr(task) == shm_lapid)) &&
77078+ (task->acl->mode & GR_PROTSHM) &&
77079+ (task->acl != current->acl))) {
77080+ read_unlock(&tasklist_lock);
77081+ rcu_read_unlock();
77082+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
77083+ return 0;
77084+ }
77085+ read_unlock(&tasklist_lock);
77086+ rcu_read_unlock();
77087+
77088+ return 1;
77089+}
77090diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
77091new file mode 100644
77092index 0000000..bc0be01
77093--- /dev/null
77094+++ b/grsecurity/grsec_chdir.c
77095@@ -0,0 +1,19 @@
77096+#include <linux/kernel.h>
77097+#include <linux/sched.h>
77098+#include <linux/fs.h>
77099+#include <linux/file.h>
77100+#include <linux/grsecurity.h>
77101+#include <linux/grinternal.h>
77102+
77103+void
77104+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
77105+{
77106+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77107+ if ((grsec_enable_chdir && grsec_enable_group &&
77108+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
77109+ !grsec_enable_group)) {
77110+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
77111+ }
77112+#endif
77113+ return;
77114+}
77115diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
77116new file mode 100644
77117index 0000000..baa635c
77118--- /dev/null
77119+++ b/grsecurity/grsec_chroot.c
77120@@ -0,0 +1,387 @@
77121+#include <linux/kernel.h>
77122+#include <linux/module.h>
77123+#include <linux/sched.h>
77124+#include <linux/file.h>
77125+#include <linux/fs.h>
77126+#include <linux/mount.h>
77127+#include <linux/types.h>
77128+#include "../fs/mount.h"
77129+#include <linux/grsecurity.h>
77130+#include <linux/grinternal.h>
77131+
77132+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77133+int gr_init_ran;
77134+#endif
77135+
77136+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
77137+{
77138+#ifdef CONFIG_GRKERNSEC
77139+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
77140+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
77141+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77142+ && gr_init_ran
77143+#endif
77144+ )
77145+ task->gr_is_chrooted = 1;
77146+ else {
77147+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77148+ if (task_pid_nr(task) == 1 && !gr_init_ran)
77149+ gr_init_ran = 1;
77150+#endif
77151+ task->gr_is_chrooted = 0;
77152+ }
77153+
77154+ task->gr_chroot_dentry = path->dentry;
77155+#endif
77156+ return;
77157+}
77158+
77159+void gr_clear_chroot_entries(struct task_struct *task)
77160+{
77161+#ifdef CONFIG_GRKERNSEC
77162+ task->gr_is_chrooted = 0;
77163+ task->gr_chroot_dentry = NULL;
77164+#endif
77165+ return;
77166+}
77167+
77168+int
77169+gr_handle_chroot_unix(const pid_t pid)
77170+{
77171+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77172+ struct task_struct *p;
77173+
77174+ if (unlikely(!grsec_enable_chroot_unix))
77175+ return 1;
77176+
77177+ if (likely(!proc_is_chrooted(current)))
77178+ return 1;
77179+
77180+ rcu_read_lock();
77181+ read_lock(&tasklist_lock);
77182+ p = find_task_by_vpid_unrestricted(pid);
77183+ if (unlikely(p && !have_same_root(current, p))) {
77184+ read_unlock(&tasklist_lock);
77185+ rcu_read_unlock();
77186+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
77187+ return 0;
77188+ }
77189+ read_unlock(&tasklist_lock);
77190+ rcu_read_unlock();
77191+#endif
77192+ return 1;
77193+}
77194+
77195+int
77196+gr_handle_chroot_nice(void)
77197+{
77198+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77199+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
77200+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
77201+ return -EPERM;
77202+ }
77203+#endif
77204+ return 0;
77205+}
77206+
77207+int
77208+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
77209+{
77210+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77211+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
77212+ && proc_is_chrooted(current)) {
77213+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
77214+ return -EACCES;
77215+ }
77216+#endif
77217+ return 0;
77218+}
77219+
77220+int
77221+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
77222+{
77223+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77224+ struct task_struct *p;
77225+ int ret = 0;
77226+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
77227+ return ret;
77228+
77229+ read_lock(&tasklist_lock);
77230+ do_each_pid_task(pid, type, p) {
77231+ if (!have_same_root(current, p)) {
77232+ ret = 1;
77233+ goto out;
77234+ }
77235+ } while_each_pid_task(pid, type, p);
77236+out:
77237+ read_unlock(&tasklist_lock);
77238+ return ret;
77239+#endif
77240+ return 0;
77241+}
77242+
77243+int
77244+gr_pid_is_chrooted(struct task_struct *p)
77245+{
77246+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77247+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
77248+ return 0;
77249+
77250+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
77251+ !have_same_root(current, p)) {
77252+ return 1;
77253+ }
77254+#endif
77255+ return 0;
77256+}
77257+
77258+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
77259+
77260+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
77261+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
77262+{
77263+ struct path path, currentroot;
77264+ int ret = 0;
77265+
77266+ path.dentry = (struct dentry *)u_dentry;
77267+ path.mnt = (struct vfsmount *)u_mnt;
77268+ get_fs_root(current->fs, &currentroot);
77269+ if (path_is_under(&path, &currentroot))
77270+ ret = 1;
77271+ path_put(&currentroot);
77272+
77273+ return ret;
77274+}
77275+#endif
77276+
77277+int
77278+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
77279+{
77280+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77281+ if (!grsec_enable_chroot_fchdir)
77282+ return 1;
77283+
77284+ if (!proc_is_chrooted(current))
77285+ return 1;
77286+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
77287+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
77288+ return 0;
77289+ }
77290+#endif
77291+ return 1;
77292+}
77293+
77294+int
77295+gr_chroot_fhandle(void)
77296+{
77297+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77298+ if (!grsec_enable_chroot_fchdir)
77299+ return 1;
77300+
77301+ if (!proc_is_chrooted(current))
77302+ return 1;
77303+ else {
77304+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
77305+ return 0;
77306+ }
77307+#endif
77308+ return 1;
77309+}
77310+
77311+int
77312+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77313+ const time_t shm_createtime)
77314+{
77315+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77316+ struct task_struct *p;
77317+ time_t starttime;
77318+
77319+ if (unlikely(!grsec_enable_chroot_shmat))
77320+ return 1;
77321+
77322+ if (likely(!proc_is_chrooted(current)))
77323+ return 1;
77324+
77325+ rcu_read_lock();
77326+ read_lock(&tasklist_lock);
77327+
77328+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
77329+ starttime = p->start_time.tv_sec;
77330+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
77331+ if (have_same_root(current, p)) {
77332+ goto allow;
77333+ } else {
77334+ read_unlock(&tasklist_lock);
77335+ rcu_read_unlock();
77336+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77337+ return 0;
77338+ }
77339+ }
77340+ /* creator exited, pid reuse, fall through to next check */
77341+ }
77342+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
77343+ if (unlikely(!have_same_root(current, p))) {
77344+ read_unlock(&tasklist_lock);
77345+ rcu_read_unlock();
77346+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77347+ return 0;
77348+ }
77349+ }
77350+
77351+allow:
77352+ read_unlock(&tasklist_lock);
77353+ rcu_read_unlock();
77354+#endif
77355+ return 1;
77356+}
77357+
77358+void
77359+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77360+{
77361+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77362+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77363+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77364+#endif
77365+ return;
77366+}
77367+
77368+int
77369+gr_handle_chroot_mknod(const struct dentry *dentry,
77370+ const struct vfsmount *mnt, const int mode)
77371+{
77372+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77373+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77374+ proc_is_chrooted(current)) {
77375+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77376+ return -EPERM;
77377+ }
77378+#endif
77379+ return 0;
77380+}
77381+
77382+int
77383+gr_handle_chroot_mount(const struct dentry *dentry,
77384+ const struct vfsmount *mnt, const char *dev_name)
77385+{
77386+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77387+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77388+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77389+ return -EPERM;
77390+ }
77391+#endif
77392+ return 0;
77393+}
77394+
77395+int
77396+gr_handle_chroot_pivot(void)
77397+{
77398+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77399+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77400+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77401+ return -EPERM;
77402+ }
77403+#endif
77404+ return 0;
77405+}
77406+
77407+int
77408+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77409+{
77410+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77411+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77412+ !gr_is_outside_chroot(dentry, mnt)) {
77413+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77414+ return -EPERM;
77415+ }
77416+#endif
77417+ return 0;
77418+}
77419+
77420+extern const char *captab_log[];
77421+extern int captab_log_entries;
77422+
77423+int
77424+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77425+{
77426+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77427+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77428+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77429+ if (cap_raised(chroot_caps, cap)) {
77430+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77431+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77432+ }
77433+ return 0;
77434+ }
77435+ }
77436+#endif
77437+ return 1;
77438+}
77439+
77440+int
77441+gr_chroot_is_capable(const int cap)
77442+{
77443+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77444+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77445+#endif
77446+ return 1;
77447+}
77448+
77449+int
77450+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77451+{
77452+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77453+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77454+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77455+ if (cap_raised(chroot_caps, cap)) {
77456+ return 0;
77457+ }
77458+ }
77459+#endif
77460+ return 1;
77461+}
77462+
77463+int
77464+gr_chroot_is_capable_nolog(const int cap)
77465+{
77466+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77467+ return gr_task_chroot_is_capable_nolog(current, cap);
77468+#endif
77469+ return 1;
77470+}
77471+
77472+int
77473+gr_handle_chroot_sysctl(const int op)
77474+{
77475+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77476+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77477+ proc_is_chrooted(current))
77478+ return -EACCES;
77479+#endif
77480+ return 0;
77481+}
77482+
77483+void
77484+gr_handle_chroot_chdir(const struct path *path)
77485+{
77486+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77487+ if (grsec_enable_chroot_chdir)
77488+ set_fs_pwd(current->fs, path);
77489+#endif
77490+ return;
77491+}
77492+
77493+int
77494+gr_handle_chroot_chmod(const struct dentry *dentry,
77495+ const struct vfsmount *mnt, const int mode)
77496+{
77497+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77498+ /* allow chmod +s on directories, but not files */
77499+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77500+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77501+ proc_is_chrooted(current)) {
77502+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77503+ return -EPERM;
77504+ }
77505+#endif
77506+ return 0;
77507+}
77508diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77509new file mode 100644
77510index 0000000..2d3bcb7
77511--- /dev/null
77512+++ b/grsecurity/grsec_disabled.c
77513@@ -0,0 +1,440 @@
77514+#include <linux/kernel.h>
77515+#include <linux/module.h>
77516+#include <linux/sched.h>
77517+#include <linux/file.h>
77518+#include <linux/fs.h>
77519+#include <linux/kdev_t.h>
77520+#include <linux/net.h>
77521+#include <linux/in.h>
77522+#include <linux/ip.h>
77523+#include <linux/skbuff.h>
77524+#include <linux/sysctl.h>
77525+
77526+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77527+void
77528+pax_set_initial_flags(struct linux_binprm *bprm)
77529+{
77530+ return;
77531+}
77532+#endif
77533+
77534+#ifdef CONFIG_SYSCTL
77535+__u32
77536+gr_handle_sysctl(const struct ctl_table * table, const int op)
77537+{
77538+ return 0;
77539+}
77540+#endif
77541+
77542+#ifdef CONFIG_TASKSTATS
77543+int gr_is_taskstats_denied(int pid)
77544+{
77545+ return 0;
77546+}
77547+#endif
77548+
77549+int
77550+gr_acl_is_enabled(void)
77551+{
77552+ return 0;
77553+}
77554+
77555+int
77556+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77557+{
77558+ return 0;
77559+}
77560+
77561+void
77562+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77563+{
77564+ return;
77565+}
77566+
77567+int
77568+gr_handle_rawio(const struct inode *inode)
77569+{
77570+ return 0;
77571+}
77572+
77573+void
77574+gr_acl_handle_psacct(struct task_struct *task, const long code)
77575+{
77576+ return;
77577+}
77578+
77579+int
77580+gr_handle_ptrace(struct task_struct *task, const long request)
77581+{
77582+ return 0;
77583+}
77584+
77585+int
77586+gr_handle_proc_ptrace(struct task_struct *task)
77587+{
77588+ return 0;
77589+}
77590+
77591+int
77592+gr_set_acls(const int type)
77593+{
77594+ return 0;
77595+}
77596+
77597+int
77598+gr_check_hidden_task(const struct task_struct *tsk)
77599+{
77600+ return 0;
77601+}
77602+
77603+int
77604+gr_check_protected_task(const struct task_struct *task)
77605+{
77606+ return 0;
77607+}
77608+
77609+int
77610+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77611+{
77612+ return 0;
77613+}
77614+
77615+void
77616+gr_copy_label(struct task_struct *tsk)
77617+{
77618+ return;
77619+}
77620+
77621+void
77622+gr_set_pax_flags(struct task_struct *task)
77623+{
77624+ return;
77625+}
77626+
77627+int
77628+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77629+ const int unsafe_share)
77630+{
77631+ return 0;
77632+}
77633+
77634+void
77635+gr_handle_delete(const ino_t ino, const dev_t dev)
77636+{
77637+ return;
77638+}
77639+
77640+void
77641+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77642+{
77643+ return;
77644+}
77645+
77646+void
77647+gr_handle_crash(struct task_struct *task, const int sig)
77648+{
77649+ return;
77650+}
77651+
77652+int
77653+gr_check_crash_exec(const struct file *filp)
77654+{
77655+ return 0;
77656+}
77657+
77658+int
77659+gr_check_crash_uid(const kuid_t uid)
77660+{
77661+ return 0;
77662+}
77663+
77664+void
77665+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77666+ struct dentry *old_dentry,
77667+ struct dentry *new_dentry,
77668+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77669+{
77670+ return;
77671+}
77672+
77673+int
77674+gr_search_socket(const int family, const int type, const int protocol)
77675+{
77676+ return 1;
77677+}
77678+
77679+int
77680+gr_search_connectbind(const int mode, const struct socket *sock,
77681+ const struct sockaddr_in *addr)
77682+{
77683+ return 0;
77684+}
77685+
77686+void
77687+gr_handle_alertkill(struct task_struct *task)
77688+{
77689+ return;
77690+}
77691+
77692+__u32
77693+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
77694+{
77695+ return 1;
77696+}
77697+
77698+__u32
77699+gr_acl_handle_hidden_file(const struct dentry * dentry,
77700+ const struct vfsmount * mnt)
77701+{
77702+ return 1;
77703+}
77704+
77705+__u32
77706+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
77707+ int acc_mode)
77708+{
77709+ return 1;
77710+}
77711+
77712+__u32
77713+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
77714+{
77715+ return 1;
77716+}
77717+
77718+__u32
77719+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
77720+{
77721+ return 1;
77722+}
77723+
77724+int
77725+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
77726+ unsigned int *vm_flags)
77727+{
77728+ return 1;
77729+}
77730+
77731+__u32
77732+gr_acl_handle_truncate(const struct dentry * dentry,
77733+ const struct vfsmount * mnt)
77734+{
77735+ return 1;
77736+}
77737+
77738+__u32
77739+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
77740+{
77741+ return 1;
77742+}
77743+
77744+__u32
77745+gr_acl_handle_access(const struct dentry * dentry,
77746+ const struct vfsmount * mnt, const int fmode)
77747+{
77748+ return 1;
77749+}
77750+
77751+__u32
77752+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
77753+ umode_t *mode)
77754+{
77755+ return 1;
77756+}
77757+
77758+__u32
77759+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
77760+{
77761+ return 1;
77762+}
77763+
77764+__u32
77765+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
77766+{
77767+ return 1;
77768+}
77769+
77770+__u32
77771+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
77772+{
77773+ return 1;
77774+}
77775+
77776+void
77777+grsecurity_init(void)
77778+{
77779+ return;
77780+}
77781+
77782+umode_t gr_acl_umask(void)
77783+{
77784+ return 0;
77785+}
77786+
77787+__u32
77788+gr_acl_handle_mknod(const struct dentry * new_dentry,
77789+ const struct dentry * parent_dentry,
77790+ const struct vfsmount * parent_mnt,
77791+ const int mode)
77792+{
77793+ return 1;
77794+}
77795+
77796+__u32
77797+gr_acl_handle_mkdir(const struct dentry * new_dentry,
77798+ const struct dentry * parent_dentry,
77799+ const struct vfsmount * parent_mnt)
77800+{
77801+ return 1;
77802+}
77803+
77804+__u32
77805+gr_acl_handle_symlink(const struct dentry * new_dentry,
77806+ const struct dentry * parent_dentry,
77807+ const struct vfsmount * parent_mnt, const struct filename *from)
77808+{
77809+ return 1;
77810+}
77811+
77812+__u32
77813+gr_acl_handle_link(const struct dentry * new_dentry,
77814+ const struct dentry * parent_dentry,
77815+ const struct vfsmount * parent_mnt,
77816+ const struct dentry * old_dentry,
77817+ const struct vfsmount * old_mnt, const struct filename *to)
77818+{
77819+ return 1;
77820+}
77821+
77822+int
77823+gr_acl_handle_rename(const struct dentry *new_dentry,
77824+ const struct dentry *parent_dentry,
77825+ const struct vfsmount *parent_mnt,
77826+ const struct dentry *old_dentry,
77827+ const struct inode *old_parent_inode,
77828+ const struct vfsmount *old_mnt, const struct filename *newname,
77829+ unsigned int flags)
77830+{
77831+ return 0;
77832+}
77833+
77834+int
77835+gr_acl_handle_filldir(const struct file *file, const char *name,
77836+ const int namelen, const ino_t ino)
77837+{
77838+ return 1;
77839+}
77840+
77841+int
77842+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77843+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
77844+{
77845+ return 1;
77846+}
77847+
77848+int
77849+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
77850+{
77851+ return 0;
77852+}
77853+
77854+int
77855+gr_search_accept(const struct socket *sock)
77856+{
77857+ return 0;
77858+}
77859+
77860+int
77861+gr_search_listen(const struct socket *sock)
77862+{
77863+ return 0;
77864+}
77865+
77866+int
77867+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
77868+{
77869+ return 0;
77870+}
77871+
77872+__u32
77873+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
77874+{
77875+ return 1;
77876+}
77877+
77878+__u32
77879+gr_acl_handle_creat(const struct dentry * dentry,
77880+ const struct dentry * p_dentry,
77881+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
77882+ const int imode)
77883+{
77884+ return 1;
77885+}
77886+
77887+void
77888+gr_acl_handle_exit(void)
77889+{
77890+ return;
77891+}
77892+
77893+int
77894+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
77895+{
77896+ return 1;
77897+}
77898+
77899+void
77900+gr_set_role_label(const kuid_t uid, const kgid_t gid)
77901+{
77902+ return;
77903+}
77904+
77905+int
77906+gr_acl_handle_procpidmem(const struct task_struct *task)
77907+{
77908+ return 0;
77909+}
77910+
77911+int
77912+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
77913+{
77914+ return 0;
77915+}
77916+
77917+int
77918+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
77919+{
77920+ return 0;
77921+}
77922+
77923+int
77924+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
77925+{
77926+ return 0;
77927+}
77928+
77929+int
77930+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
77931+{
77932+ return 0;
77933+}
77934+
77935+int gr_acl_enable_at_secure(void)
77936+{
77937+ return 0;
77938+}
77939+
77940+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77941+{
77942+ return dentry->d_sb->s_dev;
77943+}
77944+
77945+void gr_put_exec_file(struct task_struct *task)
77946+{
77947+ return;
77948+}
77949+
77950+#ifdef CONFIG_SECURITY
77951+EXPORT_SYMBOL_GPL(gr_check_user_change);
77952+EXPORT_SYMBOL_GPL(gr_check_group_change);
77953+#endif
77954diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
77955new file mode 100644
77956index 0000000..14638ff
77957--- /dev/null
77958+++ b/grsecurity/grsec_exec.c
77959@@ -0,0 +1,188 @@
77960+#include <linux/kernel.h>
77961+#include <linux/sched.h>
77962+#include <linux/file.h>
77963+#include <linux/binfmts.h>
77964+#include <linux/fs.h>
77965+#include <linux/types.h>
77966+#include <linux/grdefs.h>
77967+#include <linux/grsecurity.h>
77968+#include <linux/grinternal.h>
77969+#include <linux/capability.h>
77970+#include <linux/module.h>
77971+#include <linux/compat.h>
77972+
77973+#include <asm/uaccess.h>
77974+
77975+#ifdef CONFIG_GRKERNSEC_EXECLOG
77976+static char gr_exec_arg_buf[132];
77977+static DEFINE_MUTEX(gr_exec_arg_mutex);
77978+#endif
77979+
77980+struct user_arg_ptr {
77981+#ifdef CONFIG_COMPAT
77982+ bool is_compat;
77983+#endif
77984+ union {
77985+ const char __user *const __user *native;
77986+#ifdef CONFIG_COMPAT
77987+ const compat_uptr_t __user *compat;
77988+#endif
77989+ } ptr;
77990+};
77991+
77992+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77993+
77994+void
77995+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77996+{
77997+#ifdef CONFIG_GRKERNSEC_EXECLOG
77998+ char *grarg = gr_exec_arg_buf;
77999+ unsigned int i, x, execlen = 0;
78000+ char c;
78001+
78002+ if (!((grsec_enable_execlog && grsec_enable_group &&
78003+ in_group_p(grsec_audit_gid))
78004+ || (grsec_enable_execlog && !grsec_enable_group)))
78005+ return;
78006+
78007+ mutex_lock(&gr_exec_arg_mutex);
78008+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
78009+
78010+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
78011+ const char __user *p;
78012+ unsigned int len;
78013+
78014+ p = get_user_arg_ptr(argv, i);
78015+ if (IS_ERR(p))
78016+ goto log;
78017+
78018+ len = strnlen_user(p, 128 - execlen);
78019+ if (len > 128 - execlen)
78020+ len = 128 - execlen;
78021+ else if (len > 0)
78022+ len--;
78023+ if (copy_from_user(grarg + execlen, p, len))
78024+ goto log;
78025+
78026+ /* rewrite unprintable characters */
78027+ for (x = 0; x < len; x++) {
78028+ c = *(grarg + execlen + x);
78029+ if (c < 32 || c > 126)
78030+ *(grarg + execlen + x) = ' ';
78031+ }
78032+
78033+ execlen += len;
78034+ *(grarg + execlen) = ' ';
78035+ *(grarg + execlen + 1) = '\0';
78036+ execlen++;
78037+ }
78038+
78039+ log:
78040+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
78041+ bprm->file->f_path.mnt, grarg);
78042+ mutex_unlock(&gr_exec_arg_mutex);
78043+#endif
78044+ return;
78045+}
78046+
78047+#ifdef CONFIG_GRKERNSEC
78048+extern int gr_acl_is_capable(const int cap);
78049+extern int gr_acl_is_capable_nolog(const int cap);
78050+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78051+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
78052+extern int gr_chroot_is_capable(const int cap);
78053+extern int gr_chroot_is_capable_nolog(const int cap);
78054+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78055+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
78056+#endif
78057+
78058+const char *captab_log[] = {
78059+ "CAP_CHOWN",
78060+ "CAP_DAC_OVERRIDE",
78061+ "CAP_DAC_READ_SEARCH",
78062+ "CAP_FOWNER",
78063+ "CAP_FSETID",
78064+ "CAP_KILL",
78065+ "CAP_SETGID",
78066+ "CAP_SETUID",
78067+ "CAP_SETPCAP",
78068+ "CAP_LINUX_IMMUTABLE",
78069+ "CAP_NET_BIND_SERVICE",
78070+ "CAP_NET_BROADCAST",
78071+ "CAP_NET_ADMIN",
78072+ "CAP_NET_RAW",
78073+ "CAP_IPC_LOCK",
78074+ "CAP_IPC_OWNER",
78075+ "CAP_SYS_MODULE",
78076+ "CAP_SYS_RAWIO",
78077+ "CAP_SYS_CHROOT",
78078+ "CAP_SYS_PTRACE",
78079+ "CAP_SYS_PACCT",
78080+ "CAP_SYS_ADMIN",
78081+ "CAP_SYS_BOOT",
78082+ "CAP_SYS_NICE",
78083+ "CAP_SYS_RESOURCE",
78084+ "CAP_SYS_TIME",
78085+ "CAP_SYS_TTY_CONFIG",
78086+ "CAP_MKNOD",
78087+ "CAP_LEASE",
78088+ "CAP_AUDIT_WRITE",
78089+ "CAP_AUDIT_CONTROL",
78090+ "CAP_SETFCAP",
78091+ "CAP_MAC_OVERRIDE",
78092+ "CAP_MAC_ADMIN",
78093+ "CAP_SYSLOG",
78094+ "CAP_WAKE_ALARM",
78095+ "CAP_BLOCK_SUSPEND"
78096+};
78097+
78098+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
78099+
78100+int gr_is_capable(const int cap)
78101+{
78102+#ifdef CONFIG_GRKERNSEC
78103+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
78104+ return 1;
78105+ return 0;
78106+#else
78107+ return 1;
78108+#endif
78109+}
78110+
78111+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
78112+{
78113+#ifdef CONFIG_GRKERNSEC
78114+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
78115+ return 1;
78116+ return 0;
78117+#else
78118+ return 1;
78119+#endif
78120+}
78121+
78122+int gr_is_capable_nolog(const int cap)
78123+{
78124+#ifdef CONFIG_GRKERNSEC
78125+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
78126+ return 1;
78127+ return 0;
78128+#else
78129+ return 1;
78130+#endif
78131+}
78132+
78133+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
78134+{
78135+#ifdef CONFIG_GRKERNSEC
78136+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
78137+ return 1;
78138+ return 0;
78139+#else
78140+ return 1;
78141+#endif
78142+}
78143+
78144+EXPORT_SYMBOL_GPL(gr_is_capable);
78145+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
78146+EXPORT_SYMBOL_GPL(gr_task_is_capable);
78147+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
78148diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
78149new file mode 100644
78150index 0000000..06cc6ea
78151--- /dev/null
78152+++ b/grsecurity/grsec_fifo.c
78153@@ -0,0 +1,24 @@
78154+#include <linux/kernel.h>
78155+#include <linux/sched.h>
78156+#include <linux/fs.h>
78157+#include <linux/file.h>
78158+#include <linux/grinternal.h>
78159+
78160+int
78161+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
78162+ const struct dentry *dir, const int flag, const int acc_mode)
78163+{
78164+#ifdef CONFIG_GRKERNSEC_FIFO
78165+ const struct cred *cred = current_cred();
78166+
78167+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
78168+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
78169+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
78170+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
78171+ if (!inode_permission(dentry->d_inode, acc_mode))
78172+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
78173+ return -EACCES;
78174+ }
78175+#endif
78176+ return 0;
78177+}
78178diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
78179new file mode 100644
78180index 0000000..8ca18bf
78181--- /dev/null
78182+++ b/grsecurity/grsec_fork.c
78183@@ -0,0 +1,23 @@
78184+#include <linux/kernel.h>
78185+#include <linux/sched.h>
78186+#include <linux/grsecurity.h>
78187+#include <linux/grinternal.h>
78188+#include <linux/errno.h>
78189+
78190+void
78191+gr_log_forkfail(const int retval)
78192+{
78193+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78194+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
78195+ switch (retval) {
78196+ case -EAGAIN:
78197+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
78198+ break;
78199+ case -ENOMEM:
78200+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
78201+ break;
78202+ }
78203+ }
78204+#endif
78205+ return;
78206+}
78207diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
78208new file mode 100644
78209index 0000000..b7cb191
78210--- /dev/null
78211+++ b/grsecurity/grsec_init.c
78212@@ -0,0 +1,286 @@
78213+#include <linux/kernel.h>
78214+#include <linux/sched.h>
78215+#include <linux/mm.h>
78216+#include <linux/gracl.h>
78217+#include <linux/slab.h>
78218+#include <linux/vmalloc.h>
78219+#include <linux/percpu.h>
78220+#include <linux/module.h>
78221+
78222+int grsec_enable_ptrace_readexec;
78223+int grsec_enable_setxid;
78224+int grsec_enable_symlinkown;
78225+kgid_t grsec_symlinkown_gid;
78226+int grsec_enable_brute;
78227+int grsec_enable_link;
78228+int grsec_enable_dmesg;
78229+int grsec_enable_harden_ptrace;
78230+int grsec_enable_harden_ipc;
78231+int grsec_enable_fifo;
78232+int grsec_enable_execlog;
78233+int grsec_enable_signal;
78234+int grsec_enable_forkfail;
78235+int grsec_enable_audit_ptrace;
78236+int grsec_enable_time;
78237+int grsec_enable_group;
78238+kgid_t grsec_audit_gid;
78239+int grsec_enable_chdir;
78240+int grsec_enable_mount;
78241+int grsec_enable_rofs;
78242+int grsec_deny_new_usb;
78243+int grsec_enable_chroot_findtask;
78244+int grsec_enable_chroot_mount;
78245+int grsec_enable_chroot_shmat;
78246+int grsec_enable_chroot_fchdir;
78247+int grsec_enable_chroot_double;
78248+int grsec_enable_chroot_pivot;
78249+int grsec_enable_chroot_chdir;
78250+int grsec_enable_chroot_chmod;
78251+int grsec_enable_chroot_mknod;
78252+int grsec_enable_chroot_nice;
78253+int grsec_enable_chroot_execlog;
78254+int grsec_enable_chroot_caps;
78255+int grsec_enable_chroot_sysctl;
78256+int grsec_enable_chroot_unix;
78257+int grsec_enable_tpe;
78258+kgid_t grsec_tpe_gid;
78259+int grsec_enable_blackhole;
78260+#ifdef CONFIG_IPV6_MODULE
78261+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
78262+#endif
78263+int grsec_lastack_retries;
78264+int grsec_enable_tpe_all;
78265+int grsec_enable_tpe_invert;
78266+int grsec_enable_socket_all;
78267+kgid_t grsec_socket_all_gid;
78268+int grsec_enable_socket_client;
78269+kgid_t grsec_socket_client_gid;
78270+int grsec_enable_socket_server;
78271+kgid_t grsec_socket_server_gid;
78272+int grsec_resource_logging;
78273+int grsec_disable_privio;
78274+int grsec_enable_log_rwxmaps;
78275+int grsec_lock;
78276+
78277+DEFINE_SPINLOCK(grsec_alert_lock);
78278+unsigned long grsec_alert_wtime = 0;
78279+unsigned long grsec_alert_fyet = 0;
78280+
78281+DEFINE_SPINLOCK(grsec_audit_lock);
78282+
78283+DEFINE_RWLOCK(grsec_exec_file_lock);
78284+
78285+char *gr_shared_page[4];
78286+
78287+char *gr_alert_log_fmt;
78288+char *gr_audit_log_fmt;
78289+char *gr_alert_log_buf;
78290+char *gr_audit_log_buf;
78291+
78292+extern struct gr_arg *gr_usermode;
78293+extern unsigned char *gr_system_salt;
78294+extern unsigned char *gr_system_sum;
78295+
78296+void __init
78297+grsecurity_init(void)
78298+{
78299+ int j;
78300+ /* create the per-cpu shared pages */
78301+
78302+#ifdef CONFIG_X86
78303+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
78304+#endif
78305+
78306+ for (j = 0; j < 4; j++) {
78307+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
78308+ if (gr_shared_page[j] == NULL) {
78309+ panic("Unable to allocate grsecurity shared page");
78310+ return;
78311+ }
78312+ }
78313+
78314+ /* allocate log buffers */
78315+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
78316+ if (!gr_alert_log_fmt) {
78317+ panic("Unable to allocate grsecurity alert log format buffer");
78318+ return;
78319+ }
78320+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
78321+ if (!gr_audit_log_fmt) {
78322+ panic("Unable to allocate grsecurity audit log format buffer");
78323+ return;
78324+ }
78325+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78326+ if (!gr_alert_log_buf) {
78327+ panic("Unable to allocate grsecurity alert log buffer");
78328+ return;
78329+ }
78330+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78331+ if (!gr_audit_log_buf) {
78332+ panic("Unable to allocate grsecurity audit log buffer");
78333+ return;
78334+ }
78335+
78336+ /* allocate memory for authentication structure */
78337+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
78338+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
78339+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
78340+
78341+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
78342+ panic("Unable to allocate grsecurity authentication structure");
78343+ return;
78344+ }
78345+
78346+#ifdef CONFIG_GRKERNSEC_IO
78347+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
78348+ grsec_disable_privio = 1;
78349+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78350+ grsec_disable_privio = 1;
78351+#else
78352+ grsec_disable_privio = 0;
78353+#endif
78354+#endif
78355+
78356+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78357+ /* for backward compatibility, tpe_invert always defaults to on if
78358+ enabled in the kernel
78359+ */
78360+ grsec_enable_tpe_invert = 1;
78361+#endif
78362+
78363+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78364+#ifndef CONFIG_GRKERNSEC_SYSCTL
78365+ grsec_lock = 1;
78366+#endif
78367+
78368+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78369+ grsec_enable_log_rwxmaps = 1;
78370+#endif
78371+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78372+ grsec_enable_group = 1;
78373+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78374+#endif
78375+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78376+ grsec_enable_ptrace_readexec = 1;
78377+#endif
78378+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78379+ grsec_enable_chdir = 1;
78380+#endif
78381+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78382+ grsec_enable_harden_ptrace = 1;
78383+#endif
78384+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78385+ grsec_enable_harden_ipc = 1;
78386+#endif
78387+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78388+ grsec_enable_mount = 1;
78389+#endif
78390+#ifdef CONFIG_GRKERNSEC_LINK
78391+ grsec_enable_link = 1;
78392+#endif
78393+#ifdef CONFIG_GRKERNSEC_BRUTE
78394+ grsec_enable_brute = 1;
78395+#endif
78396+#ifdef CONFIG_GRKERNSEC_DMESG
78397+ grsec_enable_dmesg = 1;
78398+#endif
78399+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78400+ grsec_enable_blackhole = 1;
78401+ grsec_lastack_retries = 4;
78402+#endif
78403+#ifdef CONFIG_GRKERNSEC_FIFO
78404+ grsec_enable_fifo = 1;
78405+#endif
78406+#ifdef CONFIG_GRKERNSEC_EXECLOG
78407+ grsec_enable_execlog = 1;
78408+#endif
78409+#ifdef CONFIG_GRKERNSEC_SETXID
78410+ grsec_enable_setxid = 1;
78411+#endif
78412+#ifdef CONFIG_GRKERNSEC_SIGNAL
78413+ grsec_enable_signal = 1;
78414+#endif
78415+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78416+ grsec_enable_forkfail = 1;
78417+#endif
78418+#ifdef CONFIG_GRKERNSEC_TIME
78419+ grsec_enable_time = 1;
78420+#endif
78421+#ifdef CONFIG_GRKERNSEC_RESLOG
78422+ grsec_resource_logging = 1;
78423+#endif
78424+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78425+ grsec_enable_chroot_findtask = 1;
78426+#endif
78427+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78428+ grsec_enable_chroot_unix = 1;
78429+#endif
78430+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78431+ grsec_enable_chroot_mount = 1;
78432+#endif
78433+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78434+ grsec_enable_chroot_fchdir = 1;
78435+#endif
78436+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78437+ grsec_enable_chroot_shmat = 1;
78438+#endif
78439+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78440+ grsec_enable_audit_ptrace = 1;
78441+#endif
78442+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78443+ grsec_enable_chroot_double = 1;
78444+#endif
78445+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78446+ grsec_enable_chroot_pivot = 1;
78447+#endif
78448+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78449+ grsec_enable_chroot_chdir = 1;
78450+#endif
78451+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78452+ grsec_enable_chroot_chmod = 1;
78453+#endif
78454+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78455+ grsec_enable_chroot_mknod = 1;
78456+#endif
78457+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78458+ grsec_enable_chroot_nice = 1;
78459+#endif
78460+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78461+ grsec_enable_chroot_execlog = 1;
78462+#endif
78463+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78464+ grsec_enable_chroot_caps = 1;
78465+#endif
78466+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78467+ grsec_enable_chroot_sysctl = 1;
78468+#endif
78469+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78470+ grsec_enable_symlinkown = 1;
78471+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78472+#endif
78473+#ifdef CONFIG_GRKERNSEC_TPE
78474+ grsec_enable_tpe = 1;
78475+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78476+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78477+ grsec_enable_tpe_all = 1;
78478+#endif
78479+#endif
78480+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78481+ grsec_enable_socket_all = 1;
78482+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78483+#endif
78484+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78485+ grsec_enable_socket_client = 1;
78486+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78487+#endif
78488+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78489+ grsec_enable_socket_server = 1;
78490+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78491+#endif
78492+#endif
78493+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78494+ grsec_deny_new_usb = 1;
78495+#endif
78496+
78497+ return;
78498+}
78499diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78500new file mode 100644
78501index 0000000..1773300
78502--- /dev/null
78503+++ b/grsecurity/grsec_ipc.c
78504@@ -0,0 +1,48 @@
78505+#include <linux/kernel.h>
78506+#include <linux/mm.h>
78507+#include <linux/sched.h>
78508+#include <linux/file.h>
78509+#include <linux/ipc.h>
78510+#include <linux/ipc_namespace.h>
78511+#include <linux/grsecurity.h>
78512+#include <linux/grinternal.h>
78513+
78514+int
78515+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78516+{
78517+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78518+ int write;
78519+ int orig_granted_mode;
78520+ kuid_t euid;
78521+ kgid_t egid;
78522+
78523+ if (!grsec_enable_harden_ipc)
78524+ return 1;
78525+
78526+ euid = current_euid();
78527+ egid = current_egid();
78528+
78529+ write = requested_mode & 00002;
78530+ orig_granted_mode = ipcp->mode;
78531+
78532+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78533+ orig_granted_mode >>= 6;
78534+ else {
78535+ /* if likely wrong permissions, lock to user */
78536+ if (orig_granted_mode & 0007)
78537+ orig_granted_mode = 0;
78538+ /* otherwise do a egid-only check */
78539+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78540+ orig_granted_mode >>= 3;
78541+ /* otherwise, no access */
78542+ else
78543+ orig_granted_mode = 0;
78544+ }
78545+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78546+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78547+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78548+ return 0;
78549+ }
78550+#endif
78551+ return 1;
78552+}
78553diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78554new file mode 100644
78555index 0000000..5e05e20
78556--- /dev/null
78557+++ b/grsecurity/grsec_link.c
78558@@ -0,0 +1,58 @@
78559+#include <linux/kernel.h>
78560+#include <linux/sched.h>
78561+#include <linux/fs.h>
78562+#include <linux/file.h>
78563+#include <linux/grinternal.h>
78564+
78565+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78566+{
78567+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78568+ const struct inode *link_inode = link->dentry->d_inode;
78569+
78570+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78571+ /* ignore root-owned links, e.g. /proc/self */
78572+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78573+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78574+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78575+ return 1;
78576+ }
78577+#endif
78578+ return 0;
78579+}
78580+
78581+int
78582+gr_handle_follow_link(const struct inode *parent,
78583+ const struct inode *inode,
78584+ const struct dentry *dentry, const struct vfsmount *mnt)
78585+{
78586+#ifdef CONFIG_GRKERNSEC_LINK
78587+ const struct cred *cred = current_cred();
78588+
78589+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78590+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78591+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78592+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78593+ return -EACCES;
78594+ }
78595+#endif
78596+ return 0;
78597+}
78598+
78599+int
78600+gr_handle_hardlink(const struct dentry *dentry,
78601+ const struct vfsmount *mnt,
78602+ struct inode *inode, const int mode, const struct filename *to)
78603+{
78604+#ifdef CONFIG_GRKERNSEC_LINK
78605+ const struct cred *cred = current_cred();
78606+
78607+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78608+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78609+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78610+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78611+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78612+ return -EPERM;
78613+ }
78614+#endif
78615+ return 0;
78616+}
78617diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78618new file mode 100644
78619index 0000000..dbe0a6b
78620--- /dev/null
78621+++ b/grsecurity/grsec_log.c
78622@@ -0,0 +1,341 @@
78623+#include <linux/kernel.h>
78624+#include <linux/sched.h>
78625+#include <linux/file.h>
78626+#include <linux/tty.h>
78627+#include <linux/fs.h>
78628+#include <linux/mm.h>
78629+#include <linux/grinternal.h>
78630+
78631+#ifdef CONFIG_TREE_PREEMPT_RCU
78632+#define DISABLE_PREEMPT() preempt_disable()
78633+#define ENABLE_PREEMPT() preempt_enable()
78634+#else
78635+#define DISABLE_PREEMPT()
78636+#define ENABLE_PREEMPT()
78637+#endif
78638+
78639+#define BEGIN_LOCKS(x) \
78640+ DISABLE_PREEMPT(); \
78641+ rcu_read_lock(); \
78642+ read_lock(&tasklist_lock); \
78643+ read_lock(&grsec_exec_file_lock); \
78644+ if (x != GR_DO_AUDIT) \
78645+ spin_lock(&grsec_alert_lock); \
78646+ else \
78647+ spin_lock(&grsec_audit_lock)
78648+
78649+#define END_LOCKS(x) \
78650+ if (x != GR_DO_AUDIT) \
78651+ spin_unlock(&grsec_alert_lock); \
78652+ else \
78653+ spin_unlock(&grsec_audit_lock); \
78654+ read_unlock(&grsec_exec_file_lock); \
78655+ read_unlock(&tasklist_lock); \
78656+ rcu_read_unlock(); \
78657+ ENABLE_PREEMPT(); \
78658+ if (x == GR_DONT_AUDIT) \
78659+ gr_handle_alertkill(current)
78660+
78661+enum {
78662+ FLOODING,
78663+ NO_FLOODING
78664+};
78665+
78666+extern char *gr_alert_log_fmt;
78667+extern char *gr_audit_log_fmt;
78668+extern char *gr_alert_log_buf;
78669+extern char *gr_audit_log_buf;
78670+
78671+static int gr_log_start(int audit)
78672+{
78673+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78674+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78675+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78676+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78677+ unsigned long curr_secs = get_seconds();
78678+
78679+ if (audit == GR_DO_AUDIT)
78680+ goto set_fmt;
78681+
78682+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78683+ grsec_alert_wtime = curr_secs;
78684+ grsec_alert_fyet = 0;
78685+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
78686+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
78687+ grsec_alert_fyet++;
78688+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
78689+ grsec_alert_wtime = curr_secs;
78690+ grsec_alert_fyet++;
78691+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
78692+ return FLOODING;
78693+ }
78694+ else return FLOODING;
78695+
78696+set_fmt:
78697+#endif
78698+ memset(buf, 0, PAGE_SIZE);
78699+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
78700+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
78701+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78702+ } else if (current->signal->curr_ip) {
78703+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
78704+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
78705+ } else if (gr_acl_is_enabled()) {
78706+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
78707+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78708+ } else {
78709+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
78710+ strcpy(buf, fmt);
78711+ }
78712+
78713+ return NO_FLOODING;
78714+}
78715+
78716+static void gr_log_middle(int audit, const char *msg, va_list ap)
78717+ __attribute__ ((format (printf, 2, 0)));
78718+
78719+static void gr_log_middle(int audit, const char *msg, va_list ap)
78720+{
78721+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78722+ unsigned int len = strlen(buf);
78723+
78724+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78725+
78726+ return;
78727+}
78728+
78729+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78730+ __attribute__ ((format (printf, 2, 3)));
78731+
78732+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78733+{
78734+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78735+ unsigned int len = strlen(buf);
78736+ va_list ap;
78737+
78738+ va_start(ap, msg);
78739+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78740+ va_end(ap);
78741+
78742+ return;
78743+}
78744+
78745+static void gr_log_end(int audit, int append_default)
78746+{
78747+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78748+ if (append_default) {
78749+ struct task_struct *task = current;
78750+ struct task_struct *parent = task->real_parent;
78751+ const struct cred *cred = __task_cred(task);
78752+ const struct cred *pcred = __task_cred(parent);
78753+ unsigned int len = strlen(buf);
78754+
78755+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78756+ }
78757+
78758+ printk("%s\n", buf);
78759+
78760+ return;
78761+}
78762+
78763+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
78764+{
78765+ int logtype;
78766+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
78767+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
78768+ void *voidptr = NULL;
78769+ int num1 = 0, num2 = 0;
78770+ unsigned long ulong1 = 0, ulong2 = 0;
78771+ struct dentry *dentry = NULL;
78772+ struct vfsmount *mnt = NULL;
78773+ struct file *file = NULL;
78774+ struct task_struct *task = NULL;
78775+ struct vm_area_struct *vma = NULL;
78776+ const struct cred *cred, *pcred;
78777+ va_list ap;
78778+
78779+ BEGIN_LOCKS(audit);
78780+ logtype = gr_log_start(audit);
78781+ if (logtype == FLOODING) {
78782+ END_LOCKS(audit);
78783+ return;
78784+ }
78785+ va_start(ap, argtypes);
78786+ switch (argtypes) {
78787+ case GR_TTYSNIFF:
78788+ task = va_arg(ap, struct task_struct *);
78789+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
78790+ break;
78791+ case GR_SYSCTL_HIDDEN:
78792+ str1 = va_arg(ap, char *);
78793+ gr_log_middle_varargs(audit, msg, result, str1);
78794+ break;
78795+ case GR_RBAC:
78796+ dentry = va_arg(ap, struct dentry *);
78797+ mnt = va_arg(ap, struct vfsmount *);
78798+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
78799+ break;
78800+ case GR_RBAC_STR:
78801+ dentry = va_arg(ap, struct dentry *);
78802+ mnt = va_arg(ap, struct vfsmount *);
78803+ str1 = va_arg(ap, char *);
78804+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
78805+ break;
78806+ case GR_STR_RBAC:
78807+ str1 = va_arg(ap, char *);
78808+ dentry = va_arg(ap, struct dentry *);
78809+ mnt = va_arg(ap, struct vfsmount *);
78810+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
78811+ break;
78812+ case GR_RBAC_MODE2:
78813+ dentry = va_arg(ap, struct dentry *);
78814+ mnt = va_arg(ap, struct vfsmount *);
78815+ str1 = va_arg(ap, char *);
78816+ str2 = va_arg(ap, char *);
78817+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
78818+ break;
78819+ case GR_RBAC_MODE3:
78820+ dentry = va_arg(ap, struct dentry *);
78821+ mnt = va_arg(ap, struct vfsmount *);
78822+ str1 = va_arg(ap, char *);
78823+ str2 = va_arg(ap, char *);
78824+ str3 = va_arg(ap, char *);
78825+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
78826+ break;
78827+ case GR_FILENAME:
78828+ dentry = va_arg(ap, struct dentry *);
78829+ mnt = va_arg(ap, struct vfsmount *);
78830+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
78831+ break;
78832+ case GR_STR_FILENAME:
78833+ str1 = va_arg(ap, char *);
78834+ dentry = va_arg(ap, struct dentry *);
78835+ mnt = va_arg(ap, struct vfsmount *);
78836+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
78837+ break;
78838+ case GR_FILENAME_STR:
78839+ dentry = va_arg(ap, struct dentry *);
78840+ mnt = va_arg(ap, struct vfsmount *);
78841+ str1 = va_arg(ap, char *);
78842+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
78843+ break;
78844+ case GR_FILENAME_TWO_INT:
78845+ dentry = va_arg(ap, struct dentry *);
78846+ mnt = va_arg(ap, struct vfsmount *);
78847+ num1 = va_arg(ap, int);
78848+ num2 = va_arg(ap, int);
78849+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
78850+ break;
78851+ case GR_FILENAME_TWO_INT_STR:
78852+ dentry = va_arg(ap, struct dentry *);
78853+ mnt = va_arg(ap, struct vfsmount *);
78854+ num1 = va_arg(ap, int);
78855+ num2 = va_arg(ap, int);
78856+ str1 = va_arg(ap, char *);
78857+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
78858+ break;
78859+ case GR_TEXTREL:
78860+ file = va_arg(ap, struct file *);
78861+ ulong1 = va_arg(ap, unsigned long);
78862+ ulong2 = va_arg(ap, unsigned long);
78863+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
78864+ break;
78865+ case GR_PTRACE:
78866+ task = va_arg(ap, struct task_struct *);
78867+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
78868+ break;
78869+ case GR_RESOURCE:
78870+ task = va_arg(ap, struct task_struct *);
78871+ cred = __task_cred(task);
78872+ pcred = __task_cred(task->real_parent);
78873+ ulong1 = va_arg(ap, unsigned long);
78874+ str1 = va_arg(ap, char *);
78875+ ulong2 = va_arg(ap, unsigned long);
78876+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78877+ break;
78878+ case GR_CAP:
78879+ task = va_arg(ap, struct task_struct *);
78880+ cred = __task_cred(task);
78881+ pcred = __task_cred(task->real_parent);
78882+ str1 = va_arg(ap, char *);
78883+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78884+ break;
78885+ case GR_SIG:
78886+ str1 = va_arg(ap, char *);
78887+ voidptr = va_arg(ap, void *);
78888+ gr_log_middle_varargs(audit, msg, str1, voidptr);
78889+ break;
78890+ case GR_SIG2:
78891+ task = va_arg(ap, struct task_struct *);
78892+ cred = __task_cred(task);
78893+ pcred = __task_cred(task->real_parent);
78894+ num1 = va_arg(ap, int);
78895+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78896+ break;
78897+ case GR_CRASH1:
78898+ task = va_arg(ap, struct task_struct *);
78899+ cred = __task_cred(task);
78900+ pcred = __task_cred(task->real_parent);
78901+ ulong1 = va_arg(ap, unsigned long);
78902+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
78903+ break;
78904+ case GR_CRASH2:
78905+ task = va_arg(ap, struct task_struct *);
78906+ cred = __task_cred(task);
78907+ pcred = __task_cred(task->real_parent);
78908+ ulong1 = va_arg(ap, unsigned long);
78909+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
78910+ break;
78911+ case GR_RWXMAP:
78912+ file = va_arg(ap, struct file *);
78913+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
78914+ break;
78915+ case GR_RWXMAPVMA:
78916+ vma = va_arg(ap, struct vm_area_struct *);
78917+ if (vma->vm_file)
78918+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
78919+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
78920+ str1 = "<stack>";
78921+ else if (vma->vm_start <= current->mm->brk &&
78922+ vma->vm_end >= current->mm->start_brk)
78923+ str1 = "<heap>";
78924+ else
78925+ str1 = "<anonymous mapping>";
78926+ gr_log_middle_varargs(audit, msg, str1);
78927+ break;
78928+ case GR_PSACCT:
78929+ {
78930+ unsigned int wday, cday;
78931+ __u8 whr, chr;
78932+ __u8 wmin, cmin;
78933+ __u8 wsec, csec;
78934+ char cur_tty[64] = { 0 };
78935+ char parent_tty[64] = { 0 };
78936+
78937+ task = va_arg(ap, struct task_struct *);
78938+ wday = va_arg(ap, unsigned int);
78939+ cday = va_arg(ap, unsigned int);
78940+ whr = va_arg(ap, int);
78941+ chr = va_arg(ap, int);
78942+ wmin = va_arg(ap, int);
78943+ cmin = va_arg(ap, int);
78944+ wsec = va_arg(ap, int);
78945+ csec = va_arg(ap, int);
78946+ ulong1 = va_arg(ap, unsigned long);
78947+ cred = __task_cred(task);
78948+ pcred = __task_cred(task->real_parent);
78949+
78950+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78951+ }
78952+ break;
78953+ default:
78954+ gr_log_middle(audit, msg, ap);
78955+ }
78956+ va_end(ap);
78957+ // these don't need DEFAULTSECARGS printed on the end
78958+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
78959+ gr_log_end(audit, 0);
78960+ else
78961+ gr_log_end(audit, 1);
78962+ END_LOCKS(audit);
78963+}
78964diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
78965new file mode 100644
78966index 0000000..0e39d8c
78967--- /dev/null
78968+++ b/grsecurity/grsec_mem.c
78969@@ -0,0 +1,48 @@
78970+#include <linux/kernel.h>
78971+#include <linux/sched.h>
78972+#include <linux/mm.h>
78973+#include <linux/mman.h>
78974+#include <linux/module.h>
78975+#include <linux/grinternal.h>
78976+
78977+void gr_handle_msr_write(void)
78978+{
78979+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78980+ return;
78981+}
78982+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78983+
78984+void
78985+gr_handle_ioperm(void)
78986+{
78987+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78988+ return;
78989+}
78990+
78991+void
78992+gr_handle_iopl(void)
78993+{
78994+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78995+ return;
78996+}
78997+
78998+void
78999+gr_handle_mem_readwrite(u64 from, u64 to)
79000+{
79001+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
79002+ return;
79003+}
79004+
79005+void
79006+gr_handle_vm86(void)
79007+{
79008+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
79009+ return;
79010+}
79011+
79012+void
79013+gr_log_badprocpid(const char *entry)
79014+{
79015+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
79016+ return;
79017+}
79018diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
79019new file mode 100644
79020index 0000000..cd9e124
79021--- /dev/null
79022+++ b/grsecurity/grsec_mount.c
79023@@ -0,0 +1,65 @@
79024+#include <linux/kernel.h>
79025+#include <linux/sched.h>
79026+#include <linux/mount.h>
79027+#include <linux/major.h>
79028+#include <linux/grsecurity.h>
79029+#include <linux/grinternal.h>
79030+
79031+void
79032+gr_log_remount(const char *devname, const int retval)
79033+{
79034+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79035+ if (grsec_enable_mount && (retval >= 0))
79036+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
79037+#endif
79038+ return;
79039+}
79040+
79041+void
79042+gr_log_unmount(const char *devname, const int retval)
79043+{
79044+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79045+ if (grsec_enable_mount && (retval >= 0))
79046+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
79047+#endif
79048+ return;
79049+}
79050+
79051+void
79052+gr_log_mount(const char *from, const char *to, const int retval)
79053+{
79054+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79055+ if (grsec_enable_mount && (retval >= 0))
79056+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
79057+#endif
79058+ return;
79059+}
79060+
79061+int
79062+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
79063+{
79064+#ifdef CONFIG_GRKERNSEC_ROFS
79065+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
79066+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
79067+ return -EPERM;
79068+ } else
79069+ return 0;
79070+#endif
79071+ return 0;
79072+}
79073+
79074+int
79075+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
79076+{
79077+#ifdef CONFIG_GRKERNSEC_ROFS
79078+ struct inode *inode = dentry->d_inode;
79079+
79080+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
79081+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
79082+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
79083+ return -EPERM;
79084+ } else
79085+ return 0;
79086+#endif
79087+ return 0;
79088+}
79089diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
79090new file mode 100644
79091index 0000000..6ee9d50
79092--- /dev/null
79093+++ b/grsecurity/grsec_pax.c
79094@@ -0,0 +1,45 @@
79095+#include <linux/kernel.h>
79096+#include <linux/sched.h>
79097+#include <linux/mm.h>
79098+#include <linux/file.h>
79099+#include <linux/grinternal.h>
79100+#include <linux/grsecurity.h>
79101+
79102+void
79103+gr_log_textrel(struct vm_area_struct * vma)
79104+{
79105+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79106+ if (grsec_enable_log_rwxmaps)
79107+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
79108+#endif
79109+ return;
79110+}
79111+
79112+void gr_log_ptgnustack(struct file *file)
79113+{
79114+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79115+ if (grsec_enable_log_rwxmaps)
79116+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
79117+#endif
79118+ return;
79119+}
79120+
79121+void
79122+gr_log_rwxmmap(struct file *file)
79123+{
79124+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79125+ if (grsec_enable_log_rwxmaps)
79126+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
79127+#endif
79128+ return;
79129+}
79130+
79131+void
79132+gr_log_rwxmprotect(struct vm_area_struct *vma)
79133+{
79134+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79135+ if (grsec_enable_log_rwxmaps)
79136+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
79137+#endif
79138+ return;
79139+}
79140diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
79141new file mode 100644
79142index 0000000..2005a3a
79143--- /dev/null
79144+++ b/grsecurity/grsec_proc.c
79145@@ -0,0 +1,20 @@
79146+#include <linux/kernel.h>
79147+#include <linux/sched.h>
79148+#include <linux/grsecurity.h>
79149+#include <linux/grinternal.h>
79150+
79151+int gr_proc_is_restricted(void)
79152+{
79153+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79154+ const struct cred *cred = current_cred();
79155+#endif
79156+
79157+#ifdef CONFIG_GRKERNSEC_PROC_USER
79158+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
79159+ return -EACCES;
79160+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79161+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
79162+ return -EACCES;
79163+#endif
79164+ return 0;
79165+}
79166diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
79167new file mode 100644
79168index 0000000..f7f29aa
79169--- /dev/null
79170+++ b/grsecurity/grsec_ptrace.c
79171@@ -0,0 +1,30 @@
79172+#include <linux/kernel.h>
79173+#include <linux/sched.h>
79174+#include <linux/grinternal.h>
79175+#include <linux/security.h>
79176+
79177+void
79178+gr_audit_ptrace(struct task_struct *task)
79179+{
79180+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79181+ if (grsec_enable_audit_ptrace)
79182+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
79183+#endif
79184+ return;
79185+}
79186+
79187+int
79188+gr_ptrace_readexec(struct file *file, int unsafe_flags)
79189+{
79190+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79191+ const struct dentry *dentry = file->f_path.dentry;
79192+ const struct vfsmount *mnt = file->f_path.mnt;
79193+
79194+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
79195+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
79196+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
79197+ return -EACCES;
79198+ }
79199+#endif
79200+ return 0;
79201+}
79202diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
79203new file mode 100644
79204index 0000000..3860c7e
79205--- /dev/null
79206+++ b/grsecurity/grsec_sig.c
79207@@ -0,0 +1,236 @@
79208+#include <linux/kernel.h>
79209+#include <linux/sched.h>
79210+#include <linux/fs.h>
79211+#include <linux/delay.h>
79212+#include <linux/grsecurity.h>
79213+#include <linux/grinternal.h>
79214+#include <linux/hardirq.h>
79215+
79216+char *signames[] = {
79217+ [SIGSEGV] = "Segmentation fault",
79218+ [SIGILL] = "Illegal instruction",
79219+ [SIGABRT] = "Abort",
79220+ [SIGBUS] = "Invalid alignment/Bus error"
79221+};
79222+
79223+void
79224+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
79225+{
79226+#ifdef CONFIG_GRKERNSEC_SIGNAL
79227+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
79228+ (sig == SIGABRT) || (sig == SIGBUS))) {
79229+ if (task_pid_nr(t) == task_pid_nr(current)) {
79230+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
79231+ } else {
79232+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
79233+ }
79234+ }
79235+#endif
79236+ return;
79237+}
79238+
79239+int
79240+gr_handle_signal(const struct task_struct *p, const int sig)
79241+{
79242+#ifdef CONFIG_GRKERNSEC
79243+ /* ignore the 0 signal for protected task checks */
79244+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
79245+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
79246+ return -EPERM;
79247+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
79248+ return -EPERM;
79249+ }
79250+#endif
79251+ return 0;
79252+}
79253+
79254+#ifdef CONFIG_GRKERNSEC
79255+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
79256+
79257+int gr_fake_force_sig(int sig, struct task_struct *t)
79258+{
79259+ unsigned long int flags;
79260+ int ret, blocked, ignored;
79261+ struct k_sigaction *action;
79262+
79263+ spin_lock_irqsave(&t->sighand->siglock, flags);
79264+ action = &t->sighand->action[sig-1];
79265+ ignored = action->sa.sa_handler == SIG_IGN;
79266+ blocked = sigismember(&t->blocked, sig);
79267+ if (blocked || ignored) {
79268+ action->sa.sa_handler = SIG_DFL;
79269+ if (blocked) {
79270+ sigdelset(&t->blocked, sig);
79271+ recalc_sigpending_and_wake(t);
79272+ }
79273+ }
79274+ if (action->sa.sa_handler == SIG_DFL)
79275+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
79276+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
79277+
79278+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
79279+
79280+ return ret;
79281+}
79282+#endif
79283+
79284+#define GR_USER_BAN_TIME (15 * 60)
79285+#define GR_DAEMON_BRUTE_TIME (30 * 60)
79286+
79287+void gr_handle_brute_attach(int dumpable)
79288+{
79289+#ifdef CONFIG_GRKERNSEC_BRUTE
79290+ struct task_struct *p = current;
79291+ kuid_t uid = GLOBAL_ROOT_UID;
79292+ int daemon = 0;
79293+
79294+ if (!grsec_enable_brute)
79295+ return;
79296+
79297+ rcu_read_lock();
79298+ read_lock(&tasklist_lock);
79299+ read_lock(&grsec_exec_file_lock);
79300+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
79301+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
79302+ p->real_parent->brute = 1;
79303+ daemon = 1;
79304+ } else {
79305+ const struct cred *cred = __task_cred(p), *cred2;
79306+ struct task_struct *tsk, *tsk2;
79307+
79308+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
79309+ struct user_struct *user;
79310+
79311+ uid = cred->uid;
79312+
79313+ /* this is put upon execution past expiration */
79314+ user = find_user(uid);
79315+ if (user == NULL)
79316+ goto unlock;
79317+ user->suid_banned = 1;
79318+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
79319+ if (user->suid_ban_expires == ~0UL)
79320+ user->suid_ban_expires--;
79321+
79322+ /* only kill other threads of the same binary, from the same user */
79323+ do_each_thread(tsk2, tsk) {
79324+ cred2 = __task_cred(tsk);
79325+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
79326+ gr_fake_force_sig(SIGKILL, tsk);
79327+ } while_each_thread(tsk2, tsk);
79328+ }
79329+ }
79330+unlock:
79331+ read_unlock(&grsec_exec_file_lock);
79332+ read_unlock(&tasklist_lock);
79333+ rcu_read_unlock();
79334+
79335+ if (gr_is_global_nonroot(uid))
79336+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
79337+ else if (daemon)
79338+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
79339+
79340+#endif
79341+ return;
79342+}
79343+
79344+void gr_handle_brute_check(void)
79345+{
79346+#ifdef CONFIG_GRKERNSEC_BRUTE
79347+ struct task_struct *p = current;
79348+
79349+ if (unlikely(p->brute)) {
79350+ if (!grsec_enable_brute)
79351+ p->brute = 0;
79352+ else if (time_before(get_seconds(), p->brute_expires))
79353+ msleep(30 * 1000);
79354+ }
79355+#endif
79356+ return;
79357+}
79358+
79359+void gr_handle_kernel_exploit(void)
79360+{
79361+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79362+ const struct cred *cred;
79363+ struct task_struct *tsk, *tsk2;
79364+ struct user_struct *user;
79365+ kuid_t uid;
79366+
79367+ if (in_irq() || in_serving_softirq() || in_nmi())
79368+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79369+
79370+ uid = current_uid();
79371+
79372+ if (gr_is_global_root(uid))
79373+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79374+ else {
79375+ /* kill all the processes of this user, hold a reference
79376+ to their creds struct, and prevent them from creating
79377+ another process until system reset
79378+ */
79379+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79380+ GR_GLOBAL_UID(uid));
79381+ /* we intentionally leak this ref */
79382+ user = get_uid(current->cred->user);
79383+ if (user)
79384+ user->kernel_banned = 1;
79385+
79386+ /* kill all processes of this user */
79387+ read_lock(&tasklist_lock);
79388+ do_each_thread(tsk2, tsk) {
79389+ cred = __task_cred(tsk);
79390+ if (uid_eq(cred->uid, uid))
79391+ gr_fake_force_sig(SIGKILL, tsk);
79392+ } while_each_thread(tsk2, tsk);
79393+ read_unlock(&tasklist_lock);
79394+ }
79395+#endif
79396+}
79397+
79398+#ifdef CONFIG_GRKERNSEC_BRUTE
79399+static bool suid_ban_expired(struct user_struct *user)
79400+{
79401+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79402+ user->suid_banned = 0;
79403+ user->suid_ban_expires = 0;
79404+ free_uid(user);
79405+ return true;
79406+ }
79407+
79408+ return false;
79409+}
79410+#endif
79411+
79412+int gr_process_kernel_exec_ban(void)
79413+{
79414+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79415+ if (unlikely(current->cred->user->kernel_banned))
79416+ return -EPERM;
79417+#endif
79418+ return 0;
79419+}
79420+
79421+int gr_process_kernel_setuid_ban(struct user_struct *user)
79422+{
79423+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79424+ if (unlikely(user->kernel_banned))
79425+ gr_fake_force_sig(SIGKILL, current);
79426+#endif
79427+ return 0;
79428+}
79429+
79430+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79431+{
79432+#ifdef CONFIG_GRKERNSEC_BRUTE
79433+ struct user_struct *user = current->cred->user;
79434+ if (unlikely(user->suid_banned)) {
79435+ if (suid_ban_expired(user))
79436+ return 0;
79437+ /* disallow execution of suid binaries only */
79438+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79439+ return -EPERM;
79440+ }
79441+#endif
79442+ return 0;
79443+}
79444diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79445new file mode 100644
79446index 0000000..c0aef3a
79447--- /dev/null
79448+++ b/grsecurity/grsec_sock.c
79449@@ -0,0 +1,244 @@
79450+#include <linux/kernel.h>
79451+#include <linux/module.h>
79452+#include <linux/sched.h>
79453+#include <linux/file.h>
79454+#include <linux/net.h>
79455+#include <linux/in.h>
79456+#include <linux/ip.h>
79457+#include <net/sock.h>
79458+#include <net/inet_sock.h>
79459+#include <linux/grsecurity.h>
79460+#include <linux/grinternal.h>
79461+#include <linux/gracl.h>
79462+
79463+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79464+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79465+
79466+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79467+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79468+
79469+#ifdef CONFIG_UNIX_MODULE
79470+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79471+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79472+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79473+EXPORT_SYMBOL_GPL(gr_handle_create);
79474+#endif
79475+
79476+#ifdef CONFIG_GRKERNSEC
79477+#define gr_conn_table_size 32749
79478+struct conn_table_entry {
79479+ struct conn_table_entry *next;
79480+ struct signal_struct *sig;
79481+};
79482+
79483+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79484+DEFINE_SPINLOCK(gr_conn_table_lock);
79485+
79486+extern const char * gr_socktype_to_name(unsigned char type);
79487+extern const char * gr_proto_to_name(unsigned char proto);
79488+extern const char * gr_sockfamily_to_name(unsigned char family);
79489+
79490+static __inline__ int
79491+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79492+{
79493+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79494+}
79495+
79496+static __inline__ int
79497+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79498+ __u16 sport, __u16 dport)
79499+{
79500+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79501+ sig->gr_sport == sport && sig->gr_dport == dport))
79502+ return 1;
79503+ else
79504+ return 0;
79505+}
79506+
79507+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79508+{
79509+ struct conn_table_entry **match;
79510+ unsigned int index;
79511+
79512+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79513+ sig->gr_sport, sig->gr_dport,
79514+ gr_conn_table_size);
79515+
79516+ newent->sig = sig;
79517+
79518+ match = &gr_conn_table[index];
79519+ newent->next = *match;
79520+ *match = newent;
79521+
79522+ return;
79523+}
79524+
79525+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79526+{
79527+ struct conn_table_entry *match, *last = NULL;
79528+ unsigned int index;
79529+
79530+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79531+ sig->gr_sport, sig->gr_dport,
79532+ gr_conn_table_size);
79533+
79534+ match = gr_conn_table[index];
79535+ while (match && !conn_match(match->sig,
79536+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79537+ sig->gr_dport)) {
79538+ last = match;
79539+ match = match->next;
79540+ }
79541+
79542+ if (match) {
79543+ if (last)
79544+ last->next = match->next;
79545+ else
79546+ gr_conn_table[index] = NULL;
79547+ kfree(match);
79548+ }
79549+
79550+ return;
79551+}
79552+
79553+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79554+ __u16 sport, __u16 dport)
79555+{
79556+ struct conn_table_entry *match;
79557+ unsigned int index;
79558+
79559+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79560+
79561+ match = gr_conn_table[index];
79562+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79563+ match = match->next;
79564+
79565+ if (match)
79566+ return match->sig;
79567+ else
79568+ return NULL;
79569+}
79570+
79571+#endif
79572+
79573+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
79574+{
79575+#ifdef CONFIG_GRKERNSEC
79576+ struct signal_struct *sig = task->signal;
79577+ struct conn_table_entry *newent;
79578+
79579+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79580+ if (newent == NULL)
79581+ return;
79582+ /* no bh lock needed since we are called with bh disabled */
79583+ spin_lock(&gr_conn_table_lock);
79584+ gr_del_task_from_ip_table_nolock(sig);
79585+ sig->gr_saddr = inet->inet_rcv_saddr;
79586+ sig->gr_daddr = inet->inet_daddr;
79587+ sig->gr_sport = inet->inet_sport;
79588+ sig->gr_dport = inet->inet_dport;
79589+ gr_add_to_task_ip_table_nolock(sig, newent);
79590+ spin_unlock(&gr_conn_table_lock);
79591+#endif
79592+ return;
79593+}
79594+
79595+void gr_del_task_from_ip_table(struct task_struct *task)
79596+{
79597+#ifdef CONFIG_GRKERNSEC
79598+ spin_lock_bh(&gr_conn_table_lock);
79599+ gr_del_task_from_ip_table_nolock(task->signal);
79600+ spin_unlock_bh(&gr_conn_table_lock);
79601+#endif
79602+ return;
79603+}
79604+
79605+void
79606+gr_attach_curr_ip(const struct sock *sk)
79607+{
79608+#ifdef CONFIG_GRKERNSEC
79609+ struct signal_struct *p, *set;
79610+ const struct inet_sock *inet = inet_sk(sk);
79611+
79612+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79613+ return;
79614+
79615+ set = current->signal;
79616+
79617+ spin_lock_bh(&gr_conn_table_lock);
79618+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79619+ inet->inet_dport, inet->inet_sport);
79620+ if (unlikely(p != NULL)) {
79621+ set->curr_ip = p->curr_ip;
79622+ set->used_accept = 1;
79623+ gr_del_task_from_ip_table_nolock(p);
79624+ spin_unlock_bh(&gr_conn_table_lock);
79625+ return;
79626+ }
79627+ spin_unlock_bh(&gr_conn_table_lock);
79628+
79629+ set->curr_ip = inet->inet_daddr;
79630+ set->used_accept = 1;
79631+#endif
79632+ return;
79633+}
79634+
79635+int
79636+gr_handle_sock_all(const int family, const int type, const int protocol)
79637+{
79638+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79639+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
79640+ (family != AF_UNIX)) {
79641+ if (family == AF_INET)
79642+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
79643+ else
79644+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
79645+ return -EACCES;
79646+ }
79647+#endif
79648+ return 0;
79649+}
79650+
79651+int
79652+gr_handle_sock_server(const struct sockaddr *sck)
79653+{
79654+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79655+ if (grsec_enable_socket_server &&
79656+ in_group_p(grsec_socket_server_gid) &&
79657+ sck && (sck->sa_family != AF_UNIX) &&
79658+ (sck->sa_family != AF_LOCAL)) {
79659+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79660+ return -EACCES;
79661+ }
79662+#endif
79663+ return 0;
79664+}
79665+
79666+int
79667+gr_handle_sock_server_other(const struct sock *sck)
79668+{
79669+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79670+ if (grsec_enable_socket_server &&
79671+ in_group_p(grsec_socket_server_gid) &&
79672+ sck && (sck->sk_family != AF_UNIX) &&
79673+ (sck->sk_family != AF_LOCAL)) {
79674+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79675+ return -EACCES;
79676+ }
79677+#endif
79678+ return 0;
79679+}
79680+
79681+int
79682+gr_handle_sock_client(const struct sockaddr *sck)
79683+{
79684+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79685+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
79686+ sck && (sck->sa_family != AF_UNIX) &&
79687+ (sck->sa_family != AF_LOCAL)) {
79688+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
79689+ return -EACCES;
79690+ }
79691+#endif
79692+ return 0;
79693+}
79694diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
79695new file mode 100644
79696index 0000000..8159888
79697--- /dev/null
79698+++ b/grsecurity/grsec_sysctl.c
79699@@ -0,0 +1,479 @@
79700+#include <linux/kernel.h>
79701+#include <linux/sched.h>
79702+#include <linux/sysctl.h>
79703+#include <linux/grsecurity.h>
79704+#include <linux/grinternal.h>
79705+
79706+int
79707+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
79708+{
79709+#ifdef CONFIG_GRKERNSEC_SYSCTL
79710+ if (dirname == NULL || name == NULL)
79711+ return 0;
79712+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
79713+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
79714+ return -EACCES;
79715+ }
79716+#endif
79717+ return 0;
79718+}
79719+
79720+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
79721+static int __maybe_unused __read_only one = 1;
79722+#endif
79723+
79724+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
79725+ defined(CONFIG_GRKERNSEC_DENYUSB)
79726+struct ctl_table grsecurity_table[] = {
79727+#ifdef CONFIG_GRKERNSEC_SYSCTL
79728+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
79729+#ifdef CONFIG_GRKERNSEC_IO
79730+ {
79731+ .procname = "disable_priv_io",
79732+ .data = &grsec_disable_privio,
79733+ .maxlen = sizeof(int),
79734+ .mode = 0600,
79735+ .proc_handler = &proc_dointvec,
79736+ },
79737+#endif
79738+#endif
79739+#ifdef CONFIG_GRKERNSEC_LINK
79740+ {
79741+ .procname = "linking_restrictions",
79742+ .data = &grsec_enable_link,
79743+ .maxlen = sizeof(int),
79744+ .mode = 0600,
79745+ .proc_handler = &proc_dointvec,
79746+ },
79747+#endif
79748+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79749+ {
79750+ .procname = "enforce_symlinksifowner",
79751+ .data = &grsec_enable_symlinkown,
79752+ .maxlen = sizeof(int),
79753+ .mode = 0600,
79754+ .proc_handler = &proc_dointvec,
79755+ },
79756+ {
79757+ .procname = "symlinkown_gid",
79758+ .data = &grsec_symlinkown_gid,
79759+ .maxlen = sizeof(int),
79760+ .mode = 0600,
79761+ .proc_handler = &proc_dointvec,
79762+ },
79763+#endif
79764+#ifdef CONFIG_GRKERNSEC_BRUTE
79765+ {
79766+ .procname = "deter_bruteforce",
79767+ .data = &grsec_enable_brute,
79768+ .maxlen = sizeof(int),
79769+ .mode = 0600,
79770+ .proc_handler = &proc_dointvec,
79771+ },
79772+#endif
79773+#ifdef CONFIG_GRKERNSEC_FIFO
79774+ {
79775+ .procname = "fifo_restrictions",
79776+ .data = &grsec_enable_fifo,
79777+ .maxlen = sizeof(int),
79778+ .mode = 0600,
79779+ .proc_handler = &proc_dointvec,
79780+ },
79781+#endif
79782+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79783+ {
79784+ .procname = "ptrace_readexec",
79785+ .data = &grsec_enable_ptrace_readexec,
79786+ .maxlen = sizeof(int),
79787+ .mode = 0600,
79788+ .proc_handler = &proc_dointvec,
79789+ },
79790+#endif
79791+#ifdef CONFIG_GRKERNSEC_SETXID
79792+ {
79793+ .procname = "consistent_setxid",
79794+ .data = &grsec_enable_setxid,
79795+ .maxlen = sizeof(int),
79796+ .mode = 0600,
79797+ .proc_handler = &proc_dointvec,
79798+ },
79799+#endif
79800+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79801+ {
79802+ .procname = "ip_blackhole",
79803+ .data = &grsec_enable_blackhole,
79804+ .maxlen = sizeof(int),
79805+ .mode = 0600,
79806+ .proc_handler = &proc_dointvec,
79807+ },
79808+ {
79809+ .procname = "lastack_retries",
79810+ .data = &grsec_lastack_retries,
79811+ .maxlen = sizeof(int),
79812+ .mode = 0600,
79813+ .proc_handler = &proc_dointvec,
79814+ },
79815+#endif
79816+#ifdef CONFIG_GRKERNSEC_EXECLOG
79817+ {
79818+ .procname = "exec_logging",
79819+ .data = &grsec_enable_execlog,
79820+ .maxlen = sizeof(int),
79821+ .mode = 0600,
79822+ .proc_handler = &proc_dointvec,
79823+ },
79824+#endif
79825+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79826+ {
79827+ .procname = "rwxmap_logging",
79828+ .data = &grsec_enable_log_rwxmaps,
79829+ .maxlen = sizeof(int),
79830+ .mode = 0600,
79831+ .proc_handler = &proc_dointvec,
79832+ },
79833+#endif
79834+#ifdef CONFIG_GRKERNSEC_SIGNAL
79835+ {
79836+ .procname = "signal_logging",
79837+ .data = &grsec_enable_signal,
79838+ .maxlen = sizeof(int),
79839+ .mode = 0600,
79840+ .proc_handler = &proc_dointvec,
79841+ },
79842+#endif
79843+#ifdef CONFIG_GRKERNSEC_FORKFAIL
79844+ {
79845+ .procname = "forkfail_logging",
79846+ .data = &grsec_enable_forkfail,
79847+ .maxlen = sizeof(int),
79848+ .mode = 0600,
79849+ .proc_handler = &proc_dointvec,
79850+ },
79851+#endif
79852+#ifdef CONFIG_GRKERNSEC_TIME
79853+ {
79854+ .procname = "timechange_logging",
79855+ .data = &grsec_enable_time,
79856+ .maxlen = sizeof(int),
79857+ .mode = 0600,
79858+ .proc_handler = &proc_dointvec,
79859+ },
79860+#endif
79861+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
79862+ {
79863+ .procname = "chroot_deny_shmat",
79864+ .data = &grsec_enable_chroot_shmat,
79865+ .maxlen = sizeof(int),
79866+ .mode = 0600,
79867+ .proc_handler = &proc_dointvec,
79868+ },
79869+#endif
79870+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
79871+ {
79872+ .procname = "chroot_deny_unix",
79873+ .data = &grsec_enable_chroot_unix,
79874+ .maxlen = sizeof(int),
79875+ .mode = 0600,
79876+ .proc_handler = &proc_dointvec,
79877+ },
79878+#endif
79879+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
79880+ {
79881+ .procname = "chroot_deny_mount",
79882+ .data = &grsec_enable_chroot_mount,
79883+ .maxlen = sizeof(int),
79884+ .mode = 0600,
79885+ .proc_handler = &proc_dointvec,
79886+ },
79887+#endif
79888+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
79889+ {
79890+ .procname = "chroot_deny_fchdir",
79891+ .data = &grsec_enable_chroot_fchdir,
79892+ .maxlen = sizeof(int),
79893+ .mode = 0600,
79894+ .proc_handler = &proc_dointvec,
79895+ },
79896+#endif
79897+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
79898+ {
79899+ .procname = "chroot_deny_chroot",
79900+ .data = &grsec_enable_chroot_double,
79901+ .maxlen = sizeof(int),
79902+ .mode = 0600,
79903+ .proc_handler = &proc_dointvec,
79904+ },
79905+#endif
79906+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
79907+ {
79908+ .procname = "chroot_deny_pivot",
79909+ .data = &grsec_enable_chroot_pivot,
79910+ .maxlen = sizeof(int),
79911+ .mode = 0600,
79912+ .proc_handler = &proc_dointvec,
79913+ },
79914+#endif
79915+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
79916+ {
79917+ .procname = "chroot_enforce_chdir",
79918+ .data = &grsec_enable_chroot_chdir,
79919+ .maxlen = sizeof(int),
79920+ .mode = 0600,
79921+ .proc_handler = &proc_dointvec,
79922+ },
79923+#endif
79924+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
79925+ {
79926+ .procname = "chroot_deny_chmod",
79927+ .data = &grsec_enable_chroot_chmod,
79928+ .maxlen = sizeof(int),
79929+ .mode = 0600,
79930+ .proc_handler = &proc_dointvec,
79931+ },
79932+#endif
79933+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
79934+ {
79935+ .procname = "chroot_deny_mknod",
79936+ .data = &grsec_enable_chroot_mknod,
79937+ .maxlen = sizeof(int),
79938+ .mode = 0600,
79939+ .proc_handler = &proc_dointvec,
79940+ },
79941+#endif
79942+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
79943+ {
79944+ .procname = "chroot_restrict_nice",
79945+ .data = &grsec_enable_chroot_nice,
79946+ .maxlen = sizeof(int),
79947+ .mode = 0600,
79948+ .proc_handler = &proc_dointvec,
79949+ },
79950+#endif
79951+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
79952+ {
79953+ .procname = "chroot_execlog",
79954+ .data = &grsec_enable_chroot_execlog,
79955+ .maxlen = sizeof(int),
79956+ .mode = 0600,
79957+ .proc_handler = &proc_dointvec,
79958+ },
79959+#endif
79960+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
79961+ {
79962+ .procname = "chroot_caps",
79963+ .data = &grsec_enable_chroot_caps,
79964+ .maxlen = sizeof(int),
79965+ .mode = 0600,
79966+ .proc_handler = &proc_dointvec,
79967+ },
79968+#endif
79969+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79970+ {
79971+ .procname = "chroot_deny_sysctl",
79972+ .data = &grsec_enable_chroot_sysctl,
79973+ .maxlen = sizeof(int),
79974+ .mode = 0600,
79975+ .proc_handler = &proc_dointvec,
79976+ },
79977+#endif
79978+#ifdef CONFIG_GRKERNSEC_TPE
79979+ {
79980+ .procname = "tpe",
79981+ .data = &grsec_enable_tpe,
79982+ .maxlen = sizeof(int),
79983+ .mode = 0600,
79984+ .proc_handler = &proc_dointvec,
79985+ },
79986+ {
79987+ .procname = "tpe_gid",
79988+ .data = &grsec_tpe_gid,
79989+ .maxlen = sizeof(int),
79990+ .mode = 0600,
79991+ .proc_handler = &proc_dointvec,
79992+ },
79993+#endif
79994+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79995+ {
79996+ .procname = "tpe_invert",
79997+ .data = &grsec_enable_tpe_invert,
79998+ .maxlen = sizeof(int),
79999+ .mode = 0600,
80000+ .proc_handler = &proc_dointvec,
80001+ },
80002+#endif
80003+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80004+ {
80005+ .procname = "tpe_restrict_all",
80006+ .data = &grsec_enable_tpe_all,
80007+ .maxlen = sizeof(int),
80008+ .mode = 0600,
80009+ .proc_handler = &proc_dointvec,
80010+ },
80011+#endif
80012+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80013+ {
80014+ .procname = "socket_all",
80015+ .data = &grsec_enable_socket_all,
80016+ .maxlen = sizeof(int),
80017+ .mode = 0600,
80018+ .proc_handler = &proc_dointvec,
80019+ },
80020+ {
80021+ .procname = "socket_all_gid",
80022+ .data = &grsec_socket_all_gid,
80023+ .maxlen = sizeof(int),
80024+ .mode = 0600,
80025+ .proc_handler = &proc_dointvec,
80026+ },
80027+#endif
80028+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80029+ {
80030+ .procname = "socket_client",
80031+ .data = &grsec_enable_socket_client,
80032+ .maxlen = sizeof(int),
80033+ .mode = 0600,
80034+ .proc_handler = &proc_dointvec,
80035+ },
80036+ {
80037+ .procname = "socket_client_gid",
80038+ .data = &grsec_socket_client_gid,
80039+ .maxlen = sizeof(int),
80040+ .mode = 0600,
80041+ .proc_handler = &proc_dointvec,
80042+ },
80043+#endif
80044+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80045+ {
80046+ .procname = "socket_server",
80047+ .data = &grsec_enable_socket_server,
80048+ .maxlen = sizeof(int),
80049+ .mode = 0600,
80050+ .proc_handler = &proc_dointvec,
80051+ },
80052+ {
80053+ .procname = "socket_server_gid",
80054+ .data = &grsec_socket_server_gid,
80055+ .maxlen = sizeof(int),
80056+ .mode = 0600,
80057+ .proc_handler = &proc_dointvec,
80058+ },
80059+#endif
80060+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
80061+ {
80062+ .procname = "audit_group",
80063+ .data = &grsec_enable_group,
80064+ .maxlen = sizeof(int),
80065+ .mode = 0600,
80066+ .proc_handler = &proc_dointvec,
80067+ },
80068+ {
80069+ .procname = "audit_gid",
80070+ .data = &grsec_audit_gid,
80071+ .maxlen = sizeof(int),
80072+ .mode = 0600,
80073+ .proc_handler = &proc_dointvec,
80074+ },
80075+#endif
80076+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80077+ {
80078+ .procname = "audit_chdir",
80079+ .data = &grsec_enable_chdir,
80080+ .maxlen = sizeof(int),
80081+ .mode = 0600,
80082+ .proc_handler = &proc_dointvec,
80083+ },
80084+#endif
80085+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
80086+ {
80087+ .procname = "audit_mount",
80088+ .data = &grsec_enable_mount,
80089+ .maxlen = sizeof(int),
80090+ .mode = 0600,
80091+ .proc_handler = &proc_dointvec,
80092+ },
80093+#endif
80094+#ifdef CONFIG_GRKERNSEC_DMESG
80095+ {
80096+ .procname = "dmesg",
80097+ .data = &grsec_enable_dmesg,
80098+ .maxlen = sizeof(int),
80099+ .mode = 0600,
80100+ .proc_handler = &proc_dointvec,
80101+ },
80102+#endif
80103+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80104+ {
80105+ .procname = "chroot_findtask",
80106+ .data = &grsec_enable_chroot_findtask,
80107+ .maxlen = sizeof(int),
80108+ .mode = 0600,
80109+ .proc_handler = &proc_dointvec,
80110+ },
80111+#endif
80112+#ifdef CONFIG_GRKERNSEC_RESLOG
80113+ {
80114+ .procname = "resource_logging",
80115+ .data = &grsec_resource_logging,
80116+ .maxlen = sizeof(int),
80117+ .mode = 0600,
80118+ .proc_handler = &proc_dointvec,
80119+ },
80120+#endif
80121+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
80122+ {
80123+ .procname = "audit_ptrace",
80124+ .data = &grsec_enable_audit_ptrace,
80125+ .maxlen = sizeof(int),
80126+ .mode = 0600,
80127+ .proc_handler = &proc_dointvec,
80128+ },
80129+#endif
80130+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80131+ {
80132+ .procname = "harden_ptrace",
80133+ .data = &grsec_enable_harden_ptrace,
80134+ .maxlen = sizeof(int),
80135+ .mode = 0600,
80136+ .proc_handler = &proc_dointvec,
80137+ },
80138+#endif
80139+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
80140+ {
80141+ .procname = "harden_ipc",
80142+ .data = &grsec_enable_harden_ipc,
80143+ .maxlen = sizeof(int),
80144+ .mode = 0600,
80145+ .proc_handler = &proc_dointvec,
80146+ },
80147+#endif
80148+ {
80149+ .procname = "grsec_lock",
80150+ .data = &grsec_lock,
80151+ .maxlen = sizeof(int),
80152+ .mode = 0600,
80153+ .proc_handler = &proc_dointvec,
80154+ },
80155+#endif
80156+#ifdef CONFIG_GRKERNSEC_ROFS
80157+ {
80158+ .procname = "romount_protect",
80159+ .data = &grsec_enable_rofs,
80160+ .maxlen = sizeof(int),
80161+ .mode = 0600,
80162+ .proc_handler = &proc_dointvec_minmax,
80163+ .extra1 = &one,
80164+ .extra2 = &one,
80165+ },
80166+#endif
80167+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
80168+ {
80169+ .procname = "deny_new_usb",
80170+ .data = &grsec_deny_new_usb,
80171+ .maxlen = sizeof(int),
80172+ .mode = 0600,
80173+ .proc_handler = &proc_dointvec,
80174+ },
80175+#endif
80176+ { }
80177+};
80178+#endif
80179diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
80180new file mode 100644
80181index 0000000..61b514e
80182--- /dev/null
80183+++ b/grsecurity/grsec_time.c
80184@@ -0,0 +1,16 @@
80185+#include <linux/kernel.h>
80186+#include <linux/sched.h>
80187+#include <linux/grinternal.h>
80188+#include <linux/module.h>
80189+
80190+void
80191+gr_log_timechange(void)
80192+{
80193+#ifdef CONFIG_GRKERNSEC_TIME
80194+ if (grsec_enable_time)
80195+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
80196+#endif
80197+ return;
80198+}
80199+
80200+EXPORT_SYMBOL_GPL(gr_log_timechange);
80201diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
80202new file mode 100644
80203index 0000000..d1953de
80204--- /dev/null
80205+++ b/grsecurity/grsec_tpe.c
80206@@ -0,0 +1,78 @@
80207+#include <linux/kernel.h>
80208+#include <linux/sched.h>
80209+#include <linux/file.h>
80210+#include <linux/fs.h>
80211+#include <linux/grinternal.h>
80212+
80213+extern int gr_acl_tpe_check(void);
80214+
80215+int
80216+gr_tpe_allow(const struct file *file)
80217+{
80218+#ifdef CONFIG_GRKERNSEC
80219+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
80220+ struct inode *file_inode = file->f_path.dentry->d_inode;
80221+ const struct cred *cred = current_cred();
80222+ char *msg = NULL;
80223+ char *msg2 = NULL;
80224+
80225+ // never restrict root
80226+ if (gr_is_global_root(cred->uid))
80227+ return 1;
80228+
80229+ if (grsec_enable_tpe) {
80230+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80231+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
80232+ msg = "not being in trusted group";
80233+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
80234+ msg = "being in untrusted group";
80235+#else
80236+ if (in_group_p(grsec_tpe_gid))
80237+ msg = "being in untrusted group";
80238+#endif
80239+ }
80240+ if (!msg && gr_acl_tpe_check())
80241+ msg = "being in untrusted role";
80242+
80243+ // not in any affected group/role
80244+ if (!msg)
80245+ goto next_check;
80246+
80247+ if (gr_is_global_nonroot(inode->i_uid))
80248+ msg2 = "file in non-root-owned directory";
80249+ else if (inode->i_mode & S_IWOTH)
80250+ msg2 = "file in world-writable directory";
80251+ else if (inode->i_mode & S_IWGRP)
80252+ msg2 = "file in group-writable directory";
80253+ else if (file_inode->i_mode & S_IWOTH)
80254+ msg2 = "file is world-writable";
80255+
80256+ if (msg && msg2) {
80257+ char fullmsg[70] = {0};
80258+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
80259+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
80260+ return 0;
80261+ }
80262+ msg = NULL;
80263+next_check:
80264+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80265+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
80266+ return 1;
80267+
80268+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
80269+ msg = "directory not owned by user";
80270+ else if (inode->i_mode & S_IWOTH)
80271+ msg = "file in world-writable directory";
80272+ else if (inode->i_mode & S_IWGRP)
80273+ msg = "file in group-writable directory";
80274+ else if (file_inode->i_mode & S_IWOTH)
80275+ msg = "file is world-writable";
80276+
80277+ if (msg) {
80278+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
80279+ return 0;
80280+ }
80281+#endif
80282+#endif
80283+ return 1;
80284+}
80285diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
80286new file mode 100644
80287index 0000000..ae02d8e
80288--- /dev/null
80289+++ b/grsecurity/grsec_usb.c
80290@@ -0,0 +1,15 @@
80291+#include <linux/kernel.h>
80292+#include <linux/grinternal.h>
80293+#include <linux/module.h>
80294+
80295+int gr_handle_new_usb(void)
80296+{
80297+#ifdef CONFIG_GRKERNSEC_DENYUSB
80298+ if (grsec_deny_new_usb) {
80299+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
80300+ return 1;
80301+ }
80302+#endif
80303+ return 0;
80304+}
80305+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
80306diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
80307new file mode 100644
80308index 0000000..158b330
80309--- /dev/null
80310+++ b/grsecurity/grsum.c
80311@@ -0,0 +1,64 @@
80312+#include <linux/err.h>
80313+#include <linux/kernel.h>
80314+#include <linux/sched.h>
80315+#include <linux/mm.h>
80316+#include <linux/scatterlist.h>
80317+#include <linux/crypto.h>
80318+#include <linux/gracl.h>
80319+
80320+
80321+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
80322+#error "crypto and sha256 must be built into the kernel"
80323+#endif
80324+
80325+int
80326+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
80327+{
80328+ struct crypto_hash *tfm;
80329+ struct hash_desc desc;
80330+ struct scatterlist sg[2];
80331+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
80332+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
80333+ unsigned long *sumptr = (unsigned long *)sum;
80334+ int cryptres;
80335+ int retval = 1;
80336+ volatile int mismatched = 0;
80337+ volatile int dummy = 0;
80338+ unsigned int i;
80339+
80340+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
80341+ if (IS_ERR(tfm)) {
80342+ /* should never happen, since sha256 should be built in */
80343+ memset(entry->pw, 0, GR_PW_LEN);
80344+ return 1;
80345+ }
80346+
80347+ sg_init_table(sg, 2);
80348+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
80349+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
80350+
80351+ desc.tfm = tfm;
80352+ desc.flags = 0;
80353+
80354+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
80355+ temp_sum);
80356+
80357+ memset(entry->pw, 0, GR_PW_LEN);
80358+
80359+ if (cryptres)
80360+ goto out;
80361+
80362+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80363+ if (sumptr[i] != tmpsumptr[i])
80364+ mismatched = 1;
80365+ else
80366+ dummy = 1; // waste a cycle
80367+
80368+ if (!mismatched)
80369+ retval = dummy - 1;
80370+
80371+out:
80372+ crypto_free_hash(tfm);
80373+
80374+ return retval;
80375+}
80376diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80377index 77ff547..181834f 100644
80378--- a/include/asm-generic/4level-fixup.h
80379+++ b/include/asm-generic/4level-fixup.h
80380@@ -13,8 +13,10 @@
80381 #define pmd_alloc(mm, pud, address) \
80382 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80383 NULL: pmd_offset(pud, address))
80384+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80385
80386 #define pud_alloc(mm, pgd, address) (pgd)
80387+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80388 #define pud_offset(pgd, start) (pgd)
80389 #define pud_none(pud) 0
80390 #define pud_bad(pud) 0
80391diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80392index b7babf0..97f4c4f 100644
80393--- a/include/asm-generic/atomic-long.h
80394+++ b/include/asm-generic/atomic-long.h
80395@@ -22,6 +22,12 @@
80396
80397 typedef atomic64_t atomic_long_t;
80398
80399+#ifdef CONFIG_PAX_REFCOUNT
80400+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80401+#else
80402+typedef atomic64_t atomic_long_unchecked_t;
80403+#endif
80404+
80405 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80406
80407 static inline long atomic_long_read(atomic_long_t *l)
80408@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80409 return (long)atomic64_read(v);
80410 }
80411
80412+#ifdef CONFIG_PAX_REFCOUNT
80413+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80414+{
80415+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80416+
80417+ return (long)atomic64_read_unchecked(v);
80418+}
80419+#endif
80420+
80421 static inline void atomic_long_set(atomic_long_t *l, long i)
80422 {
80423 atomic64_t *v = (atomic64_t *)l;
80424@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80425 atomic64_set(v, i);
80426 }
80427
80428+#ifdef CONFIG_PAX_REFCOUNT
80429+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80430+{
80431+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80432+
80433+ atomic64_set_unchecked(v, i);
80434+}
80435+#endif
80436+
80437 static inline void atomic_long_inc(atomic_long_t *l)
80438 {
80439 atomic64_t *v = (atomic64_t *)l;
80440@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80441 atomic64_inc(v);
80442 }
80443
80444+#ifdef CONFIG_PAX_REFCOUNT
80445+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80446+{
80447+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80448+
80449+ atomic64_inc_unchecked(v);
80450+}
80451+#endif
80452+
80453 static inline void atomic_long_dec(atomic_long_t *l)
80454 {
80455 atomic64_t *v = (atomic64_t *)l;
80456@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80457 atomic64_dec(v);
80458 }
80459
80460+#ifdef CONFIG_PAX_REFCOUNT
80461+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80462+{
80463+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80464+
80465+ atomic64_dec_unchecked(v);
80466+}
80467+#endif
80468+
80469 static inline void atomic_long_add(long i, atomic_long_t *l)
80470 {
80471 atomic64_t *v = (atomic64_t *)l;
80472@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80473 atomic64_add(i, v);
80474 }
80475
80476+#ifdef CONFIG_PAX_REFCOUNT
80477+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80478+{
80479+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80480+
80481+ atomic64_add_unchecked(i, v);
80482+}
80483+#endif
80484+
80485 static inline void atomic_long_sub(long i, atomic_long_t *l)
80486 {
80487 atomic64_t *v = (atomic64_t *)l;
80488@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80489 atomic64_sub(i, v);
80490 }
80491
80492+#ifdef CONFIG_PAX_REFCOUNT
80493+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80494+{
80495+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80496+
80497+ atomic64_sub_unchecked(i, v);
80498+}
80499+#endif
80500+
80501 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80502 {
80503 atomic64_t *v = (atomic64_t *)l;
80504@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80505 return atomic64_add_negative(i, v);
80506 }
80507
80508-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80509+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80510 {
80511 atomic64_t *v = (atomic64_t *)l;
80512
80513 return (long)atomic64_add_return(i, v);
80514 }
80515
80516+#ifdef CONFIG_PAX_REFCOUNT
80517+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80518+{
80519+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80520+
80521+ return (long)atomic64_add_return_unchecked(i, v);
80522+}
80523+#endif
80524+
80525 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80526 {
80527 atomic64_t *v = (atomic64_t *)l;
80528@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80529 return (long)atomic64_inc_return(v);
80530 }
80531
80532+#ifdef CONFIG_PAX_REFCOUNT
80533+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80534+{
80535+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80536+
80537+ return (long)atomic64_inc_return_unchecked(v);
80538+}
80539+#endif
80540+
80541 static inline long atomic_long_dec_return(atomic_long_t *l)
80542 {
80543 atomic64_t *v = (atomic64_t *)l;
80544@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80545
80546 typedef atomic_t atomic_long_t;
80547
80548+#ifdef CONFIG_PAX_REFCOUNT
80549+typedef atomic_unchecked_t atomic_long_unchecked_t;
80550+#else
80551+typedef atomic_t atomic_long_unchecked_t;
80552+#endif
80553+
80554 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80555 static inline long atomic_long_read(atomic_long_t *l)
80556 {
80557@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80558 return (long)atomic_read(v);
80559 }
80560
80561+#ifdef CONFIG_PAX_REFCOUNT
80562+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80563+{
80564+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80565+
80566+ return (long)atomic_read_unchecked(v);
80567+}
80568+#endif
80569+
80570 static inline void atomic_long_set(atomic_long_t *l, long i)
80571 {
80572 atomic_t *v = (atomic_t *)l;
80573@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80574 atomic_set(v, i);
80575 }
80576
80577+#ifdef CONFIG_PAX_REFCOUNT
80578+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80579+{
80580+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80581+
80582+ atomic_set_unchecked(v, i);
80583+}
80584+#endif
80585+
80586 static inline void atomic_long_inc(atomic_long_t *l)
80587 {
80588 atomic_t *v = (atomic_t *)l;
80589@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80590 atomic_inc(v);
80591 }
80592
80593+#ifdef CONFIG_PAX_REFCOUNT
80594+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80595+{
80596+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80597+
80598+ atomic_inc_unchecked(v);
80599+}
80600+#endif
80601+
80602 static inline void atomic_long_dec(atomic_long_t *l)
80603 {
80604 atomic_t *v = (atomic_t *)l;
80605@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80606 atomic_dec(v);
80607 }
80608
80609+#ifdef CONFIG_PAX_REFCOUNT
80610+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80611+{
80612+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80613+
80614+ atomic_dec_unchecked(v);
80615+}
80616+#endif
80617+
80618 static inline void atomic_long_add(long i, atomic_long_t *l)
80619 {
80620 atomic_t *v = (atomic_t *)l;
80621@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80622 atomic_add(i, v);
80623 }
80624
80625+#ifdef CONFIG_PAX_REFCOUNT
80626+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80627+{
80628+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80629+
80630+ atomic_add_unchecked(i, v);
80631+}
80632+#endif
80633+
80634 static inline void atomic_long_sub(long i, atomic_long_t *l)
80635 {
80636 atomic_t *v = (atomic_t *)l;
80637@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80638 atomic_sub(i, v);
80639 }
80640
80641+#ifdef CONFIG_PAX_REFCOUNT
80642+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80643+{
80644+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80645+
80646+ atomic_sub_unchecked(i, v);
80647+}
80648+#endif
80649+
80650 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80651 {
80652 atomic_t *v = (atomic_t *)l;
80653@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
80654 return (long)atomic_add_return(i, v);
80655 }
80656
80657+#ifdef CONFIG_PAX_REFCOUNT
80658+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80659+{
80660+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80661+
80662+ return (long)atomic_add_return_unchecked(i, v);
80663+}
80664+
80665+#endif
80666+
80667 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80668 {
80669 atomic_t *v = (atomic_t *)l;
80670@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80671 return (long)atomic_inc_return(v);
80672 }
80673
80674+#ifdef CONFIG_PAX_REFCOUNT
80675+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80676+{
80677+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80678+
80679+ return (long)atomic_inc_return_unchecked(v);
80680+}
80681+#endif
80682+
80683 static inline long atomic_long_dec_return(atomic_long_t *l)
80684 {
80685 atomic_t *v = (atomic_t *)l;
80686@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80687
80688 #endif /* BITS_PER_LONG == 64 */
80689
80690+#ifdef CONFIG_PAX_REFCOUNT
80691+static inline void pax_refcount_needs_these_functions(void)
80692+{
80693+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
80694+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
80695+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
80696+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
80697+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
80698+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
80699+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
80700+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
80701+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
80702+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
80703+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
80704+#ifdef CONFIG_X86
80705+ atomic_clear_mask_unchecked(0, NULL);
80706+ atomic_set_mask_unchecked(0, NULL);
80707+#endif
80708+
80709+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
80710+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
80711+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
80712+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
80713+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
80714+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
80715+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
80716+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
80717+}
80718+#else
80719+#define atomic_read_unchecked(v) atomic_read(v)
80720+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
80721+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
80722+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
80723+#define atomic_inc_unchecked(v) atomic_inc(v)
80724+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
80725+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
80726+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
80727+#define atomic_dec_unchecked(v) atomic_dec(v)
80728+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
80729+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
80730+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
80731+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
80732+
80733+#define atomic_long_read_unchecked(v) atomic_long_read(v)
80734+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
80735+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
80736+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
80737+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
80738+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
80739+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
80740+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
80741+#endif
80742+
80743 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
80744diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
80745index 9c79e76..9f7827d 100644
80746--- a/include/asm-generic/atomic.h
80747+++ b/include/asm-generic/atomic.h
80748@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
80749 * Atomically clears the bits set in @mask from @v
80750 */
80751 #ifndef atomic_clear_mask
80752-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
80753+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80754 {
80755 unsigned long flags;
80756
80757diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
80758index b18ce4f..2ee2843 100644
80759--- a/include/asm-generic/atomic64.h
80760+++ b/include/asm-generic/atomic64.h
80761@@ -16,6 +16,8 @@ typedef struct {
80762 long long counter;
80763 } atomic64_t;
80764
80765+typedef atomic64_t atomic64_unchecked_t;
80766+
80767 #define ATOMIC64_INIT(i) { (i) }
80768
80769 extern long long atomic64_read(const atomic64_t *v);
80770@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
80771 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
80772 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
80773
80774+#define atomic64_read_unchecked(v) atomic64_read(v)
80775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
80776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
80777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
80778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
80779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
80780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
80781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
80782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
80783+
80784 #endif /* _ASM_GENERIC_ATOMIC64_H */
80785diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
80786index 1402fa8..025a736 100644
80787--- a/include/asm-generic/barrier.h
80788+++ b/include/asm-generic/barrier.h
80789@@ -74,7 +74,7 @@
80790 do { \
80791 compiletime_assert_atomic_type(*p); \
80792 smp_mb(); \
80793- ACCESS_ONCE(*p) = (v); \
80794+ ACCESS_ONCE_RW(*p) = (v); \
80795 } while (0)
80796
80797 #define smp_load_acquire(p) \
80798diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
80799index a60a7cc..0fe12f2 100644
80800--- a/include/asm-generic/bitops/__fls.h
80801+++ b/include/asm-generic/bitops/__fls.h
80802@@ -9,7 +9,7 @@
80803 *
80804 * Undefined if no set bit exists, so code should check against 0 first.
80805 */
80806-static __always_inline unsigned long __fls(unsigned long word)
80807+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
80808 {
80809 int num = BITS_PER_LONG - 1;
80810
80811diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
80812index 0576d1f..dad6c71 100644
80813--- a/include/asm-generic/bitops/fls.h
80814+++ b/include/asm-generic/bitops/fls.h
80815@@ -9,7 +9,7 @@
80816 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
80817 */
80818
80819-static __always_inline int fls(int x)
80820+static __always_inline int __intentional_overflow(-1) fls(int x)
80821 {
80822 int r = 32;
80823
80824diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
80825index b097cf8..3d40e14 100644
80826--- a/include/asm-generic/bitops/fls64.h
80827+++ b/include/asm-generic/bitops/fls64.h
80828@@ -15,7 +15,7 @@
80829 * at position 64.
80830 */
80831 #if BITS_PER_LONG == 32
80832-static __always_inline int fls64(__u64 x)
80833+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80834 {
80835 __u32 h = x >> 32;
80836 if (h)
80837@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
80838 return fls(x);
80839 }
80840 #elif BITS_PER_LONG == 64
80841-static __always_inline int fls64(__u64 x)
80842+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80843 {
80844 if (x == 0)
80845 return 0;
80846diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
80847index 1bfcfe5..e04c5c9 100644
80848--- a/include/asm-generic/cache.h
80849+++ b/include/asm-generic/cache.h
80850@@ -6,7 +6,7 @@
80851 * cache lines need to provide their own cache.h.
80852 */
80853
80854-#define L1_CACHE_SHIFT 5
80855-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
80856+#define L1_CACHE_SHIFT 5UL
80857+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
80858
80859 #endif /* __ASM_GENERIC_CACHE_H */
80860diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
80861index 0d68a1e..b74a761 100644
80862--- a/include/asm-generic/emergency-restart.h
80863+++ b/include/asm-generic/emergency-restart.h
80864@@ -1,7 +1,7 @@
80865 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
80866 #define _ASM_GENERIC_EMERGENCY_RESTART_H
80867
80868-static inline void machine_emergency_restart(void)
80869+static inline __noreturn void machine_emergency_restart(void)
80870 {
80871 machine_restart(NULL);
80872 }
80873diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
80874index 975e1cc..0b8a083 100644
80875--- a/include/asm-generic/io.h
80876+++ b/include/asm-generic/io.h
80877@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
80878 * These are pretty trivial
80879 */
80880 #ifndef virt_to_phys
80881-static inline unsigned long virt_to_phys(volatile void *address)
80882+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
80883 {
80884 return __pa((unsigned long)address);
80885 }
80886diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
80887index 90f99c7..00ce236 100644
80888--- a/include/asm-generic/kmap_types.h
80889+++ b/include/asm-generic/kmap_types.h
80890@@ -2,9 +2,9 @@
80891 #define _ASM_GENERIC_KMAP_TYPES_H
80892
80893 #ifdef __WITH_KM_FENCE
80894-# define KM_TYPE_NR 41
80895+# define KM_TYPE_NR 42
80896 #else
80897-# define KM_TYPE_NR 20
80898+# define KM_TYPE_NR 21
80899 #endif
80900
80901 #endif
80902diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
80903index 9ceb03b..62b0b8f 100644
80904--- a/include/asm-generic/local.h
80905+++ b/include/asm-generic/local.h
80906@@ -23,24 +23,37 @@ typedef struct
80907 atomic_long_t a;
80908 } local_t;
80909
80910+typedef struct {
80911+ atomic_long_unchecked_t a;
80912+} local_unchecked_t;
80913+
80914 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
80915
80916 #define local_read(l) atomic_long_read(&(l)->a)
80917+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
80918 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
80919+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
80920 #define local_inc(l) atomic_long_inc(&(l)->a)
80921+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
80922 #define local_dec(l) atomic_long_dec(&(l)->a)
80923+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
80924 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
80925+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
80926 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
80927+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
80928
80929 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
80930 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
80931 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
80932 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
80933 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
80934+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
80935 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
80936 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
80937+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
80938
80939 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80940+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80941 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
80942 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
80943 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
80944diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
80945index 725612b..9cc513a 100644
80946--- a/include/asm-generic/pgtable-nopmd.h
80947+++ b/include/asm-generic/pgtable-nopmd.h
80948@@ -1,14 +1,19 @@
80949 #ifndef _PGTABLE_NOPMD_H
80950 #define _PGTABLE_NOPMD_H
80951
80952-#ifndef __ASSEMBLY__
80953-
80954 #include <asm-generic/pgtable-nopud.h>
80955
80956-struct mm_struct;
80957-
80958 #define __PAGETABLE_PMD_FOLDED
80959
80960+#define PMD_SHIFT PUD_SHIFT
80961+#define PTRS_PER_PMD 1
80962+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
80963+#define PMD_MASK (~(PMD_SIZE-1))
80964+
80965+#ifndef __ASSEMBLY__
80966+
80967+struct mm_struct;
80968+
80969 /*
80970 * Having the pmd type consist of a pud gets the size right, and allows
80971 * us to conceptually access the pud entry that this pmd is folded into
80972@@ -16,11 +21,6 @@ struct mm_struct;
80973 */
80974 typedef struct { pud_t pud; } pmd_t;
80975
80976-#define PMD_SHIFT PUD_SHIFT
80977-#define PTRS_PER_PMD 1
80978-#define PMD_SIZE (1UL << PMD_SHIFT)
80979-#define PMD_MASK (~(PMD_SIZE-1))
80980-
80981 /*
80982 * The "pud_xxx()" functions here are trivial for a folded two-level
80983 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80984diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80985index 810431d..0ec4804f 100644
80986--- a/include/asm-generic/pgtable-nopud.h
80987+++ b/include/asm-generic/pgtable-nopud.h
80988@@ -1,10 +1,15 @@
80989 #ifndef _PGTABLE_NOPUD_H
80990 #define _PGTABLE_NOPUD_H
80991
80992-#ifndef __ASSEMBLY__
80993-
80994 #define __PAGETABLE_PUD_FOLDED
80995
80996+#define PUD_SHIFT PGDIR_SHIFT
80997+#define PTRS_PER_PUD 1
80998+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80999+#define PUD_MASK (~(PUD_SIZE-1))
81000+
81001+#ifndef __ASSEMBLY__
81002+
81003 /*
81004 * Having the pud type consist of a pgd gets the size right, and allows
81005 * us to conceptually access the pgd entry that this pud is folded into
81006@@ -12,11 +17,6 @@
81007 */
81008 typedef struct { pgd_t pgd; } pud_t;
81009
81010-#define PUD_SHIFT PGDIR_SHIFT
81011-#define PTRS_PER_PUD 1
81012-#define PUD_SIZE (1UL << PUD_SHIFT)
81013-#define PUD_MASK (~(PUD_SIZE-1))
81014-
81015 /*
81016 * The "pgd_xxx()" functions here are trivial for a folded two-level
81017 * setup: the pud is never bad, and a pud always exists (as it's folded
81018@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
81019 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
81020
81021 #define pgd_populate(mm, pgd, pud) do { } while (0)
81022+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
81023 /*
81024 * (puds are folded into pgds so this doesn't get actually called,
81025 * but the define is needed for a generic inline function.)
81026diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
81027index 53b2acc..f4568e7 100644
81028--- a/include/asm-generic/pgtable.h
81029+++ b/include/asm-generic/pgtable.h
81030@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
81031 }
81032 #endif /* CONFIG_NUMA_BALANCING */
81033
81034+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
81035+#ifdef CONFIG_PAX_KERNEXEC
81036+#error KERNEXEC requires pax_open_kernel
81037+#else
81038+static inline unsigned long pax_open_kernel(void) { return 0; }
81039+#endif
81040+#endif
81041+
81042+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
81043+#ifdef CONFIG_PAX_KERNEXEC
81044+#error KERNEXEC requires pax_close_kernel
81045+#else
81046+static inline unsigned long pax_close_kernel(void) { return 0; }
81047+#endif
81048+#endif
81049+
81050 #endif /* CONFIG_MMU */
81051
81052 #endif /* !__ASSEMBLY__ */
81053diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
81054index 72d8803..cb9749c 100644
81055--- a/include/asm-generic/uaccess.h
81056+++ b/include/asm-generic/uaccess.h
81057@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
81058 return __clear_user(to, n);
81059 }
81060
81061+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
81062+#ifdef CONFIG_PAX_MEMORY_UDEREF
81063+#error UDEREF requires pax_open_userland
81064+#else
81065+static inline unsigned long pax_open_userland(void) { return 0; }
81066+#endif
81067+#endif
81068+
81069+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
81070+#ifdef CONFIG_PAX_MEMORY_UDEREF
81071+#error UDEREF requires pax_close_userland
81072+#else
81073+static inline unsigned long pax_close_userland(void) { return 0; }
81074+#endif
81075+#endif
81076+
81077 #endif /* __ASM_GENERIC_UACCESS_H */
81078diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
81079index c1c0b0c..05c9588 100644
81080--- a/include/asm-generic/vmlinux.lds.h
81081+++ b/include/asm-generic/vmlinux.lds.h
81082@@ -231,6 +231,7 @@
81083 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
81084 VMLINUX_SYMBOL(__start_rodata) = .; \
81085 *(.rodata) *(.rodata.*) \
81086+ *(.data..read_only) \
81087 *(__vermagic) /* Kernel version magic */ \
81088 . = ALIGN(8); \
81089 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
81090@@ -719,17 +720,18 @@
81091 * section in the linker script will go there too. @phdr should have
81092 * a leading colon.
81093 *
81094- * Note that this macros defines __per_cpu_load as an absolute symbol.
81095+ * Note that this macros defines per_cpu_load as an absolute symbol.
81096 * If there is no need to put the percpu section at a predetermined
81097 * address, use PERCPU_SECTION.
81098 */
81099 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
81100- VMLINUX_SYMBOL(__per_cpu_load) = .; \
81101- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
81102+ per_cpu_load = .; \
81103+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
81104 - LOAD_OFFSET) { \
81105+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
81106 PERCPU_INPUT(cacheline) \
81107 } phdr \
81108- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
81109+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
81110
81111 /**
81112 * PERCPU_SECTION - define output section for percpu area, simple version
81113diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
81114index 016c2f1..c4baa98 100644
81115--- a/include/crypto/algapi.h
81116+++ b/include/crypto/algapi.h
81117@@ -34,7 +34,7 @@ struct crypto_type {
81118 unsigned int maskclear;
81119 unsigned int maskset;
81120 unsigned int tfmsize;
81121-};
81122+} __do_const;
81123
81124 struct crypto_instance {
81125 struct crypto_alg alg;
81126diff --git a/include/drm/drmP.h b/include/drm/drmP.h
81127index 8af71a8..7fe6c19 100644
81128--- a/include/drm/drmP.h
81129+++ b/include/drm/drmP.h
81130@@ -68,6 +68,7 @@
81131 #include <linux/workqueue.h>
81132 #include <linux/poll.h>
81133 #include <asm/pgalloc.h>
81134+#include <asm/local.h>
81135 #include <drm/drm.h>
81136 #include <drm/drm_sarea.h>
81137 #include <drm/drm_vma_manager.h>
81138@@ -261,10 +262,12 @@ do { \
81139 * \param cmd command.
81140 * \param arg argument.
81141 */
81142-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
81143+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
81144+ struct drm_file *file_priv);
81145+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
81146 struct drm_file *file_priv);
81147
81148-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81149+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
81150 unsigned long arg);
81151
81152 #define DRM_IOCTL_NR(n) _IOC_NR(n)
81153@@ -280,10 +283,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81154 struct drm_ioctl_desc {
81155 unsigned int cmd;
81156 int flags;
81157- drm_ioctl_t *func;
81158+ drm_ioctl_t func;
81159 unsigned int cmd_drv;
81160 const char *name;
81161-};
81162+} __do_const;
81163
81164 /**
81165 * Creates a driver or general drm_ioctl_desc array entry for the given
81166@@ -983,7 +986,8 @@ struct drm_info_list {
81167 int (*show)(struct seq_file*, void*); /** show callback */
81168 u32 driver_features; /**< Required driver features for this entry */
81169 void *data;
81170-};
81171+} __do_const;
81172+typedef struct drm_info_list __no_const drm_info_list_no_const;
81173
81174 /**
81175 * debugfs node structure. This structure represents a debugfs file.
81176@@ -1067,7 +1071,7 @@ struct drm_device {
81177
81178 /** \name Usage Counters */
81179 /*@{ */
81180- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81181+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81182 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
81183 int buf_use; /**< Buffers in use -- cannot alloc */
81184 atomic_t buf_alloc; /**< Buffer allocation in progress */
81185diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
81186index a3d75fe..6802f9c 100644
81187--- a/include/drm/drm_crtc_helper.h
81188+++ b/include/drm/drm_crtc_helper.h
81189@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
81190 struct drm_connector *connector);
81191 /* disable encoder when not in use - more explicit than dpms off */
81192 void (*disable)(struct drm_encoder *encoder);
81193-};
81194+} __no_const;
81195
81196 /**
81197 * drm_connector_helper_funcs - helper operations for connectors
81198diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
81199index a70d456..6ea07cd 100644
81200--- a/include/drm/i915_pciids.h
81201+++ b/include/drm/i915_pciids.h
81202@@ -37,7 +37,7 @@
81203 */
81204 #define INTEL_VGA_DEVICE(id, info) { \
81205 0x8086, id, \
81206- ~0, ~0, \
81207+ PCI_ANY_ID, PCI_ANY_ID, \
81208 0x030000, 0xff0000, \
81209 (unsigned long) info }
81210
81211diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
81212index 72dcbe8..8db58d7 100644
81213--- a/include/drm/ttm/ttm_memory.h
81214+++ b/include/drm/ttm/ttm_memory.h
81215@@ -48,7 +48,7 @@
81216
81217 struct ttm_mem_shrink {
81218 int (*do_shrink) (struct ttm_mem_shrink *);
81219-};
81220+} __no_const;
81221
81222 /**
81223 * struct ttm_mem_global - Global memory accounting structure.
81224diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
81225index 49a8284..9643967 100644
81226--- a/include/drm/ttm/ttm_page_alloc.h
81227+++ b/include/drm/ttm/ttm_page_alloc.h
81228@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
81229 */
81230 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
81231
81232+struct device;
81233 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81234 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81235
81236diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
81237index 4b840e8..155d235 100644
81238--- a/include/keys/asymmetric-subtype.h
81239+++ b/include/keys/asymmetric-subtype.h
81240@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
81241 /* Verify the signature on a key of this subtype (optional) */
81242 int (*verify_signature)(const struct key *key,
81243 const struct public_key_signature *sig);
81244-};
81245+} __do_const;
81246
81247 /**
81248 * asymmetric_key_subtype - Get the subtype from an asymmetric key
81249diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
81250index c1da539..1dcec55 100644
81251--- a/include/linux/atmdev.h
81252+++ b/include/linux/atmdev.h
81253@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
81254 #endif
81255
81256 struct k_atm_aal_stats {
81257-#define __HANDLE_ITEM(i) atomic_t i
81258+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81259 __AAL_STAT_ITEMS
81260 #undef __HANDLE_ITEM
81261 };
81262@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
81263 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
81264 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
81265 struct module *owner;
81266-};
81267+} __do_const ;
81268
81269 struct atmphy_ops {
81270 int (*start)(struct atm_dev *dev);
81271diff --git a/include/linux/audit.h b/include/linux/audit.h
81272index 22cfddb..ab759e8 100644
81273--- a/include/linux/audit.h
81274+++ b/include/linux/audit.h
81275@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
81276 extern unsigned int audit_serial(void);
81277 extern int auditsc_get_stamp(struct audit_context *ctx,
81278 struct timespec *t, unsigned int *serial);
81279-extern int audit_set_loginuid(kuid_t loginuid);
81280+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
81281
81282 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
81283 {
81284diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
81285index 61f29e5..e67c658 100644
81286--- a/include/linux/binfmts.h
81287+++ b/include/linux/binfmts.h
81288@@ -44,7 +44,7 @@ struct linux_binprm {
81289 unsigned interp_flags;
81290 unsigned interp_data;
81291 unsigned long loader, exec;
81292-};
81293+} __randomize_layout;
81294
81295 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
81296 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
81297@@ -73,8 +73,10 @@ struct linux_binfmt {
81298 int (*load_binary)(struct linux_binprm *);
81299 int (*load_shlib)(struct file *);
81300 int (*core_dump)(struct coredump_params *cprm);
81301+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
81302+ void (*handle_mmap)(struct file *);
81303 unsigned long min_coredump; /* minimal dump size */
81304-};
81305+} __do_const __randomize_layout;
81306
81307 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
81308
81309diff --git a/include/linux/bitops.h b/include/linux/bitops.h
81310index cbc5833..8123ebc 100644
81311--- a/include/linux/bitops.h
81312+++ b/include/linux/bitops.h
81313@@ -122,7 +122,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
81314 * @word: value to rotate
81315 * @shift: bits to roll
81316 */
81317-static inline __u32 rol32(__u32 word, unsigned int shift)
81318+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
81319 {
81320 return (word << shift) | (word >> (32 - shift));
81321 }
81322@@ -132,7 +132,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
81323 * @word: value to rotate
81324 * @shift: bits to roll
81325 */
81326-static inline __u32 ror32(__u32 word, unsigned int shift)
81327+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
81328 {
81329 return (word >> shift) | (word << (32 - shift));
81330 }
81331@@ -188,7 +188,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
81332 return (__s32)(value << shift) >> shift;
81333 }
81334
81335-static inline unsigned fls_long(unsigned long l)
81336+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
81337 {
81338 if (sizeof(l) == 4)
81339 return fls(l);
81340diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
81341index 8699bcf..279485d 100644
81342--- a/include/linux/blkdev.h
81343+++ b/include/linux/blkdev.h
81344@@ -1625,7 +1625,7 @@ struct block_device_operations {
81345 /* this callback is with swap_lock and sometimes page table lock held */
81346 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
81347 struct module *owner;
81348-};
81349+} __do_const;
81350
81351 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81352 unsigned long);
81353diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81354index afc1343..9735539 100644
81355--- a/include/linux/blktrace_api.h
81356+++ b/include/linux/blktrace_api.h
81357@@ -25,7 +25,7 @@ struct blk_trace {
81358 struct dentry *dropped_file;
81359 struct dentry *msg_file;
81360 struct list_head running_list;
81361- atomic_t dropped;
81362+ atomic_unchecked_t dropped;
81363 };
81364
81365 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81366diff --git a/include/linux/cache.h b/include/linux/cache.h
81367index 17e7e82..1d7da26 100644
81368--- a/include/linux/cache.h
81369+++ b/include/linux/cache.h
81370@@ -16,6 +16,14 @@
81371 #define __read_mostly
81372 #endif
81373
81374+#ifndef __read_only
81375+#ifdef CONFIG_PAX_KERNEXEC
81376+#error KERNEXEC requires __read_only
81377+#else
81378+#define __read_only __read_mostly
81379+#endif
81380+#endif
81381+
81382 #ifndef ____cacheline_aligned
81383 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81384 #endif
81385diff --git a/include/linux/capability.h b/include/linux/capability.h
81386index 84b13ad..172cdee 100644
81387--- a/include/linux/capability.h
81388+++ b/include/linux/capability.h
81389@@ -211,9 +211,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81390 extern bool capable(int cap);
81391 extern bool ns_capable(struct user_namespace *ns, int cap);
81392 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81393+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81394 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81395+extern bool capable_nolog(int cap);
81396+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81397
81398 /* audit system wants to get cap info from files as well */
81399 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81400
81401+extern int is_privileged_binary(const struct dentry *dentry);
81402+
81403 #endif /* !_LINUX_CAPABILITY_H */
81404diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81405index 8609d57..86e4d79 100644
81406--- a/include/linux/cdrom.h
81407+++ b/include/linux/cdrom.h
81408@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81409
81410 /* driver specifications */
81411 const int capability; /* capability flags */
81412- int n_minors; /* number of active minor devices */
81413 /* handle uniform packets for scsi type devices (scsi,atapi) */
81414 int (*generic_packet) (struct cdrom_device_info *,
81415 struct packet_command *);
81416diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81417index 4ce9056..86caac6 100644
81418--- a/include/linux/cleancache.h
81419+++ b/include/linux/cleancache.h
81420@@ -31,7 +31,7 @@ struct cleancache_ops {
81421 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81422 void (*invalidate_inode)(int, struct cleancache_filekey);
81423 void (*invalidate_fs)(int);
81424-};
81425+} __no_const;
81426
81427 extern struct cleancache_ops *
81428 cleancache_register_ops(struct cleancache_ops *ops);
81429diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81430index 0c287db..5efa775 100644
81431--- a/include/linux/clk-provider.h
81432+++ b/include/linux/clk-provider.h
81433@@ -180,6 +180,7 @@ struct clk_ops {
81434 void (*init)(struct clk_hw *hw);
81435 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81436 };
81437+typedef struct clk_ops __no_const clk_ops_no_const;
81438
81439 /**
81440 * struct clk_init_data - holds init data that's common to all clocks and is
81441diff --git a/include/linux/compat.h b/include/linux/compat.h
81442index e649426..a74047b 100644
81443--- a/include/linux/compat.h
81444+++ b/include/linux/compat.h
81445@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81446 compat_size_t __user *len_ptr);
81447
81448 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81449-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81450+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81451 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81452 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81453 compat_ssize_t msgsz, int msgflg);
81454@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81455 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81456 compat_ulong_t addr, compat_ulong_t data);
81457 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81458- compat_long_t addr, compat_long_t data);
81459+ compat_ulong_t addr, compat_ulong_t data);
81460
81461 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81462 /*
81463diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81464index 2507fd2..55203f8 100644
81465--- a/include/linux/compiler-gcc4.h
81466+++ b/include/linux/compiler-gcc4.h
81467@@ -39,9 +39,34 @@
81468 # define __compiletime_warning(message) __attribute__((warning(message)))
81469 # define __compiletime_error(message) __attribute__((error(message)))
81470 #endif /* __CHECKER__ */
81471+
81472+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81473+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81474+#define __bos0(ptr) __bos((ptr), 0)
81475+#define __bos1(ptr) __bos((ptr), 1)
81476 #endif /* GCC_VERSION >= 40300 */
81477
81478 #if GCC_VERSION >= 40500
81479+
81480+#ifdef RANDSTRUCT_PLUGIN
81481+#define __randomize_layout __attribute__((randomize_layout))
81482+#define __no_randomize_layout __attribute__((no_randomize_layout))
81483+#endif
81484+
81485+#ifdef CONSTIFY_PLUGIN
81486+#define __no_const __attribute__((no_const))
81487+#define __do_const __attribute__((do_const))
81488+#endif
81489+
81490+#ifdef SIZE_OVERFLOW_PLUGIN
81491+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81492+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81493+#endif
81494+
81495+#ifdef LATENT_ENTROPY_PLUGIN
81496+#define __latent_entropy __attribute__((latent_entropy))
81497+#endif
81498+
81499 /*
81500 * Mark a position in code as unreachable. This can be used to
81501 * suppress control flow warnings after asm blocks that transfer
81502diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81503index d5ad7b1..3b74638 100644
81504--- a/include/linux/compiler.h
81505+++ b/include/linux/compiler.h
81506@@ -5,11 +5,14 @@
81507
81508 #ifdef __CHECKER__
81509 # define __user __attribute__((noderef, address_space(1)))
81510+# define __force_user __force __user
81511 # define __kernel __attribute__((address_space(0)))
81512+# define __force_kernel __force __kernel
81513 # define __safe __attribute__((safe))
81514 # define __force __attribute__((force))
81515 # define __nocast __attribute__((nocast))
81516 # define __iomem __attribute__((noderef, address_space(2)))
81517+# define __force_iomem __force __iomem
81518 # define __must_hold(x) __attribute__((context(x,1,1)))
81519 # define __acquires(x) __attribute__((context(x,0,1)))
81520 # define __releases(x) __attribute__((context(x,1,0)))
81521@@ -17,20 +20,37 @@
81522 # define __release(x) __context__(x,-1)
81523 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81524 # define __percpu __attribute__((noderef, address_space(3)))
81525+# define __force_percpu __force __percpu
81526 #ifdef CONFIG_SPARSE_RCU_POINTER
81527 # define __rcu __attribute__((noderef, address_space(4)))
81528+# define __force_rcu __force __rcu
81529 #else
81530 # define __rcu
81531+# define __force_rcu
81532 #endif
81533 extern void __chk_user_ptr(const volatile void __user *);
81534 extern void __chk_io_ptr(const volatile void __iomem *);
81535 #else
81536-# define __user
81537-# define __kernel
81538+# ifdef CHECKER_PLUGIN
81539+//# define __user
81540+//# define __force_user
81541+//# define __kernel
81542+//# define __force_kernel
81543+# else
81544+# ifdef STRUCTLEAK_PLUGIN
81545+# define __user __attribute__((user))
81546+# else
81547+# define __user
81548+# endif
81549+# define __force_user
81550+# define __kernel
81551+# define __force_kernel
81552+# endif
81553 # define __safe
81554 # define __force
81555 # define __nocast
81556 # define __iomem
81557+# define __force_iomem
81558 # define __chk_user_ptr(x) (void)0
81559 # define __chk_io_ptr(x) (void)0
81560 # define __builtin_warning(x, y...) (1)
81561@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
81562 # define __release(x) (void)0
81563 # define __cond_lock(x,c) (c)
81564 # define __percpu
81565+# define __force_percpu
81566 # define __rcu
81567+# define __force_rcu
81568 #endif
81569
81570 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
81571@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81572 # define __attribute_const__ /* unimplemented */
81573 #endif
81574
81575+#ifndef __randomize_layout
81576+# define __randomize_layout
81577+#endif
81578+
81579+#ifndef __no_randomize_layout
81580+# define __no_randomize_layout
81581+#endif
81582+
81583+#ifndef __no_const
81584+# define __no_const
81585+#endif
81586+
81587+#ifndef __do_const
81588+# define __do_const
81589+#endif
81590+
81591+#ifndef __size_overflow
81592+# define __size_overflow(...)
81593+#endif
81594+
81595+#ifndef __intentional_overflow
81596+# define __intentional_overflow(...)
81597+#endif
81598+
81599+#ifndef __latent_entropy
81600+# define __latent_entropy
81601+#endif
81602+
81603 /*
81604 * Tell gcc if a function is cold. The compiler will assume any path
81605 * directly leading to the call is unlikely.
81606@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81607 #define __cold
81608 #endif
81609
81610+#ifndef __alloc_size
81611+#define __alloc_size(...)
81612+#endif
81613+
81614+#ifndef __bos
81615+#define __bos(ptr, arg)
81616+#endif
81617+
81618+#ifndef __bos0
81619+#define __bos0(ptr)
81620+#endif
81621+
81622+#ifndef __bos1
81623+#define __bos1(ptr)
81624+#endif
81625+
81626 /* Simple shorthand for a section definition */
81627 #ifndef __section
81628 # define __section(S) __attribute__ ((__section__(#S)))
81629@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81630 * use is to mediate communication between process-level code and irq/NMI
81631 * handlers, all running on the same CPU.
81632 */
81633-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
81634+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
81635+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81636
81637 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81638 #ifdef CONFIG_KPROBES
81639diff --git a/include/linux/completion.h b/include/linux/completion.h
81640index 5d5aaae..0ea9b84 100644
81641--- a/include/linux/completion.h
81642+++ b/include/linux/completion.h
81643@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81644
81645 extern void wait_for_completion(struct completion *);
81646 extern void wait_for_completion_io(struct completion *);
81647-extern int wait_for_completion_interruptible(struct completion *x);
81648-extern int wait_for_completion_killable(struct completion *x);
81649+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81650+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81651 extern unsigned long wait_for_completion_timeout(struct completion *x,
81652- unsigned long timeout);
81653+ unsigned long timeout) __intentional_overflow(-1);
81654 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81655- unsigned long timeout);
81656+ unsigned long timeout) __intentional_overflow(-1);
81657 extern long wait_for_completion_interruptible_timeout(
81658- struct completion *x, unsigned long timeout);
81659+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81660 extern long wait_for_completion_killable_timeout(
81661- struct completion *x, unsigned long timeout);
81662+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81663 extern bool try_wait_for_completion(struct completion *x);
81664 extern bool completion_done(struct completion *x);
81665
81666diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81667index 34025df..d94bbbc 100644
81668--- a/include/linux/configfs.h
81669+++ b/include/linux/configfs.h
81670@@ -125,7 +125,7 @@ struct configfs_attribute {
81671 const char *ca_name;
81672 struct module *ca_owner;
81673 umode_t ca_mode;
81674-};
81675+} __do_const;
81676
81677 /*
81678 * Users often need to create attribute structures for their configurable
81679diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81680index 8f8ae95..b9b0e6d 100644
81681--- a/include/linux/cpufreq.h
81682+++ b/include/linux/cpufreq.h
81683@@ -202,6 +202,7 @@ struct global_attr {
81684 ssize_t (*store)(struct kobject *a, struct attribute *b,
81685 const char *c, size_t count);
81686 };
81687+typedef struct global_attr __no_const global_attr_no_const;
81688
81689 #define define_one_global_ro(_name) \
81690 static struct global_attr _name = \
81691@@ -268,7 +269,7 @@ struct cpufreq_driver {
81692 bool boost_supported;
81693 bool boost_enabled;
81694 int (*set_boost) (int state);
81695-};
81696+} __do_const;
81697
81698 /* flags */
81699 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
81700diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
81701index 25e0df6..952dffd 100644
81702--- a/include/linux/cpuidle.h
81703+++ b/include/linux/cpuidle.h
81704@@ -50,7 +50,8 @@ struct cpuidle_state {
81705 int index);
81706
81707 int (*enter_dead) (struct cpuidle_device *dev, int index);
81708-};
81709+} __do_const;
81710+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
81711
81712 /* Idle State Flags */
81713 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
81714@@ -209,7 +210,7 @@ struct cpuidle_governor {
81715 void (*reflect) (struct cpuidle_device *dev, int index);
81716
81717 struct module *owner;
81718-};
81719+} __do_const;
81720
81721 #ifdef CONFIG_CPU_IDLE
81722 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
81723diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
81724index 2997af6..424ddc1 100644
81725--- a/include/linux/cpumask.h
81726+++ b/include/linux/cpumask.h
81727@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81728 }
81729
81730 /* Valid inputs for n are -1 and 0. */
81731-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81732+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81733 {
81734 return n+1;
81735 }
81736
81737-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81738+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81739 {
81740 return n+1;
81741 }
81742
81743-static inline unsigned int cpumask_next_and(int n,
81744+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
81745 const struct cpumask *srcp,
81746 const struct cpumask *andp)
81747 {
81748@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81749 *
81750 * Returns >= nr_cpu_ids if no further cpus set.
81751 */
81752-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81753+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81754 {
81755 /* -1 is a legal arg here. */
81756 if (n != -1)
81757@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81758 *
81759 * Returns >= nr_cpu_ids if no further cpus unset.
81760 */
81761-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81762+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81763 {
81764 /* -1 is a legal arg here. */
81765 if (n != -1)
81766@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81767 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
81768 }
81769
81770-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
81771+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
81772 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
81773 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
81774
81775diff --git a/include/linux/cred.h b/include/linux/cred.h
81776index f61d6c8..d372d95 100644
81777--- a/include/linux/cred.h
81778+++ b/include/linux/cred.h
81779@@ -35,7 +35,7 @@ struct group_info {
81780 int nblocks;
81781 kgid_t small_block[NGROUPS_SMALL];
81782 kgid_t *blocks[0];
81783-};
81784+} __randomize_layout;
81785
81786 /**
81787 * get_group_info - Get a reference to a group info structure
81788@@ -136,7 +136,7 @@ struct cred {
81789 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
81790 struct group_info *group_info; /* supplementary groups for euid/fsgid */
81791 struct rcu_head rcu; /* RCU deletion hook */
81792-};
81793+} __randomize_layout;
81794
81795 extern void __put_cred(struct cred *);
81796 extern void exit_creds(struct task_struct *);
81797@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
81798 static inline void validate_process_creds(void)
81799 {
81800 }
81801+static inline void validate_task_creds(struct task_struct *task)
81802+{
81803+}
81804 #endif
81805
81806 /**
81807@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred)
81808
81809 #define task_uid(task) (task_cred_xxx((task), uid))
81810 #define task_euid(task) (task_cred_xxx((task), euid))
81811+#define task_securebits(task) (task_cred_xxx((task), securebits))
81812
81813 #define current_cred_xxx(xxx) \
81814 ({ \
81815diff --git a/include/linux/crypto.h b/include/linux/crypto.h
81816index b92eadf..b4ecdc1 100644
81817--- a/include/linux/crypto.h
81818+++ b/include/linux/crypto.h
81819@@ -373,7 +373,7 @@ struct cipher_tfm {
81820 const u8 *key, unsigned int keylen);
81821 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81822 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81823-};
81824+} __no_const;
81825
81826 struct hash_tfm {
81827 int (*init)(struct hash_desc *desc);
81828@@ -394,13 +394,13 @@ struct compress_tfm {
81829 int (*cot_decompress)(struct crypto_tfm *tfm,
81830 const u8 *src, unsigned int slen,
81831 u8 *dst, unsigned int *dlen);
81832-};
81833+} __no_const;
81834
81835 struct rng_tfm {
81836 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
81837 unsigned int dlen);
81838 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
81839-};
81840+} __no_const;
81841
81842 #define crt_ablkcipher crt_u.ablkcipher
81843 #define crt_aead crt_u.aead
81844diff --git a/include/linux/ctype.h b/include/linux/ctype.h
81845index 653589e..4ef254a 100644
81846--- a/include/linux/ctype.h
81847+++ b/include/linux/ctype.h
81848@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
81849 * Fast implementation of tolower() for internal usage. Do not use in your
81850 * code.
81851 */
81852-static inline char _tolower(const char c)
81853+static inline unsigned char _tolower(const unsigned char c)
81854 {
81855 return c | 0x20;
81856 }
81857diff --git a/include/linux/dcache.h b/include/linux/dcache.h
81858index 3c7ec32..4ca97cc 100644
81859--- a/include/linux/dcache.h
81860+++ b/include/linux/dcache.h
81861@@ -133,7 +133,7 @@ struct dentry {
81862 } d_u;
81863 struct list_head d_subdirs; /* our children */
81864 struct hlist_node d_alias; /* inode alias list */
81865-};
81866+} __randomize_layout;
81867
81868 /*
81869 * dentry->d_lock spinlock nesting subclasses:
81870diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81871index 7925bf0..d5143d2 100644
81872--- a/include/linux/decompress/mm.h
81873+++ b/include/linux/decompress/mm.h
81874@@ -77,7 +77,7 @@ static void free(void *where)
81875 * warnings when not needed (indeed large_malloc / large_free are not
81876 * needed by inflate */
81877
81878-#define malloc(a) kmalloc(a, GFP_KERNEL)
81879+#define malloc(a) kmalloc((a), GFP_KERNEL)
81880 #define free(a) kfree(a)
81881
81882 #define large_malloc(a) vmalloc(a)
81883diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81884index f1863dc..5c26074 100644
81885--- a/include/linux/devfreq.h
81886+++ b/include/linux/devfreq.h
81887@@ -114,7 +114,7 @@ struct devfreq_governor {
81888 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81889 int (*event_handler)(struct devfreq *devfreq,
81890 unsigned int event, void *data);
81891-};
81892+} __do_const;
81893
81894 /**
81895 * struct devfreq - Device devfreq structure
81896diff --git a/include/linux/device.h b/include/linux/device.h
81897index af424ac..fd46ddf 100644
81898--- a/include/linux/device.h
81899+++ b/include/linux/device.h
81900@@ -310,7 +310,7 @@ struct subsys_interface {
81901 struct list_head node;
81902 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81903 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81904-};
81905+} __do_const;
81906
81907 int subsys_interface_register(struct subsys_interface *sif);
81908 void subsys_interface_unregister(struct subsys_interface *sif);
81909@@ -506,7 +506,7 @@ struct device_type {
81910 void (*release)(struct device *dev);
81911
81912 const struct dev_pm_ops *pm;
81913-};
81914+} __do_const;
81915
81916 /* interface for exporting device attributes */
81917 struct device_attribute {
81918@@ -516,11 +516,12 @@ struct device_attribute {
81919 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81920 const char *buf, size_t count);
81921 };
81922+typedef struct device_attribute __no_const device_attribute_no_const;
81923
81924 struct dev_ext_attribute {
81925 struct device_attribute attr;
81926 void *var;
81927-};
81928+} __do_const;
81929
81930 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81931 char *buf);
81932diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81933index 931b709..89b2d89 100644
81934--- a/include/linux/dma-mapping.h
81935+++ b/include/linux/dma-mapping.h
81936@@ -60,7 +60,7 @@ struct dma_map_ops {
81937 u64 (*get_required_mask)(struct device *dev);
81938 #endif
81939 int is_phys;
81940-};
81941+} __do_const;
81942
81943 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81944
81945diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81946index d2c5cc7..d193394 100644
81947--- a/include/linux/dmaengine.h
81948+++ b/include/linux/dmaengine.h
81949@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
81950 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81951 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81952
81953-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81954+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81955 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81956-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81957+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81958 struct dma_pinned_list *pinned_list, struct page *page,
81959 unsigned int offset, size_t len);
81960
81961diff --git a/include/linux/efi.h b/include/linux/efi.h
81962index 41bbf8b..bd3a718 100644
81963--- a/include/linux/efi.h
81964+++ b/include/linux/efi.h
81965@@ -1027,6 +1027,7 @@ struct efivar_operations {
81966 efi_set_variable_t *set_variable;
81967 efi_query_variable_store_t *query_variable_store;
81968 };
81969+typedef struct efivar_operations __no_const efivar_operations_no_const;
81970
81971 struct efivars {
81972 /*
81973diff --git a/include/linux/elf.h b/include/linux/elf.h
81974index 67a5fa7..b817372 100644
81975--- a/include/linux/elf.h
81976+++ b/include/linux/elf.h
81977@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
81978 #define elf_note elf32_note
81979 #define elf_addr_t Elf32_Off
81980 #define Elf_Half Elf32_Half
81981+#define elf_dyn Elf32_Dyn
81982
81983 #else
81984
81985@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
81986 #define elf_note elf64_note
81987 #define elf_addr_t Elf64_Off
81988 #define Elf_Half Elf64_Half
81989+#define elf_dyn Elf64_Dyn
81990
81991 #endif
81992
81993diff --git a/include/linux/err.h b/include/linux/err.h
81994index a729120..6ede2c9 100644
81995--- a/include/linux/err.h
81996+++ b/include/linux/err.h
81997@@ -20,12 +20,12 @@
81998
81999 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
82000
82001-static inline void * __must_check ERR_PTR(long error)
82002+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
82003 {
82004 return (void *) error;
82005 }
82006
82007-static inline long __must_check PTR_ERR(__force const void *ptr)
82008+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
82009 {
82010 return (long) ptr;
82011 }
82012diff --git a/include/linux/extcon.h b/include/linux/extcon.h
82013index 36f49c4..a2a1f4c 100644
82014--- a/include/linux/extcon.h
82015+++ b/include/linux/extcon.h
82016@@ -135,7 +135,7 @@ struct extcon_dev {
82017 /* /sys/class/extcon/.../mutually_exclusive/... */
82018 struct attribute_group attr_g_muex;
82019 struct attribute **attrs_muex;
82020- struct device_attribute *d_attrs_muex;
82021+ device_attribute_no_const *d_attrs_muex;
82022 };
82023
82024 /**
82025diff --git a/include/linux/fb.h b/include/linux/fb.h
82026index b6bfda9..1f13487 100644
82027--- a/include/linux/fb.h
82028+++ b/include/linux/fb.h
82029@@ -305,7 +305,7 @@ struct fb_ops {
82030 /* called at KDB enter and leave time to prepare the console */
82031 int (*fb_debug_enter)(struct fb_info *info);
82032 int (*fb_debug_leave)(struct fb_info *info);
82033-};
82034+} __do_const;
82035
82036 #ifdef CONFIG_FB_TILEBLITTING
82037 #define FB_TILE_CURSOR_NONE 0
82038diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
82039index 230f87b..1fd0485 100644
82040--- a/include/linux/fdtable.h
82041+++ b/include/linux/fdtable.h
82042@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
82043 void put_files_struct(struct files_struct *fs);
82044 void reset_files_struct(struct files_struct *);
82045 int unshare_files(struct files_struct **);
82046-struct files_struct *dup_fd(struct files_struct *, int *);
82047+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
82048 void do_close_on_exec(struct files_struct *);
82049 int iterate_fd(struct files_struct *, unsigned,
82050 int (*)(const void *, struct file *, unsigned),
82051diff --git a/include/linux/filter.h b/include/linux/filter.h
82052index a7e3c48..e568c8e 100644
82053--- a/include/linux/filter.h
82054+++ b/include/linux/filter.h
82055@@ -9,330 +9,28 @@
82056 #include <linux/workqueue.h>
82057 #include <uapi/linux/filter.h>
82058
82059-/* Internally used and optimized filter representation with extended
82060- * instruction set based on top of classic BPF.
82061- */
82062-
82063-/* instruction classes */
82064-#define BPF_ALU64 0x07 /* alu mode in double word width */
82065-
82066-/* ld/ldx fields */
82067-#define BPF_DW 0x18 /* double word */
82068-#define BPF_XADD 0xc0 /* exclusive add */
82069-
82070-/* alu/jmp fields */
82071-#define BPF_MOV 0xb0 /* mov reg to reg */
82072-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
82073-
82074-/* change endianness of a register */
82075-#define BPF_END 0xd0 /* flags for endianness conversion: */
82076-#define BPF_TO_LE 0x00 /* convert to little-endian */
82077-#define BPF_TO_BE 0x08 /* convert to big-endian */
82078-#define BPF_FROM_LE BPF_TO_LE
82079-#define BPF_FROM_BE BPF_TO_BE
82080-
82081-#define BPF_JNE 0x50 /* jump != */
82082-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
82083-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
82084-#define BPF_CALL 0x80 /* function call */
82085-#define BPF_EXIT 0x90 /* function return */
82086-
82087-/* Register numbers */
82088-enum {
82089- BPF_REG_0 = 0,
82090- BPF_REG_1,
82091- BPF_REG_2,
82092- BPF_REG_3,
82093- BPF_REG_4,
82094- BPF_REG_5,
82095- BPF_REG_6,
82096- BPF_REG_7,
82097- BPF_REG_8,
82098- BPF_REG_9,
82099- BPF_REG_10,
82100- __MAX_BPF_REG,
82101-};
82102-
82103-/* BPF has 10 general purpose 64-bit registers and stack frame. */
82104-#define MAX_BPF_REG __MAX_BPF_REG
82105-
82106-/* ArgX, context and stack frame pointer register positions. Note,
82107- * Arg1, Arg2, Arg3, etc are used as argument mappings of function
82108- * calls in BPF_CALL instruction.
82109- */
82110-#define BPF_REG_ARG1 BPF_REG_1
82111-#define BPF_REG_ARG2 BPF_REG_2
82112-#define BPF_REG_ARG3 BPF_REG_3
82113-#define BPF_REG_ARG4 BPF_REG_4
82114-#define BPF_REG_ARG5 BPF_REG_5
82115-#define BPF_REG_CTX BPF_REG_6
82116-#define BPF_REG_FP BPF_REG_10
82117-
82118-/* Additional register mappings for converted user programs. */
82119-#define BPF_REG_A BPF_REG_0
82120-#define BPF_REG_X BPF_REG_7
82121-#define BPF_REG_TMP BPF_REG_8
82122-
82123-/* BPF program can access up to 512 bytes of stack space. */
82124-#define MAX_BPF_STACK 512
82125-
82126-/* Helper macros for filter block array initializers. */
82127-
82128-/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
82129-
82130-#define BPF_ALU64_REG(OP, DST, SRC) \
82131- ((struct sock_filter_int) { \
82132- .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
82133- .dst_reg = DST, \
82134- .src_reg = SRC, \
82135- .off = 0, \
82136- .imm = 0 })
82137-
82138-#define BPF_ALU32_REG(OP, DST, SRC) \
82139- ((struct sock_filter_int) { \
82140- .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
82141- .dst_reg = DST, \
82142- .src_reg = SRC, \
82143- .off = 0, \
82144- .imm = 0 })
82145-
82146-/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
82147-
82148-#define BPF_ALU64_IMM(OP, DST, IMM) \
82149- ((struct sock_filter_int) { \
82150- .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
82151- .dst_reg = DST, \
82152- .src_reg = 0, \
82153- .off = 0, \
82154- .imm = IMM })
82155-
82156-#define BPF_ALU32_IMM(OP, DST, IMM) \
82157- ((struct sock_filter_int) { \
82158- .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
82159- .dst_reg = DST, \
82160- .src_reg = 0, \
82161- .off = 0, \
82162- .imm = IMM })
82163-
82164-/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
82165-
82166-#define BPF_ENDIAN(TYPE, DST, LEN) \
82167- ((struct sock_filter_int) { \
82168- .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
82169- .dst_reg = DST, \
82170- .src_reg = 0, \
82171- .off = 0, \
82172- .imm = LEN })
82173-
82174-/* Short form of mov, dst_reg = src_reg */
82175-
82176-#define BPF_MOV64_REG(DST, SRC) \
82177- ((struct sock_filter_int) { \
82178- .code = BPF_ALU64 | BPF_MOV | BPF_X, \
82179- .dst_reg = DST, \
82180- .src_reg = SRC, \
82181- .off = 0, \
82182- .imm = 0 })
82183-
82184-#define BPF_MOV32_REG(DST, SRC) \
82185- ((struct sock_filter_int) { \
82186- .code = BPF_ALU | BPF_MOV | BPF_X, \
82187- .dst_reg = DST, \
82188- .src_reg = SRC, \
82189- .off = 0, \
82190- .imm = 0 })
82191-
82192-/* Short form of mov, dst_reg = imm32 */
82193-
82194-#define BPF_MOV64_IMM(DST, IMM) \
82195- ((struct sock_filter_int) { \
82196- .code = BPF_ALU64 | BPF_MOV | BPF_K, \
82197- .dst_reg = DST, \
82198- .src_reg = 0, \
82199- .off = 0, \
82200- .imm = IMM })
82201-
82202-#define BPF_MOV32_IMM(DST, IMM) \
82203- ((struct sock_filter_int) { \
82204- .code = BPF_ALU | BPF_MOV | BPF_K, \
82205- .dst_reg = DST, \
82206- .src_reg = 0, \
82207- .off = 0, \
82208- .imm = IMM })
82209-
82210-/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
82211-
82212-#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
82213- ((struct sock_filter_int) { \
82214- .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
82215- .dst_reg = DST, \
82216- .src_reg = SRC, \
82217- .off = 0, \
82218- .imm = IMM })
82219-
82220-#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
82221- ((struct sock_filter_int) { \
82222- .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
82223- .dst_reg = DST, \
82224- .src_reg = SRC, \
82225- .off = 0, \
82226- .imm = IMM })
82227-
82228-/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
82229-
82230-#define BPF_LD_ABS(SIZE, IMM) \
82231- ((struct sock_filter_int) { \
82232- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
82233- .dst_reg = 0, \
82234- .src_reg = 0, \
82235- .off = 0, \
82236- .imm = IMM })
82237-
82238-/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
82239-
82240-#define BPF_LD_IND(SIZE, SRC, IMM) \
82241- ((struct sock_filter_int) { \
82242- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
82243- .dst_reg = 0, \
82244- .src_reg = SRC, \
82245- .off = 0, \
82246- .imm = IMM })
82247-
82248-/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
82249-
82250-#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
82251- ((struct sock_filter_int) { \
82252- .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
82253- .dst_reg = DST, \
82254- .src_reg = SRC, \
82255- .off = OFF, \
82256- .imm = 0 })
82257-
82258-/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
82259-
82260-#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
82261- ((struct sock_filter_int) { \
82262- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
82263- .dst_reg = DST, \
82264- .src_reg = SRC, \
82265- .off = OFF, \
82266- .imm = 0 })
82267-
82268-/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
82269-
82270-#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
82271- ((struct sock_filter_int) { \
82272- .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
82273- .dst_reg = DST, \
82274- .src_reg = 0, \
82275- .off = OFF, \
82276- .imm = IMM })
82277-
82278-/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
82279-
82280-#define BPF_JMP_REG(OP, DST, SRC, OFF) \
82281- ((struct sock_filter_int) { \
82282- .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
82283- .dst_reg = DST, \
82284- .src_reg = SRC, \
82285- .off = OFF, \
82286- .imm = 0 })
82287-
82288-/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
82289-
82290-#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
82291- ((struct sock_filter_int) { \
82292- .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
82293- .dst_reg = DST, \
82294- .src_reg = 0, \
82295- .off = OFF, \
82296- .imm = IMM })
82297-
82298-/* Function call */
82299-
82300-#define BPF_EMIT_CALL(FUNC) \
82301- ((struct sock_filter_int) { \
82302- .code = BPF_JMP | BPF_CALL, \
82303- .dst_reg = 0, \
82304- .src_reg = 0, \
82305- .off = 0, \
82306- .imm = ((FUNC) - __bpf_call_base) })
82307-
82308-/* Raw code statement block */
82309-
82310-#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
82311- ((struct sock_filter_int) { \
82312- .code = CODE, \
82313- .dst_reg = DST, \
82314- .src_reg = SRC, \
82315- .off = OFF, \
82316- .imm = IMM })
82317-
82318-/* Program exit */
82319-
82320-#define BPF_EXIT_INSN() \
82321- ((struct sock_filter_int) { \
82322- .code = BPF_JMP | BPF_EXIT, \
82323- .dst_reg = 0, \
82324- .src_reg = 0, \
82325- .off = 0, \
82326- .imm = 0 })
82327-
82328-#define bytes_to_bpf_size(bytes) \
82329-({ \
82330- int bpf_size = -EINVAL; \
82331- \
82332- if (bytes == sizeof(u8)) \
82333- bpf_size = BPF_B; \
82334- else if (bytes == sizeof(u16)) \
82335- bpf_size = BPF_H; \
82336- else if (bytes == sizeof(u32)) \
82337- bpf_size = BPF_W; \
82338- else if (bytes == sizeof(u64)) \
82339- bpf_size = BPF_DW; \
82340- \
82341- bpf_size; \
82342-})
82343-
82344-/* Macro to invoke filter function. */
82345-#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
82346-
82347-struct sock_filter_int {
82348- __u8 code; /* opcode */
82349- __u8 dst_reg:4; /* dest register */
82350- __u8 src_reg:4; /* source register */
82351- __s16 off; /* signed offset */
82352- __s32 imm; /* signed immediate constant */
82353-};
82354-
82355 #ifdef CONFIG_COMPAT
82356-/* A struct sock_filter is architecture independent. */
82357+/*
82358+ * A struct sock_filter is architecture independent.
82359+ */
82360 struct compat_sock_fprog {
82361 u16 len;
82362- compat_uptr_t filter; /* struct sock_filter * */
82363+ compat_uptr_t filter; /* struct sock_filter * */
82364 };
82365 #endif
82366
82367-struct sock_fprog_kern {
82368- u16 len;
82369- struct sock_filter *filter;
82370-};
82371-
82372 struct sk_buff;
82373 struct sock;
82374-struct seccomp_data;
82375
82376-struct sk_filter {
82377+struct sk_filter
82378+{
82379 atomic_t refcnt;
82380- u32 jited:1, /* Is our filter JIT'ed? */
82381- len:31; /* Number of filter blocks */
82382- struct sock_fprog_kern *orig_prog; /* Original BPF program */
82383+ unsigned int len; /* Number of filter blocks */
82384 struct rcu_head rcu;
82385 unsigned int (*bpf_func)(const struct sk_buff *skb,
82386- const struct sock_filter_int *filter);
82387+ const struct sock_filter *filter);
82388 union {
82389- struct sock_filter insns[0];
82390- struct sock_filter_int insnsi[0];
82391+ struct sock_filter insns[0];
82392 struct work_struct work;
82393 };
82394 };
82395@@ -343,76 +41,25 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
82396 offsetof(struct sk_filter, insns[proglen]));
82397 }
82398
82399-#define sk_filter_proglen(fprog) \
82400- (fprog->len * sizeof(fprog->filter[0]))
82401-
82402-int sk_filter(struct sock *sk, struct sk_buff *skb);
82403-
82404-void sk_filter_select_runtime(struct sk_filter *fp);
82405-void sk_filter_free(struct sk_filter *fp);
82406-
82407-int sk_convert_filter(struct sock_filter *prog, int len,
82408- struct sock_filter_int *new_prog, int *new_len);
82409-
82410-int sk_unattached_filter_create(struct sk_filter **pfp,
82411- struct sock_fprog_kern *fprog);
82412-void sk_unattached_filter_destroy(struct sk_filter *fp);
82413-
82414-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82415-int sk_detach_filter(struct sock *sk);
82416-
82417-int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82418-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
82419- unsigned int len);
82420-
82421-void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
82422-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
82423-
82424-u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
82425-void bpf_int_jit_compile(struct sk_filter *fp);
82426-
82427-#define BPF_ANC BIT(15)
82428-
82429-static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
82430-{
82431- BUG_ON(ftest->code & BPF_ANC);
82432-
82433- switch (ftest->code) {
82434- case BPF_LD | BPF_W | BPF_ABS:
82435- case BPF_LD | BPF_H | BPF_ABS:
82436- case BPF_LD | BPF_B | BPF_ABS:
82437-#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
82438- return BPF_ANC | SKF_AD_##CODE
82439- switch (ftest->k) {
82440- BPF_ANCILLARY(PROTOCOL);
82441- BPF_ANCILLARY(PKTTYPE);
82442- BPF_ANCILLARY(IFINDEX);
82443- BPF_ANCILLARY(NLATTR);
82444- BPF_ANCILLARY(NLATTR_NEST);
82445- BPF_ANCILLARY(MARK);
82446- BPF_ANCILLARY(QUEUE);
82447- BPF_ANCILLARY(HATYPE);
82448- BPF_ANCILLARY(RXHASH);
82449- BPF_ANCILLARY(CPU);
82450- BPF_ANCILLARY(ALU_XOR_X);
82451- BPF_ANCILLARY(VLAN_TAG);
82452- BPF_ANCILLARY(VLAN_TAG_PRESENT);
82453- BPF_ANCILLARY(PAY_OFFSET);
82454- BPF_ANCILLARY(RANDOM);
82455- }
82456- /* Fallthrough. */
82457- default:
82458- return ftest->code;
82459- }
82460-}
82461+extern int sk_filter(struct sock *sk, struct sk_buff *skb);
82462+extern unsigned int sk_run_filter(const struct sk_buff *skb,
82463+ const struct sock_filter *filter);
82464+extern int sk_unattached_filter_create(struct sk_filter **pfp,
82465+ struct sock_fprog *fprog);
82466+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
82467+extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82468+extern int sk_detach_filter(struct sock *sk);
82469+extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82470+extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
82471+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
82472
82473 #ifdef CONFIG_BPF_JIT
82474 #include <stdarg.h>
82475 #include <linux/linkage.h>
82476 #include <linux/printk.h>
82477
82478-void bpf_jit_compile(struct sk_filter *fp);
82479-void bpf_jit_free(struct sk_filter *fp);
82480+extern void bpf_jit_compile(struct sk_filter *fp);
82481+extern void bpf_jit_free(struct sk_filter *fp);
82482
82483 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82484 u32 pass, void *image)
82485@@ -423,22 +70,90 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82486 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
82487 16, 1, image, proglen, false);
82488 }
82489+#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
82490 #else
82491 #include <linux/slab.h>
82492-
82493 static inline void bpf_jit_compile(struct sk_filter *fp)
82494 {
82495 }
82496-
82497 static inline void bpf_jit_free(struct sk_filter *fp)
82498 {
82499 kfree(fp);
82500 }
82501-#endif /* CONFIG_BPF_JIT */
82502+#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
82503+#endif
82504
82505 static inline int bpf_tell_extensions(void)
82506 {
82507 return SKF_AD_MAX;
82508 }
82509
82510+enum {
82511+ BPF_S_RET_K = 1,
82512+ BPF_S_RET_A,
82513+ BPF_S_ALU_ADD_K,
82514+ BPF_S_ALU_ADD_X,
82515+ BPF_S_ALU_SUB_K,
82516+ BPF_S_ALU_SUB_X,
82517+ BPF_S_ALU_MUL_K,
82518+ BPF_S_ALU_MUL_X,
82519+ BPF_S_ALU_DIV_X,
82520+ BPF_S_ALU_MOD_K,
82521+ BPF_S_ALU_MOD_X,
82522+ BPF_S_ALU_AND_K,
82523+ BPF_S_ALU_AND_X,
82524+ BPF_S_ALU_OR_K,
82525+ BPF_S_ALU_OR_X,
82526+ BPF_S_ALU_XOR_K,
82527+ BPF_S_ALU_XOR_X,
82528+ BPF_S_ALU_LSH_K,
82529+ BPF_S_ALU_LSH_X,
82530+ BPF_S_ALU_RSH_K,
82531+ BPF_S_ALU_RSH_X,
82532+ BPF_S_ALU_NEG,
82533+ BPF_S_LD_W_ABS,
82534+ BPF_S_LD_H_ABS,
82535+ BPF_S_LD_B_ABS,
82536+ BPF_S_LD_W_LEN,
82537+ BPF_S_LD_W_IND,
82538+ BPF_S_LD_H_IND,
82539+ BPF_S_LD_B_IND,
82540+ BPF_S_LD_IMM,
82541+ BPF_S_LDX_W_LEN,
82542+ BPF_S_LDX_B_MSH,
82543+ BPF_S_LDX_IMM,
82544+ BPF_S_MISC_TAX,
82545+ BPF_S_MISC_TXA,
82546+ BPF_S_ALU_DIV_K,
82547+ BPF_S_LD_MEM,
82548+ BPF_S_LDX_MEM,
82549+ BPF_S_ST,
82550+ BPF_S_STX,
82551+ BPF_S_JMP_JA,
82552+ BPF_S_JMP_JEQ_K,
82553+ BPF_S_JMP_JEQ_X,
82554+ BPF_S_JMP_JGE_K,
82555+ BPF_S_JMP_JGE_X,
82556+ BPF_S_JMP_JGT_K,
82557+ BPF_S_JMP_JGT_X,
82558+ BPF_S_JMP_JSET_K,
82559+ BPF_S_JMP_JSET_X,
82560+ /* Ancillary data */
82561+ BPF_S_ANC_PROTOCOL,
82562+ BPF_S_ANC_PKTTYPE,
82563+ BPF_S_ANC_IFINDEX,
82564+ BPF_S_ANC_NLATTR,
82565+ BPF_S_ANC_NLATTR_NEST,
82566+ BPF_S_ANC_MARK,
82567+ BPF_S_ANC_QUEUE,
82568+ BPF_S_ANC_HATYPE,
82569+ BPF_S_ANC_RXHASH,
82570+ BPF_S_ANC_CPU,
82571+ BPF_S_ANC_ALU_XOR_X,
82572+ BPF_S_ANC_SECCOMP_LD_W,
82573+ BPF_S_ANC_VLAN_TAG,
82574+ BPF_S_ANC_VLAN_TAG_PRESENT,
82575+ BPF_S_ANC_PAY_OFFSET,
82576+};
82577+
82578 #endif /* __LINUX_FILTER_H__ */
82579diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
82580index 8293262..2b3b8bd 100644
82581--- a/include/linux/frontswap.h
82582+++ b/include/linux/frontswap.h
82583@@ -11,7 +11,7 @@ struct frontswap_ops {
82584 int (*load)(unsigned, pgoff_t, struct page *);
82585 void (*invalidate_page)(unsigned, pgoff_t);
82586 void (*invalidate_area)(unsigned);
82587-};
82588+} __no_const;
82589
82590 extern bool frontswap_enabled;
82591 extern struct frontswap_ops *
82592diff --git a/include/linux/fs.h b/include/linux/fs.h
82593index e11d60c..901317a 100644
82594--- a/include/linux/fs.h
82595+++ b/include/linux/fs.h
82596@@ -401,7 +401,7 @@ struct address_space {
82597 spinlock_t private_lock; /* for use by the address_space */
82598 struct list_head private_list; /* ditto */
82599 void *private_data; /* ditto */
82600-} __attribute__((aligned(sizeof(long))));
82601+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
82602 /*
82603 * On most architectures that alignment is already the case; but
82604 * must be enforced here for CRIS, to let the least significant bit
82605@@ -444,7 +444,7 @@ struct block_device {
82606 int bd_fsfreeze_count;
82607 /* Mutex for freeze */
82608 struct mutex bd_fsfreeze_mutex;
82609-};
82610+} __randomize_layout;
82611
82612 /*
82613 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
82614@@ -588,7 +588,7 @@ struct inode {
82615 #endif
82616
82617 void *i_private; /* fs or device private pointer */
82618-};
82619+} __randomize_layout;
82620
82621 static inline int inode_unhashed(struct inode *inode)
82622 {
82623@@ -781,7 +781,7 @@ struct file {
82624 struct list_head f_tfile_llink;
82625 #endif /* #ifdef CONFIG_EPOLL */
82626 struct address_space *f_mapping;
82627-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
82628+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
82629
82630 struct file_handle {
82631 __u32 handle_bytes;
82632@@ -909,7 +909,7 @@ struct file_lock {
82633 int state; /* state of grant or error if -ve */
82634 } afs;
82635 } fl_u;
82636-};
82637+} __randomize_layout;
82638
82639 /* The following constant reflects the upper bound of the file/locking space */
82640 #ifndef OFFSET_MAX
82641@@ -1258,7 +1258,7 @@ struct super_block {
82642 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
82643 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
82644 struct rcu_head rcu;
82645-};
82646+} __randomize_layout;
82647
82648 extern struct timespec current_fs_time(struct super_block *sb);
82649
82650@@ -1484,7 +1484,8 @@ struct file_operations {
82651 long (*fallocate)(struct file *file, int mode, loff_t offset,
82652 loff_t len);
82653 int (*show_fdinfo)(struct seq_file *m, struct file *f);
82654-};
82655+} __do_const __randomize_layout;
82656+typedef struct file_operations __no_const file_operations_no_const;
82657
82658 struct inode_operations {
82659 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
82660@@ -2769,4 +2770,14 @@ static inline bool dir_relax(struct inode *inode)
82661 return !IS_DEADDIR(inode);
82662 }
82663
82664+static inline bool is_sidechannel_device(const struct inode *inode)
82665+{
82666+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
82667+ umode_t mode = inode->i_mode;
82668+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
82669+#else
82670+ return false;
82671+#endif
82672+}
82673+
82674 #endif /* _LINUX_FS_H */
82675diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
82676index 0efc3e6..fd23610 100644
82677--- a/include/linux/fs_struct.h
82678+++ b/include/linux/fs_struct.h
82679@@ -6,13 +6,13 @@
82680 #include <linux/seqlock.h>
82681
82682 struct fs_struct {
82683- int users;
82684+ atomic_t users;
82685 spinlock_t lock;
82686 seqcount_t seq;
82687 int umask;
82688 int in_exec;
82689 struct path root, pwd;
82690-};
82691+} __randomize_layout;
82692
82693 extern struct kmem_cache *fs_cachep;
82694
82695diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
82696index 7714849..a4a5c7a 100644
82697--- a/include/linux/fscache-cache.h
82698+++ b/include/linux/fscache-cache.h
82699@@ -113,7 +113,7 @@ struct fscache_operation {
82700 fscache_operation_release_t release;
82701 };
82702
82703-extern atomic_t fscache_op_debug_id;
82704+extern atomic_unchecked_t fscache_op_debug_id;
82705 extern void fscache_op_work_func(struct work_struct *work);
82706
82707 extern void fscache_enqueue_operation(struct fscache_operation *);
82708@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
82709 INIT_WORK(&op->work, fscache_op_work_func);
82710 atomic_set(&op->usage, 1);
82711 op->state = FSCACHE_OP_ST_INITIALISED;
82712- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
82713+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
82714 op->processor = processor;
82715 op->release = release;
82716 INIT_LIST_HEAD(&op->pend_link);
82717diff --git a/include/linux/fscache.h b/include/linux/fscache.h
82718index 115bb81..e7b812b 100644
82719--- a/include/linux/fscache.h
82720+++ b/include/linux/fscache.h
82721@@ -152,7 +152,7 @@ struct fscache_cookie_def {
82722 * - this is mandatory for any object that may have data
82723 */
82724 void (*now_uncached)(void *cookie_netfs_data);
82725-};
82726+} __do_const;
82727
82728 /*
82729 * fscache cached network filesystem type
82730diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
82731index 1c804b0..1432c2b 100644
82732--- a/include/linux/fsnotify.h
82733+++ b/include/linux/fsnotify.h
82734@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
82735 struct inode *inode = file_inode(file);
82736 __u32 mask = FS_ACCESS;
82737
82738+ if (is_sidechannel_device(inode))
82739+ return;
82740+
82741 if (S_ISDIR(inode->i_mode))
82742 mask |= FS_ISDIR;
82743
82744@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
82745 struct inode *inode = file_inode(file);
82746 __u32 mask = FS_MODIFY;
82747
82748+ if (is_sidechannel_device(inode))
82749+ return;
82750+
82751 if (S_ISDIR(inode->i_mode))
82752 mask |= FS_ISDIR;
82753
82754@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
82755 */
82756 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
82757 {
82758- return kstrdup(name, GFP_KERNEL);
82759+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
82760 }
82761
82762 /*
82763diff --git a/include/linux/genhd.h b/include/linux/genhd.h
82764index ec274e0..e678159 100644
82765--- a/include/linux/genhd.h
82766+++ b/include/linux/genhd.h
82767@@ -194,7 +194,7 @@ struct gendisk {
82768 struct kobject *slave_dir;
82769
82770 struct timer_rand_state *random;
82771- atomic_t sync_io; /* RAID */
82772+ atomic_unchecked_t sync_io; /* RAID */
82773 struct disk_events *ev;
82774 #ifdef CONFIG_BLK_DEV_INTEGRITY
82775 struct blk_integrity *integrity;
82776@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
82777 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
82778
82779 /* drivers/char/random.c */
82780-extern void add_disk_randomness(struct gendisk *disk);
82781+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
82782 extern void rand_initialize_disk(struct gendisk *disk);
82783
82784 static inline sector_t get_start_sect(struct block_device *bdev)
82785diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
82786index c0894dd..2fbf10c 100644
82787--- a/include/linux/genl_magic_func.h
82788+++ b/include/linux/genl_magic_func.h
82789@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
82790 },
82791
82792 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
82793-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
82794+static struct genl_ops ZZZ_genl_ops[] = {
82795 #include GENL_MAGIC_INCLUDE_FILE
82796 };
82797
82798diff --git a/include/linux/gfp.h b/include/linux/gfp.h
82799index 6eb1fb3..30fe7e4 100644
82800--- a/include/linux/gfp.h
82801+++ b/include/linux/gfp.h
82802@@ -34,6 +34,13 @@ struct vm_area_struct;
82803 #define ___GFP_NO_KSWAPD 0x400000u
82804 #define ___GFP_OTHER_NODE 0x800000u
82805 #define ___GFP_WRITE 0x1000000u
82806+
82807+#ifdef CONFIG_PAX_USERCOPY_SLABS
82808+#define ___GFP_USERCOPY 0x2000000u
82809+#else
82810+#define ___GFP_USERCOPY 0
82811+#endif
82812+
82813 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
82814
82815 /*
82816@@ -90,6 +97,7 @@ struct vm_area_struct;
82817 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
82818 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
82819 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
82820+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
82821
82822 /*
82823 * This may seem redundant, but it's a way of annotating false positives vs.
82824@@ -97,7 +105,7 @@ struct vm_area_struct;
82825 */
82826 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
82827
82828-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
82829+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
82830 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
82831
82832 /* This equals 0, but use constants in case they ever change */
82833@@ -155,6 +163,8 @@ struct vm_area_struct;
82834 /* 4GB DMA on some platforms */
82835 #define GFP_DMA32 __GFP_DMA32
82836
82837+#define GFP_USERCOPY __GFP_USERCOPY
82838+
82839 /* Convert GFP flags to their corresponding migrate type */
82840 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
82841 {
82842diff --git a/include/linux/gracl.h b/include/linux/gracl.h
82843new file mode 100644
82844index 0000000..edb2cb6
82845--- /dev/null
82846+++ b/include/linux/gracl.h
82847@@ -0,0 +1,340 @@
82848+#ifndef GR_ACL_H
82849+#define GR_ACL_H
82850+
82851+#include <linux/grdefs.h>
82852+#include <linux/resource.h>
82853+#include <linux/capability.h>
82854+#include <linux/dcache.h>
82855+#include <asm/resource.h>
82856+
82857+/* Major status information */
82858+
82859+#define GR_VERSION "grsecurity 3.0"
82860+#define GRSECURITY_VERSION 0x3000
82861+
82862+enum {
82863+ GR_SHUTDOWN = 0,
82864+ GR_ENABLE = 1,
82865+ GR_SPROLE = 2,
82866+ GR_OLDRELOAD = 3,
82867+ GR_SEGVMOD = 4,
82868+ GR_STATUS = 5,
82869+ GR_UNSPROLE = 6,
82870+ GR_PASSSET = 7,
82871+ GR_SPROLEPAM = 8,
82872+ GR_RELOAD = 9,
82873+};
82874+
82875+/* Password setup definitions
82876+ * kernel/grhash.c */
82877+enum {
82878+ GR_PW_LEN = 128,
82879+ GR_SALT_LEN = 16,
82880+ GR_SHA_LEN = 32,
82881+};
82882+
82883+enum {
82884+ GR_SPROLE_LEN = 64,
82885+};
82886+
82887+enum {
82888+ GR_NO_GLOB = 0,
82889+ GR_REG_GLOB,
82890+ GR_CREATE_GLOB
82891+};
82892+
82893+#define GR_NLIMITS 32
82894+
82895+/* Begin Data Structures */
82896+
82897+struct sprole_pw {
82898+ unsigned char *rolename;
82899+ unsigned char salt[GR_SALT_LEN];
82900+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
82901+};
82902+
82903+struct name_entry {
82904+ __u32 key;
82905+ ino_t inode;
82906+ dev_t device;
82907+ char *name;
82908+ __u16 len;
82909+ __u8 deleted;
82910+ struct name_entry *prev;
82911+ struct name_entry *next;
82912+};
82913+
82914+struct inodev_entry {
82915+ struct name_entry *nentry;
82916+ struct inodev_entry *prev;
82917+ struct inodev_entry *next;
82918+};
82919+
82920+struct acl_role_db {
82921+ struct acl_role_label **r_hash;
82922+ __u32 r_size;
82923+};
82924+
82925+struct inodev_db {
82926+ struct inodev_entry **i_hash;
82927+ __u32 i_size;
82928+};
82929+
82930+struct name_db {
82931+ struct name_entry **n_hash;
82932+ __u32 n_size;
82933+};
82934+
82935+struct crash_uid {
82936+ uid_t uid;
82937+ unsigned long expires;
82938+};
82939+
82940+struct gr_hash_struct {
82941+ void **table;
82942+ void **nametable;
82943+ void *first;
82944+ __u32 table_size;
82945+ __u32 used_size;
82946+ int type;
82947+};
82948+
82949+/* Userspace Grsecurity ACL data structures */
82950+
82951+struct acl_subject_label {
82952+ char *filename;
82953+ ino_t inode;
82954+ dev_t device;
82955+ __u32 mode;
82956+ kernel_cap_t cap_mask;
82957+ kernel_cap_t cap_lower;
82958+ kernel_cap_t cap_invert_audit;
82959+
82960+ struct rlimit res[GR_NLIMITS];
82961+ __u32 resmask;
82962+
82963+ __u8 user_trans_type;
82964+ __u8 group_trans_type;
82965+ uid_t *user_transitions;
82966+ gid_t *group_transitions;
82967+ __u16 user_trans_num;
82968+ __u16 group_trans_num;
82969+
82970+ __u32 sock_families[2];
82971+ __u32 ip_proto[8];
82972+ __u32 ip_type;
82973+ struct acl_ip_label **ips;
82974+ __u32 ip_num;
82975+ __u32 inaddr_any_override;
82976+
82977+ __u32 crashes;
82978+ unsigned long expires;
82979+
82980+ struct acl_subject_label *parent_subject;
82981+ struct gr_hash_struct *hash;
82982+ struct acl_subject_label *prev;
82983+ struct acl_subject_label *next;
82984+
82985+ struct acl_object_label **obj_hash;
82986+ __u32 obj_hash_size;
82987+ __u16 pax_flags;
82988+};
82989+
82990+struct role_allowed_ip {
82991+ __u32 addr;
82992+ __u32 netmask;
82993+
82994+ struct role_allowed_ip *prev;
82995+ struct role_allowed_ip *next;
82996+};
82997+
82998+struct role_transition {
82999+ char *rolename;
83000+
83001+ struct role_transition *prev;
83002+ struct role_transition *next;
83003+};
83004+
83005+struct acl_role_label {
83006+ char *rolename;
83007+ uid_t uidgid;
83008+ __u16 roletype;
83009+
83010+ __u16 auth_attempts;
83011+ unsigned long expires;
83012+
83013+ struct acl_subject_label *root_label;
83014+ struct gr_hash_struct *hash;
83015+
83016+ struct acl_role_label *prev;
83017+ struct acl_role_label *next;
83018+
83019+ struct role_transition *transitions;
83020+ struct role_allowed_ip *allowed_ips;
83021+ uid_t *domain_children;
83022+ __u16 domain_child_num;
83023+
83024+ umode_t umask;
83025+
83026+ struct acl_subject_label **subj_hash;
83027+ __u32 subj_hash_size;
83028+};
83029+
83030+struct user_acl_role_db {
83031+ struct acl_role_label **r_table;
83032+ __u32 num_pointers; /* Number of allocations to track */
83033+ __u32 num_roles; /* Number of roles */
83034+ __u32 num_domain_children; /* Number of domain children */
83035+ __u32 num_subjects; /* Number of subjects */
83036+ __u32 num_objects; /* Number of objects */
83037+};
83038+
83039+struct acl_object_label {
83040+ char *filename;
83041+ ino_t inode;
83042+ dev_t device;
83043+ __u32 mode;
83044+
83045+ struct acl_subject_label *nested;
83046+ struct acl_object_label *globbed;
83047+
83048+ /* next two structures not used */
83049+
83050+ struct acl_object_label *prev;
83051+ struct acl_object_label *next;
83052+};
83053+
83054+struct acl_ip_label {
83055+ char *iface;
83056+ __u32 addr;
83057+ __u32 netmask;
83058+ __u16 low, high;
83059+ __u8 mode;
83060+ __u32 type;
83061+ __u32 proto[8];
83062+
83063+ /* next two structures not used */
83064+
83065+ struct acl_ip_label *prev;
83066+ struct acl_ip_label *next;
83067+};
83068+
83069+struct gr_arg {
83070+ struct user_acl_role_db role_db;
83071+ unsigned char pw[GR_PW_LEN];
83072+ unsigned char salt[GR_SALT_LEN];
83073+ unsigned char sum[GR_SHA_LEN];
83074+ unsigned char sp_role[GR_SPROLE_LEN];
83075+ struct sprole_pw *sprole_pws;
83076+ dev_t segv_device;
83077+ ino_t segv_inode;
83078+ uid_t segv_uid;
83079+ __u16 num_sprole_pws;
83080+ __u16 mode;
83081+};
83082+
83083+struct gr_arg_wrapper {
83084+ struct gr_arg *arg;
83085+ __u32 version;
83086+ __u32 size;
83087+};
83088+
83089+struct subject_map {
83090+ struct acl_subject_label *user;
83091+ struct acl_subject_label *kernel;
83092+ struct subject_map *prev;
83093+ struct subject_map *next;
83094+};
83095+
83096+struct acl_subj_map_db {
83097+ struct subject_map **s_hash;
83098+ __u32 s_size;
83099+};
83100+
83101+struct gr_policy_state {
83102+ struct sprole_pw **acl_special_roles;
83103+ __u16 num_sprole_pws;
83104+ struct acl_role_label *kernel_role;
83105+ struct acl_role_label *role_list;
83106+ struct acl_role_label *default_role;
83107+ struct acl_role_db acl_role_set;
83108+ struct acl_subj_map_db subj_map_set;
83109+ struct name_db name_set;
83110+ struct inodev_db inodev_set;
83111+};
83112+
83113+struct gr_alloc_state {
83114+ unsigned long alloc_stack_next;
83115+ unsigned long alloc_stack_size;
83116+ void **alloc_stack;
83117+};
83118+
83119+struct gr_reload_state {
83120+ struct gr_policy_state oldpolicy;
83121+ struct gr_alloc_state oldalloc;
83122+ struct gr_policy_state newpolicy;
83123+ struct gr_alloc_state newalloc;
83124+ struct gr_policy_state *oldpolicy_ptr;
83125+ struct gr_alloc_state *oldalloc_ptr;
83126+ unsigned char oldmode;
83127+};
83128+
83129+/* End Data Structures Section */
83130+
83131+/* Hash functions generated by empirical testing by Brad Spengler
83132+ Makes good use of the low bits of the inode. Generally 0-1 times
83133+ in loop for successful match. 0-3 for unsuccessful match.
83134+ Shift/add algorithm with modulus of table size and an XOR*/
83135+
83136+static __inline__ unsigned int
83137+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
83138+{
83139+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
83140+}
83141+
83142+ static __inline__ unsigned int
83143+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
83144+{
83145+ return ((const unsigned long)userp % sz);
83146+}
83147+
83148+static __inline__ unsigned int
83149+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
83150+{
83151+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
83152+}
83153+
83154+static __inline__ unsigned int
83155+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
83156+{
83157+ return full_name_hash((const unsigned char *)name, len) % sz;
83158+}
83159+
83160+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
83161+ subj = NULL; \
83162+ iter = 0; \
83163+ while (iter < role->subj_hash_size) { \
83164+ if (subj == NULL) \
83165+ subj = role->subj_hash[iter]; \
83166+ if (subj == NULL) { \
83167+ iter++; \
83168+ continue; \
83169+ }
83170+
83171+#define FOR_EACH_SUBJECT_END(subj,iter) \
83172+ subj = subj->next; \
83173+ if (subj == NULL) \
83174+ iter++; \
83175+ }
83176+
83177+
83178+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
83179+ subj = role->hash->first; \
83180+ while (subj != NULL) {
83181+
83182+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
83183+ subj = subj->next; \
83184+ }
83185+
83186+#endif
83187+
83188diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
83189new file mode 100644
83190index 0000000..33ebd1f
83191--- /dev/null
83192+++ b/include/linux/gracl_compat.h
83193@@ -0,0 +1,156 @@
83194+#ifndef GR_ACL_COMPAT_H
83195+#define GR_ACL_COMPAT_H
83196+
83197+#include <linux/resource.h>
83198+#include <asm/resource.h>
83199+
83200+struct sprole_pw_compat {
83201+ compat_uptr_t rolename;
83202+ unsigned char salt[GR_SALT_LEN];
83203+ unsigned char sum[GR_SHA_LEN];
83204+};
83205+
83206+struct gr_hash_struct_compat {
83207+ compat_uptr_t table;
83208+ compat_uptr_t nametable;
83209+ compat_uptr_t first;
83210+ __u32 table_size;
83211+ __u32 used_size;
83212+ int type;
83213+};
83214+
83215+struct acl_subject_label_compat {
83216+ compat_uptr_t filename;
83217+ compat_ino_t inode;
83218+ __u32 device;
83219+ __u32 mode;
83220+ kernel_cap_t cap_mask;
83221+ kernel_cap_t cap_lower;
83222+ kernel_cap_t cap_invert_audit;
83223+
83224+ struct compat_rlimit res[GR_NLIMITS];
83225+ __u32 resmask;
83226+
83227+ __u8 user_trans_type;
83228+ __u8 group_trans_type;
83229+ compat_uptr_t user_transitions;
83230+ compat_uptr_t group_transitions;
83231+ __u16 user_trans_num;
83232+ __u16 group_trans_num;
83233+
83234+ __u32 sock_families[2];
83235+ __u32 ip_proto[8];
83236+ __u32 ip_type;
83237+ compat_uptr_t ips;
83238+ __u32 ip_num;
83239+ __u32 inaddr_any_override;
83240+
83241+ __u32 crashes;
83242+ compat_ulong_t expires;
83243+
83244+ compat_uptr_t parent_subject;
83245+ compat_uptr_t hash;
83246+ compat_uptr_t prev;
83247+ compat_uptr_t next;
83248+
83249+ compat_uptr_t obj_hash;
83250+ __u32 obj_hash_size;
83251+ __u16 pax_flags;
83252+};
83253+
83254+struct role_allowed_ip_compat {
83255+ __u32 addr;
83256+ __u32 netmask;
83257+
83258+ compat_uptr_t prev;
83259+ compat_uptr_t next;
83260+};
83261+
83262+struct role_transition_compat {
83263+ compat_uptr_t rolename;
83264+
83265+ compat_uptr_t prev;
83266+ compat_uptr_t next;
83267+};
83268+
83269+struct acl_role_label_compat {
83270+ compat_uptr_t rolename;
83271+ uid_t uidgid;
83272+ __u16 roletype;
83273+
83274+ __u16 auth_attempts;
83275+ compat_ulong_t expires;
83276+
83277+ compat_uptr_t root_label;
83278+ compat_uptr_t hash;
83279+
83280+ compat_uptr_t prev;
83281+ compat_uptr_t next;
83282+
83283+ compat_uptr_t transitions;
83284+ compat_uptr_t allowed_ips;
83285+ compat_uptr_t domain_children;
83286+ __u16 domain_child_num;
83287+
83288+ umode_t umask;
83289+
83290+ compat_uptr_t subj_hash;
83291+ __u32 subj_hash_size;
83292+};
83293+
83294+struct user_acl_role_db_compat {
83295+ compat_uptr_t r_table;
83296+ __u32 num_pointers;
83297+ __u32 num_roles;
83298+ __u32 num_domain_children;
83299+ __u32 num_subjects;
83300+ __u32 num_objects;
83301+};
83302+
83303+struct acl_object_label_compat {
83304+ compat_uptr_t filename;
83305+ compat_ino_t inode;
83306+ __u32 device;
83307+ __u32 mode;
83308+
83309+ compat_uptr_t nested;
83310+ compat_uptr_t globbed;
83311+
83312+ compat_uptr_t prev;
83313+ compat_uptr_t next;
83314+};
83315+
83316+struct acl_ip_label_compat {
83317+ compat_uptr_t iface;
83318+ __u32 addr;
83319+ __u32 netmask;
83320+ __u16 low, high;
83321+ __u8 mode;
83322+ __u32 type;
83323+ __u32 proto[8];
83324+
83325+ compat_uptr_t prev;
83326+ compat_uptr_t next;
83327+};
83328+
83329+struct gr_arg_compat {
83330+ struct user_acl_role_db_compat role_db;
83331+ unsigned char pw[GR_PW_LEN];
83332+ unsigned char salt[GR_SALT_LEN];
83333+ unsigned char sum[GR_SHA_LEN];
83334+ unsigned char sp_role[GR_SPROLE_LEN];
83335+ compat_uptr_t sprole_pws;
83336+ __u32 segv_device;
83337+ compat_ino_t segv_inode;
83338+ uid_t segv_uid;
83339+ __u16 num_sprole_pws;
83340+ __u16 mode;
83341+};
83342+
83343+struct gr_arg_wrapper_compat {
83344+ compat_uptr_t arg;
83345+ __u32 version;
83346+ __u32 size;
83347+};
83348+
83349+#endif
83350diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
83351new file mode 100644
83352index 0000000..323ecf2
83353--- /dev/null
83354+++ b/include/linux/gralloc.h
83355@@ -0,0 +1,9 @@
83356+#ifndef __GRALLOC_H
83357+#define __GRALLOC_H
83358+
83359+void acl_free_all(void);
83360+int acl_alloc_stack_init(unsigned long size);
83361+void *acl_alloc(unsigned long len);
83362+void *acl_alloc_num(unsigned long num, unsigned long len);
83363+
83364+#endif
83365diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
83366new file mode 100644
83367index 0000000..be66033
83368--- /dev/null
83369+++ b/include/linux/grdefs.h
83370@@ -0,0 +1,140 @@
83371+#ifndef GRDEFS_H
83372+#define GRDEFS_H
83373+
83374+/* Begin grsecurity status declarations */
83375+
83376+enum {
83377+ GR_READY = 0x01,
83378+ GR_STATUS_INIT = 0x00 // disabled state
83379+};
83380+
83381+/* Begin ACL declarations */
83382+
83383+/* Role flags */
83384+
83385+enum {
83386+ GR_ROLE_USER = 0x0001,
83387+ GR_ROLE_GROUP = 0x0002,
83388+ GR_ROLE_DEFAULT = 0x0004,
83389+ GR_ROLE_SPECIAL = 0x0008,
83390+ GR_ROLE_AUTH = 0x0010,
83391+ GR_ROLE_NOPW = 0x0020,
83392+ GR_ROLE_GOD = 0x0040,
83393+ GR_ROLE_LEARN = 0x0080,
83394+ GR_ROLE_TPE = 0x0100,
83395+ GR_ROLE_DOMAIN = 0x0200,
83396+ GR_ROLE_PAM = 0x0400,
83397+ GR_ROLE_PERSIST = 0x0800
83398+};
83399+
83400+/* ACL Subject and Object mode flags */
83401+enum {
83402+ GR_DELETED = 0x80000000
83403+};
83404+
83405+/* ACL Object-only mode flags */
83406+enum {
83407+ GR_READ = 0x00000001,
83408+ GR_APPEND = 0x00000002,
83409+ GR_WRITE = 0x00000004,
83410+ GR_EXEC = 0x00000008,
83411+ GR_FIND = 0x00000010,
83412+ GR_INHERIT = 0x00000020,
83413+ GR_SETID = 0x00000040,
83414+ GR_CREATE = 0x00000080,
83415+ GR_DELETE = 0x00000100,
83416+ GR_LINK = 0x00000200,
83417+ GR_AUDIT_READ = 0x00000400,
83418+ GR_AUDIT_APPEND = 0x00000800,
83419+ GR_AUDIT_WRITE = 0x00001000,
83420+ GR_AUDIT_EXEC = 0x00002000,
83421+ GR_AUDIT_FIND = 0x00004000,
83422+ GR_AUDIT_INHERIT= 0x00008000,
83423+ GR_AUDIT_SETID = 0x00010000,
83424+ GR_AUDIT_CREATE = 0x00020000,
83425+ GR_AUDIT_DELETE = 0x00040000,
83426+ GR_AUDIT_LINK = 0x00080000,
83427+ GR_PTRACERD = 0x00100000,
83428+ GR_NOPTRACE = 0x00200000,
83429+ GR_SUPPRESS = 0x00400000,
83430+ GR_NOLEARN = 0x00800000,
83431+ GR_INIT_TRANSFER= 0x01000000
83432+};
83433+
83434+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
83435+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
83436+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
83437+
83438+/* ACL subject-only mode flags */
83439+enum {
83440+ GR_KILL = 0x00000001,
83441+ GR_VIEW = 0x00000002,
83442+ GR_PROTECTED = 0x00000004,
83443+ GR_LEARN = 0x00000008,
83444+ GR_OVERRIDE = 0x00000010,
83445+ /* just a placeholder, this mode is only used in userspace */
83446+ GR_DUMMY = 0x00000020,
83447+ GR_PROTSHM = 0x00000040,
83448+ GR_KILLPROC = 0x00000080,
83449+ GR_KILLIPPROC = 0x00000100,
83450+ /* just a placeholder, this mode is only used in userspace */
83451+ GR_NOTROJAN = 0x00000200,
83452+ GR_PROTPROCFD = 0x00000400,
83453+ GR_PROCACCT = 0x00000800,
83454+ GR_RELAXPTRACE = 0x00001000,
83455+ //GR_NESTED = 0x00002000,
83456+ GR_INHERITLEARN = 0x00004000,
83457+ GR_PROCFIND = 0x00008000,
83458+ GR_POVERRIDE = 0x00010000,
83459+ GR_KERNELAUTH = 0x00020000,
83460+ GR_ATSECURE = 0x00040000,
83461+ GR_SHMEXEC = 0x00080000
83462+};
83463+
83464+enum {
83465+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
83466+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
83467+ GR_PAX_ENABLE_MPROTECT = 0x0004,
83468+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
83469+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
83470+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
83471+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
83472+ GR_PAX_DISABLE_MPROTECT = 0x0400,
83473+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
83474+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
83475+};
83476+
83477+enum {
83478+ GR_ID_USER = 0x01,
83479+ GR_ID_GROUP = 0x02,
83480+};
83481+
83482+enum {
83483+ GR_ID_ALLOW = 0x01,
83484+ GR_ID_DENY = 0x02,
83485+};
83486+
83487+#define GR_CRASH_RES 31
83488+#define GR_UIDTABLE_MAX 500
83489+
83490+/* begin resource learning section */
83491+enum {
83492+ GR_RLIM_CPU_BUMP = 60,
83493+ GR_RLIM_FSIZE_BUMP = 50000,
83494+ GR_RLIM_DATA_BUMP = 10000,
83495+ GR_RLIM_STACK_BUMP = 1000,
83496+ GR_RLIM_CORE_BUMP = 10000,
83497+ GR_RLIM_RSS_BUMP = 500000,
83498+ GR_RLIM_NPROC_BUMP = 1,
83499+ GR_RLIM_NOFILE_BUMP = 5,
83500+ GR_RLIM_MEMLOCK_BUMP = 50000,
83501+ GR_RLIM_AS_BUMP = 500000,
83502+ GR_RLIM_LOCKS_BUMP = 2,
83503+ GR_RLIM_SIGPENDING_BUMP = 5,
83504+ GR_RLIM_MSGQUEUE_BUMP = 10000,
83505+ GR_RLIM_NICE_BUMP = 1,
83506+ GR_RLIM_RTPRIO_BUMP = 1,
83507+ GR_RLIM_RTTIME_BUMP = 1000000
83508+};
83509+
83510+#endif
83511diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
83512new file mode 100644
83513index 0000000..d25522e
83514--- /dev/null
83515+++ b/include/linux/grinternal.h
83516@@ -0,0 +1,229 @@
83517+#ifndef __GRINTERNAL_H
83518+#define __GRINTERNAL_H
83519+
83520+#ifdef CONFIG_GRKERNSEC
83521+
83522+#include <linux/fs.h>
83523+#include <linux/mnt_namespace.h>
83524+#include <linux/nsproxy.h>
83525+#include <linux/gracl.h>
83526+#include <linux/grdefs.h>
83527+#include <linux/grmsg.h>
83528+
83529+void gr_add_learn_entry(const char *fmt, ...)
83530+ __attribute__ ((format (printf, 1, 2)));
83531+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
83532+ const struct vfsmount *mnt);
83533+__u32 gr_check_create(const struct dentry *new_dentry,
83534+ const struct dentry *parent,
83535+ const struct vfsmount *mnt, const __u32 mode);
83536+int gr_check_protected_task(const struct task_struct *task);
83537+__u32 to_gr_audit(const __u32 reqmode);
83538+int gr_set_acls(const int type);
83539+int gr_acl_is_enabled(void);
83540+char gr_roletype_to_char(void);
83541+
83542+void gr_handle_alertkill(struct task_struct *task);
83543+char *gr_to_filename(const struct dentry *dentry,
83544+ const struct vfsmount *mnt);
83545+char *gr_to_filename1(const struct dentry *dentry,
83546+ const struct vfsmount *mnt);
83547+char *gr_to_filename2(const struct dentry *dentry,
83548+ const struct vfsmount *mnt);
83549+char *gr_to_filename3(const struct dentry *dentry,
83550+ const struct vfsmount *mnt);
83551+
83552+extern int grsec_enable_ptrace_readexec;
83553+extern int grsec_enable_harden_ptrace;
83554+extern int grsec_enable_link;
83555+extern int grsec_enable_fifo;
83556+extern int grsec_enable_execve;
83557+extern int grsec_enable_shm;
83558+extern int grsec_enable_execlog;
83559+extern int grsec_enable_signal;
83560+extern int grsec_enable_audit_ptrace;
83561+extern int grsec_enable_forkfail;
83562+extern int grsec_enable_time;
83563+extern int grsec_enable_rofs;
83564+extern int grsec_deny_new_usb;
83565+extern int grsec_enable_chroot_shmat;
83566+extern int grsec_enable_chroot_mount;
83567+extern int grsec_enable_chroot_double;
83568+extern int grsec_enable_chroot_pivot;
83569+extern int grsec_enable_chroot_chdir;
83570+extern int grsec_enable_chroot_chmod;
83571+extern int grsec_enable_chroot_mknod;
83572+extern int grsec_enable_chroot_fchdir;
83573+extern int grsec_enable_chroot_nice;
83574+extern int grsec_enable_chroot_execlog;
83575+extern int grsec_enable_chroot_caps;
83576+extern int grsec_enable_chroot_sysctl;
83577+extern int grsec_enable_chroot_unix;
83578+extern int grsec_enable_symlinkown;
83579+extern kgid_t grsec_symlinkown_gid;
83580+extern int grsec_enable_tpe;
83581+extern kgid_t grsec_tpe_gid;
83582+extern int grsec_enable_tpe_all;
83583+extern int grsec_enable_tpe_invert;
83584+extern int grsec_enable_socket_all;
83585+extern kgid_t grsec_socket_all_gid;
83586+extern int grsec_enable_socket_client;
83587+extern kgid_t grsec_socket_client_gid;
83588+extern int grsec_enable_socket_server;
83589+extern kgid_t grsec_socket_server_gid;
83590+extern kgid_t grsec_audit_gid;
83591+extern int grsec_enable_group;
83592+extern int grsec_enable_log_rwxmaps;
83593+extern int grsec_enable_mount;
83594+extern int grsec_enable_chdir;
83595+extern int grsec_resource_logging;
83596+extern int grsec_enable_blackhole;
83597+extern int grsec_lastack_retries;
83598+extern int grsec_enable_brute;
83599+extern int grsec_enable_harden_ipc;
83600+extern int grsec_lock;
83601+
83602+extern spinlock_t grsec_alert_lock;
83603+extern unsigned long grsec_alert_wtime;
83604+extern unsigned long grsec_alert_fyet;
83605+
83606+extern spinlock_t grsec_audit_lock;
83607+
83608+extern rwlock_t grsec_exec_file_lock;
83609+
83610+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
83611+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
83612+ (tsk)->exec_file->f_path.mnt) : "/")
83613+
83614+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
83615+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
83616+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83617+
83618+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
83619+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
83620+ (tsk)->exec_file->f_path.mnt) : "/")
83621+
83622+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
83623+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
83624+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83625+
83626+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
83627+
83628+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
83629+
83630+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
83631+{
83632+ if (file1 && file2) {
83633+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
83634+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
83635+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
83636+ return true;
83637+ }
83638+
83639+ return false;
83640+}
83641+
83642+#define GR_CHROOT_CAPS {{ \
83643+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
83644+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
83645+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
83646+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
83647+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
83648+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
83649+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
83650+
83651+#define security_learn(normal_msg,args...) \
83652+({ \
83653+ read_lock(&grsec_exec_file_lock); \
83654+ gr_add_learn_entry(normal_msg "\n", ## args); \
83655+ read_unlock(&grsec_exec_file_lock); \
83656+})
83657+
83658+enum {
83659+ GR_DO_AUDIT,
83660+ GR_DONT_AUDIT,
83661+ /* used for non-audit messages that we shouldn't kill the task on */
83662+ GR_DONT_AUDIT_GOOD
83663+};
83664+
83665+enum {
83666+ GR_TTYSNIFF,
83667+ GR_RBAC,
83668+ GR_RBAC_STR,
83669+ GR_STR_RBAC,
83670+ GR_RBAC_MODE2,
83671+ GR_RBAC_MODE3,
83672+ GR_FILENAME,
83673+ GR_SYSCTL_HIDDEN,
83674+ GR_NOARGS,
83675+ GR_ONE_INT,
83676+ GR_ONE_INT_TWO_STR,
83677+ GR_ONE_STR,
83678+ GR_STR_INT,
83679+ GR_TWO_STR_INT,
83680+ GR_TWO_INT,
83681+ GR_TWO_U64,
83682+ GR_THREE_INT,
83683+ GR_FIVE_INT_TWO_STR,
83684+ GR_TWO_STR,
83685+ GR_THREE_STR,
83686+ GR_FOUR_STR,
83687+ GR_STR_FILENAME,
83688+ GR_FILENAME_STR,
83689+ GR_FILENAME_TWO_INT,
83690+ GR_FILENAME_TWO_INT_STR,
83691+ GR_TEXTREL,
83692+ GR_PTRACE,
83693+ GR_RESOURCE,
83694+ GR_CAP,
83695+ GR_SIG,
83696+ GR_SIG2,
83697+ GR_CRASH1,
83698+ GR_CRASH2,
83699+ GR_PSACCT,
83700+ GR_RWXMAP,
83701+ GR_RWXMAPVMA
83702+};
83703+
83704+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
83705+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
83706+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
83707+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
83708+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
83709+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
83710+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
83711+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
83712+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
83713+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
83714+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
83715+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
83716+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
83717+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
83718+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
83719+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
83720+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
83721+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
83722+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
83723+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
83724+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
83725+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
83726+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
83727+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
83728+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
83729+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
83730+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
83731+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
83732+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
83733+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
83734+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
83735+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
83736+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
83737+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
83738+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
83739+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
83740+
83741+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
83742+
83743+#endif
83744+
83745+#endif
83746diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
83747new file mode 100644
83748index 0000000..b02ba9d
83749--- /dev/null
83750+++ b/include/linux/grmsg.h
83751@@ -0,0 +1,117 @@
83752+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
83753+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
83754+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
83755+#define GR_STOPMOD_MSG "denied modification of module state by "
83756+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
83757+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
83758+#define GR_IOPERM_MSG "denied use of ioperm() by "
83759+#define GR_IOPL_MSG "denied use of iopl() by "
83760+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
83761+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
83762+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
83763+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
83764+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
83765+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
83766+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
83767+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
83768+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
83769+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
83770+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
83771+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
83772+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
83773+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
83774+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
83775+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
83776+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
83777+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
83778+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
83779+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
83780+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
83781+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
83782+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
83783+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
83784+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
83785+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
83786+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
83787+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
83788+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
83789+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
83790+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
83791+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
83792+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
83793+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
83794+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
83795+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
83796+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
83797+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
83798+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
83799+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
83800+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
83801+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
83802+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
83803+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
83804+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
83805+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
83806+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
83807+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
83808+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
83809+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
83810+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
83811+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
83812+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
83813+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
83814+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
83815+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
83816+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
83817+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
83818+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
83819+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
83820+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
83821+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
83822+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
83823+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
83824+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
83825+#define GR_FAILFORK_MSG "failed fork with errno %s by "
83826+#define GR_NICE_CHROOT_MSG "denied priority change by "
83827+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
83828+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
83829+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
83830+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
83831+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
83832+#define GR_TIME_MSG "time set by "
83833+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
83834+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
83835+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
83836+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
83837+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
83838+#define GR_BIND_MSG "denied bind() by "
83839+#define GR_CONNECT_MSG "denied connect() by "
83840+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
83841+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
83842+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
83843+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
83844+#define GR_CAP_ACL_MSG "use of %s denied for "
83845+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
83846+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
83847+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
83848+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
83849+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
83850+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
83851+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
83852+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
83853+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
83854+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
83855+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
83856+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
83857+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
83858+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
83859+#define GR_VM86_MSG "denied use of vm86 by "
83860+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
83861+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
83862+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
83863+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
83864+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
83865+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
83866+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
83867+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
83868+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
83869diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
83870new file mode 100644
83871index 0000000..10b9635
83872--- /dev/null
83873+++ b/include/linux/grsecurity.h
83874@@ -0,0 +1,254 @@
83875+#ifndef GR_SECURITY_H
83876+#define GR_SECURITY_H
83877+#include <linux/fs.h>
83878+#include <linux/fs_struct.h>
83879+#include <linux/binfmts.h>
83880+#include <linux/gracl.h>
83881+
83882+/* notify of brain-dead configs */
83883+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83884+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
83885+#endif
83886+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83887+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
83888+#endif
83889+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
83890+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
83891+#endif
83892+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
83893+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
83894+#endif
83895+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
83896+#error "CONFIG_PAX enabled, but no PaX options are enabled."
83897+#endif
83898+
83899+int gr_handle_new_usb(void);
83900+
83901+void gr_handle_brute_attach(int dumpable);
83902+void gr_handle_brute_check(void);
83903+void gr_handle_kernel_exploit(void);
83904+
83905+char gr_roletype_to_char(void);
83906+
83907+int gr_proc_is_restricted(void);
83908+
83909+int gr_acl_enable_at_secure(void);
83910+
83911+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
83912+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
83913+
83914+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
83915+
83916+void gr_del_task_from_ip_table(struct task_struct *p);
83917+
83918+int gr_pid_is_chrooted(struct task_struct *p);
83919+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
83920+int gr_handle_chroot_nice(void);
83921+int gr_handle_chroot_sysctl(const int op);
83922+int gr_handle_chroot_setpriority(struct task_struct *p,
83923+ const int niceval);
83924+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
83925+int gr_chroot_fhandle(void);
83926+int gr_handle_chroot_chroot(const struct dentry *dentry,
83927+ const struct vfsmount *mnt);
83928+void gr_handle_chroot_chdir(const struct path *path);
83929+int gr_handle_chroot_chmod(const struct dentry *dentry,
83930+ const struct vfsmount *mnt, const int mode);
83931+int gr_handle_chroot_mknod(const struct dentry *dentry,
83932+ const struct vfsmount *mnt, const int mode);
83933+int gr_handle_chroot_mount(const struct dentry *dentry,
83934+ const struct vfsmount *mnt,
83935+ const char *dev_name);
83936+int gr_handle_chroot_pivot(void);
83937+int gr_handle_chroot_unix(const pid_t pid);
83938+
83939+int gr_handle_rawio(const struct inode *inode);
83940+
83941+void gr_handle_ioperm(void);
83942+void gr_handle_iopl(void);
83943+void gr_handle_msr_write(void);
83944+
83945+umode_t gr_acl_umask(void);
83946+
83947+int gr_tpe_allow(const struct file *file);
83948+
83949+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
83950+void gr_clear_chroot_entries(struct task_struct *task);
83951+
83952+void gr_log_forkfail(const int retval);
83953+void gr_log_timechange(void);
83954+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
83955+void gr_log_chdir(const struct dentry *dentry,
83956+ const struct vfsmount *mnt);
83957+void gr_log_chroot_exec(const struct dentry *dentry,
83958+ const struct vfsmount *mnt);
83959+void gr_log_remount(const char *devname, const int retval);
83960+void gr_log_unmount(const char *devname, const int retval);
83961+void gr_log_mount(const char *from, const char *to, const int retval);
83962+void gr_log_textrel(struct vm_area_struct *vma);
83963+void gr_log_ptgnustack(struct file *file);
83964+void gr_log_rwxmmap(struct file *file);
83965+void gr_log_rwxmprotect(struct vm_area_struct *vma);
83966+
83967+int gr_handle_follow_link(const struct inode *parent,
83968+ const struct inode *inode,
83969+ const struct dentry *dentry,
83970+ const struct vfsmount *mnt);
83971+int gr_handle_fifo(const struct dentry *dentry,
83972+ const struct vfsmount *mnt,
83973+ const struct dentry *dir, const int flag,
83974+ const int acc_mode);
83975+int gr_handle_hardlink(const struct dentry *dentry,
83976+ const struct vfsmount *mnt,
83977+ struct inode *inode,
83978+ const int mode, const struct filename *to);
83979+
83980+int gr_is_capable(const int cap);
83981+int gr_is_capable_nolog(const int cap);
83982+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
83983+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
83984+
83985+void gr_copy_label(struct task_struct *tsk);
83986+void gr_handle_crash(struct task_struct *task, const int sig);
83987+int gr_handle_signal(const struct task_struct *p, const int sig);
83988+int gr_check_crash_uid(const kuid_t uid);
83989+int gr_check_protected_task(const struct task_struct *task);
83990+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
83991+int gr_acl_handle_mmap(const struct file *file,
83992+ const unsigned long prot);
83993+int gr_acl_handle_mprotect(const struct file *file,
83994+ const unsigned long prot);
83995+int gr_check_hidden_task(const struct task_struct *tsk);
83996+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
83997+ const struct vfsmount *mnt);
83998+__u32 gr_acl_handle_utime(const struct dentry *dentry,
83999+ const struct vfsmount *mnt);
84000+__u32 gr_acl_handle_access(const struct dentry *dentry,
84001+ const struct vfsmount *mnt, const int fmode);
84002+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
84003+ const struct vfsmount *mnt, umode_t *mode);
84004+__u32 gr_acl_handle_chown(const struct dentry *dentry,
84005+ const struct vfsmount *mnt);
84006+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
84007+ const struct vfsmount *mnt);
84008+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
84009+ const struct vfsmount *mnt);
84010+int gr_handle_ptrace(struct task_struct *task, const long request);
84011+int gr_handle_proc_ptrace(struct task_struct *task);
84012+__u32 gr_acl_handle_execve(const struct dentry *dentry,
84013+ const struct vfsmount *mnt);
84014+int gr_check_crash_exec(const struct file *filp);
84015+int gr_acl_is_enabled(void);
84016+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
84017+ const kgid_t gid);
84018+int gr_set_proc_label(const struct dentry *dentry,
84019+ const struct vfsmount *mnt,
84020+ const int unsafe_flags);
84021+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
84022+ const struct vfsmount *mnt);
84023+__u32 gr_acl_handle_open(const struct dentry *dentry,
84024+ const struct vfsmount *mnt, int acc_mode);
84025+__u32 gr_acl_handle_creat(const struct dentry *dentry,
84026+ const struct dentry *p_dentry,
84027+ const struct vfsmount *p_mnt,
84028+ int open_flags, int acc_mode, const int imode);
84029+void gr_handle_create(const struct dentry *dentry,
84030+ const struct vfsmount *mnt);
84031+void gr_handle_proc_create(const struct dentry *dentry,
84032+ const struct inode *inode);
84033+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
84034+ const struct dentry *parent_dentry,
84035+ const struct vfsmount *parent_mnt,
84036+ const int mode);
84037+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
84038+ const struct dentry *parent_dentry,
84039+ const struct vfsmount *parent_mnt);
84040+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
84041+ const struct vfsmount *mnt);
84042+void gr_handle_delete(const ino_t ino, const dev_t dev);
84043+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
84044+ const struct vfsmount *mnt);
84045+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
84046+ const struct dentry *parent_dentry,
84047+ const struct vfsmount *parent_mnt,
84048+ const struct filename *from);
84049+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
84050+ const struct dentry *parent_dentry,
84051+ const struct vfsmount *parent_mnt,
84052+ const struct dentry *old_dentry,
84053+ const struct vfsmount *old_mnt, const struct filename *to);
84054+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
84055+int gr_acl_handle_rename(struct dentry *new_dentry,
84056+ struct dentry *parent_dentry,
84057+ const struct vfsmount *parent_mnt,
84058+ struct dentry *old_dentry,
84059+ struct inode *old_parent_inode,
84060+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
84061+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84062+ struct dentry *old_dentry,
84063+ struct dentry *new_dentry,
84064+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
84065+__u32 gr_check_link(const struct dentry *new_dentry,
84066+ const struct dentry *parent_dentry,
84067+ const struct vfsmount *parent_mnt,
84068+ const struct dentry *old_dentry,
84069+ const struct vfsmount *old_mnt);
84070+int gr_acl_handle_filldir(const struct file *file, const char *name,
84071+ const unsigned int namelen, const ino_t ino);
84072+
84073+__u32 gr_acl_handle_unix(const struct dentry *dentry,
84074+ const struct vfsmount *mnt);
84075+void gr_acl_handle_exit(void);
84076+void gr_acl_handle_psacct(struct task_struct *task, const long code);
84077+int gr_acl_handle_procpidmem(const struct task_struct *task);
84078+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
84079+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
84080+void gr_audit_ptrace(struct task_struct *task);
84081+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
84082+void gr_put_exec_file(struct task_struct *task);
84083+
84084+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
84085+
84086+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
84087+extern void gr_learn_resource(const struct task_struct *task, const int res,
84088+ const unsigned long wanted, const int gt);
84089+#else
84090+static inline void gr_learn_resource(const struct task_struct *task, const int res,
84091+ const unsigned long wanted, const int gt)
84092+{
84093+}
84094+#endif
84095+
84096+#ifdef CONFIG_GRKERNSEC_RESLOG
84097+extern void gr_log_resource(const struct task_struct *task, const int res,
84098+ const unsigned long wanted, const int gt);
84099+#else
84100+static inline void gr_log_resource(const struct task_struct *task, const int res,
84101+ const unsigned long wanted, const int gt)
84102+{
84103+}
84104+#endif
84105+
84106+#ifdef CONFIG_GRKERNSEC
84107+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
84108+void gr_handle_vm86(void);
84109+void gr_handle_mem_readwrite(u64 from, u64 to);
84110+
84111+void gr_log_badprocpid(const char *entry);
84112+
84113+extern int grsec_enable_dmesg;
84114+extern int grsec_disable_privio;
84115+
84116+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84117+extern kgid_t grsec_proc_gid;
84118+#endif
84119+
84120+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84121+extern int grsec_enable_chroot_findtask;
84122+#endif
84123+#ifdef CONFIG_GRKERNSEC_SETXID
84124+extern int grsec_enable_setxid;
84125+#endif
84126+#endif
84127+
84128+#endif
84129diff --git a/include/linux/grsock.h b/include/linux/grsock.h
84130new file mode 100644
84131index 0000000..e7ffaaf
84132--- /dev/null
84133+++ b/include/linux/grsock.h
84134@@ -0,0 +1,19 @@
84135+#ifndef __GRSOCK_H
84136+#define __GRSOCK_H
84137+
84138+extern void gr_attach_curr_ip(const struct sock *sk);
84139+extern int gr_handle_sock_all(const int family, const int type,
84140+ const int protocol);
84141+extern int gr_handle_sock_server(const struct sockaddr *sck);
84142+extern int gr_handle_sock_server_other(const struct sock *sck);
84143+extern int gr_handle_sock_client(const struct sockaddr *sck);
84144+extern int gr_search_connect(struct socket * sock,
84145+ struct sockaddr_in * addr);
84146+extern int gr_search_bind(struct socket * sock,
84147+ struct sockaddr_in * addr);
84148+extern int gr_search_listen(struct socket * sock);
84149+extern int gr_search_accept(struct socket * sock);
84150+extern int gr_search_socket(const int domain, const int type,
84151+ const int protocol);
84152+
84153+#endif
84154diff --git a/include/linux/hash.h b/include/linux/hash.h
84155index bd1754c..8240892 100644
84156--- a/include/linux/hash.h
84157+++ b/include/linux/hash.h
84158@@ -83,7 +83,7 @@ static inline u32 hash32_ptr(const void *ptr)
84159 struct fast_hash_ops {
84160 u32 (*hash)(const void *data, u32 len, u32 seed);
84161 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
84162-};
84163+} __no_const;
84164
84165 /**
84166 * arch_fast_hash - Caclulates a hash over a given buffer that can have
84167diff --git a/include/linux/highmem.h b/include/linux/highmem.h
84168index 7fb31da..08b5114 100644
84169--- a/include/linux/highmem.h
84170+++ b/include/linux/highmem.h
84171@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
84172 kunmap_atomic(kaddr);
84173 }
84174
84175+static inline void sanitize_highpage(struct page *page)
84176+{
84177+ void *kaddr;
84178+ unsigned long flags;
84179+
84180+ local_irq_save(flags);
84181+ kaddr = kmap_atomic(page);
84182+ clear_page(kaddr);
84183+ kunmap_atomic(kaddr);
84184+ local_irq_restore(flags);
84185+}
84186+
84187 static inline void zero_user_segments(struct page *page,
84188 unsigned start1, unsigned end1,
84189 unsigned start2, unsigned end2)
84190diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
84191index 1c7b89a..7dda400 100644
84192--- a/include/linux/hwmon-sysfs.h
84193+++ b/include/linux/hwmon-sysfs.h
84194@@ -25,7 +25,8 @@
84195 struct sensor_device_attribute{
84196 struct device_attribute dev_attr;
84197 int index;
84198-};
84199+} __do_const;
84200+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
84201 #define to_sensor_dev_attr(_dev_attr) \
84202 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
84203
84204@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
84205 struct device_attribute dev_attr;
84206 u8 index;
84207 u8 nr;
84208-};
84209+} __do_const;
84210+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
84211 #define to_sensor_dev_attr_2(_dev_attr) \
84212 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
84213
84214diff --git a/include/linux/i2c.h b/include/linux/i2c.h
84215index b556e0a..c10a515 100644
84216--- a/include/linux/i2c.h
84217+++ b/include/linux/i2c.h
84218@@ -378,6 +378,7 @@ struct i2c_algorithm {
84219 /* To determine what the adapter supports */
84220 u32 (*functionality) (struct i2c_adapter *);
84221 };
84222+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
84223
84224 /**
84225 * struct i2c_bus_recovery_info - I2C bus recovery information
84226diff --git a/include/linux/i2o.h b/include/linux/i2o.h
84227index d23c3c2..eb63c81 100644
84228--- a/include/linux/i2o.h
84229+++ b/include/linux/i2o.h
84230@@ -565,7 +565,7 @@ struct i2o_controller {
84231 struct i2o_device *exec; /* Executive */
84232 #if BITS_PER_LONG == 64
84233 spinlock_t context_list_lock; /* lock for context_list */
84234- atomic_t context_list_counter; /* needed for unique contexts */
84235+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
84236 struct list_head context_list; /* list of context id's
84237 and pointers */
84238 #endif
84239diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
84240index aff7ad8..3942bbd 100644
84241--- a/include/linux/if_pppox.h
84242+++ b/include/linux/if_pppox.h
84243@@ -76,7 +76,7 @@ struct pppox_proto {
84244 int (*ioctl)(struct socket *sock, unsigned int cmd,
84245 unsigned long arg);
84246 struct module *owner;
84247-};
84248+} __do_const;
84249
84250 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
84251 extern void unregister_pppox_proto(int proto_num);
84252diff --git a/include/linux/init.h b/include/linux/init.h
84253index 2df8e8d..3e1280d 100644
84254--- a/include/linux/init.h
84255+++ b/include/linux/init.h
84256@@ -37,9 +37,17 @@
84257 * section.
84258 */
84259
84260+#define add_init_latent_entropy __latent_entropy
84261+
84262+#ifdef CONFIG_MEMORY_HOTPLUG
84263+#define add_meminit_latent_entropy
84264+#else
84265+#define add_meminit_latent_entropy __latent_entropy
84266+#endif
84267+
84268 /* These are for everybody (although not all archs will actually
84269 discard it in modules) */
84270-#define __init __section(.init.text) __cold notrace
84271+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
84272 #define __initdata __section(.init.data)
84273 #define __initconst __constsection(.init.rodata)
84274 #define __exitdata __section(.exit.data)
84275@@ -100,7 +108,7 @@
84276 #define __cpuexitconst
84277
84278 /* Used for MEMORY_HOTPLUG */
84279-#define __meminit __section(.meminit.text) __cold notrace
84280+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
84281 #define __meminitdata __section(.meminit.data)
84282 #define __meminitconst __constsection(.meminit.rodata)
84283 #define __memexit __section(.memexit.text) __exitused __cold notrace
84284diff --git a/include/linux/init_task.h b/include/linux/init_task.h
84285index 6df7f9f..d0bf699 100644
84286--- a/include/linux/init_task.h
84287+++ b/include/linux/init_task.h
84288@@ -156,6 +156,12 @@ extern struct task_group root_task_group;
84289
84290 #define INIT_TASK_COMM "swapper"
84291
84292+#ifdef CONFIG_X86
84293+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
84294+#else
84295+#define INIT_TASK_THREAD_INFO
84296+#endif
84297+
84298 #ifdef CONFIG_RT_MUTEXES
84299 # define INIT_RT_MUTEXES(tsk) \
84300 .pi_waiters = RB_ROOT, \
84301@@ -203,6 +209,7 @@ extern struct task_group root_task_group;
84302 RCU_POINTER_INITIALIZER(cred, &init_cred), \
84303 .comm = INIT_TASK_COMM, \
84304 .thread = INIT_THREAD, \
84305+ INIT_TASK_THREAD_INFO \
84306 .fs = &init_fs, \
84307 .files = &init_files, \
84308 .signal = &init_signals, \
84309diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
84310index 698ad05..8601bb7 100644
84311--- a/include/linux/interrupt.h
84312+++ b/include/linux/interrupt.h
84313@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
84314
84315 struct softirq_action
84316 {
84317- void (*action)(struct softirq_action *);
84318-};
84319+ void (*action)(void);
84320+} __no_const;
84321
84322 asmlinkage void do_softirq(void);
84323 asmlinkage void __do_softirq(void);
84324@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
84325 }
84326 #endif
84327
84328-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
84329+extern void open_softirq(int nr, void (*action)(void));
84330 extern void softirq_init(void);
84331 extern void __raise_softirq_irqoff(unsigned int nr);
84332
84333diff --git a/include/linux/iommu.h b/include/linux/iommu.h
84334index b96a5b2..2732d1c 100644
84335--- a/include/linux/iommu.h
84336+++ b/include/linux/iommu.h
84337@@ -131,7 +131,7 @@ struct iommu_ops {
84338 u32 (*domain_get_windows)(struct iommu_domain *domain);
84339
84340 unsigned long pgsize_bitmap;
84341-};
84342+} __do_const;
84343
84344 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
84345 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
84346diff --git a/include/linux/ioport.h b/include/linux/ioport.h
84347index 5e3a906..3131d0f 100644
84348--- a/include/linux/ioport.h
84349+++ b/include/linux/ioport.h
84350@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
84351 int adjust_resource(struct resource *res, resource_size_t start,
84352 resource_size_t size);
84353 resource_size_t resource_alignment(struct resource *res);
84354-static inline resource_size_t resource_size(const struct resource *res)
84355+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
84356 {
84357 return res->end - res->start + 1;
84358 }
84359diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
84360index 35e7eca..6afb7ad 100644
84361--- a/include/linux/ipc_namespace.h
84362+++ b/include/linux/ipc_namespace.h
84363@@ -69,7 +69,7 @@ struct ipc_namespace {
84364 struct user_namespace *user_ns;
84365
84366 unsigned int proc_inum;
84367-};
84368+} __randomize_layout;
84369
84370 extern struct ipc_namespace init_ipc_ns;
84371 extern atomic_t nr_ipc_ns;
84372diff --git a/include/linux/irq.h b/include/linux/irq.h
84373index 0d998d8..3a1c782 100644
84374--- a/include/linux/irq.h
84375+++ b/include/linux/irq.h
84376@@ -344,7 +344,8 @@ struct irq_chip {
84377 void (*irq_release_resources)(struct irq_data *data);
84378
84379 unsigned long flags;
84380-};
84381+} __do_const;
84382+typedef struct irq_chip __no_const irq_chip_no_const;
84383
84384 /*
84385 * irq_chip specific flags
84386diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
84387index 45e2d8c..26d85da 100644
84388--- a/include/linux/irqchip/arm-gic.h
84389+++ b/include/linux/irqchip/arm-gic.h
84390@@ -75,9 +75,11 @@
84391
84392 #ifndef __ASSEMBLY__
84393
84394+#include <linux/irq.h>
84395+
84396 struct device_node;
84397
84398-extern struct irq_chip gic_arch_extn;
84399+extern irq_chip_no_const gic_arch_extn;
84400
84401 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
84402 u32 offset, struct device_node *);
84403diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
84404index 8e10f57..d5f62bc 100644
84405--- a/include/linux/isdn_ppp.h
84406+++ b/include/linux/isdn_ppp.h
84407@@ -180,8 +180,9 @@ struct ippp_struct {
84408 struct slcompress *slcomp;
84409 #endif
84410 #ifdef CONFIG_IPPP_FILTER
84411- struct sk_filter *pass_filter; /* filter for packets to pass */
84412- struct sk_filter *active_filter; /* filter for pkts to reset idle */
84413+ struct sock_filter *pass_filter; /* filter for packets to pass */
84414+ struct sock_filter *active_filter; /* filter for pkts to reset idle */
84415+ unsigned pass_len, active_len;
84416 #endif
84417 unsigned long debug;
84418 struct isdn_ppp_compressor *compressor,*decompressor;
84419diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
84420index 1f44466..b481806 100644
84421--- a/include/linux/jiffies.h
84422+++ b/include/linux/jiffies.h
84423@@ -292,20 +292,20 @@ extern unsigned long preset_lpj;
84424 /*
84425 * Convert various time units to each other:
84426 */
84427-extern unsigned int jiffies_to_msecs(const unsigned long j);
84428-extern unsigned int jiffies_to_usecs(const unsigned long j);
84429+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
84430+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
84431
84432-static inline u64 jiffies_to_nsecs(const unsigned long j)
84433+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
84434 {
84435 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
84436 }
84437
84438-extern unsigned long msecs_to_jiffies(const unsigned int m);
84439-extern unsigned long usecs_to_jiffies(const unsigned int u);
84440+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
84441+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
84442 extern unsigned long timespec_to_jiffies(const struct timespec *value);
84443 extern void jiffies_to_timespec(const unsigned long jiffies,
84444- struct timespec *value);
84445-extern unsigned long timeval_to_jiffies(const struct timeval *value);
84446+ struct timespec *value) __intentional_overflow(-1);
84447+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
84448 extern void jiffies_to_timeval(const unsigned long jiffies,
84449 struct timeval *value);
84450
84451diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
84452index 6883e19..e854fcb 100644
84453--- a/include/linux/kallsyms.h
84454+++ b/include/linux/kallsyms.h
84455@@ -15,7 +15,8 @@
84456
84457 struct module;
84458
84459-#ifdef CONFIG_KALLSYMS
84460+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
84461+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
84462 /* Lookup the address for a symbol. Returns 0 if not found. */
84463 unsigned long kallsyms_lookup_name(const char *name);
84464
84465@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
84466 /* Stupid that this does nothing, but I didn't create this mess. */
84467 #define __print_symbol(fmt, addr)
84468 #endif /*CONFIG_KALLSYMS*/
84469+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
84470+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
84471+extern unsigned long kallsyms_lookup_name(const char *name);
84472+extern void __print_symbol(const char *fmt, unsigned long address);
84473+extern int sprint_backtrace(char *buffer, unsigned long address);
84474+extern int sprint_symbol(char *buffer, unsigned long address);
84475+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
84476+const char *kallsyms_lookup(unsigned long addr,
84477+ unsigned long *symbolsize,
84478+ unsigned long *offset,
84479+ char **modname, char *namebuf);
84480+extern int kallsyms_lookup_size_offset(unsigned long addr,
84481+ unsigned long *symbolsize,
84482+ unsigned long *offset);
84483+#endif
84484
84485 /* This macro allows us to keep printk typechecking */
84486 static __printf(1, 2)
84487diff --git a/include/linux/key-type.h b/include/linux/key-type.h
84488index a74c3a8..28d3f21 100644
84489--- a/include/linux/key-type.h
84490+++ b/include/linux/key-type.h
84491@@ -131,7 +131,7 @@ struct key_type {
84492 /* internal fields */
84493 struct list_head link; /* link in types list */
84494 struct lock_class_key lock_class; /* key->sem lock class */
84495-};
84496+} __do_const;
84497
84498 extern struct key_type key_type_keyring;
84499
84500diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
84501index 6b06d37..c134867 100644
84502--- a/include/linux/kgdb.h
84503+++ b/include/linux/kgdb.h
84504@@ -52,7 +52,7 @@ extern int kgdb_connected;
84505 extern int kgdb_io_module_registered;
84506
84507 extern atomic_t kgdb_setting_breakpoint;
84508-extern atomic_t kgdb_cpu_doing_single_step;
84509+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
84510
84511 extern struct task_struct *kgdb_usethread;
84512 extern struct task_struct *kgdb_contthread;
84513@@ -254,7 +254,7 @@ struct kgdb_arch {
84514 void (*correct_hw_break)(void);
84515
84516 void (*enable_nmi)(bool on);
84517-};
84518+} __do_const;
84519
84520 /**
84521 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
84522@@ -279,7 +279,7 @@ struct kgdb_io {
84523 void (*pre_exception) (void);
84524 void (*post_exception) (void);
84525 int is_console;
84526-};
84527+} __do_const;
84528
84529 extern struct kgdb_arch arch_kgdb_ops;
84530
84531diff --git a/include/linux/kmod.h b/include/linux/kmod.h
84532index 0555cc6..40116ce 100644
84533--- a/include/linux/kmod.h
84534+++ b/include/linux/kmod.h
84535@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
84536 * usually useless though. */
84537 extern __printf(2, 3)
84538 int __request_module(bool wait, const char *name, ...);
84539+extern __printf(3, 4)
84540+int ___request_module(bool wait, char *param_name, const char *name, ...);
84541 #define request_module(mod...) __request_module(true, mod)
84542 #define request_module_nowait(mod...) __request_module(false, mod)
84543 #define try_then_request_module(x, mod...) \
84544@@ -57,6 +59,9 @@ struct subprocess_info {
84545 struct work_struct work;
84546 struct completion *complete;
84547 char *path;
84548+#ifdef CONFIG_GRKERNSEC
84549+ char *origpath;
84550+#endif
84551 char **argv;
84552 char **envp;
84553 int wait;
84554diff --git a/include/linux/kobject.h b/include/linux/kobject.h
84555index 2d61b90..a1d0a13 100644
84556--- a/include/linux/kobject.h
84557+++ b/include/linux/kobject.h
84558@@ -118,7 +118,7 @@ struct kobj_type {
84559 struct attribute **default_attrs;
84560 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
84561 const void *(*namespace)(struct kobject *kobj);
84562-};
84563+} __do_const;
84564
84565 struct kobj_uevent_env {
84566 char *argv[3];
84567@@ -142,6 +142,7 @@ struct kobj_attribute {
84568 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
84569 const char *buf, size_t count);
84570 };
84571+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
84572
84573 extern const struct sysfs_ops kobj_sysfs_ops;
84574
84575@@ -169,7 +170,7 @@ struct kset {
84576 spinlock_t list_lock;
84577 struct kobject kobj;
84578 const struct kset_uevent_ops *uevent_ops;
84579-};
84580+} __randomize_layout;
84581
84582 extern void kset_init(struct kset *kset);
84583 extern int __must_check kset_register(struct kset *kset);
84584diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
84585index df32d25..fb52e27 100644
84586--- a/include/linux/kobject_ns.h
84587+++ b/include/linux/kobject_ns.h
84588@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
84589 const void *(*netlink_ns)(struct sock *sk);
84590 const void *(*initial_ns)(void);
84591 void (*drop_ns)(void *);
84592-};
84593+} __do_const;
84594
84595 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
84596 int kobj_ns_type_registered(enum kobj_ns_type type);
84597diff --git a/include/linux/kref.h b/include/linux/kref.h
84598index 484604d..0f6c5b6 100644
84599--- a/include/linux/kref.h
84600+++ b/include/linux/kref.h
84601@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
84602 static inline int kref_sub(struct kref *kref, unsigned int count,
84603 void (*release)(struct kref *kref))
84604 {
84605- WARN_ON(release == NULL);
84606+ BUG_ON(release == NULL);
84607
84608 if (atomic_sub_and_test((int) count, &kref->refcount)) {
84609 release(kref);
84610diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
84611index ec4e3bd..14db03a 100644
84612--- a/include/linux/kvm_host.h
84613+++ b/include/linux/kvm_host.h
84614@@ -468,7 +468,7 @@ static inline void kvm_irqfd_exit(void)
84615 {
84616 }
84617 #endif
84618-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84619+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84620 struct module *module);
84621 void kvm_exit(void);
84622
84623@@ -634,7 +634,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
84624 struct kvm_guest_debug *dbg);
84625 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
84626
84627-int kvm_arch_init(void *opaque);
84628+int kvm_arch_init(const void *opaque);
84629 void kvm_arch_exit(void);
84630
84631 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
84632diff --git a/include/linux/libata.h b/include/linux/libata.h
84633index 92abb49..e7fff2a 100644
84634--- a/include/linux/libata.h
84635+++ b/include/linux/libata.h
84636@@ -976,7 +976,7 @@ struct ata_port_operations {
84637 * fields must be pointers.
84638 */
84639 const struct ata_port_operations *inherits;
84640-};
84641+} __do_const;
84642
84643 struct ata_port_info {
84644 unsigned long flags;
84645diff --git a/include/linux/linkage.h b/include/linux/linkage.h
84646index a6a42dd..6c5ebce 100644
84647--- a/include/linux/linkage.h
84648+++ b/include/linux/linkage.h
84649@@ -36,6 +36,7 @@
84650 #endif
84651
84652 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
84653+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
84654 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
84655
84656 /*
84657diff --git a/include/linux/list.h b/include/linux/list.h
84658index ef95941..82db65a 100644
84659--- a/include/linux/list.h
84660+++ b/include/linux/list.h
84661@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
84662 extern void list_del(struct list_head *entry);
84663 #endif
84664
84665+extern void __pax_list_add(struct list_head *new,
84666+ struct list_head *prev,
84667+ struct list_head *next);
84668+static inline void pax_list_add(struct list_head *new, struct list_head *head)
84669+{
84670+ __pax_list_add(new, head, head->next);
84671+}
84672+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
84673+{
84674+ __pax_list_add(new, head->prev, head);
84675+}
84676+extern void pax_list_del(struct list_head *entry);
84677+
84678 /**
84679 * list_replace - replace old entry by new one
84680 * @old : the element to be replaced
84681@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
84682 INIT_LIST_HEAD(entry);
84683 }
84684
84685+extern void pax_list_del_init(struct list_head *entry);
84686+
84687 /**
84688 * list_move - delete from one list and add as another's head
84689 * @list: the entry to move
84690diff --git a/include/linux/math64.h b/include/linux/math64.h
84691index c45c089..298841c 100644
84692--- a/include/linux/math64.h
84693+++ b/include/linux/math64.h
84694@@ -15,7 +15,7 @@
84695 * This is commonly provided by 32bit archs to provide an optimized 64bit
84696 * divide.
84697 */
84698-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84699+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84700 {
84701 *remainder = dividend % divisor;
84702 return dividend / divisor;
84703@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
84704 /**
84705 * div64_u64 - unsigned 64bit divide with 64bit divisor
84706 */
84707-static inline u64 div64_u64(u64 dividend, u64 divisor)
84708+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
84709 {
84710 return dividend / divisor;
84711 }
84712@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
84713 #define div64_ul(x, y) div_u64((x), (y))
84714
84715 #ifndef div_u64_rem
84716-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84717+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84718 {
84719 *remainder = do_div(dividend, divisor);
84720 return dividend;
84721@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84722 #endif
84723
84724 #ifndef div64_u64
84725-extern u64 div64_u64(u64 dividend, u64 divisor);
84726+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
84727 #endif
84728
84729 #ifndef div64_s64
84730@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
84731 * divide.
84732 */
84733 #ifndef div_u64
84734-static inline u64 div_u64(u64 dividend, u32 divisor)
84735+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
84736 {
84737 u32 remainder;
84738 return div_u64_rem(dividend, divisor, &remainder);
84739diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
84740index f230a97..714c006 100644
84741--- a/include/linux/mempolicy.h
84742+++ b/include/linux/mempolicy.h
84743@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
84744 }
84745
84746 #define vma_policy(vma) ((vma)->vm_policy)
84747+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84748+{
84749+ vma->vm_policy = pol;
84750+}
84751
84752 static inline void mpol_get(struct mempolicy *pol)
84753 {
84754@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
84755 }
84756
84757 #define vma_policy(vma) NULL
84758+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84759+{
84760+}
84761
84762 static inline int
84763 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
84764diff --git a/include/linux/mm.h b/include/linux/mm.h
84765index e03dd29..eaf923c 100644
84766--- a/include/linux/mm.h
84767+++ b/include/linux/mm.h
84768@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
84769 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
84770 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
84771 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
84772+
84773+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
84774+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
84775+#endif
84776+
84777 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
84778
84779 #ifdef CONFIG_MEM_SOFT_DIRTY
84780@@ -237,8 +242,8 @@ struct vm_operations_struct {
84781 /* called by access_process_vm when get_user_pages() fails, typically
84782 * for use by special VMAs that can switch between memory and hardware
84783 */
84784- int (*access)(struct vm_area_struct *vma, unsigned long addr,
84785- void *buf, int len, int write);
84786+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
84787+ void *buf, size_t len, int write);
84788
84789 /* Called by the /proc/PID/maps code to ask the vma whether it
84790 * has a special name. Returning non-NULL will also cause this
84791@@ -274,6 +279,7 @@ struct vm_operations_struct {
84792 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
84793 unsigned long size, pgoff_t pgoff);
84794 };
84795+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
84796
84797 struct mmu_gather;
84798 struct inode;
84799@@ -1144,8 +1150,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
84800 unsigned long *pfn);
84801 int follow_phys(struct vm_area_struct *vma, unsigned long address,
84802 unsigned int flags, unsigned long *prot, resource_size_t *phys);
84803-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84804- void *buf, int len, int write);
84805+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84806+ void *buf, size_t len, int write);
84807
84808 static inline void unmap_shared_mapping_range(struct address_space *mapping,
84809 loff_t const holebegin, loff_t const holelen)
84810@@ -1184,9 +1190,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
84811 }
84812 #endif
84813
84814-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
84815-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84816- void *buf, int len, int write);
84817+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
84818+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84819+ void *buf, size_t len, int write);
84820
84821 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84822 unsigned long start, unsigned long nr_pages,
84823@@ -1219,34 +1225,6 @@ int set_page_dirty_lock(struct page *page);
84824 int clear_page_dirty_for_io(struct page *page);
84825 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
84826
84827-/* Is the vma a continuation of the stack vma above it? */
84828-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
84829-{
84830- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
84831-}
84832-
84833-static inline int stack_guard_page_start(struct vm_area_struct *vma,
84834- unsigned long addr)
84835-{
84836- return (vma->vm_flags & VM_GROWSDOWN) &&
84837- (vma->vm_start == addr) &&
84838- !vma_growsdown(vma->vm_prev, addr);
84839-}
84840-
84841-/* Is the vma a continuation of the stack vma below it? */
84842-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
84843-{
84844- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
84845-}
84846-
84847-static inline int stack_guard_page_end(struct vm_area_struct *vma,
84848- unsigned long addr)
84849-{
84850- return (vma->vm_flags & VM_GROWSUP) &&
84851- (vma->vm_end == addr) &&
84852- !vma_growsup(vma->vm_next, addr);
84853-}
84854-
84855 extern pid_t
84856 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
84857
84858@@ -1346,6 +1324,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
84859 }
84860 #endif
84861
84862+#ifdef CONFIG_MMU
84863+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
84864+#else
84865+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
84866+{
84867+ return __pgprot(0);
84868+}
84869+#endif
84870+
84871 int vma_wants_writenotify(struct vm_area_struct *vma);
84872
84873 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
84874@@ -1364,8 +1351,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
84875 {
84876 return 0;
84877 }
84878+
84879+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
84880+ unsigned long address)
84881+{
84882+ return 0;
84883+}
84884 #else
84885 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84886+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84887 #endif
84888
84889 #ifdef __PAGETABLE_PMD_FOLDED
84890@@ -1374,8 +1368,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
84891 {
84892 return 0;
84893 }
84894+
84895+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
84896+ unsigned long address)
84897+{
84898+ return 0;
84899+}
84900 #else
84901 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
84902+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
84903 #endif
84904
84905 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
84906@@ -1393,11 +1394,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
84907 NULL: pud_offset(pgd, address);
84908 }
84909
84910+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
84911+{
84912+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
84913+ NULL: pud_offset(pgd, address);
84914+}
84915+
84916 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
84917 {
84918 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
84919 NULL: pmd_offset(pud, address);
84920 }
84921+
84922+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
84923+{
84924+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
84925+ NULL: pmd_offset(pud, address);
84926+}
84927 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
84928
84929 #if USE_SPLIT_PTE_PTLOCKS
84930@@ -1796,7 +1809,7 @@ extern int install_special_mapping(struct mm_struct *mm,
84931 unsigned long addr, unsigned long len,
84932 unsigned long flags, struct page **pages);
84933
84934-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
84935+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
84936
84937 extern unsigned long mmap_region(struct file *file, unsigned long addr,
84938 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
84939@@ -1804,6 +1817,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
84940 unsigned long len, unsigned long prot, unsigned long flags,
84941 unsigned long pgoff, unsigned long *populate);
84942 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
84943+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
84944
84945 #ifdef CONFIG_MMU
84946 extern int __mm_populate(unsigned long addr, unsigned long len,
84947@@ -1832,10 +1846,11 @@ struct vm_unmapped_area_info {
84948 unsigned long high_limit;
84949 unsigned long align_mask;
84950 unsigned long align_offset;
84951+ unsigned long threadstack_offset;
84952 };
84953
84954-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
84955-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84956+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
84957+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
84958
84959 /*
84960 * Search for an unmapped address range.
84961@@ -1847,7 +1862,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84962 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
84963 */
84964 static inline unsigned long
84965-vm_unmapped_area(struct vm_unmapped_area_info *info)
84966+vm_unmapped_area(const struct vm_unmapped_area_info *info)
84967 {
84968 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
84969 return unmapped_area(info);
84970@@ -1909,6 +1924,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
84971 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
84972 struct vm_area_struct **pprev);
84973
84974+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
84975+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
84976+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
84977+
84978 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
84979 NULL if none. Assume start_addr < end_addr. */
84980 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
84981@@ -1937,15 +1956,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
84982 return vma;
84983 }
84984
84985-#ifdef CONFIG_MMU
84986-pgprot_t vm_get_page_prot(unsigned long vm_flags);
84987-#else
84988-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
84989-{
84990- return __pgprot(0);
84991-}
84992-#endif
84993-
84994 #ifdef CONFIG_NUMA_BALANCING
84995 unsigned long change_prot_numa(struct vm_area_struct *vma,
84996 unsigned long start, unsigned long end);
84997@@ -1997,6 +2007,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
84998 static inline void vm_stat_account(struct mm_struct *mm,
84999 unsigned long flags, struct file *file, long pages)
85000 {
85001+
85002+#ifdef CONFIG_PAX_RANDMMAP
85003+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
85004+#endif
85005+
85006 mm->total_vm += pages;
85007 }
85008 #endif /* CONFIG_PROC_FS */
85009@@ -2078,7 +2093,7 @@ extern int unpoison_memory(unsigned long pfn);
85010 extern int sysctl_memory_failure_early_kill;
85011 extern int sysctl_memory_failure_recovery;
85012 extern void shake_page(struct page *p, int access);
85013-extern atomic_long_t num_poisoned_pages;
85014+extern atomic_long_unchecked_t num_poisoned_pages;
85015 extern int soft_offline_page(struct page *page, int flags);
85016
85017 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
85018@@ -2113,5 +2128,11 @@ void __init setup_nr_node_ids(void);
85019 static inline void setup_nr_node_ids(void) {}
85020 #endif
85021
85022+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
85023+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
85024+#else
85025+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
85026+#endif
85027+
85028 #endif /* __KERNEL__ */
85029 #endif /* _LINUX_MM_H */
85030diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
85031index 96c5750..15668ba 100644
85032--- a/include/linux/mm_types.h
85033+++ b/include/linux/mm_types.h
85034@@ -308,7 +308,9 @@ struct vm_area_struct {
85035 #ifdef CONFIG_NUMA
85036 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
85037 #endif
85038-};
85039+
85040+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
85041+} __randomize_layout;
85042
85043 struct core_thread {
85044 struct task_struct *task;
85045@@ -454,7 +456,25 @@ struct mm_struct {
85046 bool tlb_flush_pending;
85047 #endif
85048 struct uprobes_state uprobes_state;
85049-};
85050+
85051+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85052+ unsigned long pax_flags;
85053+#endif
85054+
85055+#ifdef CONFIG_PAX_DLRESOLVE
85056+ unsigned long call_dl_resolve;
85057+#endif
85058+
85059+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
85060+ unsigned long call_syscall;
85061+#endif
85062+
85063+#ifdef CONFIG_PAX_ASLR
85064+ unsigned long delta_mmap; /* randomized offset */
85065+ unsigned long delta_stack; /* randomized offset */
85066+#endif
85067+
85068+} __randomize_layout;
85069
85070 static inline void mm_init_cpumask(struct mm_struct *mm)
85071 {
85072diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
85073index c5d5278..f0b68c8 100644
85074--- a/include/linux/mmiotrace.h
85075+++ b/include/linux/mmiotrace.h
85076@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
85077 /* Called from ioremap.c */
85078 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
85079 void __iomem *addr);
85080-extern void mmiotrace_iounmap(volatile void __iomem *addr);
85081+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
85082
85083 /* For anyone to insert markers. Remember trailing newline. */
85084 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
85085@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
85086 {
85087 }
85088
85089-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
85090+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
85091 {
85092 }
85093
85094diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
85095index 6cbd1b6..b1d2f99 100644
85096--- a/include/linux/mmzone.h
85097+++ b/include/linux/mmzone.h
85098@@ -412,7 +412,7 @@ struct zone {
85099 unsigned long flags; /* zone flags, see below */
85100
85101 /* Zone statistics */
85102- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85103+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85104
85105 /*
85106 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
85107diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
85108index 44eeef0..a92d3f9 100644
85109--- a/include/linux/mod_devicetable.h
85110+++ b/include/linux/mod_devicetable.h
85111@@ -139,7 +139,7 @@ struct usb_device_id {
85112 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
85113 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
85114
85115-#define HID_ANY_ID (~0)
85116+#define HID_ANY_ID (~0U)
85117 #define HID_BUS_ANY 0xffff
85118 #define HID_GROUP_ANY 0x0000
85119
85120@@ -475,7 +475,7 @@ struct dmi_system_id {
85121 const char *ident;
85122 struct dmi_strmatch matches[4];
85123 void *driver_data;
85124-};
85125+} __do_const;
85126 /*
85127 * struct dmi_device_id appears during expansion of
85128 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
85129diff --git a/include/linux/module.h b/include/linux/module.h
85130index f520a76..5f898ef 100644
85131--- a/include/linux/module.h
85132+++ b/include/linux/module.h
85133@@ -17,9 +17,11 @@
85134 #include <linux/moduleparam.h>
85135 #include <linux/jump_label.h>
85136 #include <linux/export.h>
85137+#include <linux/fs.h>
85138
85139 #include <linux/percpu.h>
85140 #include <asm/module.h>
85141+#include <asm/pgtable.h>
85142
85143 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
85144 #define MODULE_SIG_STRING "~Module signature appended~\n"
85145@@ -42,7 +44,7 @@ struct module_kobject {
85146 struct kobject *drivers_dir;
85147 struct module_param_attrs *mp;
85148 struct completion *kobj_completion;
85149-};
85150+} __randomize_layout;
85151
85152 struct module_attribute {
85153 struct attribute attr;
85154@@ -54,12 +56,13 @@ struct module_attribute {
85155 int (*test)(struct module *);
85156 void (*free)(struct module *);
85157 };
85158+typedef struct module_attribute __no_const module_attribute_no_const;
85159
85160 struct module_version_attribute {
85161 struct module_attribute mattr;
85162 const char *module_name;
85163 const char *version;
85164-} __attribute__ ((__aligned__(sizeof(void *))));
85165+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
85166
85167 extern ssize_t __modver_version_show(struct module_attribute *,
85168 struct module_kobject *, char *);
85169@@ -235,7 +238,7 @@ struct module {
85170
85171 /* Sysfs stuff. */
85172 struct module_kobject mkobj;
85173- struct module_attribute *modinfo_attrs;
85174+ module_attribute_no_const *modinfo_attrs;
85175 const char *version;
85176 const char *srcversion;
85177 struct kobject *holders_dir;
85178@@ -284,19 +287,16 @@ struct module {
85179 int (*init)(void);
85180
85181 /* If this is non-NULL, vfree after init() returns */
85182- void *module_init;
85183+ void *module_init_rx, *module_init_rw;
85184
85185 /* Here is the actual code + data, vfree'd on unload. */
85186- void *module_core;
85187+ void *module_core_rx, *module_core_rw;
85188
85189 /* Here are the sizes of the init and core sections */
85190- unsigned int init_size, core_size;
85191+ unsigned int init_size_rw, core_size_rw;
85192
85193 /* The size of the executable code in each section. */
85194- unsigned int init_text_size, core_text_size;
85195-
85196- /* Size of RO sections of the module (text+rodata) */
85197- unsigned int init_ro_size, core_ro_size;
85198+ unsigned int init_size_rx, core_size_rx;
85199
85200 /* Arch-specific module values */
85201 struct mod_arch_specific arch;
85202@@ -352,6 +352,10 @@ struct module {
85203 #ifdef CONFIG_EVENT_TRACING
85204 struct ftrace_event_call **trace_events;
85205 unsigned int num_trace_events;
85206+ struct file_operations trace_id;
85207+ struct file_operations trace_enable;
85208+ struct file_operations trace_format;
85209+ struct file_operations trace_filter;
85210 #endif
85211 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
85212 unsigned int num_ftrace_callsites;
85213@@ -375,7 +379,7 @@ struct module {
85214 ctor_fn_t *ctors;
85215 unsigned int num_ctors;
85216 #endif
85217-};
85218+} __randomize_layout;
85219 #ifndef MODULE_ARCH_INIT
85220 #define MODULE_ARCH_INIT {}
85221 #endif
85222@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
85223 bool is_module_percpu_address(unsigned long addr);
85224 bool is_module_text_address(unsigned long addr);
85225
85226+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
85227+{
85228+
85229+#ifdef CONFIG_PAX_KERNEXEC
85230+ if (ktla_ktva(addr) >= (unsigned long)start &&
85231+ ktla_ktva(addr) < (unsigned long)start + size)
85232+ return 1;
85233+#endif
85234+
85235+ return ((void *)addr >= start && (void *)addr < start + size);
85236+}
85237+
85238+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
85239+{
85240+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
85241+}
85242+
85243+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
85244+{
85245+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
85246+}
85247+
85248+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
85249+{
85250+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
85251+}
85252+
85253+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
85254+{
85255+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
85256+}
85257+
85258 static inline int within_module_core(unsigned long addr, const struct module *mod)
85259 {
85260- return (unsigned long)mod->module_core <= addr &&
85261- addr < (unsigned long)mod->module_core + mod->core_size;
85262+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
85263 }
85264
85265 static inline int within_module_init(unsigned long addr, const struct module *mod)
85266 {
85267- return (unsigned long)mod->module_init <= addr &&
85268- addr < (unsigned long)mod->module_init + mod->init_size;
85269+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
85270 }
85271
85272 /* Search for module by name: must hold module_mutex. */
85273diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
85274index 560ca53..ef621ef 100644
85275--- a/include/linux/moduleloader.h
85276+++ b/include/linux/moduleloader.h
85277@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
85278 sections. Returns NULL on failure. */
85279 void *module_alloc(unsigned long size);
85280
85281+#ifdef CONFIG_PAX_KERNEXEC
85282+void *module_alloc_exec(unsigned long size);
85283+#else
85284+#define module_alloc_exec(x) module_alloc(x)
85285+#endif
85286+
85287 /* Free memory returned from module_alloc. */
85288 void module_free(struct module *mod, void *module_region);
85289
85290+#ifdef CONFIG_PAX_KERNEXEC
85291+void module_free_exec(struct module *mod, void *module_region);
85292+#else
85293+#define module_free_exec(x, y) module_free((x), (y))
85294+#endif
85295+
85296 /*
85297 * Apply the given relocation to the (simplified) ELF. Return -error
85298 * or 0.
85299@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
85300 unsigned int relsec,
85301 struct module *me)
85302 {
85303+#ifdef CONFIG_MODULES
85304 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85305+#endif
85306 return -ENOEXEC;
85307 }
85308 #endif
85309@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
85310 unsigned int relsec,
85311 struct module *me)
85312 {
85313+#ifdef CONFIG_MODULES
85314 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85315+#endif
85316 return -ENOEXEC;
85317 }
85318 #endif
85319diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
85320index b1990c5..2a6e611 100644
85321--- a/include/linux/moduleparam.h
85322+++ b/include/linux/moduleparam.h
85323@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
85324 * @len is usually just sizeof(string).
85325 */
85326 #define module_param_string(name, string, len, perm) \
85327- static const struct kparam_string __param_string_##name \
85328+ static const struct kparam_string __param_string_##name __used \
85329 = { len, string }; \
85330 __module_param_call(MODULE_PARAM_PREFIX, name, \
85331 &param_ops_string, \
85332@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
85333 */
85334 #define module_param_array_named(name, array, type, nump, perm) \
85335 param_check_##type(name, &(array)[0]); \
85336- static const struct kparam_array __param_arr_##name \
85337+ static const struct kparam_array __param_arr_##name __used \
85338 = { .max = ARRAY_SIZE(array), .num = nump, \
85339 .ops = &param_ops_##type, \
85340 .elemsize = sizeof(array[0]), .elem = array }; \
85341diff --git a/include/linux/mount.h b/include/linux/mount.h
85342index 839bac2..a96b37c 100644
85343--- a/include/linux/mount.h
85344+++ b/include/linux/mount.h
85345@@ -59,7 +59,7 @@ struct vfsmount {
85346 struct dentry *mnt_root; /* root of the mounted tree */
85347 struct super_block *mnt_sb; /* pointer to superblock */
85348 int mnt_flags;
85349-};
85350+} __randomize_layout;
85351
85352 struct file; /* forward dec */
85353
85354diff --git a/include/linux/namei.h b/include/linux/namei.h
85355index 492de72..1bddcd4 100644
85356--- a/include/linux/namei.h
85357+++ b/include/linux/namei.h
85358@@ -19,7 +19,7 @@ struct nameidata {
85359 unsigned seq, m_seq;
85360 int last_type;
85361 unsigned depth;
85362- char *saved_names[MAX_NESTED_LINKS + 1];
85363+ const char *saved_names[MAX_NESTED_LINKS + 1];
85364 };
85365
85366 /*
85367@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
85368
85369 extern void nd_jump_link(struct nameidata *nd, struct path *path);
85370
85371-static inline void nd_set_link(struct nameidata *nd, char *path)
85372+static inline void nd_set_link(struct nameidata *nd, const char *path)
85373 {
85374 nd->saved_names[nd->depth] = path;
85375 }
85376
85377-static inline char *nd_get_link(struct nameidata *nd)
85378+static inline const char *nd_get_link(const struct nameidata *nd)
85379 {
85380 return nd->saved_names[nd->depth];
85381 }
85382diff --git a/include/linux/net.h b/include/linux/net.h
85383index 17d8339..81656c0 100644
85384--- a/include/linux/net.h
85385+++ b/include/linux/net.h
85386@@ -192,7 +192,7 @@ struct net_proto_family {
85387 int (*create)(struct net *net, struct socket *sock,
85388 int protocol, int kern);
85389 struct module *owner;
85390-};
85391+} __do_const;
85392
85393 struct iovec;
85394 struct kvec;
85395diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
85396index 66f9a04..056078d 100644
85397--- a/include/linux/netdevice.h
85398+++ b/include/linux/netdevice.h
85399@@ -1145,6 +1145,7 @@ struct net_device_ops {
85400 void *priv);
85401 int (*ndo_get_lock_subclass)(struct net_device *dev);
85402 };
85403+typedef struct net_device_ops __no_const net_device_ops_no_const;
85404
85405 /**
85406 * enum net_device_priv_flags - &struct net_device priv_flags
85407@@ -1312,11 +1313,11 @@ struct net_device {
85408 struct net_device_stats stats;
85409
85410 /* dropped packets by core network, Do not use this in drivers */
85411- atomic_long_t rx_dropped;
85412- atomic_long_t tx_dropped;
85413+ atomic_long_unchecked_t rx_dropped;
85414+ atomic_long_unchecked_t tx_dropped;
85415
85416 /* Stats to monitor carrier on<->off transitions */
85417- atomic_t carrier_changes;
85418+ atomic_unchecked_t carrier_changes;
85419
85420 #ifdef CONFIG_WIRELESS_EXT
85421 /* List of functions to handle Wireless Extensions (instead of ioctl).
85422diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
85423index 2077489..a15e561 100644
85424--- a/include/linux/netfilter.h
85425+++ b/include/linux/netfilter.h
85426@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
85427 #endif
85428 /* Use the module struct to lock set/get code in place */
85429 struct module *owner;
85430-};
85431+} __do_const;
85432
85433 /* Function to register/unregister hook points. */
85434 int nf_register_hook(struct nf_hook_ops *reg);
85435diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
85436index e955d47..04a5338 100644
85437--- a/include/linux/netfilter/nfnetlink.h
85438+++ b/include/linux/netfilter/nfnetlink.h
85439@@ -19,7 +19,7 @@ struct nfnl_callback {
85440 const struct nlattr * const cda[]);
85441 const struct nla_policy *policy; /* netlink attribute policy */
85442 const u_int16_t attr_count; /* number of nlattr's */
85443-};
85444+} __do_const;
85445
85446 struct nfnetlink_subsystem {
85447 const char *name;
85448diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
85449new file mode 100644
85450index 0000000..33f4af8
85451--- /dev/null
85452+++ b/include/linux/netfilter/xt_gradm.h
85453@@ -0,0 +1,9 @@
85454+#ifndef _LINUX_NETFILTER_XT_GRADM_H
85455+#define _LINUX_NETFILTER_XT_GRADM_H 1
85456+
85457+struct xt_gradm_mtinfo {
85458+ __u16 flags;
85459+ __u16 invflags;
85460+};
85461+
85462+#endif
85463diff --git a/include/linux/nls.h b/include/linux/nls.h
85464index 520681b..2b7fabb 100644
85465--- a/include/linux/nls.h
85466+++ b/include/linux/nls.h
85467@@ -31,7 +31,7 @@ struct nls_table {
85468 const unsigned char *charset2upper;
85469 struct module *owner;
85470 struct nls_table *next;
85471-};
85472+} __do_const;
85473
85474 /* this value hold the maximum octet of charset */
85475 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
85476@@ -46,7 +46,7 @@ enum utf16_endian {
85477 /* nls_base.c */
85478 extern int __register_nls(struct nls_table *, struct module *);
85479 extern int unregister_nls(struct nls_table *);
85480-extern struct nls_table *load_nls(char *);
85481+extern struct nls_table *load_nls(const char *);
85482 extern void unload_nls(struct nls_table *);
85483 extern struct nls_table *load_nls_default(void);
85484 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
85485diff --git a/include/linux/notifier.h b/include/linux/notifier.h
85486index d14a4c3..a078786 100644
85487--- a/include/linux/notifier.h
85488+++ b/include/linux/notifier.h
85489@@ -54,7 +54,8 @@ struct notifier_block {
85490 notifier_fn_t notifier_call;
85491 struct notifier_block __rcu *next;
85492 int priority;
85493-};
85494+} __do_const;
85495+typedef struct notifier_block __no_const notifier_block_no_const;
85496
85497 struct atomic_notifier_head {
85498 spinlock_t lock;
85499diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
85500index b2a0f15..4d7da32 100644
85501--- a/include/linux/oprofile.h
85502+++ b/include/linux/oprofile.h
85503@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
85504 int oprofilefs_create_ro_ulong(struct dentry * root,
85505 char const * name, ulong * val);
85506
85507-/** Create a file for read-only access to an atomic_t. */
85508+/** Create a file for read-only access to an atomic_unchecked_t. */
85509 int oprofilefs_create_ro_atomic(struct dentry * root,
85510- char const * name, atomic_t * val);
85511+ char const * name, atomic_unchecked_t * val);
85512
85513 /** create a directory */
85514 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
85515diff --git a/include/linux/padata.h b/include/linux/padata.h
85516index 4386946..f50c615 100644
85517--- a/include/linux/padata.h
85518+++ b/include/linux/padata.h
85519@@ -129,7 +129,7 @@ struct parallel_data {
85520 struct padata_serial_queue __percpu *squeue;
85521 atomic_t reorder_objects;
85522 atomic_t refcnt;
85523- atomic_t seq_nr;
85524+ atomic_unchecked_t seq_nr;
85525 struct padata_cpumask cpumask;
85526 spinlock_t lock ____cacheline_aligned;
85527 unsigned int processed;
85528diff --git a/include/linux/path.h b/include/linux/path.h
85529index d137218..be0c176 100644
85530--- a/include/linux/path.h
85531+++ b/include/linux/path.h
85532@@ -1,13 +1,15 @@
85533 #ifndef _LINUX_PATH_H
85534 #define _LINUX_PATH_H
85535
85536+#include <linux/compiler.h>
85537+
85538 struct dentry;
85539 struct vfsmount;
85540
85541 struct path {
85542 struct vfsmount *mnt;
85543 struct dentry *dentry;
85544-};
85545+} __randomize_layout;
85546
85547 extern void path_get(const struct path *);
85548 extern void path_put(const struct path *);
85549diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
85550index 5f2e559..7d59314 100644
85551--- a/include/linux/pci_hotplug.h
85552+++ b/include/linux/pci_hotplug.h
85553@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
85554 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
85555 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
85556 int (*reset_slot) (struct hotplug_slot *slot, int probe);
85557-};
85558+} __do_const;
85559+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
85560
85561 /**
85562 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
85563diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
85564index 707617a..28a2e7e 100644
85565--- a/include/linux/perf_event.h
85566+++ b/include/linux/perf_event.h
85567@@ -339,8 +339,8 @@ struct perf_event {
85568
85569 enum perf_event_active_state state;
85570 unsigned int attach_state;
85571- local64_t count;
85572- atomic64_t child_count;
85573+ local64_t count; /* PaX: fix it one day */
85574+ atomic64_unchecked_t child_count;
85575
85576 /*
85577 * These are the total time in nanoseconds that the event
85578@@ -391,8 +391,8 @@ struct perf_event {
85579 * These accumulate total time (in nanoseconds) that children
85580 * events have been enabled and running, respectively.
85581 */
85582- atomic64_t child_total_time_enabled;
85583- atomic64_t child_total_time_running;
85584+ atomic64_unchecked_t child_total_time_enabled;
85585+ atomic64_unchecked_t child_total_time_running;
85586
85587 /*
85588 * Protect attach/detach and child_list:
85589@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
85590 entry->ip[entry->nr++] = ip;
85591 }
85592
85593-extern int sysctl_perf_event_paranoid;
85594+extern int sysctl_perf_event_legitimately_concerned;
85595 extern int sysctl_perf_event_mlock;
85596 extern int sysctl_perf_event_sample_rate;
85597 extern int sysctl_perf_cpu_time_max_percent;
85598@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
85599 loff_t *ppos);
85600
85601
85602+static inline bool perf_paranoid_any(void)
85603+{
85604+ return sysctl_perf_event_legitimately_concerned > 2;
85605+}
85606+
85607 static inline bool perf_paranoid_tracepoint_raw(void)
85608 {
85609- return sysctl_perf_event_paranoid > -1;
85610+ return sysctl_perf_event_legitimately_concerned > -1;
85611 }
85612
85613 static inline bool perf_paranoid_cpu(void)
85614 {
85615- return sysctl_perf_event_paranoid > 0;
85616+ return sysctl_perf_event_legitimately_concerned > 0;
85617 }
85618
85619 static inline bool perf_paranoid_kernel(void)
85620 {
85621- return sysctl_perf_event_paranoid > 1;
85622+ return sysctl_perf_event_legitimately_concerned > 1;
85623 }
85624
85625 extern void perf_event_init(void);
85626@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
85627 struct device_attribute attr;
85628 u64 id;
85629 const char *event_str;
85630-};
85631+} __do_const;
85632
85633 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
85634 static struct perf_pmu_events_attr _var = { \
85635diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
85636index 7246ef3..1539ea4 100644
85637--- a/include/linux/pid_namespace.h
85638+++ b/include/linux/pid_namespace.h
85639@@ -43,7 +43,7 @@ struct pid_namespace {
85640 int hide_pid;
85641 int reboot; /* group exit code if this pidns was rebooted */
85642 unsigned int proc_inum;
85643-};
85644+} __randomize_layout;
85645
85646 extern struct pid_namespace init_pid_ns;
85647
85648diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
85649index eb8b8ac..62649e1 100644
85650--- a/include/linux/pipe_fs_i.h
85651+++ b/include/linux/pipe_fs_i.h
85652@@ -47,10 +47,10 @@ struct pipe_inode_info {
85653 struct mutex mutex;
85654 wait_queue_head_t wait;
85655 unsigned int nrbufs, curbuf, buffers;
85656- unsigned int readers;
85657- unsigned int writers;
85658- unsigned int files;
85659- unsigned int waiting_writers;
85660+ atomic_t readers;
85661+ atomic_t writers;
85662+ atomic_t files;
85663+ atomic_t waiting_writers;
85664 unsigned int r_counter;
85665 unsigned int w_counter;
85666 struct page *tmp_page;
85667diff --git a/include/linux/pm.h b/include/linux/pm.h
85668index 72c0fe0..26918ed 100644
85669--- a/include/linux/pm.h
85670+++ b/include/linux/pm.h
85671@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
85672 struct dev_pm_domain {
85673 struct dev_pm_ops ops;
85674 };
85675+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
85676
85677 /*
85678 * The PM_EVENT_ messages are also used by drivers implementing the legacy
85679diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
85680index 7c1d252..0e7061d 100644
85681--- a/include/linux/pm_domain.h
85682+++ b/include/linux/pm_domain.h
85683@@ -44,11 +44,11 @@ struct gpd_dev_ops {
85684 int (*thaw_early)(struct device *dev);
85685 int (*thaw)(struct device *dev);
85686 bool (*active_wakeup)(struct device *dev);
85687-};
85688+} __no_const;
85689
85690 struct gpd_cpu_data {
85691 unsigned int saved_exit_latency;
85692- struct cpuidle_state *idle_state;
85693+ cpuidle_state_no_const *idle_state;
85694 };
85695
85696 struct generic_pm_domain {
85697diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
85698index 43fd671..08c96ee 100644
85699--- a/include/linux/pm_runtime.h
85700+++ b/include/linux/pm_runtime.h
85701@@ -118,7 +118,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
85702
85703 static inline void pm_runtime_mark_last_busy(struct device *dev)
85704 {
85705- ACCESS_ONCE(dev->power.last_busy) = jiffies;
85706+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
85707 }
85708
85709 #else /* !CONFIG_PM_RUNTIME */
85710diff --git a/include/linux/pnp.h b/include/linux/pnp.h
85711index 195aafc..49a7bc2 100644
85712--- a/include/linux/pnp.h
85713+++ b/include/linux/pnp.h
85714@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
85715 struct pnp_fixup {
85716 char id[7];
85717 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
85718-};
85719+} __do_const;
85720
85721 /* config parameters */
85722 #define PNP_CONFIG_NORMAL 0x0001
85723diff --git a/include/linux/poison.h b/include/linux/poison.h
85724index 2110a81..13a11bb 100644
85725--- a/include/linux/poison.h
85726+++ b/include/linux/poison.h
85727@@ -19,8 +19,8 @@
85728 * under normal circumstances, used to verify that nobody uses
85729 * non-initialized list entries.
85730 */
85731-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85732-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
85733+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
85734+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
85735
85736 /********** include/linux/timer.h **********/
85737 /*
85738diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
85739index d8b187c3..9a9257a 100644
85740--- a/include/linux/power/smartreflex.h
85741+++ b/include/linux/power/smartreflex.h
85742@@ -238,7 +238,7 @@ struct omap_sr_class_data {
85743 int (*notify)(struct omap_sr *sr, u32 status);
85744 u8 notify_flags;
85745 u8 class_type;
85746-};
85747+} __do_const;
85748
85749 /**
85750 * struct omap_sr_nvalue_table - Smartreflex n-target value info
85751diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
85752index 4ea1d37..80f4b33 100644
85753--- a/include/linux/ppp-comp.h
85754+++ b/include/linux/ppp-comp.h
85755@@ -84,7 +84,7 @@ struct compressor {
85756 struct module *owner;
85757 /* Extra skb space needed by the compressor algorithm */
85758 unsigned int comp_extra;
85759-};
85760+} __do_const;
85761
85762 /*
85763 * The return value from decompress routine is the length of the
85764diff --git a/include/linux/preempt.h b/include/linux/preempt.h
85765index de83b4e..c4b997d 100644
85766--- a/include/linux/preempt.h
85767+++ b/include/linux/preempt.h
85768@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
85769 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
85770 #endif
85771
85772+#define raw_preempt_count_add(val) __preempt_count_add(val)
85773+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
85774+
85775 #define __preempt_count_inc() __preempt_count_add(1)
85776 #define __preempt_count_dec() __preempt_count_sub(1)
85777
85778 #define preempt_count_inc() preempt_count_add(1)
85779+#define raw_preempt_count_inc() raw_preempt_count_add(1)
85780 #define preempt_count_dec() preempt_count_sub(1)
85781+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
85782
85783 #ifdef CONFIG_PREEMPT_COUNT
85784
85785@@ -41,6 +46,12 @@ do { \
85786 barrier(); \
85787 } while (0)
85788
85789+#define raw_preempt_disable() \
85790+do { \
85791+ raw_preempt_count_inc(); \
85792+ barrier(); \
85793+} while (0)
85794+
85795 #define sched_preempt_enable_no_resched() \
85796 do { \
85797 barrier(); \
85798@@ -49,6 +60,12 @@ do { \
85799
85800 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
85801
85802+#define raw_preempt_enable_no_resched() \
85803+do { \
85804+ barrier(); \
85805+ raw_preempt_count_dec(); \
85806+} while (0)
85807+
85808 #ifdef CONFIG_PREEMPT
85809 #define preempt_enable() \
85810 do { \
85811@@ -113,8 +130,10 @@ do { \
85812 * region.
85813 */
85814 #define preempt_disable() barrier()
85815+#define raw_preempt_disable() barrier()
85816 #define sched_preempt_enable_no_resched() barrier()
85817 #define preempt_enable_no_resched() barrier()
85818+#define raw_preempt_enable_no_resched() barrier()
85819 #define preempt_enable() barrier()
85820 #define preempt_check_resched() do { } while (0)
85821
85822@@ -128,11 +147,13 @@ do { \
85823 /*
85824 * Modules have no business playing preemption tricks.
85825 */
85826+#ifndef CONFIG_PAX_KERNEXEC
85827 #undef sched_preempt_enable_no_resched
85828 #undef preempt_enable_no_resched
85829 #undef preempt_enable_no_resched_notrace
85830 #undef preempt_check_resched
85831 #endif
85832+#endif
85833
85834 #define preempt_set_need_resched() \
85835 do { \
85836diff --git a/include/linux/printk.h b/include/linux/printk.h
85837index 319ff7e..608849a 100644
85838--- a/include/linux/printk.h
85839+++ b/include/linux/printk.h
85840@@ -121,6 +121,8 @@ static inline __printf(1, 2) __cold
85841 void early_printk(const char *s, ...) { }
85842 #endif
85843
85844+extern int kptr_restrict;
85845+
85846 #ifdef CONFIG_PRINTK
85847 asmlinkage __printf(5, 0)
85848 int vprintk_emit(int facility, int level,
85849@@ -155,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
85850
85851 extern int printk_delay_msec;
85852 extern int dmesg_restrict;
85853-extern int kptr_restrict;
85854
85855 extern void wake_up_klogd(void);
85856
85857diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
85858index 9d117f6..d832b31 100644
85859--- a/include/linux/proc_fs.h
85860+++ b/include/linux/proc_fs.h
85861@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
85862 extern struct proc_dir_entry *proc_symlink(const char *,
85863 struct proc_dir_entry *, const char *);
85864 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
85865+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
85866 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
85867 struct proc_dir_entry *, void *);
85868+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
85869+ struct proc_dir_entry *, void *);
85870 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
85871 struct proc_dir_entry *);
85872
85873@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
85874 return proc_create_data(name, mode, parent, proc_fops, NULL);
85875 }
85876
85877+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
85878+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
85879+{
85880+#ifdef CONFIG_GRKERNSEC_PROC_USER
85881+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
85882+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85883+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
85884+#else
85885+ return proc_create_data(name, mode, parent, proc_fops, NULL);
85886+#endif
85887+}
85888+
85889+
85890 extern void proc_set_size(struct proc_dir_entry *, loff_t);
85891 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
85892 extern void *PDE_DATA(const struct inode *);
85893@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
85894 struct proc_dir_entry *parent,const char *dest) { return NULL;}
85895 static inline struct proc_dir_entry *proc_mkdir(const char *name,
85896 struct proc_dir_entry *parent) {return NULL;}
85897+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
85898+ struct proc_dir_entry *parent) { return NULL; }
85899 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
85900 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85901+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
85902+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85903 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
85904 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
85905 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
85906@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
85907 static inline struct proc_dir_entry *proc_net_mkdir(
85908 struct net *net, const char *name, struct proc_dir_entry *parent)
85909 {
85910- return proc_mkdir_data(name, 0, parent, net);
85911+ return proc_mkdir_data_restrict(name, 0, parent, net);
85912 }
85913
85914 #endif /* _LINUX_PROC_FS_H */
85915diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
85916index 34a1e10..70f6bde 100644
85917--- a/include/linux/proc_ns.h
85918+++ b/include/linux/proc_ns.h
85919@@ -14,7 +14,7 @@ struct proc_ns_operations {
85920 void (*put)(void *ns);
85921 int (*install)(struct nsproxy *nsproxy, void *ns);
85922 unsigned int (*inum)(void *ns);
85923-};
85924+} __do_const __randomize_layout;
85925
85926 struct proc_ns {
85927 void *ns;
85928diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
85929index 7dfed71..1dc420b 100644
85930--- a/include/linux/ptp_classify.h
85931+++ b/include/linux/ptp_classify.h
85932@@ -23,8 +23,15 @@
85933 #ifndef _PTP_CLASSIFY_H_
85934 #define _PTP_CLASSIFY_H_
85935
85936+#include <linux/if_ether.h>
85937+#include <linux/if_vlan.h>
85938 #include <linux/ip.h>
85939-#include <linux/skbuff.h>
85940+#include <linux/filter.h>
85941+#ifdef __KERNEL__
85942+#include <linux/in.h>
85943+#else
85944+#include <netinet/in.h>
85945+#endif
85946
85947 #define PTP_CLASS_NONE 0x00 /* not a PTP event message */
85948 #define PTP_CLASS_V1 0x01 /* protocol version 1 */
85949@@ -37,7 +44,7 @@
85950 #define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
85951
85952 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
85953-#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
85954+#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/
85955 #define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4)
85956 #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6)
85957 #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2)
85958@@ -46,34 +53,88 @@
85959 #define PTP_EV_PORT 319
85960 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
85961
85962+#define OFF_ETYPE 12
85963+#define OFF_IHL 14
85964+#define OFF_FRAG 20
85965+#define OFF_PROTO4 23
85966+#define OFF_NEXT 6
85967+#define OFF_UDP_DST 2
85968+
85969 #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
85970 #define OFF_PTP_SEQUENCE_ID 30
85971 #define OFF_PTP_CONTROL 32 /* PTPv1 only */
85972
85973-/* Below defines should actually be removed at some point in time. */
85974+#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
85975+
85976 #define IP6_HLEN 40
85977 #define UDP_HLEN 8
85978-#define OFF_IHL 14
85979+
85980+#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST)
85981+#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST)
85982 #define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN)
85983-#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
85984
85985-#if defined(CONFIG_NET_PTP_CLASSIFY)
85986-/**
85987- * ptp_classify_raw - classify a PTP packet
85988- * @skb: buffer
85989- *
85990- * Runs a minimal BPF dissector to classify a network packet to
85991- * determine the PTP class. In case the skb does not contain any
85992- * PTP protocol data, PTP_CLASS_NONE will be returned, otherwise
85993- * PTP_CLASS_V1_IPV{4,6}, PTP_CLASS_V2_IPV{4,6} or
85994- * PTP_CLASS_V2_{L2,VLAN}, depending on the packet content.
85995- */
85996-unsigned int ptp_classify_raw(const struct sk_buff *skb);
85997+#define OP_AND (BPF_ALU | BPF_AND | BPF_K)
85998+#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K)
85999+#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K)
86000+#define OP_LDB (BPF_LD | BPF_B | BPF_ABS)
86001+#define OP_LDH (BPF_LD | BPF_H | BPF_ABS)
86002+#define OP_LDHI (BPF_LD | BPF_H | BPF_IND)
86003+#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH)
86004+#define OP_OR (BPF_ALU | BPF_OR | BPF_K)
86005+#define OP_RETA (BPF_RET | BPF_A)
86006+#define OP_RETK (BPF_RET | BPF_K)
86007
86008-void __init ptp_classifier_init(void);
86009-#else
86010-static inline void ptp_classifier_init(void)
86011+static inline int ptp_filter_init(struct sock_filter *f, int len)
86012 {
86013+ if (OP_LDH == f[0].code)
86014+ return sk_chk_filter(f, len);
86015+ else
86016+ return 0;
86017 }
86018+
86019+#define PTP_FILTER \
86020+ {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \
86021+ {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \
86022+ {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \
86023+ {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \
86024+ {OP_LDH, 0, 0, OFF_FRAG }, /* */ \
86025+ {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \
86026+ {OP_LDX, 0, 0, OFF_IHL }, /* */ \
86027+ {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \
86028+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \
86029+ {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \
86030+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86031+ {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \
86032+ {OP_RETA, 0, 0, 0 }, /* */ \
86033+/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
86034+/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \
86035+ {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \
86036+ {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \
86037+ {OP_LDH, 0, 0, OFF_DST6 }, /* */ \
86038+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \
86039+ {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \
86040+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86041+ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
86042+ {OP_RETA, 0, 0, 0 }, /* */ \
86043+/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
86044+/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
86045+ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
86046+ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
86047+ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
86048+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
86049+ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
86050+ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
86051+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86052+ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
86053+ {OP_RETA, 0, 0, 0 }, /* */ \
86054+/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
86055+ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
86056+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
86057+ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
86058+ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
86059+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86060+ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
86061+ {OP_RETA, 0, 0, 0 }, /* */ \
86062+/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE },
86063+
86064 #endif
86065-#endif /* _PTP_CLASSIFY_H_ */
86066diff --git a/include/linux/quota.h b/include/linux/quota.h
86067index 0f3c5d3..bc559e3 100644
86068--- a/include/linux/quota.h
86069+++ b/include/linux/quota.h
86070@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
86071
86072 extern bool qid_eq(struct kqid left, struct kqid right);
86073 extern bool qid_lt(struct kqid left, struct kqid right);
86074-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
86075+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
86076 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
86077 extern bool qid_valid(struct kqid qid);
86078
86079diff --git a/include/linux/random.h b/include/linux/random.h
86080index 57fbbff..2170304 100644
86081--- a/include/linux/random.h
86082+++ b/include/linux/random.h
86083@@ -9,9 +9,19 @@
86084 #include <uapi/linux/random.h>
86085
86086 extern void add_device_randomness(const void *, unsigned int);
86087+
86088+static inline void add_latent_entropy(void)
86089+{
86090+
86091+#ifdef LATENT_ENTROPY_PLUGIN
86092+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
86093+#endif
86094+
86095+}
86096+
86097 extern void add_input_randomness(unsigned int type, unsigned int code,
86098- unsigned int value);
86099-extern void add_interrupt_randomness(int irq, int irq_flags);
86100+ unsigned int value) __latent_entropy;
86101+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
86102
86103 extern void get_random_bytes(void *buf, int nbytes);
86104 extern void get_random_bytes_arch(void *buf, int nbytes);
86105@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
86106 extern const struct file_operations random_fops, urandom_fops;
86107 #endif
86108
86109-unsigned int get_random_int(void);
86110+unsigned int __intentional_overflow(-1) get_random_int(void);
86111 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
86112
86113-u32 prandom_u32(void);
86114+u32 prandom_u32(void) __intentional_overflow(-1);
86115 void prandom_bytes(void *buf, int nbytes);
86116 void prandom_seed(u32 seed);
86117 void prandom_reseed_late(void);
86118@@ -37,6 +47,11 @@ struct rnd_state {
86119 u32 prandom_u32_state(struct rnd_state *state);
86120 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
86121
86122+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
86123+{
86124+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
86125+}
86126+
86127 /**
86128 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
86129 * @ep_ro: right open interval endpoint
86130@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
86131 *
86132 * Returns: pseudo-random number in interval [0, ep_ro)
86133 */
86134-static inline u32 prandom_u32_max(u32 ep_ro)
86135+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
86136 {
86137 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
86138 }
86139diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
86140index fea49b5..2ac22bb 100644
86141--- a/include/linux/rbtree_augmented.h
86142+++ b/include/linux/rbtree_augmented.h
86143@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
86144 old->rbaugmented = rbcompute(old); \
86145 } \
86146 rbstatic const struct rb_augment_callbacks rbname = { \
86147- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
86148+ .propagate = rbname ## _propagate, \
86149+ .copy = rbname ## _copy, \
86150+ .rotate = rbname ## _rotate \
86151 };
86152
86153
86154diff --git a/include/linux/rculist.h b/include/linux/rculist.h
86155index 8183b46..a388711 100644
86156--- a/include/linux/rculist.h
86157+++ b/include/linux/rculist.h
86158@@ -29,8 +29,8 @@
86159 */
86160 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
86161 {
86162- ACCESS_ONCE(list->next) = list;
86163- ACCESS_ONCE(list->prev) = list;
86164+ ACCESS_ONCE_RW(list->next) = list;
86165+ ACCESS_ONCE_RW(list->prev) = list;
86166 }
86167
86168 /*
86169@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
86170 struct list_head *prev, struct list_head *next);
86171 #endif
86172
86173+void __pax_list_add_rcu(struct list_head *new,
86174+ struct list_head *prev, struct list_head *next);
86175+
86176 /**
86177 * list_add_rcu - add a new entry to rcu-protected list
86178 * @new: new entry to be added
86179@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
86180 __list_add_rcu(new, head, head->next);
86181 }
86182
86183+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
86184+{
86185+ __pax_list_add_rcu(new, head, head->next);
86186+}
86187+
86188 /**
86189 * list_add_tail_rcu - add a new entry to rcu-protected list
86190 * @new: new entry to be added
86191@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
86192 __list_add_rcu(new, head->prev, head);
86193 }
86194
86195+static inline void pax_list_add_tail_rcu(struct list_head *new,
86196+ struct list_head *head)
86197+{
86198+ __pax_list_add_rcu(new, head->prev, head);
86199+}
86200+
86201 /**
86202 * list_del_rcu - deletes entry from list without re-initialization
86203 * @entry: the element to delete from the list.
86204@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
86205 entry->prev = LIST_POISON2;
86206 }
86207
86208+extern void pax_list_del_rcu(struct list_head *entry);
86209+
86210 /**
86211 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
86212 * @n: the element to delete from the hash list.
86213diff --git a/include/linux/reboot.h b/include/linux/reboot.h
86214index 48bf152..d38b785 100644
86215--- a/include/linux/reboot.h
86216+++ b/include/linux/reboot.h
86217@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
86218 */
86219
86220 extern void migrate_to_reboot_cpu(void);
86221-extern void machine_restart(char *cmd);
86222-extern void machine_halt(void);
86223-extern void machine_power_off(void);
86224+extern void machine_restart(char *cmd) __noreturn;
86225+extern void machine_halt(void) __noreturn;
86226+extern void machine_power_off(void) __noreturn;
86227
86228 extern void machine_shutdown(void);
86229 struct pt_regs;
86230@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
86231 */
86232
86233 extern void kernel_restart_prepare(char *cmd);
86234-extern void kernel_restart(char *cmd);
86235-extern void kernel_halt(void);
86236-extern void kernel_power_off(void);
86237+extern void kernel_restart(char *cmd) __noreturn;
86238+extern void kernel_halt(void) __noreturn;
86239+extern void kernel_power_off(void) __noreturn;
86240
86241 extern int C_A_D; /* for sysctl */
86242 void ctrl_alt_del(void);
86243@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
86244 * Emergency restart, callable from an interrupt handler.
86245 */
86246
86247-extern void emergency_restart(void);
86248+extern void emergency_restart(void) __noreturn;
86249 #include <asm/emergency-restart.h>
86250
86251 #endif /* _LINUX_REBOOT_H */
86252diff --git a/include/linux/regset.h b/include/linux/regset.h
86253index 8e0c9fe..ac4d221 100644
86254--- a/include/linux/regset.h
86255+++ b/include/linux/regset.h
86256@@ -161,7 +161,8 @@ struct user_regset {
86257 unsigned int align;
86258 unsigned int bias;
86259 unsigned int core_note_type;
86260-};
86261+} __do_const;
86262+typedef struct user_regset __no_const user_regset_no_const;
86263
86264 /**
86265 * struct user_regset_view - available regsets
86266diff --git a/include/linux/relay.h b/include/linux/relay.h
86267index d7c8359..818daf5 100644
86268--- a/include/linux/relay.h
86269+++ b/include/linux/relay.h
86270@@ -157,7 +157,7 @@ struct rchan_callbacks
86271 * The callback should return 0 if successful, negative if not.
86272 */
86273 int (*remove_buf_file)(struct dentry *dentry);
86274-};
86275+} __no_const;
86276
86277 /*
86278 * CONFIG_RELAY kernel API, kernel/relay.c
86279diff --git a/include/linux/rio.h b/include/linux/rio.h
86280index 6bda06f..bf39a9b 100644
86281--- a/include/linux/rio.h
86282+++ b/include/linux/rio.h
86283@@ -358,7 +358,7 @@ struct rio_ops {
86284 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
86285 u64 rstart, u32 size, u32 flags);
86286 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
86287-};
86288+} __no_const;
86289
86290 #define RIO_RESOURCE_MEM 0x00000100
86291 #define RIO_RESOURCE_DOORBELL 0x00000200
86292diff --git a/include/linux/rmap.h b/include/linux/rmap.h
86293index be57450..31cf65e 100644
86294--- a/include/linux/rmap.h
86295+++ b/include/linux/rmap.h
86296@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
86297 void anon_vma_init(void); /* create anon_vma_cachep */
86298 int anon_vma_prepare(struct vm_area_struct *);
86299 void unlink_anon_vmas(struct vm_area_struct *);
86300-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
86301-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
86302+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
86303+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
86304
86305 static inline void anon_vma_merge(struct vm_area_struct *vma,
86306 struct vm_area_struct *next)
86307diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
86308index a964f72..b475afb 100644
86309--- a/include/linux/scatterlist.h
86310+++ b/include/linux/scatterlist.h
86311@@ -1,6 +1,7 @@
86312 #ifndef _LINUX_SCATTERLIST_H
86313 #define _LINUX_SCATTERLIST_H
86314
86315+#include <linux/sched.h>
86316 #include <linux/string.h>
86317 #include <linux/bug.h>
86318 #include <linux/mm.h>
86319@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
86320 #ifdef CONFIG_DEBUG_SG
86321 BUG_ON(!virt_addr_valid(buf));
86322 #endif
86323+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86324+ if (object_starts_on_stack(buf)) {
86325+ void *adjbuf = buf - current->stack + current->lowmem_stack;
86326+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
86327+ } else
86328+#endif
86329 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
86330 }
86331
86332diff --git a/include/linux/sched.h b/include/linux/sched.h
86333index 0376b05..82054c2 100644
86334--- a/include/linux/sched.h
86335+++ b/include/linux/sched.h
86336@@ -131,6 +131,7 @@ struct fs_struct;
86337 struct perf_event_context;
86338 struct blk_plug;
86339 struct filename;
86340+struct linux_binprm;
86341
86342 #define VMACACHE_BITS 2
86343 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
86344@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
86345 extern int in_sched_functions(unsigned long addr);
86346
86347 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
86348-extern signed long schedule_timeout(signed long timeout);
86349+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
86350 extern signed long schedule_timeout_interruptible(signed long timeout);
86351 extern signed long schedule_timeout_killable(signed long timeout);
86352 extern signed long schedule_timeout_uninterruptible(signed long timeout);
86353@@ -385,6 +386,19 @@ struct nsproxy;
86354 struct user_namespace;
86355
86356 #ifdef CONFIG_MMU
86357+
86358+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86359+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
86360+#else
86361+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86362+{
86363+ return 0;
86364+}
86365+#endif
86366+
86367+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
86368+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
86369+
86370 extern void arch_pick_mmap_layout(struct mm_struct *mm);
86371 extern unsigned long
86372 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
86373@@ -682,6 +696,17 @@ struct signal_struct {
86374 #ifdef CONFIG_TASKSTATS
86375 struct taskstats *stats;
86376 #endif
86377+
86378+#ifdef CONFIG_GRKERNSEC
86379+ u32 curr_ip;
86380+ u32 saved_ip;
86381+ u32 gr_saddr;
86382+ u32 gr_daddr;
86383+ u16 gr_sport;
86384+ u16 gr_dport;
86385+ u8 used_accept:1;
86386+#endif
86387+
86388 #ifdef CONFIG_AUDIT
86389 unsigned audit_tty;
86390 unsigned audit_tty_log_passwd;
86391@@ -708,7 +733,7 @@ struct signal_struct {
86392 struct mutex cred_guard_mutex; /* guard against foreign influences on
86393 * credential calculations
86394 * (notably. ptrace) */
86395-};
86396+} __randomize_layout;
86397
86398 /*
86399 * Bits in flags field of signal_struct.
86400@@ -761,6 +786,14 @@ struct user_struct {
86401 struct key *session_keyring; /* UID's default session keyring */
86402 #endif
86403
86404+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86405+ unsigned char kernel_banned;
86406+#endif
86407+#ifdef CONFIG_GRKERNSEC_BRUTE
86408+ unsigned char suid_banned;
86409+ unsigned long suid_ban_expires;
86410+#endif
86411+
86412 /* Hash table maintenance information */
86413 struct hlist_node uidhash_node;
86414 kuid_t uid;
86415@@ -768,7 +801,7 @@ struct user_struct {
86416 #ifdef CONFIG_PERF_EVENTS
86417 atomic_long_t locked_vm;
86418 #endif
86419-};
86420+} __randomize_layout;
86421
86422 extern int uids_sysfs_init(void);
86423
86424@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
86425 struct task_struct {
86426 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
86427 void *stack;
86428+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86429+ void *lowmem_stack;
86430+#endif
86431 atomic_t usage;
86432 unsigned int flags; /* per process flags, defined below */
86433 unsigned int ptrace;
86434@@ -1349,8 +1385,8 @@ struct task_struct {
86435 struct list_head thread_node;
86436
86437 struct completion *vfork_done; /* for vfork() */
86438- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
86439- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86440+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
86441+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86442
86443 cputime_t utime, stime, utimescaled, stimescaled;
86444 cputime_t gtime;
86445@@ -1375,11 +1411,6 @@ struct task_struct {
86446 struct task_cputime cputime_expires;
86447 struct list_head cpu_timers[3];
86448
86449-/* process credentials */
86450- const struct cred __rcu *real_cred; /* objective and real subjective task
86451- * credentials (COW) */
86452- const struct cred __rcu *cred; /* effective (overridable) subjective task
86453- * credentials (COW) */
86454 char comm[TASK_COMM_LEN]; /* executable name excluding path
86455 - access with [gs]et_task_comm (which lock
86456 it with task_lock())
86457@@ -1396,6 +1427,10 @@ struct task_struct {
86458 #endif
86459 /* CPU-specific state of this task */
86460 struct thread_struct thread;
86461+/* thread_info moved to task_struct */
86462+#ifdef CONFIG_X86
86463+ struct thread_info tinfo;
86464+#endif
86465 /* filesystem information */
86466 struct fs_struct *fs;
86467 /* open file information */
86468@@ -1472,6 +1507,10 @@ struct task_struct {
86469 gfp_t lockdep_reclaim_gfp;
86470 #endif
86471
86472+/* process credentials */
86473+ const struct cred __rcu *real_cred; /* objective and real subjective task
86474+ * credentials (COW) */
86475+
86476 /* journalling filesystem info */
86477 void *journal_info;
86478
86479@@ -1510,6 +1549,10 @@ struct task_struct {
86480 /* cg_list protected by css_set_lock and tsk->alloc_lock */
86481 struct list_head cg_list;
86482 #endif
86483+
86484+ const struct cred __rcu *cred; /* effective (overridable) subjective task
86485+ * credentials (COW) */
86486+
86487 #ifdef CONFIG_FUTEX
86488 struct robust_list_head __user *robust_list;
86489 #ifdef CONFIG_COMPAT
86490@@ -1655,7 +1698,78 @@ struct task_struct {
86491 unsigned int sequential_io;
86492 unsigned int sequential_io_avg;
86493 #endif
86494-};
86495+
86496+#ifdef CONFIG_GRKERNSEC
86497+ /* grsecurity */
86498+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86499+ u64 exec_id;
86500+#endif
86501+#ifdef CONFIG_GRKERNSEC_SETXID
86502+ const struct cred *delayed_cred;
86503+#endif
86504+ struct dentry *gr_chroot_dentry;
86505+ struct acl_subject_label *acl;
86506+ struct acl_subject_label *tmpacl;
86507+ struct acl_role_label *role;
86508+ struct file *exec_file;
86509+ unsigned long brute_expires;
86510+ u16 acl_role_id;
86511+ u8 inherited;
86512+ /* is this the task that authenticated to the special role */
86513+ u8 acl_sp_role;
86514+ u8 is_writable;
86515+ u8 brute;
86516+ u8 gr_is_chrooted;
86517+#endif
86518+
86519+} __randomize_layout;
86520+
86521+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
86522+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
86523+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
86524+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
86525+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
86526+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
86527+
86528+#ifdef CONFIG_PAX_SOFTMODE
86529+extern int pax_softmode;
86530+#endif
86531+
86532+extern int pax_check_flags(unsigned long *);
86533+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
86534+
86535+/* if tsk != current then task_lock must be held on it */
86536+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
86537+static inline unsigned long pax_get_flags(struct task_struct *tsk)
86538+{
86539+ if (likely(tsk->mm))
86540+ return tsk->mm->pax_flags;
86541+ else
86542+ return 0UL;
86543+}
86544+
86545+/* if tsk != current then task_lock must be held on it */
86546+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
86547+{
86548+ if (likely(tsk->mm)) {
86549+ tsk->mm->pax_flags = flags;
86550+ return 0;
86551+ }
86552+ return -EINVAL;
86553+}
86554+#endif
86555+
86556+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
86557+extern void pax_set_initial_flags(struct linux_binprm *bprm);
86558+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
86559+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
86560+#endif
86561+
86562+struct path;
86563+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
86564+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
86565+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
86566+extern void pax_report_refcount_overflow(struct pt_regs *regs);
86567
86568 /* Future-safe accessor for struct task_struct's cpus_allowed. */
86569 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
86570@@ -1737,7 +1851,7 @@ struct pid_namespace;
86571 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
86572 struct pid_namespace *ns);
86573
86574-static inline pid_t task_pid_nr(struct task_struct *tsk)
86575+static inline pid_t task_pid_nr(const struct task_struct *tsk)
86576 {
86577 return tsk->pid;
86578 }
86579@@ -2084,6 +2198,25 @@ extern u64 sched_clock_cpu(int cpu);
86580
86581 extern void sched_clock_init(void);
86582
86583+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86584+static inline void populate_stack(void)
86585+{
86586+ struct task_struct *curtask = current;
86587+ int c;
86588+ int *ptr = curtask->stack;
86589+ int *end = curtask->stack + THREAD_SIZE;
86590+
86591+ while (ptr < end) {
86592+ c = *(volatile int *)ptr;
86593+ ptr += PAGE_SIZE/sizeof(int);
86594+ }
86595+}
86596+#else
86597+static inline void populate_stack(void)
86598+{
86599+}
86600+#endif
86601+
86602 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86603 static inline void sched_clock_tick(void)
86604 {
86605@@ -2217,7 +2350,9 @@ void yield(void);
86606 extern struct exec_domain default_exec_domain;
86607
86608 union thread_union {
86609+#ifndef CONFIG_X86
86610 struct thread_info thread_info;
86611+#endif
86612 unsigned long stack[THREAD_SIZE/sizeof(long)];
86613 };
86614
86615@@ -2250,6 +2385,7 @@ extern struct pid_namespace init_pid_ns;
86616 */
86617
86618 extern struct task_struct *find_task_by_vpid(pid_t nr);
86619+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
86620 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
86621 struct pid_namespace *ns);
86622
86623@@ -2412,7 +2548,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
86624 extern void exit_itimers(struct signal_struct *);
86625 extern void flush_itimer_signals(void);
86626
86627-extern void do_group_exit(int);
86628+extern __noreturn void do_group_exit(int);
86629
86630 extern int do_execve(struct filename *,
86631 const char __user * const __user *,
86632@@ -2614,9 +2750,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
86633
86634 #endif
86635
86636-static inline int object_is_on_stack(void *obj)
86637+static inline int object_starts_on_stack(const void *obj)
86638 {
86639- void *stack = task_stack_page(current);
86640+ const void *stack = task_stack_page(current);
86641
86642 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
86643 }
86644diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
86645index 596a0e0..bea77ec 100644
86646--- a/include/linux/sched/sysctl.h
86647+++ b/include/linux/sched/sysctl.h
86648@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
86649 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
86650
86651 extern int sysctl_max_map_count;
86652+extern unsigned long sysctl_heap_stack_gap;
86653
86654 extern unsigned int sysctl_sched_latency;
86655 extern unsigned int sysctl_sched_min_granularity;
86656diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
86657index 4054b09..6f19cfd 100644
86658--- a/include/linux/seccomp.h
86659+++ b/include/linux/seccomp.h
86660@@ -76,6 +76,7 @@ static inline int seccomp_mode(struct seccomp *s)
86661 #ifdef CONFIG_SECCOMP_FILTER
86662 extern void put_seccomp_filter(struct task_struct *tsk);
86663 extern void get_seccomp_filter(struct task_struct *tsk);
86664+extern u32 seccomp_bpf_load(int off);
86665 #else /* CONFIG_SECCOMP_FILTER */
86666 static inline void put_seccomp_filter(struct task_struct *tsk)
86667 {
86668diff --git a/include/linux/security.h b/include/linux/security.h
86669index 9c6b972..7e7c704 100644
86670--- a/include/linux/security.h
86671+++ b/include/linux/security.h
86672@@ -27,6 +27,7 @@
86673 #include <linux/slab.h>
86674 #include <linux/err.h>
86675 #include <linux/string.h>
86676+#include <linux/grsecurity.h>
86677
86678 struct linux_binprm;
86679 struct cred;
86680@@ -116,8 +117,6 @@ struct seq_file;
86681
86682 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
86683
86684-void reset_security_ops(void);
86685-
86686 #ifdef CONFIG_MMU
86687 extern unsigned long mmap_min_addr;
86688 extern unsigned long dac_mmap_min_addr;
86689@@ -1719,7 +1718,7 @@ struct security_operations {
86690 struct audit_context *actx);
86691 void (*audit_rule_free) (void *lsmrule);
86692 #endif /* CONFIG_AUDIT */
86693-};
86694+} __randomize_layout;
86695
86696 /* prototypes */
86697 extern int security_init(void);
86698diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
86699index dc368b8..e895209 100644
86700--- a/include/linux/semaphore.h
86701+++ b/include/linux/semaphore.h
86702@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
86703 }
86704
86705 extern void down(struct semaphore *sem);
86706-extern int __must_check down_interruptible(struct semaphore *sem);
86707+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
86708 extern int __must_check down_killable(struct semaphore *sem);
86709 extern int __must_check down_trylock(struct semaphore *sem);
86710 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
86711diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
86712index 52e0097..383f21d 100644
86713--- a/include/linux/seq_file.h
86714+++ b/include/linux/seq_file.h
86715@@ -27,6 +27,9 @@ struct seq_file {
86716 struct mutex lock;
86717 const struct seq_operations *op;
86718 int poll_event;
86719+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86720+ u64 exec_id;
86721+#endif
86722 #ifdef CONFIG_USER_NS
86723 struct user_namespace *user_ns;
86724 #endif
86725@@ -39,6 +42,7 @@ struct seq_operations {
86726 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
86727 int (*show) (struct seq_file *m, void *v);
86728 };
86729+typedef struct seq_operations __no_const seq_operations_no_const;
86730
86731 #define SEQ_SKIP 1
86732
86733@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
86734
86735 char *mangle_path(char *s, const char *p, const char *esc);
86736 int seq_open(struct file *, const struct seq_operations *);
86737+int seq_open_restrict(struct file *, const struct seq_operations *);
86738 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
86739 loff_t seq_lseek(struct file *, loff_t, int);
86740 int seq_release(struct inode *, struct file *);
86741@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
86742 }
86743
86744 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
86745+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
86746 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
86747 int single_release(struct inode *, struct file *);
86748 void *__seq_open_private(struct file *, const struct seq_operations *, int);
86749diff --git a/include/linux/shm.h b/include/linux/shm.h
86750index 57d7770..0936af6 100644
86751--- a/include/linux/shm.h
86752+++ b/include/linux/shm.h
86753@@ -20,6 +20,10 @@ struct shmid_kernel /* private to the kernel */
86754
86755 /* The task created the shm object. NULL if the task is dead. */
86756 struct task_struct *shm_creator;
86757+#ifdef CONFIG_GRKERNSEC
86758+ time_t shm_createtime;
86759+ pid_t shm_lapid;
86760+#endif
86761 };
86762
86763 /* shm_mode upper byte flags */
86764diff --git a/include/linux/signal.h b/include/linux/signal.h
86765index c9e6536..923b302 100644
86766--- a/include/linux/signal.h
86767+++ b/include/linux/signal.h
86768@@ -293,7 +293,7 @@ static inline void allow_signal(int sig)
86769 * know it'll be handled, so that they don't get converted to
86770 * SIGKILL or just silently dropped.
86771 */
86772- kernel_sigaction(sig, (__force __sighandler_t)2);
86773+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
86774 }
86775
86776 static inline void disallow_signal(int sig)
86777diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
86778index ec89301..4fd29a6 100644
86779--- a/include/linux/skbuff.h
86780+++ b/include/linux/skbuff.h
86781@@ -725,7 +725,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
86782 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
86783 int node);
86784 struct sk_buff *build_skb(void *data, unsigned int frag_size);
86785-static inline struct sk_buff *alloc_skb(unsigned int size,
86786+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
86787 gfp_t priority)
86788 {
86789 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
86790@@ -1839,7 +1839,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
86791 return skb->inner_transport_header - skb->inner_network_header;
86792 }
86793
86794-static inline int skb_network_offset(const struct sk_buff *skb)
86795+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
86796 {
86797 return skb_network_header(skb) - skb->data;
86798 }
86799@@ -1911,7 +1911,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
86800 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
86801 */
86802 #ifndef NET_SKB_PAD
86803-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
86804+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
86805 #endif
86806
86807 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
86808@@ -2518,7 +2518,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
86809 int *err);
86810 unsigned int datagram_poll(struct file *file, struct socket *sock,
86811 struct poll_table_struct *wait);
86812-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
86813+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
86814 struct iovec *to, int size);
86815 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
86816 struct iovec *iov);
86817@@ -2664,6 +2664,8 @@ static inline ktime_t net_invalid_timestamp(void)
86818 return ktime_set(0, 0);
86819 }
86820
86821+void skb_timestamping_init(void);
86822+
86823 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
86824
86825 void skb_clone_tx_timestamp(struct sk_buff *skb);
86826@@ -2907,6 +2909,9 @@ static inline void nf_reset(struct sk_buff *skb)
86827 nf_bridge_put(skb->nf_bridge);
86828 skb->nf_bridge = NULL;
86829 #endif
86830+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
86831+ skb->nf_trace = 0;
86832+#endif
86833 }
86834
86835 static inline void nf_reset_trace(struct sk_buff *skb)
86836diff --git a/include/linux/slab.h b/include/linux/slab.h
86837index 1d9abb7..b1e8b10 100644
86838--- a/include/linux/slab.h
86839+++ b/include/linux/slab.h
86840@@ -14,15 +14,29 @@
86841 #include <linux/gfp.h>
86842 #include <linux/types.h>
86843 #include <linux/workqueue.h>
86844-
86845+#include <linux/err.h>
86846
86847 /*
86848 * Flags to pass to kmem_cache_create().
86849 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
86850 */
86851 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
86852+
86853+#ifdef CONFIG_PAX_USERCOPY_SLABS
86854+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
86855+#else
86856+#define SLAB_USERCOPY 0x00000000UL
86857+#endif
86858+
86859 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
86860 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
86861+
86862+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86863+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
86864+#else
86865+#define SLAB_NO_SANITIZE 0x00000000UL
86866+#endif
86867+
86868 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
86869 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
86870 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
86871@@ -98,10 +112,13 @@
86872 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
86873 * Both make kfree a no-op.
86874 */
86875-#define ZERO_SIZE_PTR ((void *)16)
86876+#define ZERO_SIZE_PTR \
86877+({ \
86878+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
86879+ (void *)(-MAX_ERRNO-1L); \
86880+})
86881
86882-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
86883- (unsigned long)ZERO_SIZE_PTR)
86884+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
86885
86886 #include <linux/kmemleak.h>
86887
86888@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
86889 void kfree(const void *);
86890 void kzfree(const void *);
86891 size_t ksize(const void *);
86892+const char *check_heap_object(const void *ptr, unsigned long n);
86893+bool is_usercopy_object(const void *ptr);
86894
86895 /*
86896 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
86897@@ -176,7 +195,7 @@ struct kmem_cache {
86898 unsigned int align; /* Alignment as calculated */
86899 unsigned long flags; /* Active flags on the slab */
86900 const char *name; /* Slab name for sysfs */
86901- int refcount; /* Use counter */
86902+ atomic_t refcount; /* Use counter */
86903 void (*ctor)(void *); /* Called on object slot creation */
86904 struct list_head list; /* List of all slab caches on the system */
86905 };
86906@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
86907 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86908 #endif
86909
86910+#ifdef CONFIG_PAX_USERCOPY_SLABS
86911+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
86912+#endif
86913+
86914 /*
86915 * Figure out which kmalloc slab an allocation of a certain size
86916 * belongs to.
86917@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86918 * 2 = 120 .. 192 bytes
86919 * n = 2^(n-1) .. 2^n -1
86920 */
86921-static __always_inline int kmalloc_index(size_t size)
86922+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
86923 {
86924 if (!size)
86925 return 0;
86926@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
86927 }
86928 #endif /* !CONFIG_SLOB */
86929
86930-void *__kmalloc(size_t size, gfp_t flags);
86931+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
86932 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
86933
86934 #ifdef CONFIG_NUMA
86935-void *__kmalloc_node(size_t size, gfp_t flags, int node);
86936+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
86937 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
86938 #else
86939 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
86940diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
86941index 8235dfb..47ce586 100644
86942--- a/include/linux/slab_def.h
86943+++ b/include/linux/slab_def.h
86944@@ -38,7 +38,7 @@ struct kmem_cache {
86945 /* 4) cache creation/removal */
86946 const char *name;
86947 struct list_head list;
86948- int refcount;
86949+ atomic_t refcount;
86950 int object_size;
86951 int align;
86952
86953@@ -54,10 +54,14 @@ struct kmem_cache {
86954 unsigned long node_allocs;
86955 unsigned long node_frees;
86956 unsigned long node_overflow;
86957- atomic_t allochit;
86958- atomic_t allocmiss;
86959- atomic_t freehit;
86960- atomic_t freemiss;
86961+ atomic_unchecked_t allochit;
86962+ atomic_unchecked_t allocmiss;
86963+ atomic_unchecked_t freehit;
86964+ atomic_unchecked_t freemiss;
86965+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86966+ atomic_unchecked_t sanitized;
86967+ atomic_unchecked_t not_sanitized;
86968+#endif
86969
86970 /*
86971 * If debugging is enabled, then the allocator can add additional
86972diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
86973index d82abd4..408c3a0 100644
86974--- a/include/linux/slub_def.h
86975+++ b/include/linux/slub_def.h
86976@@ -74,7 +74,7 @@ struct kmem_cache {
86977 struct kmem_cache_order_objects max;
86978 struct kmem_cache_order_objects min;
86979 gfp_t allocflags; /* gfp flags to use on each alloc */
86980- int refcount; /* Refcount for slab cache destroy */
86981+ atomic_t refcount; /* Refcount for slab cache destroy */
86982 void (*ctor)(void *);
86983 int inuse; /* Offset to metadata */
86984 int align; /* Alignment */
86985diff --git a/include/linux/smp.h b/include/linux/smp.h
86986index 34347f2..8739978 100644
86987--- a/include/linux/smp.h
86988+++ b/include/linux/smp.h
86989@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
86990 #endif
86991
86992 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
86993+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
86994 #define put_cpu() preempt_enable()
86995+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
86996
86997 /*
86998 * Callback to arch code if there's nosmp or maxcpus=0 on the
86999diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
87000index 46cca4c..3323536 100644
87001--- a/include/linux/sock_diag.h
87002+++ b/include/linux/sock_diag.h
87003@@ -11,7 +11,7 @@ struct sock;
87004 struct sock_diag_handler {
87005 __u8 family;
87006 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
87007-};
87008+} __do_const;
87009
87010 int sock_diag_register(const struct sock_diag_handler *h);
87011 void sock_diag_unregister(const struct sock_diag_handler *h);
87012diff --git a/include/linux/sonet.h b/include/linux/sonet.h
87013index 680f9a3..f13aeb0 100644
87014--- a/include/linux/sonet.h
87015+++ b/include/linux/sonet.h
87016@@ -7,7 +7,7 @@
87017 #include <uapi/linux/sonet.h>
87018
87019 struct k_sonet_stats {
87020-#define __HANDLE_ITEM(i) atomic_t i
87021+#define __HANDLE_ITEM(i) atomic_unchecked_t i
87022 __SONET_ITEMS
87023 #undef __HANDLE_ITEM
87024 };
87025diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
87026index 07d8e53..dc934c9 100644
87027--- a/include/linux/sunrpc/addr.h
87028+++ b/include/linux/sunrpc/addr.h
87029@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
87030 {
87031 switch (sap->sa_family) {
87032 case AF_INET:
87033- return ntohs(((struct sockaddr_in *)sap)->sin_port);
87034+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
87035 case AF_INET6:
87036- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
87037+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
87038 }
87039 return 0;
87040 }
87041@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
87042 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
87043 const struct sockaddr *src)
87044 {
87045- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
87046+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
87047 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
87048
87049 dsin->sin_family = ssin->sin_family;
87050@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
87051 if (sa->sa_family != AF_INET6)
87052 return 0;
87053
87054- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
87055+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
87056 }
87057
87058 #endif /* _LINUX_SUNRPC_ADDR_H */
87059diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
87060index 70736b9..37f33db 100644
87061--- a/include/linux/sunrpc/clnt.h
87062+++ b/include/linux/sunrpc/clnt.h
87063@@ -97,7 +97,7 @@ struct rpc_procinfo {
87064 unsigned int p_timer; /* Which RTT timer to use */
87065 u32 p_statidx; /* Which procedure to account */
87066 const char * p_name; /* name of procedure */
87067-};
87068+} __do_const;
87069
87070 #ifdef __KERNEL__
87071
87072diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
87073index 1bc7cd0..7912dc2 100644
87074--- a/include/linux/sunrpc/svc.h
87075+++ b/include/linux/sunrpc/svc.h
87076@@ -417,7 +417,7 @@ struct svc_procedure {
87077 unsigned int pc_count; /* call count */
87078 unsigned int pc_cachetype; /* cache info (NFS) */
87079 unsigned int pc_xdrressize; /* maximum size of XDR reply */
87080-};
87081+} __do_const;
87082
87083 /*
87084 * Function prototypes.
87085diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
87086index 5cf99a0..c0a1b98 100644
87087--- a/include/linux/sunrpc/svc_rdma.h
87088+++ b/include/linux/sunrpc/svc_rdma.h
87089@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
87090 extern unsigned int svcrdma_max_requests;
87091 extern unsigned int svcrdma_max_req_size;
87092
87093-extern atomic_t rdma_stat_recv;
87094-extern atomic_t rdma_stat_read;
87095-extern atomic_t rdma_stat_write;
87096-extern atomic_t rdma_stat_sq_starve;
87097-extern atomic_t rdma_stat_rq_starve;
87098-extern atomic_t rdma_stat_rq_poll;
87099-extern atomic_t rdma_stat_rq_prod;
87100-extern atomic_t rdma_stat_sq_poll;
87101-extern atomic_t rdma_stat_sq_prod;
87102+extern atomic_unchecked_t rdma_stat_recv;
87103+extern atomic_unchecked_t rdma_stat_read;
87104+extern atomic_unchecked_t rdma_stat_write;
87105+extern atomic_unchecked_t rdma_stat_sq_starve;
87106+extern atomic_unchecked_t rdma_stat_rq_starve;
87107+extern atomic_unchecked_t rdma_stat_rq_poll;
87108+extern atomic_unchecked_t rdma_stat_rq_prod;
87109+extern atomic_unchecked_t rdma_stat_sq_poll;
87110+extern atomic_unchecked_t rdma_stat_sq_prod;
87111
87112 #define RPCRDMA_VERSION 1
87113
87114diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
87115index 8d71d65..f79586e 100644
87116--- a/include/linux/sunrpc/svcauth.h
87117+++ b/include/linux/sunrpc/svcauth.h
87118@@ -120,7 +120,7 @@ struct auth_ops {
87119 int (*release)(struct svc_rqst *rq);
87120 void (*domain_release)(struct auth_domain *);
87121 int (*set_client)(struct svc_rqst *rq);
87122-};
87123+} __do_const;
87124
87125 #define SVC_GARBAGE 1
87126 #define SVC_SYSERR 2
87127diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
87128index e7a018e..49f8b17 100644
87129--- a/include/linux/swiotlb.h
87130+++ b/include/linux/swiotlb.h
87131@@ -60,7 +60,8 @@ extern void
87132
87133 extern void
87134 swiotlb_free_coherent(struct device *hwdev, size_t size,
87135- void *vaddr, dma_addr_t dma_handle);
87136+ void *vaddr, dma_addr_t dma_handle,
87137+ struct dma_attrs *attrs);
87138
87139 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
87140 unsigned long offset, size_t size,
87141diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
87142index b0881a0..559a440 100644
87143--- a/include/linux/syscalls.h
87144+++ b/include/linux/syscalls.h
87145@@ -98,10 +98,16 @@ struct sigaltstack;
87146 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
87147
87148 #define __SC_DECL(t, a) t a
87149+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
87150 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
87151 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
87152 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
87153-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
87154+#define __SC_LONG(t, a) __typeof( \
87155+ __builtin_choose_expr( \
87156+ sizeof(t) > sizeof(int), \
87157+ (t) 0, \
87158+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
87159+ )) a
87160 #define __SC_CAST(t, a) (t) a
87161 #define __SC_ARGS(t, a) a
87162 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
87163@@ -379,11 +385,11 @@ asmlinkage long sys_sync(void);
87164 asmlinkage long sys_fsync(unsigned int fd);
87165 asmlinkage long sys_fdatasync(unsigned int fd);
87166 asmlinkage long sys_bdflush(int func, long data);
87167-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
87168- char __user *type, unsigned long flags,
87169+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
87170+ const char __user *type, unsigned long flags,
87171 void __user *data);
87172-asmlinkage long sys_umount(char __user *name, int flags);
87173-asmlinkage long sys_oldumount(char __user *name);
87174+asmlinkage long sys_umount(const char __user *name, int flags);
87175+asmlinkage long sys_oldumount(const char __user *name);
87176 asmlinkage long sys_truncate(const char __user *path, long length);
87177 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
87178 asmlinkage long sys_stat(const char __user *filename,
87179@@ -595,7 +601,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
87180 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
87181 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
87182 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
87183- struct sockaddr __user *, int);
87184+ struct sockaddr __user *, int) __intentional_overflow(0);
87185 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
87186 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
87187 unsigned int vlen, unsigned flags);
87188diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
87189index 27b3b0b..e093dd9 100644
87190--- a/include/linux/syscore_ops.h
87191+++ b/include/linux/syscore_ops.h
87192@@ -16,7 +16,7 @@ struct syscore_ops {
87193 int (*suspend)(void);
87194 void (*resume)(void);
87195 void (*shutdown)(void);
87196-};
87197+} __do_const;
87198
87199 extern void register_syscore_ops(struct syscore_ops *ops);
87200 extern void unregister_syscore_ops(struct syscore_ops *ops);
87201diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
87202index 14a8ff2..fa95f3a 100644
87203--- a/include/linux/sysctl.h
87204+++ b/include/linux/sysctl.h
87205@@ -34,13 +34,13 @@ struct ctl_table_root;
87206 struct ctl_table_header;
87207 struct ctl_dir;
87208
87209-typedef struct ctl_table ctl_table;
87210-
87211 typedef int proc_handler (struct ctl_table *ctl, int write,
87212 void __user *buffer, size_t *lenp, loff_t *ppos);
87213
87214 extern int proc_dostring(struct ctl_table *, int,
87215 void __user *, size_t *, loff_t *);
87216+extern int proc_dostring_modpriv(struct ctl_table *, int,
87217+ void __user *, size_t *, loff_t *);
87218 extern int proc_dointvec(struct ctl_table *, int,
87219 void __user *, size_t *, loff_t *);
87220 extern int proc_dointvec_minmax(struct ctl_table *, int,
87221@@ -115,7 +115,9 @@ struct ctl_table
87222 struct ctl_table_poll *poll;
87223 void *extra1;
87224 void *extra2;
87225-};
87226+} __do_const __randomize_layout;
87227+typedef struct ctl_table __no_const ctl_table_no_const;
87228+typedef struct ctl_table ctl_table;
87229
87230 struct ctl_node {
87231 struct rb_node node;
87232diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
87233index f97d0db..c1187dc 100644
87234--- a/include/linux/sysfs.h
87235+++ b/include/linux/sysfs.h
87236@@ -34,7 +34,8 @@ struct attribute {
87237 struct lock_class_key *key;
87238 struct lock_class_key skey;
87239 #endif
87240-};
87241+} __do_const;
87242+typedef struct attribute __no_const attribute_no_const;
87243
87244 /**
87245 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
87246@@ -63,7 +64,8 @@ struct attribute_group {
87247 struct attribute *, int);
87248 struct attribute **attrs;
87249 struct bin_attribute **bin_attrs;
87250-};
87251+} __do_const;
87252+typedef struct attribute_group __no_const attribute_group_no_const;
87253
87254 /**
87255 * Use these macros to make defining attributes easier. See include/linux/device.h
87256@@ -128,7 +130,8 @@ struct bin_attribute {
87257 char *, loff_t, size_t);
87258 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
87259 struct vm_area_struct *vma);
87260-};
87261+} __do_const;
87262+typedef struct bin_attribute __no_const bin_attribute_no_const;
87263
87264 /**
87265 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
87266diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
87267index 387fa7d..3fcde6b 100644
87268--- a/include/linux/sysrq.h
87269+++ b/include/linux/sysrq.h
87270@@ -16,6 +16,7 @@
87271
87272 #include <linux/errno.h>
87273 #include <linux/types.h>
87274+#include <linux/compiler.h>
87275
87276 /* Possible values of bitmask for enabling sysrq functions */
87277 /* 0x0001 is reserved for enable everything */
87278@@ -33,7 +34,7 @@ struct sysrq_key_op {
87279 char *help_msg;
87280 char *action_msg;
87281 int enable_mask;
87282-};
87283+} __do_const;
87284
87285 #ifdef CONFIG_MAGIC_SYSRQ
87286
87287diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
87288index ff307b5..f1a4468 100644
87289--- a/include/linux/thread_info.h
87290+++ b/include/linux/thread_info.h
87291@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
87292 #error "no set_restore_sigmask() provided and default one won't work"
87293 #endif
87294
87295+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
87296+
87297+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
87298+{
87299+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
87300+}
87301+
87302 #endif /* __KERNEL__ */
87303
87304 #endif /* _LINUX_THREAD_INFO_H */
87305diff --git a/include/linux/tty.h b/include/linux/tty.h
87306index 1c3316a..ae83b9f 100644
87307--- a/include/linux/tty.h
87308+++ b/include/linux/tty.h
87309@@ -202,7 +202,7 @@ struct tty_port {
87310 const struct tty_port_operations *ops; /* Port operations */
87311 spinlock_t lock; /* Lock protecting tty field */
87312 int blocked_open; /* Waiting to open */
87313- int count; /* Usage count */
87314+ atomic_t count; /* Usage count */
87315 wait_queue_head_t open_wait; /* Open waiters */
87316 wait_queue_head_t close_wait; /* Close waiters */
87317 wait_queue_head_t delta_msr_wait; /* Modem status change */
87318@@ -284,7 +284,7 @@ struct tty_struct {
87319 /* If the tty has a pending do_SAK, queue it here - akpm */
87320 struct work_struct SAK_work;
87321 struct tty_port *port;
87322-};
87323+} __randomize_layout;
87324
87325 /* Each of a tty's open files has private_data pointing to tty_file_private */
87326 struct tty_file_private {
87327@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
87328 struct tty_struct *tty, struct file *filp);
87329 static inline int tty_port_users(struct tty_port *port)
87330 {
87331- return port->count + port->blocked_open;
87332+ return atomic_read(&port->count) + port->blocked_open;
87333 }
87334
87335 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
87336diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
87337index 756a609..89db85e 100644
87338--- a/include/linux/tty_driver.h
87339+++ b/include/linux/tty_driver.h
87340@@ -285,7 +285,7 @@ struct tty_operations {
87341 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
87342 #endif
87343 const struct file_operations *proc_fops;
87344-};
87345+} __do_const __randomize_layout;
87346
87347 struct tty_driver {
87348 int magic; /* magic number for this structure */
87349@@ -319,7 +319,7 @@ struct tty_driver {
87350
87351 const struct tty_operations *ops;
87352 struct list_head tty_drivers;
87353-};
87354+} __randomize_layout;
87355
87356 extern struct list_head tty_drivers;
87357
87358diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
87359index 00c9d68..bc0188b 100644
87360--- a/include/linux/tty_ldisc.h
87361+++ b/include/linux/tty_ldisc.h
87362@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
87363
87364 struct module *owner;
87365
87366- int refcount;
87367+ atomic_t refcount;
87368 };
87369
87370 struct tty_ldisc {
87371diff --git a/include/linux/types.h b/include/linux/types.h
87372index a0bb704..f511c77 100644
87373--- a/include/linux/types.h
87374+++ b/include/linux/types.h
87375@@ -177,10 +177,26 @@ typedef struct {
87376 int counter;
87377 } atomic_t;
87378
87379+#ifdef CONFIG_PAX_REFCOUNT
87380+typedef struct {
87381+ int counter;
87382+} atomic_unchecked_t;
87383+#else
87384+typedef atomic_t atomic_unchecked_t;
87385+#endif
87386+
87387 #ifdef CONFIG_64BIT
87388 typedef struct {
87389 long counter;
87390 } atomic64_t;
87391+
87392+#ifdef CONFIG_PAX_REFCOUNT
87393+typedef struct {
87394+ long counter;
87395+} atomic64_unchecked_t;
87396+#else
87397+typedef atomic64_t atomic64_unchecked_t;
87398+#endif
87399 #endif
87400
87401 struct list_head {
87402diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
87403index ecd3319..8a36ded 100644
87404--- a/include/linux/uaccess.h
87405+++ b/include/linux/uaccess.h
87406@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
87407 long ret; \
87408 mm_segment_t old_fs = get_fs(); \
87409 \
87410- set_fs(KERNEL_DS); \
87411 pagefault_disable(); \
87412- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
87413- pagefault_enable(); \
87414+ set_fs(KERNEL_DS); \
87415+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
87416 set_fs(old_fs); \
87417+ pagefault_enable(); \
87418 ret; \
87419 })
87420
87421diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
87422index 2d1f9b6..d7a9fce 100644
87423--- a/include/linux/uidgid.h
87424+++ b/include/linux/uidgid.h
87425@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
87426
87427 #endif /* CONFIG_USER_NS */
87428
87429+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
87430+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
87431+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
87432+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
87433+
87434 #endif /* _LINUX_UIDGID_H */
87435diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
87436index 99c1b4d..562e6f3 100644
87437--- a/include/linux/unaligned/access_ok.h
87438+++ b/include/linux/unaligned/access_ok.h
87439@@ -4,34 +4,34 @@
87440 #include <linux/kernel.h>
87441 #include <asm/byteorder.h>
87442
87443-static inline u16 get_unaligned_le16(const void *p)
87444+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
87445 {
87446- return le16_to_cpup((__le16 *)p);
87447+ return le16_to_cpup((const __le16 *)p);
87448 }
87449
87450-static inline u32 get_unaligned_le32(const void *p)
87451+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
87452 {
87453- return le32_to_cpup((__le32 *)p);
87454+ return le32_to_cpup((const __le32 *)p);
87455 }
87456
87457-static inline u64 get_unaligned_le64(const void *p)
87458+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
87459 {
87460- return le64_to_cpup((__le64 *)p);
87461+ return le64_to_cpup((const __le64 *)p);
87462 }
87463
87464-static inline u16 get_unaligned_be16(const void *p)
87465+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
87466 {
87467- return be16_to_cpup((__be16 *)p);
87468+ return be16_to_cpup((const __be16 *)p);
87469 }
87470
87471-static inline u32 get_unaligned_be32(const void *p)
87472+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
87473 {
87474- return be32_to_cpup((__be32 *)p);
87475+ return be32_to_cpup((const __be32 *)p);
87476 }
87477
87478-static inline u64 get_unaligned_be64(const void *p)
87479+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
87480 {
87481- return be64_to_cpup((__be64 *)p);
87482+ return be64_to_cpup((const __be64 *)p);
87483 }
87484
87485 static inline void put_unaligned_le16(u16 val, void *p)
87486diff --git a/include/linux/usb.h b/include/linux/usb.h
87487index d2465bc..5256de4 100644
87488--- a/include/linux/usb.h
87489+++ b/include/linux/usb.h
87490@@ -571,7 +571,7 @@ struct usb_device {
87491 int maxchild;
87492
87493 u32 quirks;
87494- atomic_t urbnum;
87495+ atomic_unchecked_t urbnum;
87496
87497 unsigned long active_duration;
87498
87499@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
87500
87501 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
87502 __u8 request, __u8 requesttype, __u16 value, __u16 index,
87503- void *data, __u16 size, int timeout);
87504+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
87505 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
87506 void *data, int len, int *actual_length, int timeout);
87507 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
87508diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
87509index e452ba6..78f8e80 100644
87510--- a/include/linux/usb/renesas_usbhs.h
87511+++ b/include/linux/usb/renesas_usbhs.h
87512@@ -39,7 +39,7 @@ enum {
87513 */
87514 struct renesas_usbhs_driver_callback {
87515 int (*notify_hotplug)(struct platform_device *pdev);
87516-};
87517+} __no_const;
87518
87519 /*
87520 * callback functions for platform
87521diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
87522index 4836ba3..603f6ee 100644
87523--- a/include/linux/user_namespace.h
87524+++ b/include/linux/user_namespace.h
87525@@ -33,7 +33,7 @@ struct user_namespace {
87526 struct key *persistent_keyring_register;
87527 struct rw_semaphore persistent_keyring_register_sem;
87528 #endif
87529-};
87530+} __randomize_layout;
87531
87532 extern struct user_namespace init_user_ns;
87533
87534diff --git a/include/linux/utsname.h b/include/linux/utsname.h
87535index 239e277..22a5cf5 100644
87536--- a/include/linux/utsname.h
87537+++ b/include/linux/utsname.h
87538@@ -24,7 +24,7 @@ struct uts_namespace {
87539 struct new_utsname name;
87540 struct user_namespace *user_ns;
87541 unsigned int proc_inum;
87542-};
87543+} __randomize_layout;
87544 extern struct uts_namespace init_uts_ns;
87545
87546 #ifdef CONFIG_UTS_NS
87547diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
87548index 6f8fbcf..4efc177 100644
87549--- a/include/linux/vermagic.h
87550+++ b/include/linux/vermagic.h
87551@@ -25,9 +25,42 @@
87552 #define MODULE_ARCH_VERMAGIC ""
87553 #endif
87554
87555+#ifdef CONFIG_PAX_REFCOUNT
87556+#define MODULE_PAX_REFCOUNT "REFCOUNT "
87557+#else
87558+#define MODULE_PAX_REFCOUNT ""
87559+#endif
87560+
87561+#ifdef CONSTIFY_PLUGIN
87562+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
87563+#else
87564+#define MODULE_CONSTIFY_PLUGIN ""
87565+#endif
87566+
87567+#ifdef STACKLEAK_PLUGIN
87568+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
87569+#else
87570+#define MODULE_STACKLEAK_PLUGIN ""
87571+#endif
87572+
87573+#ifdef RANDSTRUCT_PLUGIN
87574+#include <generated/randomize_layout_hash.h>
87575+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
87576+#else
87577+#define MODULE_RANDSTRUCT_PLUGIN
87578+#endif
87579+
87580+#ifdef CONFIG_GRKERNSEC
87581+#define MODULE_GRSEC "GRSEC "
87582+#else
87583+#define MODULE_GRSEC ""
87584+#endif
87585+
87586 #define VERMAGIC_STRING \
87587 UTS_RELEASE " " \
87588 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
87589 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
87590- MODULE_ARCH_VERMAGIC
87591+ MODULE_ARCH_VERMAGIC \
87592+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
87593+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
87594
87595diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
87596index 502073a..a7de024 100644
87597--- a/include/linux/vga_switcheroo.h
87598+++ b/include/linux/vga_switcheroo.h
87599@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
87600
87601 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
87602
87603-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
87604-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
87605+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
87606+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
87607 #else
87608
87609 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
87610@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
87611
87612 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
87613
87614-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87615-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87616+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87617+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87618
87619 #endif
87620 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
87621diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
87622index 4b8a891..e9a2863 100644
87623--- a/include/linux/vmalloc.h
87624+++ b/include/linux/vmalloc.h
87625@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
87626 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
87627 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
87628 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
87629+
87630+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
87631+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
87632+#endif
87633+
87634 /* bits [20..32] reserved for arch specific ioremap internals */
87635
87636 /*
87637@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
87638 unsigned long flags, pgprot_t prot);
87639 extern void vunmap(const void *addr);
87640
87641+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
87642+extern void unmap_process_stacks(struct task_struct *task);
87643+#endif
87644+
87645 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
87646 unsigned long uaddr, void *kaddr,
87647 unsigned long size);
87648@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
87649
87650 /* for /dev/kmem */
87651 extern long vread(char *buf, char *addr, unsigned long count);
87652-extern long vwrite(char *buf, char *addr, unsigned long count);
87653+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
87654
87655 /*
87656 * Internals. Dont't use..
87657diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
87658index 82e7db7..f8ce3d0 100644
87659--- a/include/linux/vmstat.h
87660+++ b/include/linux/vmstat.h
87661@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
87662 /*
87663 * Zone based page accounting with per cpu differentials.
87664 */
87665-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87666+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87667
87668 static inline void zone_page_state_add(long x, struct zone *zone,
87669 enum zone_stat_item item)
87670 {
87671- atomic_long_add(x, &zone->vm_stat[item]);
87672- atomic_long_add(x, &vm_stat[item]);
87673+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
87674+ atomic_long_add_unchecked(x, &vm_stat[item]);
87675 }
87676
87677-static inline unsigned long global_page_state(enum zone_stat_item item)
87678+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
87679 {
87680- long x = atomic_long_read(&vm_stat[item]);
87681+ long x = atomic_long_read_unchecked(&vm_stat[item]);
87682 #ifdef CONFIG_SMP
87683 if (x < 0)
87684 x = 0;
87685@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
87686 return x;
87687 }
87688
87689-static inline unsigned long zone_page_state(struct zone *zone,
87690+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
87691 enum zone_stat_item item)
87692 {
87693- long x = atomic_long_read(&zone->vm_stat[item]);
87694+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87695 #ifdef CONFIG_SMP
87696 if (x < 0)
87697 x = 0;
87698@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
87699 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
87700 enum zone_stat_item item)
87701 {
87702- long x = atomic_long_read(&zone->vm_stat[item]);
87703+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87704
87705 #ifdef CONFIG_SMP
87706 int cpu;
87707@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
87708
87709 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
87710 {
87711- atomic_long_inc(&zone->vm_stat[item]);
87712- atomic_long_inc(&vm_stat[item]);
87713+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
87714+ atomic_long_inc_unchecked(&vm_stat[item]);
87715 }
87716
87717 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
87718 {
87719- atomic_long_dec(&zone->vm_stat[item]);
87720- atomic_long_dec(&vm_stat[item]);
87721+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
87722+ atomic_long_dec_unchecked(&vm_stat[item]);
87723 }
87724
87725 static inline void __inc_zone_page_state(struct page *page,
87726diff --git a/include/linux/xattr.h b/include/linux/xattr.h
87727index 91b0a68..0e9adf6 100644
87728--- a/include/linux/xattr.h
87729+++ b/include/linux/xattr.h
87730@@ -28,7 +28,7 @@ struct xattr_handler {
87731 size_t size, int handler_flags);
87732 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
87733 size_t size, int flags, int handler_flags);
87734-};
87735+} __do_const;
87736
87737 struct xattr {
87738 const char *name;
87739@@ -37,6 +37,9 @@ struct xattr {
87740 };
87741
87742 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
87743+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87744+ssize_t pax_getxattr(struct dentry *, void *, size_t);
87745+#endif
87746 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
87747 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
87748 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
87749diff --git a/include/linux/zlib.h b/include/linux/zlib.h
87750index 9c5a6b4..09c9438 100644
87751--- a/include/linux/zlib.h
87752+++ b/include/linux/zlib.h
87753@@ -31,6 +31,7 @@
87754 #define _ZLIB_H
87755
87756 #include <linux/zconf.h>
87757+#include <linux/compiler.h>
87758
87759 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
87760 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
87761@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
87762
87763 /* basic functions */
87764
87765-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
87766+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
87767 /*
87768 Returns the number of bytes that needs to be allocated for a per-
87769 stream workspace with the specified parameters. A pointer to this
87770diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
87771index eec6e46..82d5641 100644
87772--- a/include/media/v4l2-dev.h
87773+++ b/include/media/v4l2-dev.h
87774@@ -77,7 +77,7 @@ struct v4l2_file_operations {
87775 int (*mmap) (struct file *, struct vm_area_struct *);
87776 int (*open) (struct file *);
87777 int (*release) (struct file *);
87778-};
87779+} __do_const;
87780
87781 /*
87782 * Newer version of video_device, handled by videodev2.c
87783diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
87784index ffb69da..040393e 100644
87785--- a/include/media/v4l2-device.h
87786+++ b/include/media/v4l2-device.h
87787@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
87788 this function returns 0. If the name ends with a digit (e.g. cx18),
87789 then the name will be set to cx18-0 since cx180 looks really odd. */
87790 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
87791- atomic_t *instance);
87792+ atomic_unchecked_t *instance);
87793
87794 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
87795 Since the parent disappears this ensures that v4l2_dev doesn't have an
87796diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
87797index d9fa68f..45c88d1 100644
87798--- a/include/net/9p/transport.h
87799+++ b/include/net/9p/transport.h
87800@@ -63,7 +63,7 @@ struct p9_trans_module {
87801 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
87802 int (*zc_request)(struct p9_client *, struct p9_req_t *,
87803 char *, char *, int , int, int, int);
87804-};
87805+} __do_const;
87806
87807 void v9fs_register_trans(struct p9_trans_module *m);
87808 void v9fs_unregister_trans(struct p9_trans_module *m);
87809diff --git a/include/net/af_unix.h b/include/net/af_unix.h
87810index a175ba4..196eb82 100644
87811--- a/include/net/af_unix.h
87812+++ b/include/net/af_unix.h
87813@@ -36,7 +36,7 @@ struct unix_skb_parms {
87814 u32 secid; /* Security ID */
87815 #endif
87816 u32 consumed;
87817-};
87818+} __randomize_layout;
87819
87820 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
87821 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
87822diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
87823index 4abdcb2..945c5cc 100644
87824--- a/include/net/bluetooth/l2cap.h
87825+++ b/include/net/bluetooth/l2cap.h
87826@@ -601,7 +601,7 @@ struct l2cap_ops {
87827 long (*get_sndtimeo) (struct l2cap_chan *chan);
87828 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
87829 unsigned long len, int nb);
87830-};
87831+} __do_const;
87832
87833 struct l2cap_conn {
87834 struct hci_conn *hcon;
87835diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
87836index f2ae33d..c457cf0 100644
87837--- a/include/net/caif/cfctrl.h
87838+++ b/include/net/caif/cfctrl.h
87839@@ -52,7 +52,7 @@ struct cfctrl_rsp {
87840 void (*radioset_rsp)(void);
87841 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
87842 struct cflayer *client_layer);
87843-};
87844+} __no_const;
87845
87846 /* Link Setup Parameters for CAIF-Links. */
87847 struct cfctrl_link_param {
87848@@ -101,8 +101,8 @@ struct cfctrl_request_info {
87849 struct cfctrl {
87850 struct cfsrvl serv;
87851 struct cfctrl_rsp res;
87852- atomic_t req_seq_no;
87853- atomic_t rsp_seq_no;
87854+ atomic_unchecked_t req_seq_no;
87855+ atomic_unchecked_t rsp_seq_no;
87856 struct list_head list;
87857 /* Protects from simultaneous access to first_req list */
87858 spinlock_t info_list_lock;
87859diff --git a/include/net/flow.h b/include/net/flow.h
87860index 8109a15..504466d 100644
87861--- a/include/net/flow.h
87862+++ b/include/net/flow.h
87863@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
87864
87865 void flow_cache_flush(struct net *net);
87866 void flow_cache_flush_deferred(struct net *net);
87867-extern atomic_t flow_cache_genid;
87868+extern atomic_unchecked_t flow_cache_genid;
87869
87870 #endif
87871diff --git a/include/net/genetlink.h b/include/net/genetlink.h
87872index 93695f0..766d71c 100644
87873--- a/include/net/genetlink.h
87874+++ b/include/net/genetlink.h
87875@@ -120,7 +120,7 @@ struct genl_ops {
87876 u8 cmd;
87877 u8 internal_flags;
87878 u8 flags;
87879-};
87880+} __do_const;
87881
87882 int __genl_register_family(struct genl_family *family);
87883
87884diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
87885index 734d9b5..48a9a4b 100644
87886--- a/include/net/gro_cells.h
87887+++ b/include/net/gro_cells.h
87888@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
87889 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
87890
87891 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
87892- atomic_long_inc(&dev->rx_dropped);
87893+ atomic_long_inc_unchecked(&dev->rx_dropped);
87894 kfree_skb(skb);
87895 return;
87896 }
87897diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
87898index 7a43138..bc76865 100644
87899--- a/include/net/inet_connection_sock.h
87900+++ b/include/net/inet_connection_sock.h
87901@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
87902 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
87903 int (*bind_conflict)(const struct sock *sk,
87904 const struct inet_bind_bucket *tb, bool relax);
87905-};
87906+} __do_const;
87907
87908 /** inet_connection_sock - INET connection oriented sock
87909 *
87910diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
87911index 01d590e..f69c61d 100644
87912--- a/include/net/inetpeer.h
87913+++ b/include/net/inetpeer.h
87914@@ -47,7 +47,7 @@ struct inet_peer {
87915 */
87916 union {
87917 struct {
87918- atomic_t rid; /* Frag reception counter */
87919+ atomic_unchecked_t rid; /* Frag reception counter */
87920 };
87921 struct rcu_head rcu;
87922 struct inet_peer *gc_next;
87923diff --git a/include/net/ip.h b/include/net/ip.h
87924index 7596eb2..f7f5fad 100644
87925--- a/include/net/ip.h
87926+++ b/include/net/ip.h
87927@@ -309,7 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
87928 }
87929 }
87930
87931-u32 ip_idents_reserve(u32 hash, int segs);
87932+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
87933 void __ip_select_ident(struct iphdr *iph, int segs);
87934
87935 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
87936diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
87937index 9922093..a1755d6 100644
87938--- a/include/net/ip_fib.h
87939+++ b/include/net/ip_fib.h
87940@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
87941
87942 #define FIB_RES_SADDR(net, res) \
87943 ((FIB_RES_NH(res).nh_saddr_genid == \
87944- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
87945+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
87946 FIB_RES_NH(res).nh_saddr : \
87947 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
87948 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
87949diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
87950index 624a8a5..b1e2a24 100644
87951--- a/include/net/ip_vs.h
87952+++ b/include/net/ip_vs.h
87953@@ -558,7 +558,7 @@ struct ip_vs_conn {
87954 struct ip_vs_conn *control; /* Master control connection */
87955 atomic_t n_control; /* Number of controlled ones */
87956 struct ip_vs_dest *dest; /* real server */
87957- atomic_t in_pkts; /* incoming packet counter */
87958+ atomic_unchecked_t in_pkts; /* incoming packet counter */
87959
87960 /* packet transmitter for different forwarding methods. If it
87961 mangles the packet, it must return NF_DROP or better NF_STOLEN,
87962@@ -705,7 +705,7 @@ struct ip_vs_dest {
87963 __be16 port; /* port number of the server */
87964 union nf_inet_addr addr; /* IP address of the server */
87965 volatile unsigned int flags; /* dest status flags */
87966- atomic_t conn_flags; /* flags to copy to conn */
87967+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
87968 atomic_t weight; /* server weight */
87969
87970 atomic_t refcnt; /* reference counter */
87971@@ -960,11 +960,11 @@ struct netns_ipvs {
87972 /* ip_vs_lblc */
87973 int sysctl_lblc_expiration;
87974 struct ctl_table_header *lblc_ctl_header;
87975- struct ctl_table *lblc_ctl_table;
87976+ ctl_table_no_const *lblc_ctl_table;
87977 /* ip_vs_lblcr */
87978 int sysctl_lblcr_expiration;
87979 struct ctl_table_header *lblcr_ctl_header;
87980- struct ctl_table *lblcr_ctl_table;
87981+ ctl_table_no_const *lblcr_ctl_table;
87982 /* ip_vs_est */
87983 struct list_head est_list; /* estimator list */
87984 spinlock_t est_lock;
87985diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
87986index 8d4f588..2e37ad2 100644
87987--- a/include/net/irda/ircomm_tty.h
87988+++ b/include/net/irda/ircomm_tty.h
87989@@ -33,6 +33,7 @@
87990 #include <linux/termios.h>
87991 #include <linux/timer.h>
87992 #include <linux/tty.h> /* struct tty_struct */
87993+#include <asm/local.h>
87994
87995 #include <net/irda/irias_object.h>
87996 #include <net/irda/ircomm_core.h>
87997diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
87998index 714cc9a..ea05f3e 100644
87999--- a/include/net/iucv/af_iucv.h
88000+++ b/include/net/iucv/af_iucv.h
88001@@ -149,7 +149,7 @@ struct iucv_skb_cb {
88002 struct iucv_sock_list {
88003 struct hlist_head head;
88004 rwlock_t lock;
88005- atomic_t autobind_name;
88006+ atomic_unchecked_t autobind_name;
88007 };
88008
88009 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
88010diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
88011index f3be818..bf46196 100644
88012--- a/include/net/llc_c_ac.h
88013+++ b/include/net/llc_c_ac.h
88014@@ -87,7 +87,7 @@
88015 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
88016 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
88017
88018-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88019+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88020
88021 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
88022 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
88023diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
88024index 3948cf1..83b28c4 100644
88025--- a/include/net/llc_c_ev.h
88026+++ b/include/net/llc_c_ev.h
88027@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
88028 return (struct llc_conn_state_ev *)skb->cb;
88029 }
88030
88031-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88032-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88033+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88034+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88035
88036 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
88037 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
88038diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
88039index 0e79cfb..f46db31 100644
88040--- a/include/net/llc_c_st.h
88041+++ b/include/net/llc_c_st.h
88042@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
88043 u8 next_state;
88044 llc_conn_ev_qfyr_t *ev_qualifiers;
88045 llc_conn_action_t *ev_actions;
88046-};
88047+} __do_const;
88048
88049 struct llc_conn_state {
88050 u8 current_state;
88051diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
88052index a61b98c..aade1eb 100644
88053--- a/include/net/llc_s_ac.h
88054+++ b/include/net/llc_s_ac.h
88055@@ -23,7 +23,7 @@
88056 #define SAP_ACT_TEST_IND 9
88057
88058 /* All action functions must look like this */
88059-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88060+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88061
88062 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
88063 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
88064diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
88065index 567c681..cd73ac02 100644
88066--- a/include/net/llc_s_st.h
88067+++ b/include/net/llc_s_st.h
88068@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
88069 llc_sap_ev_t ev;
88070 u8 next_state;
88071 llc_sap_action_t *ev_actions;
88072-};
88073+} __do_const;
88074
88075 struct llc_sap_state {
88076 u8 curr_state;
88077diff --git a/include/net/mac80211.h b/include/net/mac80211.h
88078index 421b6ec..5a03729 100644
88079--- a/include/net/mac80211.h
88080+++ b/include/net/mac80211.h
88081@@ -4588,7 +4588,7 @@ struct rate_control_ops {
88082 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
88083
88084 u32 (*get_expected_throughput)(void *priv_sta);
88085-};
88086+} __do_const;
88087
88088 static inline int rate_supported(struct ieee80211_sta *sta,
88089 enum ieee80211_band band,
88090diff --git a/include/net/neighbour.h b/include/net/neighbour.h
88091index 47f4254..fd095bc 100644
88092--- a/include/net/neighbour.h
88093+++ b/include/net/neighbour.h
88094@@ -163,7 +163,7 @@ struct neigh_ops {
88095 void (*error_report)(struct neighbour *, struct sk_buff *);
88096 int (*output)(struct neighbour *, struct sk_buff *);
88097 int (*connected_output)(struct neighbour *, struct sk_buff *);
88098-};
88099+} __do_const;
88100
88101 struct pneigh_entry {
88102 struct pneigh_entry *next;
88103@@ -217,7 +217,7 @@ struct neigh_table {
88104 struct neigh_statistics __percpu *stats;
88105 struct neigh_hash_table __rcu *nht;
88106 struct pneigh_entry **phash_buckets;
88107-};
88108+} __randomize_layout;
88109
88110 static inline int neigh_parms_family(struct neigh_parms *p)
88111 {
88112diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
88113index 361d260..903d15f 100644
88114--- a/include/net/net_namespace.h
88115+++ b/include/net/net_namespace.h
88116@@ -129,8 +129,8 @@ struct net {
88117 struct netns_ipvs *ipvs;
88118 #endif
88119 struct sock *diag_nlsk;
88120- atomic_t fnhe_genid;
88121-};
88122+ atomic_unchecked_t fnhe_genid;
88123+} __randomize_layout;
88124
88125 #include <linux/seq_file_net.h>
88126
88127@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
88128 #define __net_init __init
88129 #define __net_exit __exit_refok
88130 #define __net_initdata __initdata
88131+#ifdef CONSTIFY_PLUGIN
88132 #define __net_initconst __initconst
88133+#else
88134+#define __net_initconst __initdata
88135+#endif
88136 #endif
88137
88138 struct pernet_operations {
88139@@ -296,7 +300,7 @@ struct pernet_operations {
88140 void (*exit_batch)(struct list_head *net_exit_list);
88141 int *id;
88142 size_t size;
88143-};
88144+} __do_const;
88145
88146 /*
88147 * Use these carefully. If you implement a network device and it
88148@@ -344,23 +348,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
88149
88150 static inline int rt_genid_ipv4(struct net *net)
88151 {
88152- return atomic_read(&net->ipv4.rt_genid);
88153+ return atomic_read_unchecked(&net->ipv4.rt_genid);
88154 }
88155
88156 static inline void rt_genid_bump_ipv4(struct net *net)
88157 {
88158- atomic_inc(&net->ipv4.rt_genid);
88159+ atomic_inc_unchecked(&net->ipv4.rt_genid);
88160 }
88161
88162 #if IS_ENABLED(CONFIG_IPV6)
88163 static inline int rt_genid_ipv6(struct net *net)
88164 {
88165- return atomic_read(&net->ipv6.rt_genid);
88166+ return atomic_read_unchecked(&net->ipv6.rt_genid);
88167 }
88168
88169 static inline void rt_genid_bump_ipv6(struct net *net)
88170 {
88171- atomic_inc(&net->ipv6.rt_genid);
88172+ atomic_inc_unchecked(&net->ipv6.rt_genid);
88173 }
88174 #else
88175 static inline int rt_genid_ipv6(struct net *net)
88176@@ -390,12 +394,12 @@ static inline void rt_genid_bump_all(struct net *net)
88177
88178 static inline int fnhe_genid(struct net *net)
88179 {
88180- return atomic_read(&net->fnhe_genid);
88181+ return atomic_read_unchecked(&net->fnhe_genid);
88182 }
88183
88184 static inline void fnhe_genid_bump(struct net *net)
88185 {
88186- atomic_inc(&net->fnhe_genid);
88187+ atomic_inc_unchecked(&net->fnhe_genid);
88188 }
88189
88190 #endif /* __NET_NET_NAMESPACE_H */
88191diff --git a/include/net/netdma.h b/include/net/netdma.h
88192index 8ba8ce2..99b7fff 100644
88193--- a/include/net/netdma.h
88194+++ b/include/net/netdma.h
88195@@ -24,7 +24,7 @@
88196 #include <linux/dmaengine.h>
88197 #include <linux/skbuff.h>
88198
88199-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
88200+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
88201 struct sk_buff *skb, int offset, struct iovec *to,
88202 size_t len, struct dma_pinned_list *pinned_list);
88203
88204diff --git a/include/net/netlink.h b/include/net/netlink.h
88205index 2b47eaa..6d5bcc2 100644
88206--- a/include/net/netlink.h
88207+++ b/include/net/netlink.h
88208@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
88209 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
88210 {
88211 if (mark)
88212- skb_trim(skb, (unsigned char *) mark - skb->data);
88213+ skb_trim(skb, (const unsigned char *) mark - skb->data);
88214 }
88215
88216 /**
88217diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
88218index 773cce3..6a11852 100644
88219--- a/include/net/netns/conntrack.h
88220+++ b/include/net/netns/conntrack.h
88221@@ -13,10 +13,10 @@ struct nf_conntrack_ecache;
88222 struct nf_proto_net {
88223 #ifdef CONFIG_SYSCTL
88224 struct ctl_table_header *ctl_table_header;
88225- struct ctl_table *ctl_table;
88226+ ctl_table_no_const *ctl_table;
88227 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
88228 struct ctl_table_header *ctl_compat_header;
88229- struct ctl_table *ctl_compat_table;
88230+ ctl_table_no_const *ctl_compat_table;
88231 #endif
88232 #endif
88233 unsigned int users;
88234@@ -59,7 +59,7 @@ struct nf_ip_net {
88235 struct nf_icmp_net icmpv6;
88236 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
88237 struct ctl_table_header *ctl_table_header;
88238- struct ctl_table *ctl_table;
88239+ ctl_table_no_const *ctl_table;
88240 #endif
88241 };
88242
88243diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
88244index aec5e12..807233f 100644
88245--- a/include/net/netns/ipv4.h
88246+++ b/include/net/netns/ipv4.h
88247@@ -82,7 +82,7 @@ struct netns_ipv4 {
88248
88249 struct ping_group_range ping_group_range;
88250
88251- atomic_t dev_addr_genid;
88252+ atomic_unchecked_t dev_addr_genid;
88253
88254 #ifdef CONFIG_SYSCTL
88255 unsigned long *sysctl_local_reserved_ports;
88256@@ -96,6 +96,6 @@ struct netns_ipv4 {
88257 struct fib_rules_ops *mr_rules_ops;
88258 #endif
88259 #endif
88260- atomic_t rt_genid;
88261+ atomic_unchecked_t rt_genid;
88262 };
88263 #endif
88264diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
88265index 19d3446..3c87195 100644
88266--- a/include/net/netns/ipv6.h
88267+++ b/include/net/netns/ipv6.h
88268@@ -74,8 +74,8 @@ struct netns_ipv6 {
88269 struct fib_rules_ops *mr6_rules_ops;
88270 #endif
88271 #endif
88272- atomic_t dev_addr_genid;
88273- atomic_t rt_genid;
88274+ atomic_unchecked_t dev_addr_genid;
88275+ atomic_unchecked_t rt_genid;
88276 };
88277
88278 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
88279diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
88280index 3492434..209f58c 100644
88281--- a/include/net/netns/xfrm.h
88282+++ b/include/net/netns/xfrm.h
88283@@ -64,7 +64,7 @@ struct netns_xfrm {
88284
88285 /* flow cache part */
88286 struct flow_cache flow_cache_global;
88287- atomic_t flow_cache_genid;
88288+ atomic_unchecked_t flow_cache_genid;
88289 struct list_head flow_cache_gc_list;
88290 spinlock_t flow_cache_gc_lock;
88291 struct work_struct flow_cache_gc_work;
88292diff --git a/include/net/ping.h b/include/net/ping.h
88293index 026479b..d9b2829 100644
88294--- a/include/net/ping.h
88295+++ b/include/net/ping.h
88296@@ -54,7 +54,7 @@ struct ping_iter_state {
88297
88298 extern struct proto ping_prot;
88299 #if IS_ENABLED(CONFIG_IPV6)
88300-extern struct pingv6_ops pingv6_ops;
88301+extern struct pingv6_ops *pingv6_ops;
88302 #endif
88303
88304 struct pingfakehdr {
88305diff --git a/include/net/protocol.h b/include/net/protocol.h
88306index d6fcc1f..ca277058 100644
88307--- a/include/net/protocol.h
88308+++ b/include/net/protocol.h
88309@@ -49,7 +49,7 @@ struct net_protocol {
88310 * socket lookup?
88311 */
88312 icmp_strict_tag_validation:1;
88313-};
88314+} __do_const;
88315
88316 #if IS_ENABLED(CONFIG_IPV6)
88317 struct inet6_protocol {
88318@@ -62,7 +62,7 @@ struct inet6_protocol {
88319 u8 type, u8 code, int offset,
88320 __be32 info);
88321 unsigned int flags; /* INET6_PROTO_xxx */
88322-};
88323+} __do_const;
88324
88325 #define INET6_PROTO_NOPOLICY 0x1
88326 #define INET6_PROTO_FINAL 0x2
88327diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
88328index 72240e5..8c14bef 100644
88329--- a/include/net/rtnetlink.h
88330+++ b/include/net/rtnetlink.h
88331@@ -93,7 +93,7 @@ struct rtnl_link_ops {
88332 int (*fill_slave_info)(struct sk_buff *skb,
88333 const struct net_device *dev,
88334 const struct net_device *slave_dev);
88335-};
88336+} __do_const;
88337
88338 int __rtnl_link_register(struct rtnl_link_ops *ops);
88339 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
88340diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
88341index 4a5b9a3..ca27d73 100644
88342--- a/include/net/sctp/checksum.h
88343+++ b/include/net/sctp/checksum.h
88344@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
88345 unsigned int offset)
88346 {
88347 struct sctphdr *sh = sctp_hdr(skb);
88348- __le32 ret, old = sh->checksum;
88349- const struct skb_checksum_ops ops = {
88350+ __le32 ret, old = sh->checksum;
88351+ static const struct skb_checksum_ops ops = {
88352 .update = sctp_csum_update,
88353 .combine = sctp_csum_combine,
88354 };
88355diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
88356index 7f4eeb3..37e8fe1 100644
88357--- a/include/net/sctp/sm.h
88358+++ b/include/net/sctp/sm.h
88359@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
88360 typedef struct {
88361 sctp_state_fn_t *fn;
88362 const char *name;
88363-} sctp_sm_table_entry_t;
88364+} __do_const sctp_sm_table_entry_t;
88365
88366 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
88367 * currently in use.
88368@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
88369 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
88370
88371 /* Extern declarations for major data structures. */
88372-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88373+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88374
88375
88376 /* Get the size of a DATA chunk payload. */
88377diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
88378index f38588bf..94c1795 100644
88379--- a/include/net/sctp/structs.h
88380+++ b/include/net/sctp/structs.h
88381@@ -507,7 +507,7 @@ struct sctp_pf {
88382 struct sctp_association *asoc);
88383 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
88384 struct sctp_af *af;
88385-};
88386+} __do_const;
88387
88388
88389 /* Structure to track chunk fragments that have been acked, but peer
88390diff --git a/include/net/sock.h b/include/net/sock.h
88391index 1563507..20d5d0e 100644
88392--- a/include/net/sock.h
88393+++ b/include/net/sock.h
88394@@ -349,7 +349,7 @@ struct sock {
88395 unsigned int sk_napi_id;
88396 unsigned int sk_ll_usec;
88397 #endif
88398- atomic_t sk_drops;
88399+ atomic_unchecked_t sk_drops;
88400 int sk_rcvbuf;
88401
88402 struct sk_filter __rcu *sk_filter;
88403@@ -1038,7 +1038,7 @@ struct proto {
88404 void (*destroy_cgroup)(struct mem_cgroup *memcg);
88405 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
88406 #endif
88407-};
88408+} __randomize_layout;
88409
88410 /*
88411 * Bits in struct cg_proto.flags
88412@@ -1225,7 +1225,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
88413 return ret >> PAGE_SHIFT;
88414 }
88415
88416-static inline long
88417+static inline long __intentional_overflow(-1)
88418 sk_memory_allocated(const struct sock *sk)
88419 {
88420 struct proto *prot = sk->sk_prot;
88421@@ -1370,7 +1370,7 @@ struct sock_iocb {
88422 struct scm_cookie *scm;
88423 struct msghdr *msg, async_msg;
88424 struct kiocb *kiocb;
88425-};
88426+} __randomize_layout;
88427
88428 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
88429 {
88430@@ -1623,6 +1623,33 @@ void sk_common_release(struct sock *sk);
88431 /* Initialise core socket variables */
88432 void sock_init_data(struct socket *sock, struct sock *sk);
88433
88434+void sk_filter_release_rcu(struct rcu_head *rcu);
88435+
88436+/**
88437+ * sk_filter_release - release a socket filter
88438+ * @fp: filter to remove
88439+ *
88440+ * Remove a filter from a socket and release its resources.
88441+ */
88442+
88443+static inline void sk_filter_release(struct sk_filter *fp)
88444+{
88445+ if (atomic_dec_and_test(&fp->refcnt))
88446+ call_rcu(&fp->rcu, sk_filter_release_rcu);
88447+}
88448+
88449+static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
88450+{
88451+ atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
88452+ sk_filter_release(fp);
88453+}
88454+
88455+static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
88456+{
88457+ atomic_inc(&fp->refcnt);
88458+ atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
88459+}
88460+
88461 /*
88462 * Socket reference counting postulates.
88463 *
88464@@ -1805,7 +1832,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
88465 }
88466
88467 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
88468- char __user *from, char *to,
88469+ char __user *from, unsigned char *to,
88470 int copy, int offset)
88471 {
88472 if (skb->ip_summed == CHECKSUM_NONE) {
88473@@ -2067,7 +2094,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
88474 }
88475 }
88476
88477-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88478+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88479
88480 /**
88481 * sk_page_frag - return an appropriate page_frag
88482diff --git a/include/net/tcp.h b/include/net/tcp.h
88483index 7286db8..f1aa7dc 100644
88484--- a/include/net/tcp.h
88485+++ b/include/net/tcp.h
88486@@ -535,7 +535,7 @@ void tcp_retransmit_timer(struct sock *sk);
88487 void tcp_xmit_retransmit_queue(struct sock *);
88488 void tcp_simple_retransmit(struct sock *);
88489 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
88490-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88491+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88492
88493 void tcp_send_probe0(struct sock *);
88494 void tcp_send_partial(struct sock *);
88495@@ -708,8 +708,8 @@ struct tcp_skb_cb {
88496 struct inet6_skb_parm h6;
88497 #endif
88498 } header; /* For incoming frames */
88499- __u32 seq; /* Starting sequence number */
88500- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
88501+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
88502+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
88503 __u32 when; /* used to compute rtt's */
88504 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
88505
88506@@ -723,7 +723,7 @@ struct tcp_skb_cb {
88507
88508 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
88509 /* 1 byte hole */
88510- __u32 ack_seq; /* Sequence number ACK'd */
88511+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
88512 };
88513
88514 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
88515diff --git a/include/net/xfrm.h b/include/net/xfrm.h
88516index 721e9c3b..3c81bbf 100644
88517--- a/include/net/xfrm.h
88518+++ b/include/net/xfrm.h
88519@@ -285,7 +285,6 @@ struct xfrm_dst;
88520 struct xfrm_policy_afinfo {
88521 unsigned short family;
88522 struct dst_ops *dst_ops;
88523- void (*garbage_collect)(struct net *net);
88524 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
88525 const xfrm_address_t *saddr,
88526 const xfrm_address_t *daddr);
88527@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
88528 struct net_device *dev,
88529 const struct flowi *fl);
88530 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
88531-};
88532+} __do_const;
88533
88534 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
88535 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
88536@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
88537 int (*transport_finish)(struct sk_buff *skb,
88538 int async);
88539 void (*local_error)(struct sk_buff *skb, u32 mtu);
88540-};
88541+} __do_const;
88542
88543 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
88544 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
88545@@ -437,7 +436,7 @@ struct xfrm_mode {
88546 struct module *owner;
88547 unsigned int encap;
88548 int flags;
88549-};
88550+} __do_const;
88551
88552 /* Flags for xfrm_mode. */
88553 enum {
88554@@ -534,7 +533,7 @@ struct xfrm_policy {
88555 struct timer_list timer;
88556
88557 struct flow_cache_object flo;
88558- atomic_t genid;
88559+ atomic_unchecked_t genid;
88560 u32 priority;
88561 u32 index;
88562 struct xfrm_mark mark;
88563@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
88564 }
88565
88566 void xfrm_garbage_collect(struct net *net);
88567+void xfrm_garbage_collect_deferred(struct net *net);
88568
88569 #else
88570
88571@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
88572 static inline void xfrm_garbage_collect(struct net *net)
88573 {
88574 }
88575+static inline void xfrm_garbage_collect_deferred(struct net *net)
88576+{
88577+}
88578 #endif
88579
88580 static __inline__
88581diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
88582index 1017e0b..227aa4d 100644
88583--- a/include/rdma/iw_cm.h
88584+++ b/include/rdma/iw_cm.h
88585@@ -122,7 +122,7 @@ struct iw_cm_verbs {
88586 int backlog);
88587
88588 int (*destroy_listen)(struct iw_cm_id *cm_id);
88589-};
88590+} __no_const;
88591
88592 /**
88593 * iw_create_cm_id - Create an IW CM identifier.
88594diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
88595index 52beadf..598734c 100644
88596--- a/include/scsi/libfc.h
88597+++ b/include/scsi/libfc.h
88598@@ -771,6 +771,7 @@ struct libfc_function_template {
88599 */
88600 void (*disc_stop_final) (struct fc_lport *);
88601 };
88602+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
88603
88604 /**
88605 * struct fc_disc - Discovery context
88606@@ -875,7 +876,7 @@ struct fc_lport {
88607 struct fc_vport *vport;
88608
88609 /* Operational Information */
88610- struct libfc_function_template tt;
88611+ libfc_function_template_no_const tt;
88612 u8 link_up;
88613 u8 qfull;
88614 enum fc_lport_state state;
88615diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
88616index 27ab310..60dc245 100644
88617--- a/include/scsi/scsi_device.h
88618+++ b/include/scsi/scsi_device.h
88619@@ -187,9 +187,9 @@ struct scsi_device {
88620 unsigned int max_device_blocked; /* what device_blocked counts down from */
88621 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
88622
88623- atomic_t iorequest_cnt;
88624- atomic_t iodone_cnt;
88625- atomic_t ioerr_cnt;
88626+ atomic_unchecked_t iorequest_cnt;
88627+ atomic_unchecked_t iodone_cnt;
88628+ atomic_unchecked_t ioerr_cnt;
88629
88630 struct device sdev_gendev,
88631 sdev_dev;
88632diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
88633index 8c79980..723f6f9 100644
88634--- a/include/scsi/scsi_transport_fc.h
88635+++ b/include/scsi/scsi_transport_fc.h
88636@@ -752,7 +752,8 @@ struct fc_function_template {
88637 unsigned long show_host_system_hostname:1;
88638
88639 unsigned long disable_target_scan:1;
88640-};
88641+} __do_const;
88642+typedef struct fc_function_template __no_const fc_function_template_no_const;
88643
88644
88645 /**
88646diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
88647index ae6c3b8..fd748ac 100644
88648--- a/include/sound/compress_driver.h
88649+++ b/include/sound/compress_driver.h
88650@@ -128,7 +128,7 @@ struct snd_compr_ops {
88651 struct snd_compr_caps *caps);
88652 int (*get_codec_caps) (struct snd_compr_stream *stream,
88653 struct snd_compr_codec_caps *codec);
88654-};
88655+} __no_const;
88656
88657 /**
88658 * struct snd_compr: Compressed device
88659diff --git a/include/sound/soc.h b/include/sound/soc.h
88660index ed9e2d7..aad0887 100644
88661--- a/include/sound/soc.h
88662+++ b/include/sound/soc.h
88663@@ -798,7 +798,7 @@ struct snd_soc_codec_driver {
88664 /* probe ordering - for components with runtime dependencies */
88665 int probe_order;
88666 int remove_order;
88667-};
88668+} __do_const;
88669
88670 /* SoC platform interface */
88671 struct snd_soc_platform_driver {
88672@@ -845,7 +845,7 @@ struct snd_soc_platform_driver {
88673 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
88674 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
88675 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
88676-};
88677+} __do_const;
88678
88679 struct snd_soc_platform {
88680 const char *name;
88681diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
88682index 9ec9864..e2ee1ee 100644
88683--- a/include/target/target_core_base.h
88684+++ b/include/target/target_core_base.h
88685@@ -761,7 +761,7 @@ struct se_device {
88686 atomic_long_t write_bytes;
88687 /* Active commands on this virtual SE device */
88688 atomic_t simple_cmds;
88689- atomic_t dev_ordered_id;
88690+ atomic_unchecked_t dev_ordered_id;
88691 atomic_t dev_ordered_sync;
88692 atomic_t dev_qf_count;
88693 int export_count;
88694diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
88695new file mode 100644
88696index 0000000..fb634b7
88697--- /dev/null
88698+++ b/include/trace/events/fs.h
88699@@ -0,0 +1,53 @@
88700+#undef TRACE_SYSTEM
88701+#define TRACE_SYSTEM fs
88702+
88703+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
88704+#define _TRACE_FS_H
88705+
88706+#include <linux/fs.h>
88707+#include <linux/tracepoint.h>
88708+
88709+TRACE_EVENT(do_sys_open,
88710+
88711+ TP_PROTO(const char *filename, int flags, int mode),
88712+
88713+ TP_ARGS(filename, flags, mode),
88714+
88715+ TP_STRUCT__entry(
88716+ __string( filename, filename )
88717+ __field( int, flags )
88718+ __field( int, mode )
88719+ ),
88720+
88721+ TP_fast_assign(
88722+ __assign_str(filename, filename);
88723+ __entry->flags = flags;
88724+ __entry->mode = mode;
88725+ ),
88726+
88727+ TP_printk("\"%s\" %x %o",
88728+ __get_str(filename), __entry->flags, __entry->mode)
88729+);
88730+
88731+TRACE_EVENT(open_exec,
88732+
88733+ TP_PROTO(const char *filename),
88734+
88735+ TP_ARGS(filename),
88736+
88737+ TP_STRUCT__entry(
88738+ __string( filename, filename )
88739+ ),
88740+
88741+ TP_fast_assign(
88742+ __assign_str(filename, filename);
88743+ ),
88744+
88745+ TP_printk("\"%s\"",
88746+ __get_str(filename))
88747+);
88748+
88749+#endif /* _TRACE_FS_H */
88750+
88751+/* This part must be outside protection */
88752+#include <trace/define_trace.h>
88753diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
88754index 1c09820..7f5ec79 100644
88755--- a/include/trace/events/irq.h
88756+++ b/include/trace/events/irq.h
88757@@ -36,7 +36,7 @@ struct softirq_action;
88758 */
88759 TRACE_EVENT(irq_handler_entry,
88760
88761- TP_PROTO(int irq, struct irqaction *action),
88762+ TP_PROTO(int irq, const struct irqaction *action),
88763
88764 TP_ARGS(irq, action),
88765
88766@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
88767 */
88768 TRACE_EVENT(irq_handler_exit,
88769
88770- TP_PROTO(int irq, struct irqaction *action, int ret),
88771+ TP_PROTO(int irq, const struct irqaction *action, int ret),
88772
88773 TP_ARGS(irq, action, ret),
88774
88775diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
88776index 7caf44c..23c6f27 100644
88777--- a/include/uapi/linux/a.out.h
88778+++ b/include/uapi/linux/a.out.h
88779@@ -39,6 +39,14 @@ enum machine_type {
88780 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
88781 };
88782
88783+/* Constants for the N_FLAGS field */
88784+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88785+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
88786+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
88787+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
88788+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88789+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88790+
88791 #if !defined (N_MAGIC)
88792 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
88793 #endif
88794diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
88795index 22b6ad3..aeba37e 100644
88796--- a/include/uapi/linux/bcache.h
88797+++ b/include/uapi/linux/bcache.h
88798@@ -5,6 +5,7 @@
88799 * Bcache on disk data structures
88800 */
88801
88802+#include <linux/compiler.h>
88803 #include <asm/types.h>
88804
88805 #define BITMASK(name, type, field, offset, size) \
88806@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
88807 /* Btree keys - all units are in sectors */
88808
88809 struct bkey {
88810- __u64 high;
88811- __u64 low;
88812+ __u64 high __intentional_overflow(-1);
88813+ __u64 low __intentional_overflow(-1);
88814 __u64 ptr[];
88815 };
88816
88817diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
88818index d876736..ccce5c0 100644
88819--- a/include/uapi/linux/byteorder/little_endian.h
88820+++ b/include/uapi/linux/byteorder/little_endian.h
88821@@ -42,51 +42,51 @@
88822
88823 static inline __le64 __cpu_to_le64p(const __u64 *p)
88824 {
88825- return (__force __le64)*p;
88826+ return (__force const __le64)*p;
88827 }
88828-static inline __u64 __le64_to_cpup(const __le64 *p)
88829+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
88830 {
88831- return (__force __u64)*p;
88832+ return (__force const __u64)*p;
88833 }
88834 static inline __le32 __cpu_to_le32p(const __u32 *p)
88835 {
88836- return (__force __le32)*p;
88837+ return (__force const __le32)*p;
88838 }
88839 static inline __u32 __le32_to_cpup(const __le32 *p)
88840 {
88841- return (__force __u32)*p;
88842+ return (__force const __u32)*p;
88843 }
88844 static inline __le16 __cpu_to_le16p(const __u16 *p)
88845 {
88846- return (__force __le16)*p;
88847+ return (__force const __le16)*p;
88848 }
88849 static inline __u16 __le16_to_cpup(const __le16 *p)
88850 {
88851- return (__force __u16)*p;
88852+ return (__force const __u16)*p;
88853 }
88854 static inline __be64 __cpu_to_be64p(const __u64 *p)
88855 {
88856- return (__force __be64)__swab64p(p);
88857+ return (__force const __be64)__swab64p(p);
88858 }
88859 static inline __u64 __be64_to_cpup(const __be64 *p)
88860 {
88861- return __swab64p((__u64 *)p);
88862+ return __swab64p((const __u64 *)p);
88863 }
88864 static inline __be32 __cpu_to_be32p(const __u32 *p)
88865 {
88866- return (__force __be32)__swab32p(p);
88867+ return (__force const __be32)__swab32p(p);
88868 }
88869-static inline __u32 __be32_to_cpup(const __be32 *p)
88870+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
88871 {
88872- return __swab32p((__u32 *)p);
88873+ return __swab32p((const __u32 *)p);
88874 }
88875 static inline __be16 __cpu_to_be16p(const __u16 *p)
88876 {
88877- return (__force __be16)__swab16p(p);
88878+ return (__force const __be16)__swab16p(p);
88879 }
88880 static inline __u16 __be16_to_cpup(const __be16 *p)
88881 {
88882- return __swab16p((__u16 *)p);
88883+ return __swab16p((const __u16 *)p);
88884 }
88885 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
88886 #define __le64_to_cpus(x) do { (void)(x); } while (0)
88887diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
88888index ef6103b..d4e65dd 100644
88889--- a/include/uapi/linux/elf.h
88890+++ b/include/uapi/linux/elf.h
88891@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
88892 #define PT_GNU_EH_FRAME 0x6474e550
88893
88894 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
88895+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
88896+
88897+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
88898+
88899+/* Constants for the e_flags field */
88900+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88901+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
88902+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
88903+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
88904+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88905+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88906
88907 /*
88908 * Extended Numbering
88909@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
88910 #define DT_DEBUG 21
88911 #define DT_TEXTREL 22
88912 #define DT_JMPREL 23
88913+#define DT_FLAGS 30
88914+ #define DF_TEXTREL 0x00000004
88915 #define DT_ENCODING 32
88916 #define OLD_DT_LOOS 0x60000000
88917 #define DT_LOOS 0x6000000d
88918@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
88919 #define PF_W 0x2
88920 #define PF_X 0x1
88921
88922+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
88923+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
88924+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
88925+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
88926+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
88927+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
88928+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
88929+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
88930+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
88931+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
88932+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
88933+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
88934+
88935 typedef struct elf32_phdr{
88936 Elf32_Word p_type;
88937 Elf32_Off p_offset;
88938@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
88939 #define EI_OSABI 7
88940 #define EI_PAD 8
88941
88942+#define EI_PAX 14
88943+
88944 #define ELFMAG0 0x7f /* EI_MAG */
88945 #define ELFMAG1 'E'
88946 #define ELFMAG2 'L'
88947diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
88948index aa169c4..6a2771d 100644
88949--- a/include/uapi/linux/personality.h
88950+++ b/include/uapi/linux/personality.h
88951@@ -30,6 +30,7 @@ enum {
88952 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
88953 ADDR_NO_RANDOMIZE | \
88954 ADDR_COMPAT_LAYOUT | \
88955+ ADDR_LIMIT_3GB | \
88956 MMAP_PAGE_ZERO)
88957
88958 /*
88959diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
88960index 7530e74..e714828 100644
88961--- a/include/uapi/linux/screen_info.h
88962+++ b/include/uapi/linux/screen_info.h
88963@@ -43,7 +43,8 @@ struct screen_info {
88964 __u16 pages; /* 0x32 */
88965 __u16 vesa_attributes; /* 0x34 */
88966 __u32 capabilities; /* 0x36 */
88967- __u8 _reserved[6]; /* 0x3a */
88968+ __u16 vesapm_size; /* 0x3a */
88969+ __u8 _reserved[4]; /* 0x3c */
88970 } __attribute__((packed));
88971
88972 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88973diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
88974index 0e011eb..82681b1 100644
88975--- a/include/uapi/linux/swab.h
88976+++ b/include/uapi/linux/swab.h
88977@@ -43,7 +43,7 @@
88978 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
88979 */
88980
88981-static inline __attribute_const__ __u16 __fswab16(__u16 val)
88982+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
88983 {
88984 #ifdef __HAVE_BUILTIN_BSWAP16__
88985 return __builtin_bswap16(val);
88986@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
88987 #endif
88988 }
88989
88990-static inline __attribute_const__ __u32 __fswab32(__u32 val)
88991+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
88992 {
88993 #ifdef __HAVE_BUILTIN_BSWAP32__
88994 return __builtin_bswap32(val);
88995@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
88996 #endif
88997 }
88998
88999-static inline __attribute_const__ __u64 __fswab64(__u64 val)
89000+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
89001 {
89002 #ifdef __HAVE_BUILTIN_BSWAP64__
89003 return __builtin_bswap64(val);
89004diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
89005index 6d67213..552fdd9 100644
89006--- a/include/uapi/linux/sysctl.h
89007+++ b/include/uapi/linux/sysctl.h
89008@@ -155,8 +155,6 @@ enum
89009 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
89010 };
89011
89012-
89013-
89014 /* CTL_VM names: */
89015 enum
89016 {
89017diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
89018index 168ff50..a921df2 100644
89019--- a/include/uapi/linux/videodev2.h
89020+++ b/include/uapi/linux/videodev2.h
89021@@ -1253,7 +1253,7 @@ struct v4l2_ext_control {
89022 union {
89023 __s32 value;
89024 __s64 value64;
89025- char *string;
89026+ char __user *string;
89027 };
89028 } __attribute__ ((packed));
89029
89030diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
89031index c38355c..17a57bc 100644
89032--- a/include/uapi/linux/xattr.h
89033+++ b/include/uapi/linux/xattr.h
89034@@ -73,5 +73,9 @@
89035 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
89036 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
89037
89038+/* User namespace */
89039+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89040+#define XATTR_PAX_FLAGS_SUFFIX "flags"
89041+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89042
89043 #endif /* _UAPI_LINUX_XATTR_H */
89044diff --git a/include/video/udlfb.h b/include/video/udlfb.h
89045index f9466fa..f4e2b81 100644
89046--- a/include/video/udlfb.h
89047+++ b/include/video/udlfb.h
89048@@ -53,10 +53,10 @@ struct dlfb_data {
89049 u32 pseudo_palette[256];
89050 int blank_mode; /*one of FB_BLANK_ */
89051 /* blit-only rendering path metrics, exposed through sysfs */
89052- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
89053- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
89054- atomic_t bytes_sent; /* to usb, after compression including overhead */
89055- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
89056+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
89057+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
89058+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
89059+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
89060 };
89061
89062 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
89063diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89064index 30f5362..8ed8ac9 100644
89065--- a/include/video/uvesafb.h
89066+++ b/include/video/uvesafb.h
89067@@ -122,6 +122,7 @@ struct uvesafb_par {
89068 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89069 u8 pmi_setpal; /* PMI for palette changes */
89070 u16 *pmi_base; /* protected mode interface location */
89071+ u8 *pmi_code; /* protected mode code location */
89072 void *pmi_start;
89073 void *pmi_pal;
89074 u8 *vbe_state_orig; /*
89075diff --git a/init/Kconfig b/init/Kconfig
89076index 9d76b99..d378b1e 100644
89077--- a/init/Kconfig
89078+++ b/init/Kconfig
89079@@ -1105,6 +1105,7 @@ endif # CGROUPS
89080
89081 config CHECKPOINT_RESTORE
89082 bool "Checkpoint/restore support" if EXPERT
89083+ depends on !GRKERNSEC
89084 default n
89085 help
89086 Enables additional kernel features in a sake of checkpoint/restore.
89087@@ -1589,7 +1590,7 @@ config SLUB_DEBUG
89088
89089 config COMPAT_BRK
89090 bool "Disable heap randomization"
89091- default y
89092+ default n
89093 help
89094 Randomizing heap placement makes heap exploits harder, but it
89095 also breaks ancient binaries (including anything libc5 based).
89096@@ -1877,7 +1878,7 @@ config INIT_ALL_POSSIBLE
89097 config STOP_MACHINE
89098 bool
89099 default y
89100- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
89101+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
89102 help
89103 Need stop_machine() primitive.
89104
89105diff --git a/init/Makefile b/init/Makefile
89106index 7bc47ee..6da2dc7 100644
89107--- a/init/Makefile
89108+++ b/init/Makefile
89109@@ -2,6 +2,9 @@
89110 # Makefile for the linux kernel.
89111 #
89112
89113+ccflags-y := $(GCC_PLUGINS_CFLAGS)
89114+asflags-y := $(GCC_PLUGINS_AFLAGS)
89115+
89116 obj-y := main.o version.o mounts.o
89117 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
89118 obj-y += noinitramfs.o
89119diff --git a/init/do_mounts.c b/init/do_mounts.c
89120index 82f2288..ea1430a 100644
89121--- a/init/do_mounts.c
89122+++ b/init/do_mounts.c
89123@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
89124 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89125 {
89126 struct super_block *s;
89127- int err = sys_mount(name, "/root", fs, flags, data);
89128+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
89129 if (err)
89130 return err;
89131
89132- sys_chdir("/root");
89133+ sys_chdir((const char __force_user *)"/root");
89134 s = current->fs->pwd.dentry->d_sb;
89135 ROOT_DEV = s->s_dev;
89136 printk(KERN_INFO
89137@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
89138 va_start(args, fmt);
89139 vsprintf(buf, fmt, args);
89140 va_end(args);
89141- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89142+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89143 if (fd >= 0) {
89144 sys_ioctl(fd, FDEJECT, 0);
89145 sys_close(fd);
89146 }
89147 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89148- fd = sys_open("/dev/console", O_RDWR, 0);
89149+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
89150 if (fd >= 0) {
89151 sys_ioctl(fd, TCGETS, (long)&termios);
89152 termios.c_lflag &= ~ICANON;
89153 sys_ioctl(fd, TCSETSF, (long)&termios);
89154- sys_read(fd, &c, 1);
89155+ sys_read(fd, (char __user *)&c, 1);
89156 termios.c_lflag |= ICANON;
89157 sys_ioctl(fd, TCSETSF, (long)&termios);
89158 sys_close(fd);
89159@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
89160 mount_root();
89161 out:
89162 devtmpfs_mount("dev");
89163- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89164- sys_chroot(".");
89165+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89166+ sys_chroot((const char __force_user *)".");
89167 }
89168
89169 static bool is_tmpfs;
89170diff --git a/init/do_mounts.h b/init/do_mounts.h
89171index f5b978a..69dbfe8 100644
89172--- a/init/do_mounts.h
89173+++ b/init/do_mounts.h
89174@@ -15,15 +15,15 @@ extern int root_mountflags;
89175
89176 static inline int create_dev(char *name, dev_t dev)
89177 {
89178- sys_unlink(name);
89179- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89180+ sys_unlink((char __force_user *)name);
89181+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89182 }
89183
89184 #if BITS_PER_LONG == 32
89185 static inline u32 bstat(char *name)
89186 {
89187 struct stat64 stat;
89188- if (sys_stat64(name, &stat) != 0)
89189+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89190 return 0;
89191 if (!S_ISBLK(stat.st_mode))
89192 return 0;
89193@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89194 static inline u32 bstat(char *name)
89195 {
89196 struct stat stat;
89197- if (sys_newstat(name, &stat) != 0)
89198+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89199 return 0;
89200 if (!S_ISBLK(stat.st_mode))
89201 return 0;
89202diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89203index 3e0878e..8a9d7a0 100644
89204--- a/init/do_mounts_initrd.c
89205+++ b/init/do_mounts_initrd.c
89206@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
89207 {
89208 sys_unshare(CLONE_FS | CLONE_FILES);
89209 /* stdin/stdout/stderr for /linuxrc */
89210- sys_open("/dev/console", O_RDWR, 0);
89211+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
89212 sys_dup(0);
89213 sys_dup(0);
89214 /* move initrd over / and chdir/chroot in initrd root */
89215- sys_chdir("/root");
89216- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89217- sys_chroot(".");
89218+ sys_chdir((const char __force_user *)"/root");
89219+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89220+ sys_chroot((const char __force_user *)".");
89221 sys_setsid();
89222 return 0;
89223 }
89224@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
89225 create_dev("/dev/root.old", Root_RAM0);
89226 /* mount initrd on rootfs' /root */
89227 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89228- sys_mkdir("/old", 0700);
89229- sys_chdir("/old");
89230+ sys_mkdir((const char __force_user *)"/old", 0700);
89231+ sys_chdir((const char __force_user *)"/old");
89232
89233 /* try loading default modules from initrd */
89234 load_default_modules();
89235@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
89236 current->flags &= ~PF_FREEZER_SKIP;
89237
89238 /* move initrd to rootfs' /old */
89239- sys_mount("..", ".", NULL, MS_MOVE, NULL);
89240+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
89241 /* switch root and cwd back to / of rootfs */
89242- sys_chroot("..");
89243+ sys_chroot((const char __force_user *)"..");
89244
89245 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89246- sys_chdir("/old");
89247+ sys_chdir((const char __force_user *)"/old");
89248 return;
89249 }
89250
89251- sys_chdir("/");
89252+ sys_chdir((const char __force_user *)"/");
89253 ROOT_DEV = new_decode_dev(real_root_dev);
89254 mount_root();
89255
89256 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89257- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89258+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89259 if (!error)
89260 printk("okay\n");
89261 else {
89262- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89263+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89264 if (error == -ENOENT)
89265 printk("/initrd does not exist. Ignored.\n");
89266 else
89267 printk("failed\n");
89268 printk(KERN_NOTICE "Unmounting old root\n");
89269- sys_umount("/old", MNT_DETACH);
89270+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89271 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89272 if (fd < 0) {
89273 error = fd;
89274@@ -127,11 +127,11 @@ int __init initrd_load(void)
89275 * mounted in the normal path.
89276 */
89277 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89278- sys_unlink("/initrd.image");
89279+ sys_unlink((const char __force_user *)"/initrd.image");
89280 handle_initrd();
89281 return 1;
89282 }
89283 }
89284- sys_unlink("/initrd.image");
89285+ sys_unlink((const char __force_user *)"/initrd.image");
89286 return 0;
89287 }
89288diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89289index 8cb6db5..d729f50 100644
89290--- a/init/do_mounts_md.c
89291+++ b/init/do_mounts_md.c
89292@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
89293 partitioned ? "_d" : "", minor,
89294 md_setup_args[ent].device_names);
89295
89296- fd = sys_open(name, 0, 0);
89297+ fd = sys_open((char __force_user *)name, 0, 0);
89298 if (fd < 0) {
89299 printk(KERN_ERR "md: open failed - cannot start "
89300 "array %s\n", name);
89301@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
89302 * array without it
89303 */
89304 sys_close(fd);
89305- fd = sys_open(name, 0, 0);
89306+ fd = sys_open((char __force_user *)name, 0, 0);
89307 sys_ioctl(fd, BLKRRPART, 0);
89308 }
89309 sys_close(fd);
89310@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
89311
89312 wait_for_device_probe();
89313
89314- fd = sys_open("/dev/md0", 0, 0);
89315+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
89316 if (fd >= 0) {
89317 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89318 sys_close(fd);
89319diff --git a/init/init_task.c b/init/init_task.c
89320index ba0a7f36..2bcf1d5 100644
89321--- a/init/init_task.c
89322+++ b/init/init_task.c
89323@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
89324 * Initial thread structure. Alignment of this is handled by a special
89325 * linker map entry.
89326 */
89327+#ifdef CONFIG_X86
89328+union thread_union init_thread_union __init_task_data;
89329+#else
89330 union thread_union init_thread_union __init_task_data =
89331 { INIT_THREAD_INFO(init_task) };
89332+#endif
89333diff --git a/init/initramfs.c b/init/initramfs.c
89334index a8497fa..35b3c90 100644
89335--- a/init/initramfs.c
89336+++ b/init/initramfs.c
89337@@ -84,7 +84,7 @@ static void __init free_hash(void)
89338 }
89339 }
89340
89341-static long __init do_utime(char *filename, time_t mtime)
89342+static long __init do_utime(char __force_user *filename, time_t mtime)
89343 {
89344 struct timespec t[2];
89345
89346@@ -119,7 +119,7 @@ static void __init dir_utime(void)
89347 struct dir_entry *de, *tmp;
89348 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89349 list_del(&de->list);
89350- do_utime(de->name, de->mtime);
89351+ do_utime((char __force_user *)de->name, de->mtime);
89352 kfree(de->name);
89353 kfree(de);
89354 }
89355@@ -281,7 +281,7 @@ static int __init maybe_link(void)
89356 if (nlink >= 2) {
89357 char *old = find_link(major, minor, ino, mode, collected);
89358 if (old)
89359- return (sys_link(old, collected) < 0) ? -1 : 1;
89360+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89361 }
89362 return 0;
89363 }
89364@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
89365 {
89366 struct stat st;
89367
89368- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89369+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89370 if (S_ISDIR(st.st_mode))
89371- sys_rmdir(path);
89372+ sys_rmdir((char __force_user *)path);
89373 else
89374- sys_unlink(path);
89375+ sys_unlink((char __force_user *)path);
89376 }
89377 }
89378
89379@@ -315,7 +315,7 @@ static int __init do_name(void)
89380 int openflags = O_WRONLY|O_CREAT;
89381 if (ml != 1)
89382 openflags |= O_TRUNC;
89383- wfd = sys_open(collected, openflags, mode);
89384+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89385
89386 if (wfd >= 0) {
89387 sys_fchown(wfd, uid, gid);
89388@@ -327,17 +327,17 @@ static int __init do_name(void)
89389 }
89390 }
89391 } else if (S_ISDIR(mode)) {
89392- sys_mkdir(collected, mode);
89393- sys_chown(collected, uid, gid);
89394- sys_chmod(collected, mode);
89395+ sys_mkdir((char __force_user *)collected, mode);
89396+ sys_chown((char __force_user *)collected, uid, gid);
89397+ sys_chmod((char __force_user *)collected, mode);
89398 dir_add(collected, mtime);
89399 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89400 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89401 if (maybe_link() == 0) {
89402- sys_mknod(collected, mode, rdev);
89403- sys_chown(collected, uid, gid);
89404- sys_chmod(collected, mode);
89405- do_utime(collected, mtime);
89406+ sys_mknod((char __force_user *)collected, mode, rdev);
89407+ sys_chown((char __force_user *)collected, uid, gid);
89408+ sys_chmod((char __force_user *)collected, mode);
89409+ do_utime((char __force_user *)collected, mtime);
89410 }
89411 }
89412 return 0;
89413@@ -346,15 +346,15 @@ static int __init do_name(void)
89414 static int __init do_copy(void)
89415 {
89416 if (count >= body_len) {
89417- sys_write(wfd, victim, body_len);
89418+ sys_write(wfd, (char __force_user *)victim, body_len);
89419 sys_close(wfd);
89420- do_utime(vcollected, mtime);
89421+ do_utime((char __force_user *)vcollected, mtime);
89422 kfree(vcollected);
89423 eat(body_len);
89424 state = SkipIt;
89425 return 0;
89426 } else {
89427- sys_write(wfd, victim, count);
89428+ sys_write(wfd, (char __force_user *)victim, count);
89429 body_len -= count;
89430 eat(count);
89431 return 1;
89432@@ -365,9 +365,9 @@ static int __init do_symlink(void)
89433 {
89434 collected[N_ALIGN(name_len) + body_len] = '\0';
89435 clean_path(collected, 0);
89436- sys_symlink(collected + N_ALIGN(name_len), collected);
89437- sys_lchown(collected, uid, gid);
89438- do_utime(collected, mtime);
89439+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89440+ sys_lchown((char __force_user *)collected, uid, gid);
89441+ do_utime((char __force_user *)collected, mtime);
89442 state = SkipIt;
89443 next_state = Reset;
89444 return 0;
89445diff --git a/init/main.c b/init/main.c
89446index e8ae1fe..f60f98c 100644
89447--- a/init/main.c
89448+++ b/init/main.c
89449@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
89450 static inline void mark_rodata_ro(void) { }
89451 #endif
89452
89453+extern void grsecurity_init(void);
89454+
89455 /*
89456 * Debug helper: via this flag we know that we are in 'early bootup code'
89457 * where only the boot processor is running with IRQ disabled. This means
89458@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
89459
89460 __setup("reset_devices", set_reset_devices);
89461
89462+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89463+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
89464+static int __init setup_grsec_proc_gid(char *str)
89465+{
89466+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
89467+ return 1;
89468+}
89469+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
89470+#endif
89471+
89472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89473+unsigned long pax_user_shadow_base __read_only;
89474+EXPORT_SYMBOL(pax_user_shadow_base);
89475+extern char pax_enter_kernel_user[];
89476+extern char pax_exit_kernel_user[];
89477+#endif
89478+
89479+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89480+static int __init setup_pax_nouderef(char *str)
89481+{
89482+#ifdef CONFIG_X86_32
89483+ unsigned int cpu;
89484+ struct desc_struct *gdt;
89485+
89486+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89487+ gdt = get_cpu_gdt_table(cpu);
89488+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89489+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89490+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89491+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89492+ }
89493+ loadsegment(ds, __KERNEL_DS);
89494+ loadsegment(es, __KERNEL_DS);
89495+ loadsegment(ss, __KERNEL_DS);
89496+#else
89497+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89498+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89499+ clone_pgd_mask = ~(pgdval_t)0UL;
89500+ pax_user_shadow_base = 0UL;
89501+ setup_clear_cpu_cap(X86_FEATURE_PCID);
89502+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
89503+#endif
89504+
89505+ return 0;
89506+}
89507+early_param("pax_nouderef", setup_pax_nouderef);
89508+
89509+#ifdef CONFIG_X86_64
89510+static int __init setup_pax_weakuderef(char *str)
89511+{
89512+ if (clone_pgd_mask != ~(pgdval_t)0UL)
89513+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
89514+ return 1;
89515+}
89516+__setup("pax_weakuderef", setup_pax_weakuderef);
89517+#endif
89518+#endif
89519+
89520+#ifdef CONFIG_PAX_SOFTMODE
89521+int pax_softmode;
89522+
89523+static int __init setup_pax_softmode(char *str)
89524+{
89525+ get_option(&str, &pax_softmode);
89526+ return 1;
89527+}
89528+__setup("pax_softmode=", setup_pax_softmode);
89529+#endif
89530+
89531 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
89532 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
89533 static const char *panic_later, *panic_param;
89534@@ -727,7 +798,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
89535 struct blacklist_entry *entry;
89536 char *fn_name;
89537
89538- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
89539+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
89540 if (!fn_name)
89541 return false;
89542
89543@@ -779,7 +850,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
89544 {
89545 int count = preempt_count();
89546 int ret;
89547- char msgbuf[64];
89548+ const char *msg1 = "", *msg2 = "";
89549
89550 if (initcall_blacklisted(fn))
89551 return -EPERM;
89552@@ -789,18 +860,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
89553 else
89554 ret = fn();
89555
89556- msgbuf[0] = 0;
89557-
89558 if (preempt_count() != count) {
89559- sprintf(msgbuf, "preemption imbalance ");
89560+ msg1 = " preemption imbalance";
89561 preempt_count_set(count);
89562 }
89563 if (irqs_disabled()) {
89564- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
89565+ msg2 = " disabled interrupts";
89566 local_irq_enable();
89567 }
89568- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
89569+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
89570
89571+ add_latent_entropy();
89572 return ret;
89573 }
89574
89575@@ -907,8 +977,8 @@ static int run_init_process(const char *init_filename)
89576 {
89577 argv_init[0] = init_filename;
89578 return do_execve(getname_kernel(init_filename),
89579- (const char __user *const __user *)argv_init,
89580- (const char __user *const __user *)envp_init);
89581+ (const char __user *const __force_user *)argv_init,
89582+ (const char __user *const __force_user *)envp_init);
89583 }
89584
89585 static int try_to_run_init_process(const char *init_filename)
89586@@ -925,6 +995,10 @@ static int try_to_run_init_process(const char *init_filename)
89587 return ret;
89588 }
89589
89590+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89591+extern int gr_init_ran;
89592+#endif
89593+
89594 static noinline void __init kernel_init_freeable(void);
89595
89596 static int __ref kernel_init(void *unused)
89597@@ -949,6 +1023,11 @@ static int __ref kernel_init(void *unused)
89598 ramdisk_execute_command, ret);
89599 }
89600
89601+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89602+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
89603+ gr_init_ran = 1;
89604+#endif
89605+
89606 /*
89607 * We try each of these until one succeeds.
89608 *
89609@@ -1004,7 +1083,7 @@ static noinline void __init kernel_init_freeable(void)
89610 do_basic_setup();
89611
89612 /* Open the /dev/console on the rootfs, this should never fail */
89613- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
89614+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
89615 pr_err("Warning: unable to open an initial console.\n");
89616
89617 (void) sys_dup(0);
89618@@ -1017,11 +1096,13 @@ static noinline void __init kernel_init_freeable(void)
89619 if (!ramdisk_execute_command)
89620 ramdisk_execute_command = "/init";
89621
89622- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
89623+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
89624 ramdisk_execute_command = NULL;
89625 prepare_namespace();
89626 }
89627
89628+ grsecurity_init();
89629+
89630 /*
89631 * Ok, we have completed the initial bootup, and
89632 * we're essentially up and running. Get rid of the
89633diff --git a/ipc/compat.c b/ipc/compat.c
89634index b5ef4f7..ff31d87 100644
89635--- a/ipc/compat.c
89636+++ b/ipc/compat.c
89637@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
89638 COMPAT_SHMLBA);
89639 if (err < 0)
89640 return err;
89641- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
89642+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
89643 }
89644 case SHMDT:
89645 return sys_shmdt(compat_ptr(ptr));
89646diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
89647index c3f0326..d4e0579 100644
89648--- a/ipc/ipc_sysctl.c
89649+++ b/ipc/ipc_sysctl.c
89650@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
89651 static int proc_ipc_dointvec(struct ctl_table *table, int write,
89652 void __user *buffer, size_t *lenp, loff_t *ppos)
89653 {
89654- struct ctl_table ipc_table;
89655+ ctl_table_no_const ipc_table;
89656
89657 memcpy(&ipc_table, table, sizeof(ipc_table));
89658 ipc_table.data = get_ipc(table);
89659@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
89660 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
89661 void __user *buffer, size_t *lenp, loff_t *ppos)
89662 {
89663- struct ctl_table ipc_table;
89664+ ctl_table_no_const ipc_table;
89665
89666 memcpy(&ipc_table, table, sizeof(ipc_table));
89667 ipc_table.data = get_ipc(table);
89668@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
89669 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
89670 void __user *buffer, size_t *lenp, loff_t *ppos)
89671 {
89672- struct ctl_table ipc_table;
89673+ ctl_table_no_const ipc_table;
89674 size_t lenp_bef = *lenp;
89675 int rc;
89676
89677@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
89678 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89679 void __user *buffer, size_t *lenp, loff_t *ppos)
89680 {
89681- struct ctl_table ipc_table;
89682+ ctl_table_no_const ipc_table;
89683 memcpy(&ipc_table, table, sizeof(ipc_table));
89684 ipc_table.data = get_ipc(table);
89685
89686@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
89687 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
89688 void __user *buffer, size_t *lenp, loff_t *ppos)
89689 {
89690- struct ctl_table ipc_table;
89691+ ctl_table_no_const ipc_table;
89692 size_t lenp_bef = *lenp;
89693 int oldval;
89694 int rc;
89695diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
89696index 68d4e95..1477ded 100644
89697--- a/ipc/mq_sysctl.c
89698+++ b/ipc/mq_sysctl.c
89699@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
89700 static int proc_mq_dointvec(struct ctl_table *table, int write,
89701 void __user *buffer, size_t *lenp, loff_t *ppos)
89702 {
89703- struct ctl_table mq_table;
89704+ ctl_table_no_const mq_table;
89705 memcpy(&mq_table, table, sizeof(mq_table));
89706 mq_table.data = get_mq(table);
89707
89708@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
89709 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
89710 void __user *buffer, size_t *lenp, loff_t *ppos)
89711 {
89712- struct ctl_table mq_table;
89713+ ctl_table_no_const mq_table;
89714 memcpy(&mq_table, table, sizeof(mq_table));
89715 mq_table.data = get_mq(table);
89716
89717diff --git a/ipc/mqueue.c b/ipc/mqueue.c
89718index 4fcf39a..d3cc2ec 100644
89719--- a/ipc/mqueue.c
89720+++ b/ipc/mqueue.c
89721@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
89722 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
89723 info->attr.mq_msgsize);
89724
89725+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
89726 spin_lock(&mq_lock);
89727 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
89728 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
89729diff --git a/ipc/shm.c b/ipc/shm.c
89730index 89fc354..cf56786 100644
89731--- a/ipc/shm.c
89732+++ b/ipc/shm.c
89733@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
89734 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
89735 #endif
89736
89737+#ifdef CONFIG_GRKERNSEC
89738+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89739+ const time_t shm_createtime, const kuid_t cuid,
89740+ const int shmid);
89741+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89742+ const time_t shm_createtime);
89743+#endif
89744+
89745 void shm_init_ns(struct ipc_namespace *ns)
89746 {
89747 ns->shm_ctlmax = SHMMAX;
89748@@ -557,6 +565,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
89749 shp->shm_lprid = 0;
89750 shp->shm_atim = shp->shm_dtim = 0;
89751 shp->shm_ctim = get_seconds();
89752+#ifdef CONFIG_GRKERNSEC
89753+ {
89754+ struct timespec timeval;
89755+ do_posix_clock_monotonic_gettime(&timeval);
89756+
89757+ shp->shm_createtime = timeval.tv_sec;
89758+ }
89759+#endif
89760 shp->shm_segsz = size;
89761 shp->shm_nattch = 0;
89762 shp->shm_file = file;
89763@@ -1092,6 +1108,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89764 f_mode = FMODE_READ | FMODE_WRITE;
89765 }
89766 if (shmflg & SHM_EXEC) {
89767+
89768+#ifdef CONFIG_PAX_MPROTECT
89769+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
89770+ goto out;
89771+#endif
89772+
89773 prot |= PROT_EXEC;
89774 acc_mode |= S_IXUGO;
89775 }
89776@@ -1116,6 +1138,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89777 if (err)
89778 goto out_unlock;
89779
89780+#ifdef CONFIG_GRKERNSEC
89781+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
89782+ shp->shm_perm.cuid, shmid) ||
89783+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
89784+ err = -EACCES;
89785+ goto out_unlock;
89786+ }
89787+#endif
89788+
89789 ipc_lock_object(&shp->shm_perm);
89790
89791 /* check if shm_destroy() is tearing down shp */
89792@@ -1128,6 +1159,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89793 path = shp->shm_file->f_path;
89794 path_get(&path);
89795 shp->shm_nattch++;
89796+#ifdef CONFIG_GRKERNSEC
89797+ shp->shm_lapid = current->pid;
89798+#endif
89799 size = i_size_read(path.dentry->d_inode);
89800 ipc_unlock_object(&shp->shm_perm);
89801 rcu_read_unlock();
89802diff --git a/ipc/util.c b/ipc/util.c
89803index 27d74e6..8be0be2 100644
89804--- a/ipc/util.c
89805+++ b/ipc/util.c
89806@@ -71,6 +71,8 @@ struct ipc_proc_iface {
89807 int (*show)(struct seq_file *, void *);
89808 };
89809
89810+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
89811+
89812 static void ipc_memory_notifier(struct work_struct *work)
89813 {
89814 ipcns_notify(IPCNS_MEMCHANGED);
89815@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
89816 granted_mode >>= 6;
89817 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
89818 granted_mode >>= 3;
89819+
89820+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
89821+ return -1;
89822+
89823 /* is there some bit set in requested_mode but not in granted_mode? */
89824 if ((requested_mode & ~granted_mode & 0007) &&
89825 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
89826diff --git a/kernel/acct.c b/kernel/acct.c
89827index 808a86f..da69695 100644
89828--- a/kernel/acct.c
89829+++ b/kernel/acct.c
89830@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
89831 */
89832 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
89833 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
89834- file->f_op->write(file, (char *)&ac,
89835+ file->f_op->write(file, (char __force_user *)&ac,
89836 sizeof(acct_t), &file->f_pos);
89837 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
89838 set_fs(fs);
89839diff --git a/kernel/audit.c b/kernel/audit.c
89840index 3ef2e0e..8873765 100644
89841--- a/kernel/audit.c
89842+++ b/kernel/audit.c
89843@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
89844 3) suppressed due to audit_rate_limit
89845 4) suppressed due to audit_backlog_limit
89846 */
89847-static atomic_t audit_lost = ATOMIC_INIT(0);
89848+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
89849
89850 /* The netlink socket. */
89851 static struct sock *audit_sock;
89852@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
89853 unsigned long now;
89854 int print;
89855
89856- atomic_inc(&audit_lost);
89857+ atomic_inc_unchecked(&audit_lost);
89858
89859 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
89860
89861@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
89862 if (print) {
89863 if (printk_ratelimit())
89864 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
89865- atomic_read(&audit_lost),
89866+ atomic_read_unchecked(&audit_lost),
89867 audit_rate_limit,
89868 audit_backlog_limit);
89869 audit_panic(message);
89870@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89871 s.pid = audit_pid;
89872 s.rate_limit = audit_rate_limit;
89873 s.backlog_limit = audit_backlog_limit;
89874- s.lost = atomic_read(&audit_lost);
89875+ s.lost = atomic_read_unchecked(&audit_lost);
89876 s.backlog = skb_queue_len(&audit_skb_queue);
89877 s.version = AUDIT_VERSION_LATEST;
89878 s.backlog_wait_time = audit_backlog_wait_time;
89879diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89880index 21eae3c..66db239 100644
89881--- a/kernel/auditsc.c
89882+++ b/kernel/auditsc.c
89883@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89884 }
89885
89886 /* global counter which is incremented every time something logs in */
89887-static atomic_t session_id = ATOMIC_INIT(0);
89888+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89889
89890 static int audit_set_loginuid_perm(kuid_t loginuid)
89891 {
89892@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
89893
89894 /* are we setting or clearing? */
89895 if (uid_valid(loginuid))
89896- sessionid = (unsigned int)atomic_inc_return(&session_id);
89897+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
89898
89899 task->sessionid = sessionid;
89900 task->loginuid = loginuid;
89901diff --git a/kernel/capability.c b/kernel/capability.c
89902index a5cf13c..07a2647 100644
89903--- a/kernel/capability.c
89904+++ b/kernel/capability.c
89905@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
89906 * before modification is attempted and the application
89907 * fails.
89908 */
89909+ if (tocopy > ARRAY_SIZE(kdata))
89910+ return -EFAULT;
89911+
89912 if (copy_to_user(dataptr, kdata, tocopy
89913 * sizeof(struct __user_cap_data_struct))) {
89914 return -EFAULT;
89915@@ -293,10 +296,11 @@ bool has_ns_capability(struct task_struct *t,
89916 int ret;
89917
89918 rcu_read_lock();
89919- ret = security_capable(__task_cred(t), ns, cap);
89920+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
89921+ gr_task_is_capable(t, __task_cred(t), cap);
89922 rcu_read_unlock();
89923
89924- return (ret == 0);
89925+ return ret;
89926 }
89927
89928 /**
89929@@ -333,10 +337,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
89930 int ret;
89931
89932 rcu_read_lock();
89933- ret = security_capable_noaudit(__task_cred(t), ns, cap);
89934+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
89935 rcu_read_unlock();
89936
89937- return (ret == 0);
89938+ return ret;
89939 }
89940
89941 /**
89942@@ -374,7 +378,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
89943 BUG();
89944 }
89945
89946- if (security_capable(current_cred(), ns, cap) == 0) {
89947+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
89948 current->flags |= PF_SUPERPRIV;
89949 return true;
89950 }
89951@@ -382,6 +386,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
89952 }
89953 EXPORT_SYMBOL(ns_capable);
89954
89955+bool ns_capable_nolog(struct user_namespace *ns, int cap)
89956+{
89957+ if (unlikely(!cap_valid(cap))) {
89958+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
89959+ BUG();
89960+ }
89961+
89962+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
89963+ current->flags |= PF_SUPERPRIV;
89964+ return true;
89965+ }
89966+ return false;
89967+}
89968+EXPORT_SYMBOL(ns_capable_nolog);
89969+
89970 /**
89971 * file_ns_capable - Determine if the file's opener had a capability in effect
89972 * @file: The file we want to check
89973@@ -423,6 +442,12 @@ bool capable(int cap)
89974 }
89975 EXPORT_SYMBOL(capable);
89976
89977+bool capable_nolog(int cap)
89978+{
89979+ return ns_capable_nolog(&init_user_ns, cap);
89980+}
89981+EXPORT_SYMBOL(capable_nolog);
89982+
89983 /**
89984 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
89985 * @inode: The inode in question
89986@@ -440,3 +465,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
89987 kgid_has_mapping(ns, inode->i_gid);
89988 }
89989 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
89990+
89991+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
89992+{
89993+ struct user_namespace *ns = current_user_ns();
89994+
89995+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
89996+ kgid_has_mapping(ns, inode->i_gid);
89997+}
89998+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
89999diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90000index 70776ae..09c4988 100644
90001--- a/kernel/cgroup.c
90002+++ b/kernel/cgroup.c
90003@@ -5146,6 +5146,14 @@ static void cgroup_release_agent(struct work_struct *work)
90004 release_list);
90005 list_del_init(&cgrp->release_list);
90006 raw_spin_unlock(&release_list_lock);
90007+
90008+ /*
90009+ * don't bother calling call_usermodehelper if we haven't
90010+ * configured a binary to execute
90011+ */
90012+ if (cgrp->root->release_agent_path[0] == '\0')
90013+ goto continue_free;
90014+
90015 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
90016 if (!pathbuf)
90017 goto continue_free;
90018@@ -5336,7 +5344,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
90019 struct task_struct *task;
90020 int count = 0;
90021
90022- seq_printf(seq, "css_set %p\n", cset);
90023+ seq_printf(seq, "css_set %pK\n", cset);
90024
90025 list_for_each_entry(task, &cset->tasks, cg_list) {
90026 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
90027diff --git a/kernel/compat.c b/kernel/compat.c
90028index 633394f..bdfa969 100644
90029--- a/kernel/compat.c
90030+++ b/kernel/compat.c
90031@@ -13,6 +13,7 @@
90032
90033 #include <linux/linkage.h>
90034 #include <linux/compat.h>
90035+#include <linux/module.h>
90036 #include <linux/errno.h>
90037 #include <linux/time.h>
90038 #include <linux/signal.h>
90039@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90040 mm_segment_t oldfs;
90041 long ret;
90042
90043- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90044+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90045 oldfs = get_fs();
90046 set_fs(KERNEL_DS);
90047 ret = hrtimer_nanosleep_restart(restart);
90048@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
90049 oldfs = get_fs();
90050 set_fs(KERNEL_DS);
90051 ret = hrtimer_nanosleep(&tu,
90052- rmtp ? (struct timespec __user *)&rmt : NULL,
90053+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90054 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90055 set_fs(oldfs);
90056
90057@@ -361,7 +362,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
90058 mm_segment_t old_fs = get_fs();
90059
90060 set_fs(KERNEL_DS);
90061- ret = sys_sigpending((old_sigset_t __user *) &s);
90062+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90063 set_fs(old_fs);
90064 if (ret == 0)
90065 ret = put_user(s, set);
90066@@ -451,7 +452,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
90067 mm_segment_t old_fs = get_fs();
90068
90069 set_fs(KERNEL_DS);
90070- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
90071+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90072 set_fs(old_fs);
90073
90074 if (!ret) {
90075@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
90076 set_fs (KERNEL_DS);
90077 ret = sys_wait4(pid,
90078 (stat_addr ?
90079- (unsigned int __user *) &status : NULL),
90080- options, (struct rusage __user *) &r);
90081+ (unsigned int __force_user *) &status : NULL),
90082+ options, (struct rusage __force_user *) &r);
90083 set_fs (old_fs);
90084
90085 if (ret > 0) {
90086@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
90087 memset(&info, 0, sizeof(info));
90088
90089 set_fs(KERNEL_DS);
90090- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90091- uru ? (struct rusage __user *)&ru : NULL);
90092+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90093+ uru ? (struct rusage __force_user *)&ru : NULL);
90094 set_fs(old_fs);
90095
90096 if ((ret < 0) || (info.si_signo == 0))
90097@@ -695,8 +696,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
90098 oldfs = get_fs();
90099 set_fs(KERNEL_DS);
90100 err = sys_timer_settime(timer_id, flags,
90101- (struct itimerspec __user *) &newts,
90102- (struct itimerspec __user *) &oldts);
90103+ (struct itimerspec __force_user *) &newts,
90104+ (struct itimerspec __force_user *) &oldts);
90105 set_fs(oldfs);
90106 if (!err && old && put_compat_itimerspec(old, &oldts))
90107 return -EFAULT;
90108@@ -713,7 +714,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
90109 oldfs = get_fs();
90110 set_fs(KERNEL_DS);
90111 err = sys_timer_gettime(timer_id,
90112- (struct itimerspec __user *) &ts);
90113+ (struct itimerspec __force_user *) &ts);
90114 set_fs(oldfs);
90115 if (!err && put_compat_itimerspec(setting, &ts))
90116 return -EFAULT;
90117@@ -732,7 +733,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
90118 oldfs = get_fs();
90119 set_fs(KERNEL_DS);
90120 err = sys_clock_settime(which_clock,
90121- (struct timespec __user *) &ts);
90122+ (struct timespec __force_user *) &ts);
90123 set_fs(oldfs);
90124 return err;
90125 }
90126@@ -747,7 +748,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
90127 oldfs = get_fs();
90128 set_fs(KERNEL_DS);
90129 err = sys_clock_gettime(which_clock,
90130- (struct timespec __user *) &ts);
90131+ (struct timespec __force_user *) &ts);
90132 set_fs(oldfs);
90133 if (!err && compat_put_timespec(&ts, tp))
90134 return -EFAULT;
90135@@ -767,7 +768,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
90136
90137 oldfs = get_fs();
90138 set_fs(KERNEL_DS);
90139- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
90140+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
90141 set_fs(oldfs);
90142
90143 err = compat_put_timex(utp, &txc);
90144@@ -787,7 +788,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
90145 oldfs = get_fs();
90146 set_fs(KERNEL_DS);
90147 err = sys_clock_getres(which_clock,
90148- (struct timespec __user *) &ts);
90149+ (struct timespec __force_user *) &ts);
90150 set_fs(oldfs);
90151 if (!err && tp && compat_put_timespec(&ts, tp))
90152 return -EFAULT;
90153@@ -801,7 +802,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90154 struct timespec tu;
90155 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90156
90157- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90158+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90159 oldfs = get_fs();
90160 set_fs(KERNEL_DS);
90161 err = clock_nanosleep_restart(restart);
90162@@ -833,8 +834,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
90163 oldfs = get_fs();
90164 set_fs(KERNEL_DS);
90165 err = sys_clock_nanosleep(which_clock, flags,
90166- (struct timespec __user *) &in,
90167- (struct timespec __user *) &out);
90168+ (struct timespec __force_user *) &in,
90169+ (struct timespec __force_user *) &out);
90170 set_fs(oldfs);
90171
90172 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90173@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
90174 mm_segment_t old_fs = get_fs();
90175
90176 set_fs(KERNEL_DS);
90177- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
90178+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
90179 set_fs(old_fs);
90180 if (compat_put_timespec(&t, interval))
90181 return -EFAULT;
90182diff --git a/kernel/configs.c b/kernel/configs.c
90183index c18b1f1..b9a0132 100644
90184--- a/kernel/configs.c
90185+++ b/kernel/configs.c
90186@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
90187 struct proc_dir_entry *entry;
90188
90189 /* create the current config file */
90190+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90191+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90192+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90193+ &ikconfig_file_ops);
90194+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90195+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90196+ &ikconfig_file_ops);
90197+#endif
90198+#else
90199 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90200 &ikconfig_file_ops);
90201+#endif
90202+
90203 if (!entry)
90204 return -ENOMEM;
90205
90206diff --git a/kernel/cred.c b/kernel/cred.c
90207index e0573a4..26c0fd3 100644
90208--- a/kernel/cred.c
90209+++ b/kernel/cred.c
90210@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
90211 validate_creds(cred);
90212 alter_cred_subscribers(cred, -1);
90213 put_cred(cred);
90214+
90215+#ifdef CONFIG_GRKERNSEC_SETXID
90216+ cred = (struct cred *) tsk->delayed_cred;
90217+ if (cred != NULL) {
90218+ tsk->delayed_cred = NULL;
90219+ validate_creds(cred);
90220+ alter_cred_subscribers(cred, -1);
90221+ put_cred(cred);
90222+ }
90223+#endif
90224 }
90225
90226 /**
90227@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
90228 * Always returns 0 thus allowing this function to be tail-called at the end
90229 * of, say, sys_setgid().
90230 */
90231-int commit_creds(struct cred *new)
90232+static int __commit_creds(struct cred *new)
90233 {
90234 struct task_struct *task = current;
90235 const struct cred *old = task->real_cred;
90236@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
90237
90238 get_cred(new); /* we will require a ref for the subj creds too */
90239
90240+ gr_set_role_label(task, new->uid, new->gid);
90241+
90242 /* dumpability changes */
90243 if (!uid_eq(old->euid, new->euid) ||
90244 !gid_eq(old->egid, new->egid) ||
90245@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
90246 put_cred(old);
90247 return 0;
90248 }
90249+#ifdef CONFIG_GRKERNSEC_SETXID
90250+extern int set_user(struct cred *new);
90251+
90252+void gr_delayed_cred_worker(void)
90253+{
90254+ const struct cred *new = current->delayed_cred;
90255+ struct cred *ncred;
90256+
90257+ current->delayed_cred = NULL;
90258+
90259+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
90260+ // from doing get_cred on it when queueing this
90261+ put_cred(new);
90262+ return;
90263+ } else if (new == NULL)
90264+ return;
90265+
90266+ ncred = prepare_creds();
90267+ if (!ncred)
90268+ goto die;
90269+ // uids
90270+ ncred->uid = new->uid;
90271+ ncred->euid = new->euid;
90272+ ncred->suid = new->suid;
90273+ ncred->fsuid = new->fsuid;
90274+ // gids
90275+ ncred->gid = new->gid;
90276+ ncred->egid = new->egid;
90277+ ncred->sgid = new->sgid;
90278+ ncred->fsgid = new->fsgid;
90279+ // groups
90280+ set_groups(ncred, new->group_info);
90281+ // caps
90282+ ncred->securebits = new->securebits;
90283+ ncred->cap_inheritable = new->cap_inheritable;
90284+ ncred->cap_permitted = new->cap_permitted;
90285+ ncred->cap_effective = new->cap_effective;
90286+ ncred->cap_bset = new->cap_bset;
90287+
90288+ if (set_user(ncred)) {
90289+ abort_creds(ncred);
90290+ goto die;
90291+ }
90292+
90293+ // from doing get_cred on it when queueing this
90294+ put_cred(new);
90295+
90296+ __commit_creds(ncred);
90297+ return;
90298+die:
90299+ // from doing get_cred on it when queueing this
90300+ put_cred(new);
90301+ do_group_exit(SIGKILL);
90302+}
90303+#endif
90304+
90305+int commit_creds(struct cred *new)
90306+{
90307+#ifdef CONFIG_GRKERNSEC_SETXID
90308+ int ret;
90309+ int schedule_it = 0;
90310+ struct task_struct *t;
90311+ unsigned oldsecurebits = current_cred()->securebits;
90312+
90313+ /* we won't get called with tasklist_lock held for writing
90314+ and interrupts disabled as the cred struct in that case is
90315+ init_cred
90316+ */
90317+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90318+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
90319+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
90320+ schedule_it = 1;
90321+ }
90322+ ret = __commit_creds(new);
90323+ if (schedule_it) {
90324+ rcu_read_lock();
90325+ read_lock(&tasklist_lock);
90326+ for (t = next_thread(current); t != current;
90327+ t = next_thread(t)) {
90328+ /* we'll check if the thread has uid 0 in
90329+ * the delayed worker routine
90330+ */
90331+ if (task_securebits(t) == oldsecurebits &&
90332+ t->delayed_cred == NULL) {
90333+ t->delayed_cred = get_cred(new);
90334+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
90335+ set_tsk_need_resched(t);
90336+ }
90337+ }
90338+ read_unlock(&tasklist_lock);
90339+ rcu_read_unlock();
90340+ }
90341+
90342+ return ret;
90343+#else
90344+ return __commit_creds(new);
90345+#endif
90346+}
90347+
90348 EXPORT_SYMBOL(commit_creds);
90349
90350 /**
90351diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
90352index 1adf62b..7736e06 100644
90353--- a/kernel/debug/debug_core.c
90354+++ b/kernel/debug/debug_core.c
90355@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
90356 */
90357 static atomic_t masters_in_kgdb;
90358 static atomic_t slaves_in_kgdb;
90359-static atomic_t kgdb_break_tasklet_var;
90360+static atomic_unchecked_t kgdb_break_tasklet_var;
90361 atomic_t kgdb_setting_breakpoint;
90362
90363 struct task_struct *kgdb_usethread;
90364@@ -134,7 +134,7 @@ int kgdb_single_step;
90365 static pid_t kgdb_sstep_pid;
90366
90367 /* to keep track of the CPU which is doing the single stepping*/
90368-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90369+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90370
90371 /*
90372 * If you are debugging a problem where roundup (the collection of
90373@@ -549,7 +549,7 @@ return_normal:
90374 * kernel will only try for the value of sstep_tries before
90375 * giving up and continuing on.
90376 */
90377- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90378+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90379 (kgdb_info[cpu].task &&
90380 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
90381 atomic_set(&kgdb_active, -1);
90382@@ -647,8 +647,8 @@ cpu_master_loop:
90383 }
90384
90385 kgdb_restore:
90386- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
90387- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
90388+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
90389+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
90390 if (kgdb_info[sstep_cpu].task)
90391 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
90392 else
90393@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
90394 static void kgdb_tasklet_bpt(unsigned long ing)
90395 {
90396 kgdb_breakpoint();
90397- atomic_set(&kgdb_break_tasklet_var, 0);
90398+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
90399 }
90400
90401 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
90402
90403 void kgdb_schedule_breakpoint(void)
90404 {
90405- if (atomic_read(&kgdb_break_tasklet_var) ||
90406+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
90407 atomic_read(&kgdb_active) != -1 ||
90408 atomic_read(&kgdb_setting_breakpoint))
90409 return;
90410- atomic_inc(&kgdb_break_tasklet_var);
90411+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
90412 tasklet_schedule(&kgdb_tasklet_breakpoint);
90413 }
90414 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
90415diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
90416index 2f7c760..95b6a66 100644
90417--- a/kernel/debug/kdb/kdb_main.c
90418+++ b/kernel/debug/kdb/kdb_main.c
90419@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
90420 continue;
90421
90422 kdb_printf("%-20s%8u 0x%p ", mod->name,
90423- mod->core_size, (void *)mod);
90424+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
90425 #ifdef CONFIG_MODULE_UNLOAD
90426 kdb_printf("%4ld ", module_refcount(mod));
90427 #endif
90428@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
90429 kdb_printf(" (Loading)");
90430 else
90431 kdb_printf(" (Live)");
90432- kdb_printf(" 0x%p", mod->module_core);
90433+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
90434
90435 #ifdef CONFIG_MODULE_UNLOAD
90436 {
90437diff --git a/kernel/events/core.c b/kernel/events/core.c
90438index 6b17ac1..00fd505 100644
90439--- a/kernel/events/core.c
90440+++ b/kernel/events/core.c
90441@@ -160,8 +160,15 @@ static struct srcu_struct pmus_srcu;
90442 * 0 - disallow raw tracepoint access for unpriv
90443 * 1 - disallow cpu events for unpriv
90444 * 2 - disallow kernel profiling for unpriv
90445+ * 3 - disallow all unpriv perf event use
90446 */
90447-int sysctl_perf_event_paranoid __read_mostly = 1;
90448+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90449+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
90450+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
90451+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
90452+#else
90453+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
90454+#endif
90455
90456 /* Minimum for 512 kiB + 1 user control page */
90457 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
90458@@ -187,7 +194,7 @@ void update_perf_cpu_limits(void)
90459
90460 tmp *= sysctl_perf_cpu_time_max_percent;
90461 do_div(tmp, 100);
90462- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
90463+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
90464 }
90465
90466 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
90467@@ -293,7 +300,7 @@ void perf_sample_event_took(u64 sample_len_ns)
90468 }
90469 }
90470
90471-static atomic64_t perf_event_id;
90472+static atomic64_unchecked_t perf_event_id;
90473
90474 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
90475 enum event_type_t event_type);
90476@@ -3023,7 +3030,7 @@ static void __perf_event_read(void *info)
90477
90478 static inline u64 perf_event_count(struct perf_event *event)
90479 {
90480- return local64_read(&event->count) + atomic64_read(&event->child_count);
90481+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
90482 }
90483
90484 static u64 perf_event_read(struct perf_event *event)
90485@@ -3399,9 +3406,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
90486 mutex_lock(&event->child_mutex);
90487 total += perf_event_read(event);
90488 *enabled += event->total_time_enabled +
90489- atomic64_read(&event->child_total_time_enabled);
90490+ atomic64_read_unchecked(&event->child_total_time_enabled);
90491 *running += event->total_time_running +
90492- atomic64_read(&event->child_total_time_running);
90493+ atomic64_read_unchecked(&event->child_total_time_running);
90494
90495 list_for_each_entry(child, &event->child_list, child_list) {
90496 total += perf_event_read(child);
90497@@ -3830,10 +3837,10 @@ void perf_event_update_userpage(struct perf_event *event)
90498 userpg->offset -= local64_read(&event->hw.prev_count);
90499
90500 userpg->time_enabled = enabled +
90501- atomic64_read(&event->child_total_time_enabled);
90502+ atomic64_read_unchecked(&event->child_total_time_enabled);
90503
90504 userpg->time_running = running +
90505- atomic64_read(&event->child_total_time_running);
90506+ atomic64_read_unchecked(&event->child_total_time_running);
90507
90508 arch_perf_update_userpage(userpg, now);
90509
90510@@ -4397,7 +4404,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
90511
90512 /* Data. */
90513 sp = perf_user_stack_pointer(regs);
90514- rem = __output_copy_user(handle, (void *) sp, dump_size);
90515+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
90516 dyn_size = dump_size - rem;
90517
90518 perf_output_skip(handle, rem);
90519@@ -4488,11 +4495,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
90520 values[n++] = perf_event_count(event);
90521 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
90522 values[n++] = enabled +
90523- atomic64_read(&event->child_total_time_enabled);
90524+ atomic64_read_unchecked(&event->child_total_time_enabled);
90525 }
90526 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
90527 values[n++] = running +
90528- atomic64_read(&event->child_total_time_running);
90529+ atomic64_read_unchecked(&event->child_total_time_running);
90530 }
90531 if (read_format & PERF_FORMAT_ID)
90532 values[n++] = primary_event_id(event);
90533@@ -6801,7 +6808,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
90534 event->parent = parent_event;
90535
90536 event->ns = get_pid_ns(task_active_pid_ns(current));
90537- event->id = atomic64_inc_return(&perf_event_id);
90538+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
90539
90540 event->state = PERF_EVENT_STATE_INACTIVE;
90541
90542@@ -7080,6 +7087,11 @@ SYSCALL_DEFINE5(perf_event_open,
90543 if (flags & ~PERF_FLAG_ALL)
90544 return -EINVAL;
90545
90546+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90547+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
90548+ return -EACCES;
90549+#endif
90550+
90551 err = perf_copy_attr(attr_uptr, &attr);
90552 if (err)
90553 return err;
90554@@ -7432,10 +7444,10 @@ static void sync_child_event(struct perf_event *child_event,
90555 /*
90556 * Add back the child's count to the parent's count:
90557 */
90558- atomic64_add(child_val, &parent_event->child_count);
90559- atomic64_add(child_event->total_time_enabled,
90560+ atomic64_add_unchecked(child_val, &parent_event->child_count);
90561+ atomic64_add_unchecked(child_event->total_time_enabled,
90562 &parent_event->child_total_time_enabled);
90563- atomic64_add(child_event->total_time_running,
90564+ atomic64_add_unchecked(child_event->total_time_running,
90565 &parent_event->child_total_time_running);
90566
90567 /*
90568diff --git a/kernel/events/internal.h b/kernel/events/internal.h
90569index 569b2187..19940d9 100644
90570--- a/kernel/events/internal.h
90571+++ b/kernel/events/internal.h
90572@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
90573 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
90574 }
90575
90576-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
90577+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
90578 static inline unsigned long \
90579 func_name(struct perf_output_handle *handle, \
90580- const void *buf, unsigned long len) \
90581+ const void user *buf, unsigned long len) \
90582 { \
90583 unsigned long size, written; \
90584 \
90585@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
90586 return 0;
90587 }
90588
90589-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
90590+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
90591
90592 static inline unsigned long
90593 memcpy_skip(void *dst, const void *src, unsigned long n)
90594@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
90595 return 0;
90596 }
90597
90598-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
90599+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
90600
90601 #ifndef arch_perf_out_copy_user
90602 #define arch_perf_out_copy_user arch_perf_out_copy_user
90603@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
90604 }
90605 #endif
90606
90607-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
90608+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
90609
90610 /* Callchain handling */
90611 extern struct perf_callchain_entry *
90612diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
90613index 6f3254e..e4c1fe4 100644
90614--- a/kernel/events/uprobes.c
90615+++ b/kernel/events/uprobes.c
90616@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
90617 {
90618 struct page *page;
90619 uprobe_opcode_t opcode;
90620- int result;
90621+ long result;
90622
90623 pagefault_disable();
90624 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
90625diff --git a/kernel/exit.c b/kernel/exit.c
90626index e5c4668..592d2e5 100644
90627--- a/kernel/exit.c
90628+++ b/kernel/exit.c
90629@@ -173,6 +173,10 @@ void release_task(struct task_struct * p)
90630 struct task_struct *leader;
90631 int zap_leader;
90632 repeat:
90633+#ifdef CONFIG_NET
90634+ gr_del_task_from_ip_table(p);
90635+#endif
90636+
90637 /* don't need to get the RCU readlock here - the process is dead and
90638 * can't be modifying its own credentials. But shut RCU-lockdep up */
90639 rcu_read_lock();
90640@@ -664,6 +668,8 @@ void do_exit(long code)
90641 struct task_struct *tsk = current;
90642 int group_dead;
90643
90644+ set_fs(USER_DS);
90645+
90646 profile_task_exit(tsk);
90647
90648 WARN_ON(blk_needs_flush_plug(tsk));
90649@@ -680,7 +686,6 @@ void do_exit(long code)
90650 * mm_release()->clear_child_tid() from writing to a user-controlled
90651 * kernel address.
90652 */
90653- set_fs(USER_DS);
90654
90655 ptrace_event(PTRACE_EVENT_EXIT, code);
90656
90657@@ -739,6 +744,9 @@ void do_exit(long code)
90658 tsk->exit_code = code;
90659 taskstats_exit(tsk, group_dead);
90660
90661+ gr_acl_handle_psacct(tsk, code);
90662+ gr_acl_handle_exit();
90663+
90664 exit_mm(tsk);
90665
90666 if (group_dead)
90667@@ -858,7 +866,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
90668 * Take down every thread in the group. This is called by fatal signals
90669 * as well as by sys_exit_group (below).
90670 */
90671-void
90672+__noreturn void
90673 do_group_exit(int exit_code)
90674 {
90675 struct signal_struct *sig = current->signal;
90676diff --git a/kernel/fork.c b/kernel/fork.c
90677index 6a13c46..a623c8e 100644
90678--- a/kernel/fork.c
90679+++ b/kernel/fork.c
90680@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
90681 # endif
90682 #endif
90683
90684+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90685+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90686+ int node, void **lowmem_stack)
90687+{
90688+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
90689+ void *ret = NULL;
90690+ unsigned int i;
90691+
90692+ *lowmem_stack = alloc_thread_info_node(tsk, node);
90693+ if (*lowmem_stack == NULL)
90694+ goto out;
90695+
90696+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
90697+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
90698+
90699+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
90700+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
90701+ if (ret == NULL) {
90702+ free_thread_info(*lowmem_stack);
90703+ *lowmem_stack = NULL;
90704+ }
90705+
90706+out:
90707+ return ret;
90708+}
90709+
90710+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90711+{
90712+ unmap_process_stacks(tsk);
90713+}
90714+#else
90715+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90716+ int node, void **lowmem_stack)
90717+{
90718+ return alloc_thread_info_node(tsk, node);
90719+}
90720+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90721+{
90722+ free_thread_info(ti);
90723+}
90724+#endif
90725+
90726 /* SLAB cache for signal_struct structures (tsk->signal) */
90727 static struct kmem_cache *signal_cachep;
90728
90729@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
90730 /* SLAB cache for mm_struct structures (tsk->mm) */
90731 static struct kmem_cache *mm_cachep;
90732
90733-static void account_kernel_stack(struct thread_info *ti, int account)
90734+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
90735 {
90736+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90737+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
90738+#else
90739 struct zone *zone = page_zone(virt_to_page(ti));
90740+#endif
90741
90742 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
90743 }
90744
90745 void free_task(struct task_struct *tsk)
90746 {
90747- account_kernel_stack(tsk->stack, -1);
90748+ account_kernel_stack(tsk, tsk->stack, -1);
90749 arch_release_thread_info(tsk->stack);
90750- free_thread_info(tsk->stack);
90751+ gr_free_thread_info(tsk, tsk->stack);
90752 rt_mutex_debug_task_free(tsk);
90753 ftrace_graph_exit_task(tsk);
90754 put_seccomp_filter(tsk);
90755@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90756 struct task_struct *tsk;
90757 struct thread_info *ti;
90758 unsigned long *stackend;
90759+ void *lowmem_stack;
90760 int node = tsk_fork_get_node(orig);
90761 int err;
90762
90763@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90764 if (!tsk)
90765 return NULL;
90766
90767- ti = alloc_thread_info_node(tsk, node);
90768+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
90769 if (!ti)
90770 goto free_tsk;
90771
90772@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90773 goto free_ti;
90774
90775 tsk->stack = ti;
90776+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90777+ tsk->lowmem_stack = lowmem_stack;
90778+#endif
90779
90780 setup_thread_stack(tsk, orig);
90781 clear_user_return_notifier(tsk);
90782@@ -323,7 +373,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90783 *stackend = STACK_END_MAGIC; /* for overflow detection */
90784
90785 #ifdef CONFIG_CC_STACKPROTECTOR
90786- tsk->stack_canary = get_random_int();
90787+ tsk->stack_canary = pax_get_random_long();
90788 #endif
90789
90790 /*
90791@@ -337,24 +387,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90792 tsk->splice_pipe = NULL;
90793 tsk->task_frag.page = NULL;
90794
90795- account_kernel_stack(ti, 1);
90796+ account_kernel_stack(tsk, ti, 1);
90797
90798 return tsk;
90799
90800 free_ti:
90801- free_thread_info(ti);
90802+ gr_free_thread_info(tsk, ti);
90803 free_tsk:
90804 free_task_struct(tsk);
90805 return NULL;
90806 }
90807
90808 #ifdef CONFIG_MMU
90809-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90810+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
90811+{
90812+ struct vm_area_struct *tmp;
90813+ unsigned long charge;
90814+ struct file *file;
90815+ int retval;
90816+
90817+ charge = 0;
90818+ if (mpnt->vm_flags & VM_ACCOUNT) {
90819+ unsigned long len = vma_pages(mpnt);
90820+
90821+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90822+ goto fail_nomem;
90823+ charge = len;
90824+ }
90825+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90826+ if (!tmp)
90827+ goto fail_nomem;
90828+ *tmp = *mpnt;
90829+ tmp->vm_mm = mm;
90830+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
90831+ retval = vma_dup_policy(mpnt, tmp);
90832+ if (retval)
90833+ goto fail_nomem_policy;
90834+ if (anon_vma_fork(tmp, mpnt))
90835+ goto fail_nomem_anon_vma_fork;
90836+ tmp->vm_flags &= ~VM_LOCKED;
90837+ tmp->vm_next = tmp->vm_prev = NULL;
90838+ tmp->vm_mirror = NULL;
90839+ file = tmp->vm_file;
90840+ if (file) {
90841+ struct inode *inode = file_inode(file);
90842+ struct address_space *mapping = file->f_mapping;
90843+
90844+ get_file(file);
90845+ if (tmp->vm_flags & VM_DENYWRITE)
90846+ atomic_dec(&inode->i_writecount);
90847+ mutex_lock(&mapping->i_mmap_mutex);
90848+ if (tmp->vm_flags & VM_SHARED)
90849+ mapping->i_mmap_writable++;
90850+ flush_dcache_mmap_lock(mapping);
90851+ /* insert tmp into the share list, just after mpnt */
90852+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90853+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
90854+ else
90855+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
90856+ flush_dcache_mmap_unlock(mapping);
90857+ mutex_unlock(&mapping->i_mmap_mutex);
90858+ }
90859+
90860+ /*
90861+ * Clear hugetlb-related page reserves for children. This only
90862+ * affects MAP_PRIVATE mappings. Faults generated by the child
90863+ * are not guaranteed to succeed, even if read-only
90864+ */
90865+ if (is_vm_hugetlb_page(tmp))
90866+ reset_vma_resv_huge_pages(tmp);
90867+
90868+ return tmp;
90869+
90870+fail_nomem_anon_vma_fork:
90871+ mpol_put(vma_policy(tmp));
90872+fail_nomem_policy:
90873+ kmem_cache_free(vm_area_cachep, tmp);
90874+fail_nomem:
90875+ vm_unacct_memory(charge);
90876+ return NULL;
90877+}
90878+
90879+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90880 {
90881 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
90882 struct rb_node **rb_link, *rb_parent;
90883 int retval;
90884- unsigned long charge;
90885
90886 uprobe_start_dup_mmap();
90887 down_write(&oldmm->mmap_sem);
90888@@ -383,55 +501,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90889
90890 prev = NULL;
90891 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
90892- struct file *file;
90893-
90894 if (mpnt->vm_flags & VM_DONTCOPY) {
90895 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
90896 -vma_pages(mpnt));
90897 continue;
90898 }
90899- charge = 0;
90900- if (mpnt->vm_flags & VM_ACCOUNT) {
90901- unsigned long len = vma_pages(mpnt);
90902-
90903- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90904- goto fail_nomem;
90905- charge = len;
90906- }
90907- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90908- if (!tmp)
90909- goto fail_nomem;
90910- *tmp = *mpnt;
90911- INIT_LIST_HEAD(&tmp->anon_vma_chain);
90912- retval = vma_dup_policy(mpnt, tmp);
90913- if (retval)
90914- goto fail_nomem_policy;
90915- tmp->vm_mm = mm;
90916- if (anon_vma_fork(tmp, mpnt))
90917- goto fail_nomem_anon_vma_fork;
90918- tmp->vm_flags &= ~VM_LOCKED;
90919- tmp->vm_next = tmp->vm_prev = NULL;
90920- file = tmp->vm_file;
90921- if (file) {
90922- struct inode *inode = file_inode(file);
90923- struct address_space *mapping = file->f_mapping;
90924-
90925- get_file(file);
90926- if (tmp->vm_flags & VM_DENYWRITE)
90927- atomic_dec(&inode->i_writecount);
90928- mutex_lock(&mapping->i_mmap_mutex);
90929- if (tmp->vm_flags & VM_SHARED)
90930- mapping->i_mmap_writable++;
90931- flush_dcache_mmap_lock(mapping);
90932- /* insert tmp into the share list, just after mpnt */
90933- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90934- vma_nonlinear_insert(tmp,
90935- &mapping->i_mmap_nonlinear);
90936- else
90937- vma_interval_tree_insert_after(tmp, mpnt,
90938- &mapping->i_mmap);
90939- flush_dcache_mmap_unlock(mapping);
90940- mutex_unlock(&mapping->i_mmap_mutex);
90941+ tmp = dup_vma(mm, oldmm, mpnt);
90942+ if (!tmp) {
90943+ retval = -ENOMEM;
90944+ goto out;
90945 }
90946
90947 /*
90948@@ -463,6 +541,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90949 if (retval)
90950 goto out;
90951 }
90952+
90953+#ifdef CONFIG_PAX_SEGMEXEC
90954+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90955+ struct vm_area_struct *mpnt_m;
90956+
90957+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90958+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90959+
90960+ if (!mpnt->vm_mirror)
90961+ continue;
90962+
90963+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90964+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90965+ mpnt->vm_mirror = mpnt_m;
90966+ } else {
90967+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90968+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90969+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90970+ mpnt->vm_mirror->vm_mirror = mpnt;
90971+ }
90972+ }
90973+ BUG_ON(mpnt_m);
90974+ }
90975+#endif
90976+
90977 /* a new mm has just been created */
90978 arch_dup_mmap(oldmm, mm);
90979 retval = 0;
90980@@ -472,14 +575,6 @@ out:
90981 up_write(&oldmm->mmap_sem);
90982 uprobe_end_dup_mmap();
90983 return retval;
90984-fail_nomem_anon_vma_fork:
90985- mpol_put(vma_policy(tmp));
90986-fail_nomem_policy:
90987- kmem_cache_free(vm_area_cachep, tmp);
90988-fail_nomem:
90989- retval = -ENOMEM;
90990- vm_unacct_memory(charge);
90991- goto out;
90992 }
90993
90994 static inline int mm_alloc_pgd(struct mm_struct *mm)
90995@@ -698,8 +793,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
90996 return ERR_PTR(err);
90997
90998 mm = get_task_mm(task);
90999- if (mm && mm != current->mm &&
91000- !ptrace_may_access(task, mode)) {
91001+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
91002+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
91003 mmput(mm);
91004 mm = ERR_PTR(-EACCES);
91005 }
91006@@ -918,13 +1013,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91007 spin_unlock(&fs->lock);
91008 return -EAGAIN;
91009 }
91010- fs->users++;
91011+ atomic_inc(&fs->users);
91012 spin_unlock(&fs->lock);
91013 return 0;
91014 }
91015 tsk->fs = copy_fs_struct(fs);
91016 if (!tsk->fs)
91017 return -ENOMEM;
91018+ /* Carry through gr_chroot_dentry and is_chrooted instead
91019+ of recomputing it here. Already copied when the task struct
91020+ is duplicated. This allows pivot_root to not be treated as
91021+ a chroot
91022+ */
91023+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
91024+
91025 return 0;
91026 }
91027
91028@@ -1133,7 +1235,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
91029 * parts of the process environment (as per the clone
91030 * flags). The actual kick-off is left to the caller.
91031 */
91032-static struct task_struct *copy_process(unsigned long clone_flags,
91033+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
91034 unsigned long stack_start,
91035 unsigned long stack_size,
91036 int __user *child_tidptr,
91037@@ -1205,6 +1307,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91038 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91039 #endif
91040 retval = -EAGAIN;
91041+
91042+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91043+
91044 if (atomic_read(&p->real_cred->user->processes) >=
91045 task_rlimit(p, RLIMIT_NPROC)) {
91046 if (p->real_cred->user != INIT_USER &&
91047@@ -1452,6 +1557,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91048 goto bad_fork_free_pid;
91049 }
91050
91051+ /* synchronizes with gr_set_acls()
91052+ we need to call this past the point of no return for fork()
91053+ */
91054+ gr_copy_label(p);
91055+
91056 if (likely(p->pid)) {
91057 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
91058
91059@@ -1541,6 +1651,8 @@ bad_fork_cleanup_count:
91060 bad_fork_free:
91061 free_task(p);
91062 fork_out:
91063+ gr_log_forkfail(retval);
91064+
91065 return ERR_PTR(retval);
91066 }
91067
91068@@ -1602,6 +1714,7 @@ long do_fork(unsigned long clone_flags,
91069
91070 p = copy_process(clone_flags, stack_start, stack_size,
91071 child_tidptr, NULL, trace);
91072+ add_latent_entropy();
91073 /*
91074 * Do this prior waking up the new thread - the thread pointer
91075 * might get invalid after that point, if the thread exits quickly.
91076@@ -1618,6 +1731,8 @@ long do_fork(unsigned long clone_flags,
91077 if (clone_flags & CLONE_PARENT_SETTID)
91078 put_user(nr, parent_tidptr);
91079
91080+ gr_handle_brute_check();
91081+
91082 if (clone_flags & CLONE_VFORK) {
91083 p->vfork_done = &vfork;
91084 init_completion(&vfork);
91085@@ -1736,7 +1851,7 @@ void __init proc_caches_init(void)
91086 mm_cachep = kmem_cache_create("mm_struct",
91087 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
91088 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
91089- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
91090+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
91091 mmap_init();
91092 nsproxy_cache_init();
91093 }
91094@@ -1776,7 +1891,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91095 return 0;
91096
91097 /* don't need lock here; in the worst case we'll do useless copy */
91098- if (fs->users == 1)
91099+ if (atomic_read(&fs->users) == 1)
91100 return 0;
91101
91102 *new_fsp = copy_fs_struct(fs);
91103@@ -1883,7 +1998,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91104 fs = current->fs;
91105 spin_lock(&fs->lock);
91106 current->fs = new_fs;
91107- if (--fs->users)
91108+ gr_set_chroot_entries(current, &current->fs->root);
91109+ if (atomic_dec_return(&fs->users))
91110 new_fs = NULL;
91111 else
91112 new_fs = fs;
91113diff --git a/kernel/futex.c b/kernel/futex.c
91114index b632b5f..ca00da9 100644
91115--- a/kernel/futex.c
91116+++ b/kernel/futex.c
91117@@ -202,7 +202,7 @@ struct futex_pi_state {
91118 atomic_t refcount;
91119
91120 union futex_key key;
91121-};
91122+} __randomize_layout;
91123
91124 /**
91125 * struct futex_q - The hashed futex queue entry, one per waiting task
91126@@ -236,7 +236,7 @@ struct futex_q {
91127 struct rt_mutex_waiter *rt_waiter;
91128 union futex_key *requeue_pi_key;
91129 u32 bitset;
91130-};
91131+} __randomize_layout;
91132
91133 static const struct futex_q futex_q_init = {
91134 /* list gets initialized in queue_me()*/
91135@@ -394,6 +394,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91136 struct page *page, *page_head;
91137 int err, ro = 0;
91138
91139+#ifdef CONFIG_PAX_SEGMEXEC
91140+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91141+ return -EFAULT;
91142+#endif
91143+
91144 /*
91145 * The futex address must be "naturally" aligned.
91146 */
91147@@ -593,7 +598,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
91148
91149 static int get_futex_value_locked(u32 *dest, u32 __user *from)
91150 {
91151- int ret;
91152+ unsigned long ret;
91153
91154 pagefault_disable();
91155 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
91156@@ -3033,6 +3038,7 @@ static void __init futex_detect_cmpxchg(void)
91157 {
91158 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
91159 u32 curval;
91160+ mm_segment_t oldfs;
91161
91162 /*
91163 * This will fail and we want it. Some arch implementations do
91164@@ -3044,8 +3050,11 @@ static void __init futex_detect_cmpxchg(void)
91165 * implementation, the non-functional ones will return
91166 * -ENOSYS.
91167 */
91168+ oldfs = get_fs();
91169+ set_fs(USER_DS);
91170 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
91171 futex_cmpxchg_enabled = 1;
91172+ set_fs(oldfs);
91173 #endif
91174 }
91175
91176diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91177index 55c8c93..9ba7ad6 100644
91178--- a/kernel/futex_compat.c
91179+++ b/kernel/futex_compat.c
91180@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
91181 return 0;
91182 }
91183
91184-static void __user *futex_uaddr(struct robust_list __user *entry,
91185+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
91186 compat_long_t futex_offset)
91187 {
91188 compat_uptr_t base = ptr_to_compat(entry);
91189diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91190index b358a80..fc25240 100644
91191--- a/kernel/gcov/base.c
91192+++ b/kernel/gcov/base.c
91193@@ -114,11 +114,6 @@ void gcov_enable_events(void)
91194 }
91195
91196 #ifdef CONFIG_MODULES
91197-static inline int within(void *addr, void *start, unsigned long size)
91198-{
91199- return ((addr >= start) && (addr < start + size));
91200-}
91201-
91202 /* Update list and generate events when modules are unloaded. */
91203 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91204 void *data)
91205@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91206
91207 /* Remove entries located in module from linked list. */
91208 while ((info = gcov_info_next(info))) {
91209- if (within(info, mod->module_core, mod->core_size)) {
91210+ if (within_module_core_rw((unsigned long)info, mod)) {
91211 gcov_info_unlink(prev, info);
91212 if (gcov_events_enabled)
91213 gcov_event(GCOV_REMOVE, info);
91214diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91215index 3ab2899..c6ad010 100644
91216--- a/kernel/hrtimer.c
91217+++ b/kernel/hrtimer.c
91218@@ -1449,7 +1449,7 @@ void hrtimer_peek_ahead_timers(void)
91219 local_irq_restore(flags);
91220 }
91221
91222-static void run_hrtimer_softirq(struct softirq_action *h)
91223+static __latent_entropy void run_hrtimer_softirq(void)
91224 {
91225 hrtimer_peek_ahead_timers();
91226 }
91227diff --git a/kernel/irq_work.c b/kernel/irq_work.c
91228index a82170e..5b01e7f 100644
91229--- a/kernel/irq_work.c
91230+++ b/kernel/irq_work.c
91231@@ -191,12 +191,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
91232 return NOTIFY_OK;
91233 }
91234
91235-static struct notifier_block cpu_notify;
91236+static struct notifier_block cpu_notify = {
91237+ .notifier_call = irq_work_cpu_notify,
91238+ .priority = 0,
91239+};
91240
91241 static __init int irq_work_init_cpu_notifier(void)
91242 {
91243- cpu_notify.notifier_call = irq_work_cpu_notify;
91244- cpu_notify.priority = 0;
91245 register_cpu_notifier(&cpu_notify);
91246 return 0;
91247 }
91248diff --git a/kernel/jump_label.c b/kernel/jump_label.c
91249index 9019f15..9a3c42e 100644
91250--- a/kernel/jump_label.c
91251+++ b/kernel/jump_label.c
91252@@ -14,6 +14,7 @@
91253 #include <linux/err.h>
91254 #include <linux/static_key.h>
91255 #include <linux/jump_label_ratelimit.h>
91256+#include <linux/mm.h>
91257
91258 #ifdef HAVE_JUMP_LABEL
91259
91260@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
91261
91262 size = (((unsigned long)stop - (unsigned long)start)
91263 / sizeof(struct jump_entry));
91264+ pax_open_kernel();
91265 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
91266+ pax_close_kernel();
91267 }
91268
91269 static void jump_label_update(struct static_key *key, int enable);
91270@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
91271 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
91272 struct jump_entry *iter;
91273
91274+ pax_open_kernel();
91275 for (iter = iter_start; iter < iter_stop; iter++) {
91276 if (within_module_init(iter->code, mod))
91277 iter->code = 0;
91278 }
91279+ pax_close_kernel();
91280 }
91281
91282 static int
91283diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91284index cb0cf37..b69e161 100644
91285--- a/kernel/kallsyms.c
91286+++ b/kernel/kallsyms.c
91287@@ -11,6 +11,9 @@
91288 * Changed the compression method from stem compression to "table lookup"
91289 * compression (see scripts/kallsyms.c for a more complete description)
91290 */
91291+#ifdef CONFIG_GRKERNSEC_HIDESYM
91292+#define __INCLUDED_BY_HIDESYM 1
91293+#endif
91294 #include <linux/kallsyms.h>
91295 #include <linux/module.h>
91296 #include <linux/init.h>
91297@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
91298
91299 static inline int is_kernel_inittext(unsigned long addr)
91300 {
91301+ if (system_state != SYSTEM_BOOTING)
91302+ return 0;
91303+
91304 if (addr >= (unsigned long)_sinittext
91305 && addr <= (unsigned long)_einittext)
91306 return 1;
91307 return 0;
91308 }
91309
91310+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91311+#ifdef CONFIG_MODULES
91312+static inline int is_module_text(unsigned long addr)
91313+{
91314+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91315+ return 1;
91316+
91317+ addr = ktla_ktva(addr);
91318+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91319+}
91320+#else
91321+static inline int is_module_text(unsigned long addr)
91322+{
91323+ return 0;
91324+}
91325+#endif
91326+#endif
91327+
91328 static inline int is_kernel_text(unsigned long addr)
91329 {
91330 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91331@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
91332
91333 static inline int is_kernel(unsigned long addr)
91334 {
91335+
91336+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91337+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91338+ return 1;
91339+
91340+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91341+#else
91342 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91343+#endif
91344+
91345 return 1;
91346 return in_gate_area_no_mm(addr);
91347 }
91348
91349 static int is_ksym_addr(unsigned long addr)
91350 {
91351+
91352+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91353+ if (is_module_text(addr))
91354+ return 0;
91355+#endif
91356+
91357 if (all_var)
91358 return is_kernel(addr);
91359
91360@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91361
91362 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91363 {
91364- iter->name[0] = '\0';
91365 iter->nameoff = get_symbol_offset(new_pos);
91366 iter->pos = new_pos;
91367 }
91368@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
91369 {
91370 struct kallsym_iter *iter = m->private;
91371
91372+#ifdef CONFIG_GRKERNSEC_HIDESYM
91373+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
91374+ return 0;
91375+#endif
91376+
91377 /* Some debugging symbols have no name. Ignore them. */
91378 if (!iter->name[0])
91379 return 0;
91380@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
91381 */
91382 type = iter->exported ? toupper(iter->type) :
91383 tolower(iter->type);
91384+
91385 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
91386 type, iter->name, iter->module_name);
91387 } else
91388@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91389 struct kallsym_iter *iter;
91390 int ret;
91391
91392- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91393+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91394 if (!iter)
91395 return -ENOMEM;
91396 reset_iter(iter, 0);
91397diff --git a/kernel/kcmp.c b/kernel/kcmp.c
91398index e30ac0f..3528cac 100644
91399--- a/kernel/kcmp.c
91400+++ b/kernel/kcmp.c
91401@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
91402 struct task_struct *task1, *task2;
91403 int ret;
91404
91405+#ifdef CONFIG_GRKERNSEC
91406+ return -ENOSYS;
91407+#endif
91408+
91409 rcu_read_lock();
91410
91411 /*
91412diff --git a/kernel/kexec.c b/kernel/kexec.c
91413index 4b8f0c9..fffd0df 100644
91414--- a/kernel/kexec.c
91415+++ b/kernel/kexec.c
91416@@ -1045,7 +1045,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
91417 compat_ulong_t, flags)
91418 {
91419 struct compat_kexec_segment in;
91420- struct kexec_segment out, __user *ksegments;
91421+ struct kexec_segment out;
91422+ struct kexec_segment __user *ksegments;
91423 unsigned long i, result;
91424
91425 /* Don't allow clients that don't understand the native
91426diff --git a/kernel/kmod.c b/kernel/kmod.c
91427index 8637e04..8b1d0d8 100644
91428--- a/kernel/kmod.c
91429+++ b/kernel/kmod.c
91430@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
91431 kfree(info->argv);
91432 }
91433
91434-static int call_modprobe(char *module_name, int wait)
91435+static int call_modprobe(char *module_name, char *module_param, int wait)
91436 {
91437 struct subprocess_info *info;
91438 static char *envp[] = {
91439@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
91440 NULL
91441 };
91442
91443- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
91444+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
91445 if (!argv)
91446 goto out;
91447
91448@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
91449 argv[1] = "-q";
91450 argv[2] = "--";
91451 argv[3] = module_name; /* check free_modprobe_argv() */
91452- argv[4] = NULL;
91453+ argv[4] = module_param;
91454+ argv[5] = NULL;
91455
91456 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
91457 NULL, free_modprobe_argv, NULL);
91458@@ -129,9 +130,8 @@ out:
91459 * If module auto-loading support is disabled then this function
91460 * becomes a no-operation.
91461 */
91462-int __request_module(bool wait, const char *fmt, ...)
91463+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91464 {
91465- va_list args;
91466 char module_name[MODULE_NAME_LEN];
91467 unsigned int max_modprobes;
91468 int ret;
91469@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
91470 if (!modprobe_path[0])
91471 return 0;
91472
91473- va_start(args, fmt);
91474- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91475- va_end(args);
91476+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91477 if (ret >= MODULE_NAME_LEN)
91478 return -ENAMETOOLONG;
91479
91480@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
91481 if (ret)
91482 return ret;
91483
91484+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91485+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91486+ /* hack to workaround consolekit/udisks stupidity */
91487+ read_lock(&tasklist_lock);
91488+ if (!strcmp(current->comm, "mount") &&
91489+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91490+ read_unlock(&tasklist_lock);
91491+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91492+ return -EPERM;
91493+ }
91494+ read_unlock(&tasklist_lock);
91495+ }
91496+#endif
91497+
91498 /* If modprobe needs a service that is in a module, we get a recursive
91499 * loop. Limit the number of running kmod threads to max_threads/2 or
91500 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91501@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
91502
91503 trace_module_request(module_name, wait, _RET_IP_);
91504
91505- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91506+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91507
91508 atomic_dec(&kmod_concurrent);
91509 return ret;
91510 }
91511+
91512+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91513+{
91514+ va_list args;
91515+ int ret;
91516+
91517+ va_start(args, fmt);
91518+ ret = ____request_module(wait, module_param, fmt, args);
91519+ va_end(args);
91520+
91521+ return ret;
91522+}
91523+
91524+int __request_module(bool wait, const char *fmt, ...)
91525+{
91526+ va_list args;
91527+ int ret;
91528+
91529+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91530+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91531+ char module_param[MODULE_NAME_LEN];
91532+
91533+ memset(module_param, 0, sizeof(module_param));
91534+
91535+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
91536+
91537+ va_start(args, fmt);
91538+ ret = ____request_module(wait, module_param, fmt, args);
91539+ va_end(args);
91540+
91541+ return ret;
91542+ }
91543+#endif
91544+
91545+ va_start(args, fmt);
91546+ ret = ____request_module(wait, NULL, fmt, args);
91547+ va_end(args);
91548+
91549+ return ret;
91550+}
91551+
91552 EXPORT_SYMBOL(__request_module);
91553 #endif /* CONFIG_MODULES */
91554
91555@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
91556 */
91557 set_user_nice(current, 0);
91558
91559+#ifdef CONFIG_GRKERNSEC
91560+ /* this is race-free as far as userland is concerned as we copied
91561+ out the path to be used prior to this point and are now operating
91562+ on that copy
91563+ */
91564+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
91565+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
91566+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
91567+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
91568+ retval = -EPERM;
91569+ goto fail;
91570+ }
91571+#endif
91572+
91573 retval = -ENOMEM;
91574 new = prepare_kernel_cred(current);
91575 if (!new)
91576@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
91577 commit_creds(new);
91578
91579 retval = do_execve(getname_kernel(sub_info->path),
91580- (const char __user *const __user *)sub_info->argv,
91581- (const char __user *const __user *)sub_info->envp);
91582+ (const char __user *const __force_user *)sub_info->argv,
91583+ (const char __user *const __force_user *)sub_info->envp);
91584 if (!retval)
91585 return 0;
91586
91587@@ -260,6 +327,10 @@ static int call_helper(void *data)
91588
91589 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
91590 {
91591+#ifdef CONFIG_GRKERNSEC
91592+ kfree(info->path);
91593+ info->path = info->origpath;
91594+#endif
91595 if (info->cleanup)
91596 (*info->cleanup)(info);
91597 kfree(info);
91598@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
91599 *
91600 * Thus the __user pointer cast is valid here.
91601 */
91602- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91603+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91604
91605 /*
91606 * If ret is 0, either ____call_usermodehelper failed and the
91607@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
91608 goto out;
91609
91610 INIT_WORK(&sub_info->work, __call_usermodehelper);
91611+#ifdef CONFIG_GRKERNSEC
91612+ sub_info->origpath = path;
91613+ sub_info->path = kstrdup(path, gfp_mask);
91614+#else
91615 sub_info->path = path;
91616+#endif
91617 sub_info->argv = argv;
91618 sub_info->envp = envp;
91619
91620@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
91621 static int proc_cap_handler(struct ctl_table *table, int write,
91622 void __user *buffer, size_t *lenp, loff_t *ppos)
91623 {
91624- struct ctl_table t;
91625+ ctl_table_no_const t;
91626 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
91627 kernel_cap_t new_cap;
91628 int err, i;
91629diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91630index 734e9a7..0a313b8 100644
91631--- a/kernel/kprobes.c
91632+++ b/kernel/kprobes.c
91633@@ -31,6 +31,9 @@
91634 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
91635 * <prasanna@in.ibm.com> added function-return probes.
91636 */
91637+#ifdef CONFIG_GRKERNSEC_HIDESYM
91638+#define __INCLUDED_BY_HIDESYM 1
91639+#endif
91640 #include <linux/kprobes.h>
91641 #include <linux/hash.h>
91642 #include <linux/init.h>
91643@@ -122,12 +125,12 @@ enum kprobe_slot_state {
91644
91645 static void *alloc_insn_page(void)
91646 {
91647- return module_alloc(PAGE_SIZE);
91648+ return module_alloc_exec(PAGE_SIZE);
91649 }
91650
91651 static void free_insn_page(void *page)
91652 {
91653- module_free(NULL, page);
91654+ module_free_exec(NULL, page);
91655 }
91656
91657 struct kprobe_insn_cache kprobe_insn_slots = {
91658@@ -2176,11 +2179,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
91659 kprobe_type = "k";
91660
91661 if (sym)
91662- seq_printf(pi, "%p %s %s+0x%x %s ",
91663+ seq_printf(pi, "%pK %s %s+0x%x %s ",
91664 p->addr, kprobe_type, sym, offset,
91665 (modname ? modname : " "));
91666 else
91667- seq_printf(pi, "%p %s %p ",
91668+ seq_printf(pi, "%pK %s %pK ",
91669 p->addr, kprobe_type, p->addr);
91670
91671 if (!pp)
91672diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
91673index 6683cce..daf8999 100644
91674--- a/kernel/ksysfs.c
91675+++ b/kernel/ksysfs.c
91676@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
91677 {
91678 if (count+1 > UEVENT_HELPER_PATH_LEN)
91679 return -ENOENT;
91680+ if (!capable(CAP_SYS_ADMIN))
91681+ return -EPERM;
91682 memcpy(uevent_helper, buf, count);
91683 uevent_helper[count] = '\0';
91684 if (count && uevent_helper[count-1] == '\n')
91685@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
91686 return count;
91687 }
91688
91689-static struct bin_attribute notes_attr = {
91690+static bin_attribute_no_const notes_attr __read_only = {
91691 .attr = {
91692 .name = "notes",
91693 .mode = S_IRUGO,
91694diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
91695index d24e433..fa04fb8 100644
91696--- a/kernel/locking/lockdep.c
91697+++ b/kernel/locking/lockdep.c
91698@@ -597,6 +597,10 @@ static int static_obj(void *obj)
91699 end = (unsigned long) &_end,
91700 addr = (unsigned long) obj;
91701
91702+#ifdef CONFIG_PAX_KERNEXEC
91703+ start = ktla_ktva(start);
91704+#endif
91705+
91706 /*
91707 * static variable?
91708 */
91709@@ -738,6 +742,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91710 if (!static_obj(lock->key)) {
91711 debug_locks_off();
91712 printk("INFO: trying to register non-static key.\n");
91713+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91714 printk("the code is fine but needs lockdep annotation.\n");
91715 printk("turning off the locking correctness validator.\n");
91716 dump_stack();
91717@@ -3079,7 +3084,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91718 if (!class)
91719 return 0;
91720 }
91721- atomic_inc((atomic_t *)&class->ops);
91722+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
91723 if (very_verbose(class)) {
91724 printk("\nacquire class [%p] %s", class->key, class->name);
91725 if (class->name_version > 1)
91726diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
91727index ef43ac4..2720dfa 100644
91728--- a/kernel/locking/lockdep_proc.c
91729+++ b/kernel/locking/lockdep_proc.c
91730@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
91731 return 0;
91732 }
91733
91734- seq_printf(m, "%p", class->key);
91735+ seq_printf(m, "%pK", class->key);
91736 #ifdef CONFIG_DEBUG_LOCKDEP
91737 seq_printf(m, " OPS:%8ld", class->ops);
91738 #endif
91739@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
91740
91741 list_for_each_entry(entry, &class->locks_after, entry) {
91742 if (entry->distance == 1) {
91743- seq_printf(m, " -> [%p] ", entry->class->key);
91744+ seq_printf(m, " -> [%pK] ", entry->class->key);
91745 print_name(m, entry->class);
91746 seq_puts(m, "\n");
91747 }
91748@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
91749 if (!class->key)
91750 continue;
91751
91752- seq_printf(m, "[%p] ", class->key);
91753+ seq_printf(m, "[%pK] ", class->key);
91754 print_name(m, class);
91755 seq_puts(m, "\n");
91756 }
91757@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91758 if (!i)
91759 seq_line(m, '-', 40-namelen, namelen);
91760
91761- snprintf(ip, sizeof(ip), "[<%p>]",
91762+ snprintf(ip, sizeof(ip), "[<%pK>]",
91763 (void *)class->contention_point[i]);
91764 seq_printf(m, "%40s %14lu %29s %pS\n",
91765 name, stats->contention_point[i],
91766@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91767 if (!i)
91768 seq_line(m, '-', 40-namelen, namelen);
91769
91770- snprintf(ip, sizeof(ip), "[<%p>]",
91771+ snprintf(ip, sizeof(ip), "[<%pK>]",
91772 (void *)class->contending_point[i]);
91773 seq_printf(m, "%40s %14lu %29s %pS\n",
91774 name, stats->contending_point[i],
91775diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
91776index be9ee15..39d6233 100644
91777--- a/kernel/locking/mcs_spinlock.c
91778+++ b/kernel/locking/mcs_spinlock.c
91779@@ -102,7 +102,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
91780
91781 prev = decode_cpu(old);
91782 node->prev = prev;
91783- ACCESS_ONCE(prev->next) = node;
91784+ ACCESS_ONCE_RW(prev->next) = node;
91785
91786 /*
91787 * Normally @prev is untouchable after the above store; because at that
91788@@ -174,8 +174,8 @@ unqueue:
91789 * it will wait in Step-A.
91790 */
91791
91792- ACCESS_ONCE(next->prev) = prev;
91793- ACCESS_ONCE(prev->next) = next;
91794+ ACCESS_ONCE_RW(next->prev) = prev;
91795+ ACCESS_ONCE_RW(prev->next) = next;
91796
91797 return false;
91798 }
91799@@ -197,13 +197,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
91800 node = this_cpu_ptr(&osq_node);
91801 next = xchg(&node->next, NULL);
91802 if (next) {
91803- ACCESS_ONCE(next->locked) = 1;
91804+ ACCESS_ONCE_RW(next->locked) = 1;
91805 return;
91806 }
91807
91808 next = osq_wait_next(lock, node, NULL);
91809 if (next)
91810- ACCESS_ONCE(next->locked) = 1;
91811+ ACCESS_ONCE_RW(next->locked) = 1;
91812 }
91813
91814 #endif
91815diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
91816index 74356dc..48dd5e1 100644
91817--- a/kernel/locking/mcs_spinlock.h
91818+++ b/kernel/locking/mcs_spinlock.h
91819@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91820 */
91821 return;
91822 }
91823- ACCESS_ONCE(prev->next) = node;
91824+ ACCESS_ONCE_RW(prev->next) = node;
91825
91826 /* Wait until the lock holder passes the lock down. */
91827 arch_mcs_spin_lock_contended(&node->locked);
91828diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
91829index 5cf6731..ce3bc5a 100644
91830--- a/kernel/locking/mutex-debug.c
91831+++ b/kernel/locking/mutex-debug.c
91832@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91833 }
91834
91835 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91836- struct thread_info *ti)
91837+ struct task_struct *task)
91838 {
91839 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91840
91841 /* Mark the current thread as blocked on the lock: */
91842- ti->task->blocked_on = waiter;
91843+ task->blocked_on = waiter;
91844 }
91845
91846 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91847- struct thread_info *ti)
91848+ struct task_struct *task)
91849 {
91850 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91851- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91852- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91853- ti->task->blocked_on = NULL;
91854+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91855+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91856+ task->blocked_on = NULL;
91857
91858 list_del_init(&waiter->list);
91859 waiter->task = NULL;
91860diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
91861index 0799fd3..d06ae3b 100644
91862--- a/kernel/locking/mutex-debug.h
91863+++ b/kernel/locking/mutex-debug.h
91864@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
91865 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
91866 extern void debug_mutex_add_waiter(struct mutex *lock,
91867 struct mutex_waiter *waiter,
91868- struct thread_info *ti);
91869+ struct task_struct *task);
91870 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91871- struct thread_info *ti);
91872+ struct task_struct *task);
91873 extern void debug_mutex_unlock(struct mutex *lock);
91874 extern void debug_mutex_init(struct mutex *lock, const char *name,
91875 struct lock_class_key *key);
91876diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
91877index acca2c1..ddeaea8 100644
91878--- a/kernel/locking/mutex.c
91879+++ b/kernel/locking/mutex.c
91880@@ -490,7 +490,7 @@ slowpath:
91881 goto skip_wait;
91882
91883 debug_mutex_lock_common(lock, &waiter);
91884- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
91885+ debug_mutex_add_waiter(lock, &waiter, task);
91886
91887 /* add waiting tasks to the end of the waitqueue (FIFO): */
91888 list_add_tail(&waiter.list, &lock->wait_list);
91889@@ -534,7 +534,7 @@ slowpath:
91890 schedule_preempt_disabled();
91891 spin_lock_mutex(&lock->wait_lock, flags);
91892 }
91893- mutex_remove_waiter(lock, &waiter, current_thread_info());
91894+ mutex_remove_waiter(lock, &waiter, task);
91895 /* set it to 0 if there are no waiters left: */
91896 if (likely(list_empty(&lock->wait_list)))
91897 atomic_set(&lock->count, 0);
91898@@ -571,7 +571,7 @@ skip_wait:
91899 return 0;
91900
91901 err:
91902- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
91903+ mutex_remove_waiter(lock, &waiter, task);
91904 spin_unlock_mutex(&lock->wait_lock, flags);
91905 debug_mutex_free_waiter(&waiter);
91906 mutex_release(&lock->dep_map, 1, ip);
91907diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
91908index 1d96dd0..994ff19 100644
91909--- a/kernel/locking/rtmutex-tester.c
91910+++ b/kernel/locking/rtmutex-tester.c
91911@@ -22,7 +22,7 @@
91912 #define MAX_RT_TEST_MUTEXES 8
91913
91914 static spinlock_t rttest_lock;
91915-static atomic_t rttest_event;
91916+static atomic_unchecked_t rttest_event;
91917
91918 struct test_thread_data {
91919 int opcode;
91920@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91921
91922 case RTTEST_LOCKCONT:
91923 td->mutexes[td->opdata] = 1;
91924- td->event = atomic_add_return(1, &rttest_event);
91925+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91926 return 0;
91927
91928 case RTTEST_RESET:
91929@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91930 return 0;
91931
91932 case RTTEST_RESETEVENT:
91933- atomic_set(&rttest_event, 0);
91934+ atomic_set_unchecked(&rttest_event, 0);
91935 return 0;
91936
91937 default:
91938@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91939 return ret;
91940
91941 td->mutexes[id] = 1;
91942- td->event = atomic_add_return(1, &rttest_event);
91943+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91944 rt_mutex_lock(&mutexes[id]);
91945- td->event = atomic_add_return(1, &rttest_event);
91946+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91947 td->mutexes[id] = 4;
91948 return 0;
91949
91950@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91951 return ret;
91952
91953 td->mutexes[id] = 1;
91954- td->event = atomic_add_return(1, &rttest_event);
91955+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91956 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
91957- td->event = atomic_add_return(1, &rttest_event);
91958+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91959 td->mutexes[id] = ret ? 0 : 4;
91960 return ret ? -EINTR : 0;
91961
91962@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91963 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
91964 return ret;
91965
91966- td->event = atomic_add_return(1, &rttest_event);
91967+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91968 rt_mutex_unlock(&mutexes[id]);
91969- td->event = atomic_add_return(1, &rttest_event);
91970+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91971 td->mutexes[id] = 0;
91972 return 0;
91973
91974@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91975 break;
91976
91977 td->mutexes[dat] = 2;
91978- td->event = atomic_add_return(1, &rttest_event);
91979+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91980 break;
91981
91982 default:
91983@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91984 return;
91985
91986 td->mutexes[dat] = 3;
91987- td->event = atomic_add_return(1, &rttest_event);
91988+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91989 break;
91990
91991 case RTTEST_LOCKNOWAIT:
91992@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91993 return;
91994
91995 td->mutexes[dat] = 1;
91996- td->event = atomic_add_return(1, &rttest_event);
91997+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91998 return;
91999
92000 default:
92001diff --git a/kernel/module.c b/kernel/module.c
92002index 81e727c..a8ea6f9 100644
92003--- a/kernel/module.c
92004+++ b/kernel/module.c
92005@@ -61,6 +61,7 @@
92006 #include <linux/pfn.h>
92007 #include <linux/bsearch.h>
92008 #include <linux/fips.h>
92009+#include <linux/grsecurity.h>
92010 #include <uapi/linux/module.h>
92011 #include "module-internal.h"
92012
92013@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
92014
92015 /* Bounds of module allocation, for speeding __module_address.
92016 * Protected by module_mutex. */
92017-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
92018+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
92019+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
92020
92021 int register_module_notifier(struct notifier_block * nb)
92022 {
92023@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
92024 return true;
92025
92026 list_for_each_entry_rcu(mod, &modules, list) {
92027- struct symsearch arr[] = {
92028+ struct symsearch modarr[] = {
92029 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
92030 NOT_GPL_ONLY, false },
92031 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
92032@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
92033 if (mod->state == MODULE_STATE_UNFORMED)
92034 continue;
92035
92036- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
92037+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
92038 return true;
92039 }
92040 return false;
92041@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
92042 if (!pcpusec->sh_size)
92043 return 0;
92044
92045- if (align > PAGE_SIZE) {
92046+ if (align-1 >= PAGE_SIZE) {
92047 pr_warn("%s: per-cpu alignment %li > %li\n",
92048 mod->name, align, PAGE_SIZE);
92049 align = PAGE_SIZE;
92050@@ -1061,7 +1063,7 @@ struct module_attribute module_uevent =
92051 static ssize_t show_coresize(struct module_attribute *mattr,
92052 struct module_kobject *mk, char *buffer)
92053 {
92054- return sprintf(buffer, "%u\n", mk->mod->core_size);
92055+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
92056 }
92057
92058 static struct module_attribute modinfo_coresize =
92059@@ -1070,7 +1072,7 @@ static struct module_attribute modinfo_coresize =
92060 static ssize_t show_initsize(struct module_attribute *mattr,
92061 struct module_kobject *mk, char *buffer)
92062 {
92063- return sprintf(buffer, "%u\n", mk->mod->init_size);
92064+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
92065 }
92066
92067 static struct module_attribute modinfo_initsize =
92068@@ -1162,12 +1164,29 @@ static int check_version(Elf_Shdr *sechdrs,
92069 goto bad_version;
92070 }
92071
92072+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92073+ /*
92074+ * avoid potentially printing jibberish on attempted load
92075+ * of a module randomized with a different seed
92076+ */
92077+ pr_warn("no symbol version for %s\n", symname);
92078+#else
92079 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
92080+#endif
92081 return 0;
92082
92083 bad_version:
92084+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92085+ /*
92086+ * avoid potentially printing jibberish on attempted load
92087+ * of a module randomized with a different seed
92088+ */
92089+ printk("attempted module disagrees about version of symbol %s\n",
92090+ symname);
92091+#else
92092 printk("%s: disagrees about version of symbol %s\n",
92093 mod->name, symname);
92094+#endif
92095 return 0;
92096 }
92097
92098@@ -1283,7 +1302,7 @@ resolve_symbol_wait(struct module *mod,
92099 */
92100 #ifdef CONFIG_SYSFS
92101
92102-#ifdef CONFIG_KALLSYMS
92103+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92104 static inline bool sect_empty(const Elf_Shdr *sect)
92105 {
92106 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
92107@@ -1423,7 +1442,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
92108 {
92109 unsigned int notes, loaded, i;
92110 struct module_notes_attrs *notes_attrs;
92111- struct bin_attribute *nattr;
92112+ bin_attribute_no_const *nattr;
92113
92114 /* failed to create section attributes, so can't create notes */
92115 if (!mod->sect_attrs)
92116@@ -1535,7 +1554,7 @@ static void del_usage_links(struct module *mod)
92117 static int module_add_modinfo_attrs(struct module *mod)
92118 {
92119 struct module_attribute *attr;
92120- struct module_attribute *temp_attr;
92121+ module_attribute_no_const *temp_attr;
92122 int error = 0;
92123 int i;
92124
92125@@ -1756,21 +1775,21 @@ static void set_section_ro_nx(void *base,
92126
92127 static void unset_module_core_ro_nx(struct module *mod)
92128 {
92129- set_page_attributes(mod->module_core + mod->core_text_size,
92130- mod->module_core + mod->core_size,
92131+ set_page_attributes(mod->module_core_rw,
92132+ mod->module_core_rw + mod->core_size_rw,
92133 set_memory_x);
92134- set_page_attributes(mod->module_core,
92135- mod->module_core + mod->core_ro_size,
92136+ set_page_attributes(mod->module_core_rx,
92137+ mod->module_core_rx + mod->core_size_rx,
92138 set_memory_rw);
92139 }
92140
92141 static void unset_module_init_ro_nx(struct module *mod)
92142 {
92143- set_page_attributes(mod->module_init + mod->init_text_size,
92144- mod->module_init + mod->init_size,
92145+ set_page_attributes(mod->module_init_rw,
92146+ mod->module_init_rw + mod->init_size_rw,
92147 set_memory_x);
92148- set_page_attributes(mod->module_init,
92149- mod->module_init + mod->init_ro_size,
92150+ set_page_attributes(mod->module_init_rx,
92151+ mod->module_init_rx + mod->init_size_rx,
92152 set_memory_rw);
92153 }
92154
92155@@ -1783,14 +1802,14 @@ void set_all_modules_text_rw(void)
92156 list_for_each_entry_rcu(mod, &modules, list) {
92157 if (mod->state == MODULE_STATE_UNFORMED)
92158 continue;
92159- if ((mod->module_core) && (mod->core_text_size)) {
92160- set_page_attributes(mod->module_core,
92161- mod->module_core + mod->core_text_size,
92162+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92163+ set_page_attributes(mod->module_core_rx,
92164+ mod->module_core_rx + mod->core_size_rx,
92165 set_memory_rw);
92166 }
92167- if ((mod->module_init) && (mod->init_text_size)) {
92168- set_page_attributes(mod->module_init,
92169- mod->module_init + mod->init_text_size,
92170+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92171+ set_page_attributes(mod->module_init_rx,
92172+ mod->module_init_rx + mod->init_size_rx,
92173 set_memory_rw);
92174 }
92175 }
92176@@ -1806,14 +1825,14 @@ void set_all_modules_text_ro(void)
92177 list_for_each_entry_rcu(mod, &modules, list) {
92178 if (mod->state == MODULE_STATE_UNFORMED)
92179 continue;
92180- if ((mod->module_core) && (mod->core_text_size)) {
92181- set_page_attributes(mod->module_core,
92182- mod->module_core + mod->core_text_size,
92183+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92184+ set_page_attributes(mod->module_core_rx,
92185+ mod->module_core_rx + mod->core_size_rx,
92186 set_memory_ro);
92187 }
92188- if ((mod->module_init) && (mod->init_text_size)) {
92189- set_page_attributes(mod->module_init,
92190- mod->module_init + mod->init_text_size,
92191+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92192+ set_page_attributes(mod->module_init_rx,
92193+ mod->module_init_rx + mod->init_size_rx,
92194 set_memory_ro);
92195 }
92196 }
92197@@ -1864,16 +1883,19 @@ static void free_module(struct module *mod)
92198
92199 /* This may be NULL, but that's OK */
92200 unset_module_init_ro_nx(mod);
92201- module_free(mod, mod->module_init);
92202+ module_free(mod, mod->module_init_rw);
92203+ module_free_exec(mod, mod->module_init_rx);
92204 kfree(mod->args);
92205 percpu_modfree(mod);
92206
92207 /* Free lock-classes: */
92208- lockdep_free_key_range(mod->module_core, mod->core_size);
92209+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92210+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92211
92212 /* Finally, free the core (containing the module structure) */
92213 unset_module_core_ro_nx(mod);
92214- module_free(mod, mod->module_core);
92215+ module_free_exec(mod, mod->module_core_rx);
92216+ module_free(mod, mod->module_core_rw);
92217
92218 #ifdef CONFIG_MPU
92219 update_protections(current->mm);
92220@@ -1942,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92221 int ret = 0;
92222 const struct kernel_symbol *ksym;
92223
92224+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92225+ int is_fs_load = 0;
92226+ int register_filesystem_found = 0;
92227+ char *p;
92228+
92229+ p = strstr(mod->args, "grsec_modharden_fs");
92230+ if (p) {
92231+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
92232+ /* copy \0 as well */
92233+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92234+ is_fs_load = 1;
92235+ }
92236+#endif
92237+
92238 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
92239 const char *name = info->strtab + sym[i].st_name;
92240
92241+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92242+ /* it's a real shame this will never get ripped and copied
92243+ upstream! ;(
92244+ */
92245+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92246+ register_filesystem_found = 1;
92247+#endif
92248+
92249 switch (sym[i].st_shndx) {
92250 case SHN_COMMON:
92251 /* Ignore common symbols */
92252@@ -1969,7 +2013,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92253 ksym = resolve_symbol_wait(mod, info, name);
92254 /* Ok if resolved. */
92255 if (ksym && !IS_ERR(ksym)) {
92256+ pax_open_kernel();
92257 sym[i].st_value = ksym->value;
92258+ pax_close_kernel();
92259 break;
92260 }
92261
92262@@ -1988,11 +2034,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92263 secbase = (unsigned long)mod_percpu(mod);
92264 else
92265 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
92266+ pax_open_kernel();
92267 sym[i].st_value += secbase;
92268+ pax_close_kernel();
92269 break;
92270 }
92271 }
92272
92273+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92274+ if (is_fs_load && !register_filesystem_found) {
92275+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92276+ ret = -EPERM;
92277+ }
92278+#endif
92279+
92280 return ret;
92281 }
92282
92283@@ -2076,22 +2131,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
92284 || s->sh_entsize != ~0UL
92285 || strstarts(sname, ".init"))
92286 continue;
92287- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92288+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92289+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92290+ else
92291+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92292 pr_debug("\t%s\n", sname);
92293 }
92294- switch (m) {
92295- case 0: /* executable */
92296- mod->core_size = debug_align(mod->core_size);
92297- mod->core_text_size = mod->core_size;
92298- break;
92299- case 1: /* RO: text and ro-data */
92300- mod->core_size = debug_align(mod->core_size);
92301- mod->core_ro_size = mod->core_size;
92302- break;
92303- case 3: /* whole core */
92304- mod->core_size = debug_align(mod->core_size);
92305- break;
92306- }
92307 }
92308
92309 pr_debug("Init section allocation order:\n");
92310@@ -2105,23 +2150,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
92311 || s->sh_entsize != ~0UL
92312 || !strstarts(sname, ".init"))
92313 continue;
92314- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92315- | INIT_OFFSET_MASK);
92316+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92317+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92318+ else
92319+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92320+ s->sh_entsize |= INIT_OFFSET_MASK;
92321 pr_debug("\t%s\n", sname);
92322 }
92323- switch (m) {
92324- case 0: /* executable */
92325- mod->init_size = debug_align(mod->init_size);
92326- mod->init_text_size = mod->init_size;
92327- break;
92328- case 1: /* RO: text and ro-data */
92329- mod->init_size = debug_align(mod->init_size);
92330- mod->init_ro_size = mod->init_size;
92331- break;
92332- case 3: /* whole init */
92333- mod->init_size = debug_align(mod->init_size);
92334- break;
92335- }
92336 }
92337 }
92338
92339@@ -2294,7 +2329,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92340
92341 /* Put symbol section at end of init part of module. */
92342 symsect->sh_flags |= SHF_ALLOC;
92343- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92344+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92345 info->index.sym) | INIT_OFFSET_MASK;
92346 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
92347
92348@@ -2311,13 +2346,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92349 }
92350
92351 /* Append room for core symbols at end of core part. */
92352- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92353- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
92354- mod->core_size += strtab_size;
92355+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92356+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
92357+ mod->core_size_rx += strtab_size;
92358
92359 /* Put string table section at end of init part of module. */
92360 strsect->sh_flags |= SHF_ALLOC;
92361- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92362+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92363 info->index.str) | INIT_OFFSET_MASK;
92364 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
92365 }
92366@@ -2335,12 +2370,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92367 /* Make sure we get permanent strtab: don't use info->strtab. */
92368 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
92369
92370+ pax_open_kernel();
92371+
92372 /* Set types up while we still have access to sections. */
92373 for (i = 0; i < mod->num_symtab; i++)
92374 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
92375
92376- mod->core_symtab = dst = mod->module_core + info->symoffs;
92377- mod->core_strtab = s = mod->module_core + info->stroffs;
92378+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
92379+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
92380 src = mod->symtab;
92381 for (ndst = i = 0; i < mod->num_symtab; i++) {
92382 if (i == 0 ||
92383@@ -2352,6 +2389,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92384 }
92385 }
92386 mod->core_num_syms = ndst;
92387+
92388+ pax_close_kernel();
92389 }
92390 #else
92391 static inline void layout_symtab(struct module *mod, struct load_info *info)
92392@@ -2385,17 +2424,33 @@ void * __weak module_alloc(unsigned long size)
92393 return vmalloc_exec(size);
92394 }
92395
92396-static void *module_alloc_update_bounds(unsigned long size)
92397+static void *module_alloc_update_bounds_rw(unsigned long size)
92398 {
92399 void *ret = module_alloc(size);
92400
92401 if (ret) {
92402 mutex_lock(&module_mutex);
92403 /* Update module bounds. */
92404- if ((unsigned long)ret < module_addr_min)
92405- module_addr_min = (unsigned long)ret;
92406- if ((unsigned long)ret + size > module_addr_max)
92407- module_addr_max = (unsigned long)ret + size;
92408+ if ((unsigned long)ret < module_addr_min_rw)
92409+ module_addr_min_rw = (unsigned long)ret;
92410+ if ((unsigned long)ret + size > module_addr_max_rw)
92411+ module_addr_max_rw = (unsigned long)ret + size;
92412+ mutex_unlock(&module_mutex);
92413+ }
92414+ return ret;
92415+}
92416+
92417+static void *module_alloc_update_bounds_rx(unsigned long size)
92418+{
92419+ void *ret = module_alloc_exec(size);
92420+
92421+ if (ret) {
92422+ mutex_lock(&module_mutex);
92423+ /* Update module bounds. */
92424+ if ((unsigned long)ret < module_addr_min_rx)
92425+ module_addr_min_rx = (unsigned long)ret;
92426+ if ((unsigned long)ret + size > module_addr_max_rx)
92427+ module_addr_max_rx = (unsigned long)ret + size;
92428 mutex_unlock(&module_mutex);
92429 }
92430 return ret;
92431@@ -2652,7 +2707,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92432 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
92433
92434 if (info->index.sym == 0) {
92435+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92436+ /*
92437+ * avoid potentially printing jibberish on attempted load
92438+ * of a module randomized with a different seed
92439+ */
92440+ pr_warn("module has no symbols (stripped?)\n");
92441+#else
92442 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
92443+#endif
92444 return ERR_PTR(-ENOEXEC);
92445 }
92446
92447@@ -2668,8 +2731,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92448 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92449 {
92450 const char *modmagic = get_modinfo(info, "vermagic");
92451+ const char *license = get_modinfo(info, "license");
92452 int err;
92453
92454+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92455+ if (!license || !license_is_gpl_compatible(license))
92456+ return -ENOEXEC;
92457+#endif
92458+
92459 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
92460 modmagic = NULL;
92461
92462@@ -2694,7 +2763,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92463 }
92464
92465 /* Set up license info based on the info section */
92466- set_license(mod, get_modinfo(info, "license"));
92467+ set_license(mod, license);
92468
92469 return 0;
92470 }
92471@@ -2788,7 +2857,7 @@ static int move_module(struct module *mod, struct load_info *info)
92472 void *ptr;
92473
92474 /* Do the allocs. */
92475- ptr = module_alloc_update_bounds(mod->core_size);
92476+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92477 /*
92478 * The pointer to this block is stored in the module structure
92479 * which is inside the block. Just mark it as not being a
92480@@ -2798,11 +2867,11 @@ static int move_module(struct module *mod, struct load_info *info)
92481 if (!ptr)
92482 return -ENOMEM;
92483
92484- memset(ptr, 0, mod->core_size);
92485- mod->module_core = ptr;
92486+ memset(ptr, 0, mod->core_size_rw);
92487+ mod->module_core_rw = ptr;
92488
92489- if (mod->init_size) {
92490- ptr = module_alloc_update_bounds(mod->init_size);
92491+ if (mod->init_size_rw) {
92492+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92493 /*
92494 * The pointer to this block is stored in the module structure
92495 * which is inside the block. This block doesn't need to be
92496@@ -2811,13 +2880,45 @@ static int move_module(struct module *mod, struct load_info *info)
92497 */
92498 kmemleak_ignore(ptr);
92499 if (!ptr) {
92500- module_free(mod, mod->module_core);
92501+ module_free(mod, mod->module_core_rw);
92502 return -ENOMEM;
92503 }
92504- memset(ptr, 0, mod->init_size);
92505- mod->module_init = ptr;
92506+ memset(ptr, 0, mod->init_size_rw);
92507+ mod->module_init_rw = ptr;
92508 } else
92509- mod->module_init = NULL;
92510+ mod->module_init_rw = NULL;
92511+
92512+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92513+ kmemleak_not_leak(ptr);
92514+ if (!ptr) {
92515+ if (mod->module_init_rw)
92516+ module_free(mod, mod->module_init_rw);
92517+ module_free(mod, mod->module_core_rw);
92518+ return -ENOMEM;
92519+ }
92520+
92521+ pax_open_kernel();
92522+ memset(ptr, 0, mod->core_size_rx);
92523+ pax_close_kernel();
92524+ mod->module_core_rx = ptr;
92525+
92526+ if (mod->init_size_rx) {
92527+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92528+ kmemleak_ignore(ptr);
92529+ if (!ptr && mod->init_size_rx) {
92530+ module_free_exec(mod, mod->module_core_rx);
92531+ if (mod->module_init_rw)
92532+ module_free(mod, mod->module_init_rw);
92533+ module_free(mod, mod->module_core_rw);
92534+ return -ENOMEM;
92535+ }
92536+
92537+ pax_open_kernel();
92538+ memset(ptr, 0, mod->init_size_rx);
92539+ pax_close_kernel();
92540+ mod->module_init_rx = ptr;
92541+ } else
92542+ mod->module_init_rx = NULL;
92543
92544 /* Transfer each section which specifies SHF_ALLOC */
92545 pr_debug("final section addresses:\n");
92546@@ -2828,16 +2929,45 @@ static int move_module(struct module *mod, struct load_info *info)
92547 if (!(shdr->sh_flags & SHF_ALLOC))
92548 continue;
92549
92550- if (shdr->sh_entsize & INIT_OFFSET_MASK)
92551- dest = mod->module_init
92552- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92553- else
92554- dest = mod->module_core + shdr->sh_entsize;
92555+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
92556+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92557+ dest = mod->module_init_rw
92558+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92559+ else
92560+ dest = mod->module_init_rx
92561+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92562+ } else {
92563+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92564+ dest = mod->module_core_rw + shdr->sh_entsize;
92565+ else
92566+ dest = mod->module_core_rx + shdr->sh_entsize;
92567+ }
92568+
92569+ if (shdr->sh_type != SHT_NOBITS) {
92570+
92571+#ifdef CONFIG_PAX_KERNEXEC
92572+#ifdef CONFIG_X86_64
92573+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
92574+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92575+#endif
92576+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
92577+ pax_open_kernel();
92578+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92579+ pax_close_kernel();
92580+ } else
92581+#endif
92582
92583- if (shdr->sh_type != SHT_NOBITS)
92584 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92585+ }
92586 /* Update sh_addr to point to copy in image. */
92587- shdr->sh_addr = (unsigned long)dest;
92588+
92589+#ifdef CONFIG_PAX_KERNEXEC
92590+ if (shdr->sh_flags & SHF_EXECINSTR)
92591+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
92592+ else
92593+#endif
92594+
92595+ shdr->sh_addr = (unsigned long)dest;
92596 pr_debug("\t0x%lx %s\n",
92597 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
92598 }
92599@@ -2894,12 +3024,12 @@ static void flush_module_icache(const struct module *mod)
92600 * Do it before processing of module parameters, so the module
92601 * can provide parameter accessor functions of its own.
92602 */
92603- if (mod->module_init)
92604- flush_icache_range((unsigned long)mod->module_init,
92605- (unsigned long)mod->module_init
92606- + mod->init_size);
92607- flush_icache_range((unsigned long)mod->module_core,
92608- (unsigned long)mod->module_core + mod->core_size);
92609+ if (mod->module_init_rx)
92610+ flush_icache_range((unsigned long)mod->module_init_rx,
92611+ (unsigned long)mod->module_init_rx
92612+ + mod->init_size_rx);
92613+ flush_icache_range((unsigned long)mod->module_core_rx,
92614+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92615
92616 set_fs(old_fs);
92617 }
92618@@ -2956,8 +3086,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
92619 static void module_deallocate(struct module *mod, struct load_info *info)
92620 {
92621 percpu_modfree(mod);
92622- module_free(mod, mod->module_init);
92623- module_free(mod, mod->module_core);
92624+ module_free_exec(mod, mod->module_init_rx);
92625+ module_free_exec(mod, mod->module_core_rx);
92626+ module_free(mod, mod->module_init_rw);
92627+ module_free(mod, mod->module_core_rw);
92628 }
92629
92630 int __weak module_finalize(const Elf_Ehdr *hdr,
92631@@ -2970,7 +3102,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
92632 static int post_relocation(struct module *mod, const struct load_info *info)
92633 {
92634 /* Sort exception table now relocations are done. */
92635+ pax_open_kernel();
92636 sort_extable(mod->extable, mod->extable + mod->num_exentries);
92637+ pax_close_kernel();
92638
92639 /* Copy relocated percpu area over. */
92640 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
92641@@ -3079,11 +3213,12 @@ static int do_init_module(struct module *mod)
92642 mod->strtab = mod->core_strtab;
92643 #endif
92644 unset_module_init_ro_nx(mod);
92645- module_free(mod, mod->module_init);
92646- mod->module_init = NULL;
92647- mod->init_size = 0;
92648- mod->init_ro_size = 0;
92649- mod->init_text_size = 0;
92650+ module_free(mod, mod->module_init_rw);
92651+ module_free_exec(mod, mod->module_init_rx);
92652+ mod->module_init_rw = NULL;
92653+ mod->module_init_rx = NULL;
92654+ mod->init_size_rw = 0;
92655+ mod->init_size_rx = 0;
92656 mutex_unlock(&module_mutex);
92657 wake_up_all(&module_wq);
92658
92659@@ -3151,16 +3286,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
92660 module_bug_finalize(info->hdr, info->sechdrs, mod);
92661
92662 /* Set RO and NX regions for core */
92663- set_section_ro_nx(mod->module_core,
92664- mod->core_text_size,
92665- mod->core_ro_size,
92666- mod->core_size);
92667+ set_section_ro_nx(mod->module_core_rx,
92668+ mod->core_size_rx,
92669+ mod->core_size_rx,
92670+ mod->core_size_rx);
92671
92672 /* Set RO and NX regions for init */
92673- set_section_ro_nx(mod->module_init,
92674- mod->init_text_size,
92675- mod->init_ro_size,
92676- mod->init_size);
92677+ set_section_ro_nx(mod->module_init_rx,
92678+ mod->init_size_rx,
92679+ mod->init_size_rx,
92680+ mod->init_size_rx);
92681
92682 /* Mark state as coming so strong_try_module_get() ignores us,
92683 * but kallsyms etc. can see us. */
92684@@ -3244,9 +3379,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
92685 if (err)
92686 goto free_unload;
92687
92688+ /* Now copy in args */
92689+ mod->args = strndup_user(uargs, ~0UL >> 1);
92690+ if (IS_ERR(mod->args)) {
92691+ err = PTR_ERR(mod->args);
92692+ goto free_unload;
92693+ }
92694+
92695 /* Set up MODINFO_ATTR fields */
92696 setup_modinfo(mod, info);
92697
92698+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92699+ {
92700+ char *p, *p2;
92701+
92702+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92703+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92704+ err = -EPERM;
92705+ goto free_modinfo;
92706+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92707+ p += sizeof("grsec_modharden_normal") - 1;
92708+ p2 = strstr(p, "_");
92709+ if (p2) {
92710+ *p2 = '\0';
92711+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92712+ *p2 = '_';
92713+ }
92714+ err = -EPERM;
92715+ goto free_modinfo;
92716+ }
92717+ }
92718+#endif
92719+
92720 /* Fix up syms, so that st_value is a pointer to location. */
92721 err = simplify_symbols(mod, info);
92722 if (err < 0)
92723@@ -3262,13 +3426,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
92724
92725 flush_module_icache(mod);
92726
92727- /* Now copy in args */
92728- mod->args = strndup_user(uargs, ~0UL >> 1);
92729- if (IS_ERR(mod->args)) {
92730- err = PTR_ERR(mod->args);
92731- goto free_arch_cleanup;
92732- }
92733-
92734 dynamic_debug_setup(info->debug, info->num_debug);
92735
92736 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
92737@@ -3311,11 +3468,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
92738 ddebug_cleanup:
92739 dynamic_debug_remove(info->debug);
92740 synchronize_sched();
92741- kfree(mod->args);
92742- free_arch_cleanup:
92743 module_arch_cleanup(mod);
92744 free_modinfo:
92745 free_modinfo(mod);
92746+ kfree(mod->args);
92747 free_unload:
92748 module_unload_free(mod);
92749 unlink_mod:
92750@@ -3398,10 +3554,16 @@ static const char *get_ksymbol(struct module *mod,
92751 unsigned long nextval;
92752
92753 /* At worse, next value is at end of module */
92754- if (within_module_init(addr, mod))
92755- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92756+ if (within_module_init_rx(addr, mod))
92757+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92758+ else if (within_module_init_rw(addr, mod))
92759+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92760+ else if (within_module_core_rx(addr, mod))
92761+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92762+ else if (within_module_core_rw(addr, mod))
92763+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92764 else
92765- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92766+ return NULL;
92767
92768 /* Scan for closest preceding symbol, and next symbol. (ELF
92769 starts real symbols at 1). */
92770@@ -3652,7 +3814,7 @@ static int m_show(struct seq_file *m, void *p)
92771 return 0;
92772
92773 seq_printf(m, "%s %u",
92774- mod->name, mod->init_size + mod->core_size);
92775+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92776 print_unload_info(m, mod);
92777
92778 /* Informative for users. */
92779@@ -3661,7 +3823,7 @@ static int m_show(struct seq_file *m, void *p)
92780 mod->state == MODULE_STATE_COMING ? "Loading":
92781 "Live");
92782 /* Used by oprofile and other similar tools. */
92783- seq_printf(m, " 0x%pK", mod->module_core);
92784+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
92785
92786 /* Taints info */
92787 if (mod->taints)
92788@@ -3697,7 +3859,17 @@ static const struct file_operations proc_modules_operations = {
92789
92790 static int __init proc_modules_init(void)
92791 {
92792+#ifndef CONFIG_GRKERNSEC_HIDESYM
92793+#ifdef CONFIG_GRKERNSEC_PROC_USER
92794+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92795+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92796+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92797+#else
92798 proc_create("modules", 0, NULL, &proc_modules_operations);
92799+#endif
92800+#else
92801+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92802+#endif
92803 return 0;
92804 }
92805 module_init(proc_modules_init);
92806@@ -3758,14 +3930,14 @@ struct module *__module_address(unsigned long addr)
92807 {
92808 struct module *mod;
92809
92810- if (addr < module_addr_min || addr > module_addr_max)
92811+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92812+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92813 return NULL;
92814
92815 list_for_each_entry_rcu(mod, &modules, list) {
92816 if (mod->state == MODULE_STATE_UNFORMED)
92817 continue;
92818- if (within_module_core(addr, mod)
92819- || within_module_init(addr, mod))
92820+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
92821 return mod;
92822 }
92823 return NULL;
92824@@ -3800,11 +3972,20 @@ bool is_module_text_address(unsigned long addr)
92825 */
92826 struct module *__module_text_address(unsigned long addr)
92827 {
92828- struct module *mod = __module_address(addr);
92829+ struct module *mod;
92830+
92831+#ifdef CONFIG_X86_32
92832+ addr = ktla_ktva(addr);
92833+#endif
92834+
92835+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92836+ return NULL;
92837+
92838+ mod = __module_address(addr);
92839+
92840 if (mod) {
92841 /* Make sure it's within the text section. */
92842- if (!within(addr, mod->module_init, mod->init_text_size)
92843- && !within(addr, mod->module_core, mod->core_text_size))
92844+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92845 mod = NULL;
92846 }
92847 return mod;
92848diff --git a/kernel/notifier.c b/kernel/notifier.c
92849index 4803da6..1c5eea6 100644
92850--- a/kernel/notifier.c
92851+++ b/kernel/notifier.c
92852@@ -5,6 +5,7 @@
92853 #include <linux/rcupdate.h>
92854 #include <linux/vmalloc.h>
92855 #include <linux/reboot.h>
92856+#include <linux/mm.h>
92857
92858 /*
92859 * Notifier list for kernel code which wants to be called
92860@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
92861 while ((*nl) != NULL) {
92862 if (n->priority > (*nl)->priority)
92863 break;
92864- nl = &((*nl)->next);
92865+ nl = (struct notifier_block **)&((*nl)->next);
92866 }
92867- n->next = *nl;
92868+ pax_open_kernel();
92869+ *(const void **)&n->next = *nl;
92870 rcu_assign_pointer(*nl, n);
92871+ pax_close_kernel();
92872 return 0;
92873 }
92874
92875@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
92876 return 0;
92877 if (n->priority > (*nl)->priority)
92878 break;
92879- nl = &((*nl)->next);
92880+ nl = (struct notifier_block **)&((*nl)->next);
92881 }
92882- n->next = *nl;
92883+ pax_open_kernel();
92884+ *(const void **)&n->next = *nl;
92885 rcu_assign_pointer(*nl, n);
92886+ pax_close_kernel();
92887 return 0;
92888 }
92889
92890@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
92891 {
92892 while ((*nl) != NULL) {
92893 if ((*nl) == n) {
92894+ pax_open_kernel();
92895 rcu_assign_pointer(*nl, n->next);
92896+ pax_close_kernel();
92897 return 0;
92898 }
92899- nl = &((*nl)->next);
92900+ nl = (struct notifier_block **)&((*nl)->next);
92901 }
92902 return -ENOENT;
92903 }
92904diff --git a/kernel/padata.c b/kernel/padata.c
92905index 161402f..598814c 100644
92906--- a/kernel/padata.c
92907+++ b/kernel/padata.c
92908@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
92909 * seq_nr mod. number of cpus in use.
92910 */
92911
92912- seq_nr = atomic_inc_return(&pd->seq_nr);
92913+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
92914 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
92915
92916 return padata_index_to_cpu(pd, cpu_index);
92917@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
92918 padata_init_pqueues(pd);
92919 padata_init_squeues(pd);
92920 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
92921- atomic_set(&pd->seq_nr, -1);
92922+ atomic_set_unchecked(&pd->seq_nr, -1);
92923 atomic_set(&pd->reorder_objects, 0);
92924 atomic_set(&pd->refcnt, 0);
92925 pd->pinst = pinst;
92926diff --git a/kernel/panic.c b/kernel/panic.c
92927index 62e16ce..9db5047b 100644
92928--- a/kernel/panic.c
92929+++ b/kernel/panic.c
92930@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
92931 /*
92932 * Stop ourself in panic -- architecture code may override this
92933 */
92934-void __weak panic_smp_self_stop(void)
92935+void __weak __noreturn panic_smp_self_stop(void)
92936 {
92937 while (1)
92938 cpu_relax();
92939@@ -420,7 +420,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
92940 disable_trace_on_warning();
92941
92942 pr_warn("------------[ cut here ]------------\n");
92943- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
92944+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
92945 raw_smp_processor_id(), current->pid, file, line, caller);
92946
92947 if (args)
92948@@ -474,7 +474,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92949 */
92950 __visible void __stack_chk_fail(void)
92951 {
92952- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92953+ dump_stack();
92954+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92955 __builtin_return_address(0));
92956 }
92957 EXPORT_SYMBOL(__stack_chk_fail);
92958diff --git a/kernel/pid.c b/kernel/pid.c
92959index 9b9a266..c20ef80 100644
92960--- a/kernel/pid.c
92961+++ b/kernel/pid.c
92962@@ -33,6 +33,7 @@
92963 #include <linux/rculist.h>
92964 #include <linux/bootmem.h>
92965 #include <linux/hash.h>
92966+#include <linux/security.h>
92967 #include <linux/pid_namespace.h>
92968 #include <linux/init_task.h>
92969 #include <linux/syscalls.h>
92970@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92971
92972 int pid_max = PID_MAX_DEFAULT;
92973
92974-#define RESERVED_PIDS 300
92975+#define RESERVED_PIDS 500
92976
92977 int pid_max_min = RESERVED_PIDS + 1;
92978 int pid_max_max = PID_MAX_LIMIT;
92979@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
92980 */
92981 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92982 {
92983+ struct task_struct *task;
92984+
92985 rcu_lockdep_assert(rcu_read_lock_held(),
92986 "find_task_by_pid_ns() needs rcu_read_lock()"
92987 " protection");
92988- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92989+
92990+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92991+
92992+ if (gr_pid_is_chrooted(task))
92993+ return NULL;
92994+
92995+ return task;
92996 }
92997
92998 struct task_struct *find_task_by_vpid(pid_t vnr)
92999@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93000 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
93001 }
93002
93003+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93004+{
93005+ rcu_lockdep_assert(rcu_read_lock_held(),
93006+ "find_task_by_pid_ns() needs rcu_read_lock()"
93007+ " protection");
93008+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
93009+}
93010+
93011 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93012 {
93013 struct pid *pid;
93014diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
93015index db95d8e..a0ca23f 100644
93016--- a/kernel/pid_namespace.c
93017+++ b/kernel/pid_namespace.c
93018@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
93019 void __user *buffer, size_t *lenp, loff_t *ppos)
93020 {
93021 struct pid_namespace *pid_ns = task_active_pid_ns(current);
93022- struct ctl_table tmp = *table;
93023+ ctl_table_no_const tmp = *table;
93024
93025 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
93026 return -EPERM;
93027diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
93028index 3b89464..5e38379 100644
93029--- a/kernel/posix-cpu-timers.c
93030+++ b/kernel/posix-cpu-timers.c
93031@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
93032
93033 static __init int init_posix_cpu_timers(void)
93034 {
93035- struct k_clock process = {
93036+ static struct k_clock process = {
93037 .clock_getres = process_cpu_clock_getres,
93038 .clock_get = process_cpu_clock_get,
93039 .timer_create = process_cpu_timer_create,
93040 .nsleep = process_cpu_nsleep,
93041 .nsleep_restart = process_cpu_nsleep_restart,
93042 };
93043- struct k_clock thread = {
93044+ static struct k_clock thread = {
93045 .clock_getres = thread_cpu_clock_getres,
93046 .clock_get = thread_cpu_clock_get,
93047 .timer_create = thread_cpu_timer_create,
93048diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93049index 424c2d4..679242f 100644
93050--- a/kernel/posix-timers.c
93051+++ b/kernel/posix-timers.c
93052@@ -43,6 +43,7 @@
93053 #include <linux/hash.h>
93054 #include <linux/posix-clock.h>
93055 #include <linux/posix-timers.h>
93056+#include <linux/grsecurity.h>
93057 #include <linux/syscalls.h>
93058 #include <linux/wait.h>
93059 #include <linux/workqueue.h>
93060@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
93061 * which we beg off on and pass to do_sys_settimeofday().
93062 */
93063
93064-static struct k_clock posix_clocks[MAX_CLOCKS];
93065+static struct k_clock *posix_clocks[MAX_CLOCKS];
93066
93067 /*
93068 * These ones are defined below.
93069@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93070 */
93071 static __init int init_posix_timers(void)
93072 {
93073- struct k_clock clock_realtime = {
93074+ static struct k_clock clock_realtime = {
93075 .clock_getres = hrtimer_get_res,
93076 .clock_get = posix_clock_realtime_get,
93077 .clock_set = posix_clock_realtime_set,
93078@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
93079 .timer_get = common_timer_get,
93080 .timer_del = common_timer_del,
93081 };
93082- struct k_clock clock_monotonic = {
93083+ static struct k_clock clock_monotonic = {
93084 .clock_getres = hrtimer_get_res,
93085 .clock_get = posix_ktime_get_ts,
93086 .nsleep = common_nsleep,
93087@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
93088 .timer_get = common_timer_get,
93089 .timer_del = common_timer_del,
93090 };
93091- struct k_clock clock_monotonic_raw = {
93092+ static struct k_clock clock_monotonic_raw = {
93093 .clock_getres = hrtimer_get_res,
93094 .clock_get = posix_get_monotonic_raw,
93095 };
93096- struct k_clock clock_realtime_coarse = {
93097+ static struct k_clock clock_realtime_coarse = {
93098 .clock_getres = posix_get_coarse_res,
93099 .clock_get = posix_get_realtime_coarse,
93100 };
93101- struct k_clock clock_monotonic_coarse = {
93102+ static struct k_clock clock_monotonic_coarse = {
93103 .clock_getres = posix_get_coarse_res,
93104 .clock_get = posix_get_monotonic_coarse,
93105 };
93106- struct k_clock clock_tai = {
93107+ static struct k_clock clock_tai = {
93108 .clock_getres = hrtimer_get_res,
93109 .clock_get = posix_get_tai,
93110 .nsleep = common_nsleep,
93111@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
93112 .timer_get = common_timer_get,
93113 .timer_del = common_timer_del,
93114 };
93115- struct k_clock clock_boottime = {
93116+ static struct k_clock clock_boottime = {
93117 .clock_getres = hrtimer_get_res,
93118 .clock_get = posix_get_boottime,
93119 .nsleep = common_nsleep,
93120@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93121 return;
93122 }
93123
93124- posix_clocks[clock_id] = *new_clock;
93125+ posix_clocks[clock_id] = new_clock;
93126 }
93127 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93128
93129@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93130 return (id & CLOCKFD_MASK) == CLOCKFD ?
93131 &clock_posix_dynamic : &clock_posix_cpu;
93132
93133- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93134+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93135 return NULL;
93136- return &posix_clocks[id];
93137+ return posix_clocks[id];
93138 }
93139
93140 static int common_timer_create(struct k_itimer *new_timer)
93141@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93142 struct k_clock *kc = clockid_to_kclock(which_clock);
93143 struct k_itimer *new_timer;
93144 int error, new_timer_id;
93145- sigevent_t event;
93146+ sigevent_t event = { };
93147 int it_id_set = IT_ID_NOT_SET;
93148
93149 if (!kc)
93150@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93151 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93152 return -EFAULT;
93153
93154+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93155+ have their clock_set fptr set to a nosettime dummy function
93156+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93157+ call common_clock_set, which calls do_sys_settimeofday, which
93158+ we hook
93159+ */
93160+
93161 return kc->clock_set(which_clock, &new_tp);
93162 }
93163
93164diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
93165index 9a83d78..128bfc0 100644
93166--- a/kernel/power/Kconfig
93167+++ b/kernel/power/Kconfig
93168@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
93169 config HIBERNATION
93170 bool "Hibernation (aka 'suspend to disk')"
93171 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
93172+ depends on !GRKERNSEC_KMEM
93173+ depends on !PAX_MEMORY_SANITIZE
93174 select HIBERNATE_CALLBACKS
93175 select LZO_COMPRESS
93176 select LZO_DECOMPRESS
93177diff --git a/kernel/power/process.c b/kernel/power/process.c
93178index 4ee194e..925778f 100644
93179--- a/kernel/power/process.c
93180+++ b/kernel/power/process.c
93181@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
93182 unsigned int elapsed_msecs;
93183 bool wakeup = false;
93184 int sleep_usecs = USEC_PER_MSEC;
93185+ bool timedout = false;
93186
93187 do_gettimeofday(&start);
93188
93189@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
93190
93191 while (true) {
93192 todo = 0;
93193+ if (time_after(jiffies, end_time))
93194+ timedout = true;
93195 read_lock(&tasklist_lock);
93196 do_each_thread(g, p) {
93197 if (p == current || !freeze_task(p))
93198 continue;
93199
93200- if (!freezer_should_skip(p))
93201+ if (!freezer_should_skip(p)) {
93202 todo++;
93203+ if (timedout) {
93204+ printk(KERN_ERR "Task refusing to freeze:\n");
93205+ sched_show_task(p);
93206+ }
93207+ }
93208 } while_each_thread(g, p);
93209 read_unlock(&tasklist_lock);
93210
93211@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
93212 todo += wq_busy;
93213 }
93214
93215- if (!todo || time_after(jiffies, end_time))
93216+ if (!todo || timedout)
93217 break;
93218
93219 if (pm_wakeup_pending()) {
93220diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
93221index 13e839d..8a71f12 100644
93222--- a/kernel/printk/printk.c
93223+++ b/kernel/printk/printk.c
93224@@ -480,6 +480,11 @@ static int check_syslog_permissions(int type, bool from_file)
93225 if (from_file && type != SYSLOG_ACTION_OPEN)
93226 return 0;
93227
93228+#ifdef CONFIG_GRKERNSEC_DMESG
93229+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
93230+ return -EPERM;
93231+#endif
93232+
93233 if (syslog_action_restricted(type)) {
93234 if (capable(CAP_SYSLOG))
93235 return 0;
93236diff --git a/kernel/profile.c b/kernel/profile.c
93237index 54bf5ba..df6e0a2 100644
93238--- a/kernel/profile.c
93239+++ b/kernel/profile.c
93240@@ -37,7 +37,7 @@ struct profile_hit {
93241 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
93242 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
93243
93244-static atomic_t *prof_buffer;
93245+static atomic_unchecked_t *prof_buffer;
93246 static unsigned long prof_len, prof_shift;
93247
93248 int prof_on __read_mostly;
93249@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
93250 hits[i].pc = 0;
93251 continue;
93252 }
93253- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93254+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93255 hits[i].hits = hits[i].pc = 0;
93256 }
93257 }
93258@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93259 * Add the current hit(s) and flush the write-queue out
93260 * to the global buffer:
93261 */
93262- atomic_add(nr_hits, &prof_buffer[pc]);
93263+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93264 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93265- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93266+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93267 hits[i].pc = hits[i].hits = 0;
93268 }
93269 out:
93270@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93271 {
93272 unsigned long pc;
93273 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93274- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93275+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93276 }
93277 #endif /* !CONFIG_SMP */
93278
93279@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93280 return -EFAULT;
93281 buf++; p++; count--; read++;
93282 }
93283- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93284+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93285 if (copy_to_user(buf, (void *)pnt, count))
93286 return -EFAULT;
93287 read += count;
93288@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93289 }
93290 #endif
93291 profile_discard_flip_buffers();
93292- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93293+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93294 return count;
93295 }
93296
93297diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93298index adf9862..9d86345 100644
93299--- a/kernel/ptrace.c
93300+++ b/kernel/ptrace.c
93301@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
93302 if (seize)
93303 flags |= PT_SEIZED;
93304 rcu_read_lock();
93305- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93306+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93307 flags |= PT_PTRACE_CAP;
93308 rcu_read_unlock();
93309 task->ptrace = flags;
93310@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93311 break;
93312 return -EIO;
93313 }
93314- if (copy_to_user(dst, buf, retval))
93315+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
93316 return -EFAULT;
93317 copied += retval;
93318 src += retval;
93319@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
93320 bool seized = child->ptrace & PT_SEIZED;
93321 int ret = -EIO;
93322 siginfo_t siginfo, *si;
93323- void __user *datavp = (void __user *) data;
93324+ void __user *datavp = (__force void __user *) data;
93325 unsigned long __user *datalp = datavp;
93326 unsigned long flags;
93327
93328@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
93329 goto out;
93330 }
93331
93332+ if (gr_handle_ptrace(child, request)) {
93333+ ret = -EPERM;
93334+ goto out_put_task_struct;
93335+ }
93336+
93337 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93338 ret = ptrace_attach(child, request, addr, data);
93339 /*
93340 * Some architectures need to do book-keeping after
93341 * a ptrace attach.
93342 */
93343- if (!ret)
93344+ if (!ret) {
93345 arch_ptrace_attach(child);
93346+ gr_audit_ptrace(child);
93347+ }
93348 goto out_put_task_struct;
93349 }
93350
93351@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
93352 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93353 if (copied != sizeof(tmp))
93354 return -EIO;
93355- return put_user(tmp, (unsigned long __user *)data);
93356+ return put_user(tmp, (__force unsigned long __user *)data);
93357 }
93358
93359 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
93360@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93361 }
93362
93363 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93364- compat_long_t, addr, compat_long_t, data)
93365+ compat_ulong_t, addr, compat_ulong_t, data)
93366 {
93367 struct task_struct *child;
93368 long ret;
93369@@ -1197,14 +1204,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93370 goto out;
93371 }
93372
93373+ if (gr_handle_ptrace(child, request)) {
93374+ ret = -EPERM;
93375+ goto out_put_task_struct;
93376+ }
93377+
93378 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93379 ret = ptrace_attach(child, request, addr, data);
93380 /*
93381 * Some architectures need to do book-keeping after
93382 * a ptrace attach.
93383 */
93384- if (!ret)
93385+ if (!ret) {
93386 arch_ptrace_attach(child);
93387+ gr_audit_ptrace(child);
93388+ }
93389 goto out_put_task_struct;
93390 }
93391
93392diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
93393index 948a769..5ca842b 100644
93394--- a/kernel/rcu/rcutorture.c
93395+++ b/kernel/rcu/rcutorture.c
93396@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93397 rcu_torture_count) = { 0 };
93398 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93399 rcu_torture_batch) = { 0 };
93400-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93401-static atomic_t n_rcu_torture_alloc;
93402-static atomic_t n_rcu_torture_alloc_fail;
93403-static atomic_t n_rcu_torture_free;
93404-static atomic_t n_rcu_torture_mberror;
93405-static atomic_t n_rcu_torture_error;
93406+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93407+static atomic_unchecked_t n_rcu_torture_alloc;
93408+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93409+static atomic_unchecked_t n_rcu_torture_free;
93410+static atomic_unchecked_t n_rcu_torture_mberror;
93411+static atomic_unchecked_t n_rcu_torture_error;
93412 static long n_rcu_torture_barrier_error;
93413 static long n_rcu_torture_boost_ktrerror;
93414 static long n_rcu_torture_boost_rterror;
93415@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
93416
93417 spin_lock_bh(&rcu_torture_lock);
93418 if (list_empty(&rcu_torture_freelist)) {
93419- atomic_inc(&n_rcu_torture_alloc_fail);
93420+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93421 spin_unlock_bh(&rcu_torture_lock);
93422 return NULL;
93423 }
93424- atomic_inc(&n_rcu_torture_alloc);
93425+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93426 p = rcu_torture_freelist.next;
93427 list_del_init(p);
93428 spin_unlock_bh(&rcu_torture_lock);
93429@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
93430 static void
93431 rcu_torture_free(struct rcu_torture *p)
93432 {
93433- atomic_inc(&n_rcu_torture_free);
93434+ atomic_inc_unchecked(&n_rcu_torture_free);
93435 spin_lock_bh(&rcu_torture_lock);
93436 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93437 spin_unlock_bh(&rcu_torture_lock);
93438@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
93439 i = rp->rtort_pipe_count;
93440 if (i > RCU_TORTURE_PIPE_LEN)
93441 i = RCU_TORTURE_PIPE_LEN;
93442- atomic_inc(&rcu_torture_wcount[i]);
93443+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93444 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93445 rp->rtort_mbtest = 0;
93446 return true;
93447@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
93448 i = old_rp->rtort_pipe_count;
93449 if (i > RCU_TORTURE_PIPE_LEN)
93450 i = RCU_TORTURE_PIPE_LEN;
93451- atomic_inc(&rcu_torture_wcount[i]);
93452+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93453 old_rp->rtort_pipe_count++;
93454 switch (synctype[torture_random(&rand) % nsynctypes]) {
93455 case RTWS_DEF_FREE:
93456@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
93457 return;
93458 }
93459 if (p->rtort_mbtest == 0)
93460- atomic_inc(&n_rcu_torture_mberror);
93461+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93462 spin_lock(&rand_lock);
93463 cur_ops->read_delay(&rand);
93464 n_rcu_torture_timers++;
93465@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
93466 continue;
93467 }
93468 if (p->rtort_mbtest == 0)
93469- atomic_inc(&n_rcu_torture_mberror);
93470+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93471 cur_ops->read_delay(&rand);
93472 preempt_disable();
93473 pipe_count = p->rtort_pipe_count;
93474@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
93475 }
93476 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
93477 page += sprintf(page,
93478- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
93479+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
93480 rcu_torture_current,
93481 rcu_torture_current_version,
93482 list_empty(&rcu_torture_freelist),
93483- atomic_read(&n_rcu_torture_alloc),
93484- atomic_read(&n_rcu_torture_alloc_fail),
93485- atomic_read(&n_rcu_torture_free));
93486+ atomic_read_unchecked(&n_rcu_torture_alloc),
93487+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93488+ atomic_read_unchecked(&n_rcu_torture_free));
93489 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
93490- atomic_read(&n_rcu_torture_mberror),
93491+ atomic_read_unchecked(&n_rcu_torture_mberror),
93492 n_rcu_torture_boost_ktrerror,
93493 n_rcu_torture_boost_rterror);
93494 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
93495@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
93496 n_barrier_attempts,
93497 n_rcu_torture_barrier_error);
93498 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
93499- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
93500+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
93501 n_rcu_torture_barrier_error != 0 ||
93502 n_rcu_torture_boost_ktrerror != 0 ||
93503 n_rcu_torture_boost_rterror != 0 ||
93504 n_rcu_torture_boost_failure != 0 ||
93505 i > 1) {
93506 page += sprintf(page, "!!! ");
93507- atomic_inc(&n_rcu_torture_error);
93508+ atomic_inc_unchecked(&n_rcu_torture_error);
93509 WARN_ON_ONCE(1);
93510 }
93511 page += sprintf(page, "Reader Pipe: ");
93512@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
93513 page += sprintf(page, "Free-Block Circulation: ");
93514 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93515 page += sprintf(page, " %d",
93516- atomic_read(&rcu_torture_wcount[i]));
93517+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93518 }
93519 page += sprintf(page, "\n");
93520 if (cur_ops->stats)
93521@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
93522
93523 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
93524
93525- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93526+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93527 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
93528 else if (torture_onoff_failures())
93529 rcu_torture_print_module_parms(cur_ops,
93530@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
93531
93532 rcu_torture_current = NULL;
93533 rcu_torture_current_version = 0;
93534- atomic_set(&n_rcu_torture_alloc, 0);
93535- atomic_set(&n_rcu_torture_alloc_fail, 0);
93536- atomic_set(&n_rcu_torture_free, 0);
93537- atomic_set(&n_rcu_torture_mberror, 0);
93538- atomic_set(&n_rcu_torture_error, 0);
93539+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93540+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93541+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93542+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93543+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93544 n_rcu_torture_barrier_error = 0;
93545 n_rcu_torture_boost_ktrerror = 0;
93546 n_rcu_torture_boost_rterror = 0;
93547 n_rcu_torture_boost_failure = 0;
93548 n_rcu_torture_boosts = 0;
93549 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93550- atomic_set(&rcu_torture_wcount[i], 0);
93551+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93552 for_each_possible_cpu(cpu) {
93553 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93554 per_cpu(rcu_torture_count, cpu)[i] = 0;
93555diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
93556index c639556..cf0a0d5 100644
93557--- a/kernel/rcu/srcu.c
93558+++ b/kernel/rcu/srcu.c
93559@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
93560
93561 idx = ACCESS_ONCE(sp->completed) & 0x1;
93562 preempt_disable();
93563- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
93564+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
93565 smp_mb(); /* B */ /* Avoid leaking the critical section. */
93566- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
93567+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
93568 preempt_enable();
93569 return idx;
93570 }
93571diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
93572index d9efcc1..ea543e9 100644
93573--- a/kernel/rcu/tiny.c
93574+++ b/kernel/rcu/tiny.c
93575@@ -42,7 +42,7 @@
93576 /* Forward declarations for tiny_plugin.h. */
93577 struct rcu_ctrlblk;
93578 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
93579-static void rcu_process_callbacks(struct softirq_action *unused);
93580+static void rcu_process_callbacks(void);
93581 static void __call_rcu(struct rcu_head *head,
93582 void (*func)(struct rcu_head *rcu),
93583 struct rcu_ctrlblk *rcp);
93584@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
93585 false));
93586 }
93587
93588-static void rcu_process_callbacks(struct softirq_action *unused)
93589+static __latent_entropy void rcu_process_callbacks(void)
93590 {
93591 __rcu_process_callbacks(&rcu_sched_ctrlblk);
93592 __rcu_process_callbacks(&rcu_bh_ctrlblk);
93593diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
93594index 858c565..7efd915 100644
93595--- a/kernel/rcu/tiny_plugin.h
93596+++ b/kernel/rcu/tiny_plugin.h
93597@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
93598 dump_stack();
93599 }
93600 if (*rcp->curtail && ULONG_CMP_GE(j, js))
93601- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
93602+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
93603 3 * rcu_jiffies_till_stall_check() + 3;
93604 else if (ULONG_CMP_GE(j, js))
93605- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93606+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93607 }
93608
93609 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
93610 {
93611 rcp->ticks_this_gp = 0;
93612 rcp->gp_start = jiffies;
93613- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93614+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93615 }
93616
93617 static void check_cpu_stalls(void)
93618diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
93619index 625d0b0..0bce4d6 100644
93620--- a/kernel/rcu/tree.c
93621+++ b/kernel/rcu/tree.c
93622@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
93623 */
93624 rdtp = this_cpu_ptr(&rcu_dynticks);
93625 smp_mb__before_atomic(); /* Earlier stuff before QS. */
93626- atomic_add(2, &rdtp->dynticks); /* QS. */
93627+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
93628 smp_mb__after_atomic(); /* Later stuff after QS. */
93629 break;
93630 }
93631@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
93632 rcu_prepare_for_idle(smp_processor_id());
93633 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93634 smp_mb__before_atomic(); /* See above. */
93635- atomic_inc(&rdtp->dynticks);
93636+ atomic_inc_unchecked(&rdtp->dynticks);
93637 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
93638- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93639+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93640
93641 /*
93642 * It is illegal to enter an extended quiescent state while
93643@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
93644 int user)
93645 {
93646 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
93647- atomic_inc(&rdtp->dynticks);
93648+ atomic_inc_unchecked(&rdtp->dynticks);
93649 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93650 smp_mb__after_atomic(); /* See above. */
93651- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93652+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93653 rcu_cleanup_after_idle(smp_processor_id());
93654 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
93655 if (!user && !is_idle_task(current)) {
93656@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
93657 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
93658
93659 if (rdtp->dynticks_nmi_nesting == 0 &&
93660- (atomic_read(&rdtp->dynticks) & 0x1))
93661+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
93662 return;
93663 rdtp->dynticks_nmi_nesting++;
93664 smp_mb__before_atomic(); /* Force delay from prior write. */
93665- atomic_inc(&rdtp->dynticks);
93666+ atomic_inc_unchecked(&rdtp->dynticks);
93667 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93668 smp_mb__after_atomic(); /* See above. */
93669- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93670+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93671 }
93672
93673 /**
93674@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
93675 return;
93676 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93677 smp_mb__before_atomic(); /* See above. */
93678- atomic_inc(&rdtp->dynticks);
93679+ atomic_inc_unchecked(&rdtp->dynticks);
93680 smp_mb__after_atomic(); /* Force delay to next write. */
93681- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93682+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93683 }
93684
93685 /**
93686@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
93687 */
93688 bool notrace __rcu_is_watching(void)
93689 {
93690- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93691+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93692 }
93693
93694 /**
93695@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
93696 static int dyntick_save_progress_counter(struct rcu_data *rdp,
93697 bool *isidle, unsigned long *maxj)
93698 {
93699- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
93700+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93701 rcu_sysidle_check_cpu(rdp, isidle, maxj);
93702 if ((rdp->dynticks_snap & 0x1) == 0) {
93703 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
93704@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93705 int *rcrmp;
93706 unsigned int snap;
93707
93708- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
93709+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93710 snap = (unsigned int)rdp->dynticks_snap;
93711
93712 /*
93713@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93714 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
93715 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
93716 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
93717- ACCESS_ONCE(rdp->cond_resched_completed) =
93718+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
93719 ACCESS_ONCE(rdp->mynode->completed);
93720 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
93721- ACCESS_ONCE(*rcrmp) =
93722+ ACCESS_ONCE_RW(*rcrmp) =
93723 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
93724 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
93725 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
93726@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
93727 rsp->gp_start = j;
93728 smp_wmb(); /* Record start time before stall time. */
93729 j1 = rcu_jiffies_till_stall_check();
93730- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
93731+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
93732 rsp->jiffies_resched = j + j1 / 2;
93733 }
93734
93735@@ -1052,7 +1052,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
93736 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93737 return;
93738 }
93739- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93740+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93741 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93742
93743 /*
93744@@ -1130,7 +1130,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
93745
93746 raw_spin_lock_irqsave(&rnp->lock, flags);
93747 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
93748- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
93749+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
93750 3 * rcu_jiffies_till_stall_check() + 3;
93751 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93752
93753@@ -1214,7 +1214,7 @@ void rcu_cpu_stall_reset(void)
93754 struct rcu_state *rsp;
93755
93756 for_each_rcu_flavor(rsp)
93757- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93758+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93759 }
93760
93761 /*
93762@@ -1594,7 +1594,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93763 raw_spin_unlock_irq(&rnp->lock);
93764 return 0;
93765 }
93766- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93767+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93768
93769 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
93770 /*
93771@@ -1635,9 +1635,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
93772 rdp = this_cpu_ptr(rsp->rda);
93773 rcu_preempt_check_blocked_tasks(rnp);
93774 rnp->qsmask = rnp->qsmaskinit;
93775- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
93776+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
93777 WARN_ON_ONCE(rnp->completed != rsp->completed);
93778- ACCESS_ONCE(rnp->completed) = rsp->completed;
93779+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
93780 if (rnp == rdp->mynode)
93781 (void)__note_gp_changes(rsp, rnp, rdp);
93782 rcu_preempt_boost_start_gp(rnp);
93783@@ -1687,7 +1687,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
93784 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
93785 raw_spin_lock_irq(&rnp->lock);
93786 smp_mb__after_unlock_lock();
93787- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
93788+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
93789 raw_spin_unlock_irq(&rnp->lock);
93790 }
93791 return fqs_state;
93792@@ -1732,7 +1732,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93793 rcu_for_each_node_breadth_first(rsp, rnp) {
93794 raw_spin_lock_irq(&rnp->lock);
93795 smp_mb__after_unlock_lock();
93796- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
93797+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
93798 rdp = this_cpu_ptr(rsp->rda);
93799 if (rnp == rdp->mynode)
93800 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
93801@@ -1747,14 +1747,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93802 rcu_nocb_gp_set(rnp, nocb);
93803
93804 /* Declare grace period done. */
93805- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
93806+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
93807 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
93808 rsp->fqs_state = RCU_GP_IDLE;
93809 rdp = this_cpu_ptr(rsp->rda);
93810 /* Advance CBs to reduce false positives below. */
93811 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
93812 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
93813- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93814+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93815 trace_rcu_grace_period(rsp->name,
93816 ACCESS_ONCE(rsp->gpnum),
93817 TPS("newreq"));
93818@@ -1879,7 +1879,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
93819 */
93820 return false;
93821 }
93822- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93823+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93824 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
93825 TPS("newreq"));
93826
93827@@ -2100,7 +2100,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
93828 rsp->qlen += rdp->qlen;
93829 rdp->n_cbs_orphaned += rdp->qlen;
93830 rdp->qlen_lazy = 0;
93831- ACCESS_ONCE(rdp->qlen) = 0;
93832+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93833 }
93834
93835 /*
93836@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
93837 }
93838 smp_mb(); /* List handling before counting for rcu_barrier(). */
93839 rdp->qlen_lazy -= count_lazy;
93840- ACCESS_ONCE(rdp->qlen) -= count;
93841+ ACCESS_ONCE_RW(rdp->qlen) -= count;
93842 rdp->n_cbs_invoked += count;
93843
93844 /* Reinstate batch limit if we have worked down the excess. */
93845@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
93846 if (rnp_old != NULL)
93847 raw_spin_unlock(&rnp_old->fqslock);
93848 if (ret) {
93849- ACCESS_ONCE(rsp->n_force_qs_lh)++;
93850+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
93851 return;
93852 }
93853 rnp_old = rnp;
93854@@ -2504,11 +2504,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
93855 smp_mb__after_unlock_lock();
93856 raw_spin_unlock(&rnp_old->fqslock);
93857 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
93858- ACCESS_ONCE(rsp->n_force_qs_lh)++;
93859+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
93860 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93861 return; /* Someone beat us to it. */
93862 }
93863- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
93864+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
93865 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93866 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
93867 }
93868@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
93869 /*
93870 * Do RCU core processing for the current CPU.
93871 */
93872-static void rcu_process_callbacks(struct softirq_action *unused)
93873+static void rcu_process_callbacks(void)
93874 {
93875 struct rcu_state *rsp;
93876
93877@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93878 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
93879 if (debug_rcu_head_queue(head)) {
93880 /* Probable double call_rcu(), so leak the callback. */
93881- ACCESS_ONCE(head->func) = rcu_leak_callback;
93882+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
93883 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
93884 return;
93885 }
93886@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93887 local_irq_restore(flags);
93888 return;
93889 }
93890- ACCESS_ONCE(rdp->qlen)++;
93891+ ACCESS_ONCE_RW(rdp->qlen)++;
93892 if (lazy)
93893 rdp->qlen_lazy++;
93894 else
93895@@ -2968,11 +2968,11 @@ void synchronize_sched_expedited(void)
93896 * counter wrap on a 32-bit system. Quite a few more CPUs would of
93897 * course be required on a 64-bit system.
93898 */
93899- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
93900+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
93901 (ulong)atomic_long_read(&rsp->expedited_done) +
93902 ULONG_MAX / 8)) {
93903 synchronize_sched();
93904- atomic_long_inc(&rsp->expedited_wrap);
93905+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
93906 return;
93907 }
93908
93909@@ -2980,7 +2980,7 @@ void synchronize_sched_expedited(void)
93910 * Take a ticket. Note that atomic_inc_return() implies a
93911 * full memory barrier.
93912 */
93913- snap = atomic_long_inc_return(&rsp->expedited_start);
93914+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
93915 firstsnap = snap;
93916 get_online_cpus();
93917 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
93918@@ -2993,14 +2993,14 @@ void synchronize_sched_expedited(void)
93919 synchronize_sched_expedited_cpu_stop,
93920 NULL) == -EAGAIN) {
93921 put_online_cpus();
93922- atomic_long_inc(&rsp->expedited_tryfail);
93923+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
93924
93925 /* Check to see if someone else did our work for us. */
93926 s = atomic_long_read(&rsp->expedited_done);
93927 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93928 /* ensure test happens before caller kfree */
93929 smp_mb__before_atomic(); /* ^^^ */
93930- atomic_long_inc(&rsp->expedited_workdone1);
93931+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
93932 return;
93933 }
93934
93935@@ -3009,7 +3009,7 @@ void synchronize_sched_expedited(void)
93936 udelay(trycount * num_online_cpus());
93937 } else {
93938 wait_rcu_gp(call_rcu_sched);
93939- atomic_long_inc(&rsp->expedited_normal);
93940+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93941 return;
93942 }
93943
93944@@ -3018,7 +3018,7 @@ void synchronize_sched_expedited(void)
93945 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93946 /* ensure test happens before caller kfree */
93947 smp_mb__before_atomic(); /* ^^^ */
93948- atomic_long_inc(&rsp->expedited_workdone2);
93949+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
93950 return;
93951 }
93952
93953@@ -3030,10 +3030,10 @@ void synchronize_sched_expedited(void)
93954 * period works for us.
93955 */
93956 get_online_cpus();
93957- snap = atomic_long_read(&rsp->expedited_start);
93958+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
93959 smp_mb(); /* ensure read is before try_stop_cpus(). */
93960 }
93961- atomic_long_inc(&rsp->expedited_stoppedcpus);
93962+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
93963
93964 /*
93965 * Everyone up to our most recent fetch is covered by our grace
93966@@ -3042,16 +3042,16 @@ void synchronize_sched_expedited(void)
93967 * than we did already did their update.
93968 */
93969 do {
93970- atomic_long_inc(&rsp->expedited_done_tries);
93971+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
93972 s = atomic_long_read(&rsp->expedited_done);
93973 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
93974 /* ensure test happens before caller kfree */
93975 smp_mb__before_atomic(); /* ^^^ */
93976- atomic_long_inc(&rsp->expedited_done_lost);
93977+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
93978 break;
93979 }
93980 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
93981- atomic_long_inc(&rsp->expedited_done_exit);
93982+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
93983
93984 put_online_cpus();
93985 }
93986@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93987 * ACCESS_ONCE() to prevent the compiler from speculating
93988 * the increment to precede the early-exit check.
93989 */
93990- ACCESS_ONCE(rsp->n_barrier_done)++;
93991+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
93992 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
93993 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
93994 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
93995@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93996
93997 /* Increment ->n_barrier_done to prevent duplicate work. */
93998 smp_mb(); /* Keep increment after above mechanism. */
93999- ACCESS_ONCE(rsp->n_barrier_done)++;
94000+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
94001 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
94002 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
94003 smp_mb(); /* Keep increment before caller's subsequent code. */
94004@@ -3352,10 +3352,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
94005 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
94006 init_callback_list(rdp);
94007 rdp->qlen_lazy = 0;
94008- ACCESS_ONCE(rdp->qlen) = 0;
94009+ ACCESS_ONCE_RW(rdp->qlen) = 0;
94010 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
94011 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
94012- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
94013+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
94014 rdp->cpu = cpu;
94015 rdp->rsp = rsp;
94016 rcu_boot_init_nocb_percpu_data(rdp);
94017@@ -3388,8 +3388,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
94018 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
94019 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
94020 rcu_sysidle_init_percpu_data(rdp->dynticks);
94021- atomic_set(&rdp->dynticks->dynticks,
94022- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
94023+ atomic_set_unchecked(&rdp->dynticks->dynticks,
94024+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
94025 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
94026
94027 /* Add CPU to rcu_node bitmasks. */
94028diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
94029index 0f69a79..c85c2dc 100644
94030--- a/kernel/rcu/tree.h
94031+++ b/kernel/rcu/tree.h
94032@@ -87,11 +87,11 @@ struct rcu_dynticks {
94033 long long dynticks_nesting; /* Track irq/process nesting level. */
94034 /* Process level is worth LLONG_MAX/2. */
94035 int dynticks_nmi_nesting; /* Track NMI nesting level. */
94036- atomic_t dynticks; /* Even value for idle, else odd. */
94037+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
94038 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
94039 long long dynticks_idle_nesting;
94040 /* irq/process nesting level from idle. */
94041- atomic_t dynticks_idle; /* Even value for idle, else odd. */
94042+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
94043 /* "Idle" excludes userspace execution. */
94044 unsigned long dynticks_idle_jiffies;
94045 /* End of last non-NMI non-idle period. */
94046@@ -435,17 +435,17 @@ struct rcu_state {
94047 /* _rcu_barrier(). */
94048 /* End of fields guarded by barrier_mutex. */
94049
94050- atomic_long_t expedited_start; /* Starting ticket. */
94051- atomic_long_t expedited_done; /* Done ticket. */
94052- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
94053- atomic_long_t expedited_tryfail; /* # acquisition failures. */
94054- atomic_long_t expedited_workdone1; /* # done by others #1. */
94055- atomic_long_t expedited_workdone2; /* # done by others #2. */
94056- atomic_long_t expedited_normal; /* # fallbacks to normal. */
94057- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
94058- atomic_long_t expedited_done_tries; /* # tries to update _done. */
94059- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
94060- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
94061+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
94062+ atomic_long_t expedited_done; /* Done ticket. */
94063+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
94064+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
94065+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
94066+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
94067+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
94068+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
94069+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
94070+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
94071+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
94072
94073 unsigned long jiffies_force_qs; /* Time at which to invoke */
94074 /* force_quiescent_state(). */
94075diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
94076index 02ac0fb..4aa4a36 100644
94077--- a/kernel/rcu/tree_plugin.h
94078+++ b/kernel/rcu/tree_plugin.h
94079@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
94080 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
94081 {
94082 return !rcu_preempted_readers_exp(rnp) &&
94083- ACCESS_ONCE(rnp->expmask) == 0;
94084+ ACCESS_ONCE_RW(rnp->expmask) == 0;
94085 }
94086
94087 /*
94088@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
94089
94090 /* Clean up and exit. */
94091 smp_mb(); /* ensure expedited GP seen before counter increment. */
94092- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
94093+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
94094 unlock_mb_ret:
94095 mutex_unlock(&sync_rcu_preempt_exp_mutex);
94096 mb_ret:
94097@@ -1447,7 +1447,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
94098 free_cpumask_var(cm);
94099 }
94100
94101-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
94102+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
94103 .store = &rcu_cpu_kthread_task,
94104 .thread_should_run = rcu_cpu_kthread_should_run,
94105 .thread_fn = rcu_cpu_kthread,
94106@@ -1926,7 +1926,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
94107 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
94108 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
94109 cpu, ticks_value, ticks_title,
94110- atomic_read(&rdtp->dynticks) & 0xfff,
94111+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
94112 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
94113 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
94114 fast_no_hz);
94115@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
94116
94117 /* Enqueue the callback on the nocb list and update counts. */
94118 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
94119- ACCESS_ONCE(*old_rhpp) = rhp;
94120+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
94121 atomic_long_add(rhcount, &rdp->nocb_q_count);
94122 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
94123
94124@@ -2255,12 +2255,12 @@ static int rcu_nocb_kthread(void *arg)
94125 * Extract queued callbacks, update counts, and wait
94126 * for a grace period to elapse.
94127 */
94128- ACCESS_ONCE(rdp->nocb_head) = NULL;
94129+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
94130 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
94131 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
94132 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
94133- ACCESS_ONCE(rdp->nocb_p_count) += c;
94134- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
94135+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
94136+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
94137 rcu_nocb_wait_gp(rdp);
94138
94139 /* Each pass through the following loop invokes a callback. */
94140@@ -2286,8 +2286,8 @@ static int rcu_nocb_kthread(void *arg)
94141 list = next;
94142 }
94143 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
94144- ACCESS_ONCE(rdp->nocb_p_count) -= c;
94145- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
94146+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
94147+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
94148 rdp->n_nocbs_invoked += c;
94149 }
94150 return 0;
94151@@ -2304,7 +2304,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
94152 {
94153 if (!rcu_nocb_need_deferred_wakeup(rdp))
94154 return;
94155- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
94156+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
94157 wake_up(&rdp->nocb_wq);
94158 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
94159 }
94160@@ -2330,7 +2330,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
94161 t = kthread_run(rcu_nocb_kthread, rdp,
94162 "rcuo%c/%d", rsp->abbr, cpu);
94163 BUG_ON(IS_ERR(t));
94164- ACCESS_ONCE(rdp->nocb_kthread) = t;
94165+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
94166 }
94167 }
94168
94169@@ -2461,11 +2461,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
94170
94171 /* Record start of fully idle period. */
94172 j = jiffies;
94173- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
94174+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
94175 smp_mb__before_atomic();
94176- atomic_inc(&rdtp->dynticks_idle);
94177+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94178 smp_mb__after_atomic();
94179- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
94180+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
94181 }
94182
94183 /*
94184@@ -2530,9 +2530,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
94185
94186 /* Record end of idle period. */
94187 smp_mb__before_atomic();
94188- atomic_inc(&rdtp->dynticks_idle);
94189+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94190 smp_mb__after_atomic();
94191- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
94192+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
94193
94194 /*
94195 * If we are the timekeeping CPU, we are permitted to be non-idle
94196@@ -2573,7 +2573,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
94197 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
94198
94199 /* Pick up current idle and NMI-nesting counter and check. */
94200- cur = atomic_read(&rdtp->dynticks_idle);
94201+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
94202 if (cur & 0x1) {
94203 *isidle = false; /* We are not idle! */
94204 return;
94205@@ -2622,7 +2622,7 @@ static void rcu_sysidle(unsigned long j)
94206 case RCU_SYSIDLE_NOT:
94207
94208 /* First time all are idle, so note a short idle period. */
94209- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94210+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94211 break;
94212
94213 case RCU_SYSIDLE_SHORT:
94214@@ -2660,7 +2660,7 @@ static void rcu_sysidle_cancel(void)
94215 {
94216 smp_mb();
94217 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
94218- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
94219+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
94220 }
94221
94222 /*
94223@@ -2708,7 +2708,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
94224 smp_mb(); /* grace period precedes setting inuse. */
94225
94226 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
94227- ACCESS_ONCE(rshp->inuse) = 0;
94228+ ACCESS_ONCE_RW(rshp->inuse) = 0;
94229 }
94230
94231 /*
94232diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
94233index 5cdc62e..cc52e88 100644
94234--- a/kernel/rcu/tree_trace.c
94235+++ b/kernel/rcu/tree_trace.c
94236@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
94237 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
94238 rdp->passed_quiesce, rdp->qs_pending);
94239 seq_printf(m, " dt=%d/%llx/%d df=%lu",
94240- atomic_read(&rdp->dynticks->dynticks),
94241+ atomic_read_unchecked(&rdp->dynticks->dynticks),
94242 rdp->dynticks->dynticks_nesting,
94243 rdp->dynticks->dynticks_nmi_nesting,
94244 rdp->dynticks_fqs);
94245@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
94246 struct rcu_state *rsp = (struct rcu_state *)m->private;
94247
94248 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
94249- atomic_long_read(&rsp->expedited_start),
94250+ atomic_long_read_unchecked(&rsp->expedited_start),
94251 atomic_long_read(&rsp->expedited_done),
94252- atomic_long_read(&rsp->expedited_wrap),
94253- atomic_long_read(&rsp->expedited_tryfail),
94254- atomic_long_read(&rsp->expedited_workdone1),
94255- atomic_long_read(&rsp->expedited_workdone2),
94256- atomic_long_read(&rsp->expedited_normal),
94257- atomic_long_read(&rsp->expedited_stoppedcpus),
94258- atomic_long_read(&rsp->expedited_done_tries),
94259- atomic_long_read(&rsp->expedited_done_lost),
94260- atomic_long_read(&rsp->expedited_done_exit));
94261+ atomic_long_read_unchecked(&rsp->expedited_wrap),
94262+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
94263+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
94264+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
94265+ atomic_long_read_unchecked(&rsp->expedited_normal),
94266+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
94267+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
94268+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
94269+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
94270 return 0;
94271 }
94272
94273diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
94274index bc78835..7691a45 100644
94275--- a/kernel/rcu/update.c
94276+++ b/kernel/rcu/update.c
94277@@ -311,10 +311,10 @@ int rcu_jiffies_till_stall_check(void)
94278 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
94279 */
94280 if (till_stall_check < 3) {
94281- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
94282+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
94283 till_stall_check = 3;
94284 } else if (till_stall_check > 300) {
94285- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
94286+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
94287 till_stall_check = 300;
94288 }
94289 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
94290diff --git a/kernel/resource.c b/kernel/resource.c
94291index 3c2237a..4568d96 100644
94292--- a/kernel/resource.c
94293+++ b/kernel/resource.c
94294@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
94295
94296 static int __init ioresources_init(void)
94297 {
94298+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94299+#ifdef CONFIG_GRKERNSEC_PROC_USER
94300+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94301+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94302+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94303+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94304+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94305+#endif
94306+#else
94307 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94308 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94309+#endif
94310 return 0;
94311 }
94312 __initcall(ioresources_init);
94313diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
94314index e73efba..c9bfbd4 100644
94315--- a/kernel/sched/auto_group.c
94316+++ b/kernel/sched/auto_group.c
94317@@ -11,7 +11,7 @@
94318
94319 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
94320 static struct autogroup autogroup_default;
94321-static atomic_t autogroup_seq_nr;
94322+static atomic_unchecked_t autogroup_seq_nr;
94323
94324 void __init autogroup_init(struct task_struct *init_task)
94325 {
94326@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
94327
94328 kref_init(&ag->kref);
94329 init_rwsem(&ag->lock);
94330- ag->id = atomic_inc_return(&autogroup_seq_nr);
94331+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
94332 ag->tg = tg;
94333 #ifdef CONFIG_RT_GROUP_SCHED
94334 /*
94335diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
94336index a63f4dc..349bbb0 100644
94337--- a/kernel/sched/completion.c
94338+++ b/kernel/sched/completion.c
94339@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
94340 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94341 * or number of jiffies left till timeout) if completed.
94342 */
94343-long __sched
94344+long __sched __intentional_overflow(-1)
94345 wait_for_completion_interruptible_timeout(struct completion *x,
94346 unsigned long timeout)
94347 {
94348@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
94349 *
94350 * Return: -ERESTARTSYS if interrupted, 0 if completed.
94351 */
94352-int __sched wait_for_completion_killable(struct completion *x)
94353+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
94354 {
94355 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
94356 if (t == -ERESTARTSYS)
94357@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
94358 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94359 * or number of jiffies left till timeout) if completed.
94360 */
94361-long __sched
94362+long __sched __intentional_overflow(-1)
94363 wait_for_completion_killable_timeout(struct completion *x,
94364 unsigned long timeout)
94365 {
94366diff --git a/kernel/sched/core.c b/kernel/sched/core.c
94367index 0acf96b..80ba955 100644
94368--- a/kernel/sched/core.c
94369+++ b/kernel/sched/core.c
94370@@ -1849,7 +1849,7 @@ void set_numabalancing_state(bool enabled)
94371 int sysctl_numa_balancing(struct ctl_table *table, int write,
94372 void __user *buffer, size_t *lenp, loff_t *ppos)
94373 {
94374- struct ctl_table t;
94375+ ctl_table_no_const t;
94376 int err;
94377 int state = numabalancing_enabled;
94378
94379@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
94380 next->active_mm = oldmm;
94381 atomic_inc(&oldmm->mm_count);
94382 enter_lazy_tlb(oldmm, next);
94383- } else
94384+ } else {
94385 switch_mm(oldmm, mm, next);
94386+ populate_stack();
94387+ }
94388
94389 if (!prev->mm) {
94390 prev->active_mm = NULL;
94391@@ -3081,6 +3083,8 @@ int can_nice(const struct task_struct *p, const int nice)
94392 /* convert nice value [19,-20] to rlimit style value [1,40] */
94393 int nice_rlim = nice_to_rlimit(nice);
94394
94395+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94396+
94397 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
94398 capable(CAP_SYS_NICE));
94399 }
94400@@ -3107,7 +3111,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94401 nice = task_nice(current) + increment;
94402
94403 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
94404- if (increment < 0 && !can_nice(current, nice))
94405+ if (increment < 0 && (!can_nice(current, nice) ||
94406+ gr_handle_chroot_nice()))
94407 return -EPERM;
94408
94409 retval = security_task_setnice(current, nice);
94410@@ -3380,6 +3385,7 @@ recheck:
94411 if (policy != p->policy && !rlim_rtprio)
94412 return -EPERM;
94413
94414+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
94415 /* can't increase priority */
94416 if (attr->sched_priority > p->rt_priority &&
94417 attr->sched_priority > rlim_rtprio)
94418@@ -4772,6 +4778,7 @@ void idle_task_exit(void)
94419
94420 if (mm != &init_mm) {
94421 switch_mm(mm, &init_mm, current);
94422+ populate_stack();
94423 finish_arch_post_lock_switch();
94424 }
94425 mmdrop(mm);
94426@@ -4867,7 +4874,7 @@ static void migrate_tasks(unsigned int dead_cpu)
94427
94428 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
94429
94430-static struct ctl_table sd_ctl_dir[] = {
94431+static ctl_table_no_const sd_ctl_dir[] __read_only = {
94432 {
94433 .procname = "sched_domain",
94434 .mode = 0555,
94435@@ -4884,17 +4891,17 @@ static struct ctl_table sd_ctl_root[] = {
94436 {}
94437 };
94438
94439-static struct ctl_table *sd_alloc_ctl_entry(int n)
94440+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
94441 {
94442- struct ctl_table *entry =
94443+ ctl_table_no_const *entry =
94444 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
94445
94446 return entry;
94447 }
94448
94449-static void sd_free_ctl_entry(struct ctl_table **tablep)
94450+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
94451 {
94452- struct ctl_table *entry;
94453+ ctl_table_no_const *entry;
94454
94455 /*
94456 * In the intermediate directories, both the child directory and
94457@@ -4902,22 +4909,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
94458 * will always be set. In the lowest directory the names are
94459 * static strings and all have proc handlers.
94460 */
94461- for (entry = *tablep; entry->mode; entry++) {
94462- if (entry->child)
94463- sd_free_ctl_entry(&entry->child);
94464+ for (entry = tablep; entry->mode; entry++) {
94465+ if (entry->child) {
94466+ sd_free_ctl_entry(entry->child);
94467+ pax_open_kernel();
94468+ entry->child = NULL;
94469+ pax_close_kernel();
94470+ }
94471 if (entry->proc_handler == NULL)
94472 kfree(entry->procname);
94473 }
94474
94475- kfree(*tablep);
94476- *tablep = NULL;
94477+ kfree(tablep);
94478 }
94479
94480 static int min_load_idx = 0;
94481 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
94482
94483 static void
94484-set_table_entry(struct ctl_table *entry,
94485+set_table_entry(ctl_table_no_const *entry,
94486 const char *procname, void *data, int maxlen,
94487 umode_t mode, proc_handler *proc_handler,
94488 bool load_idx)
94489@@ -4937,7 +4947,7 @@ set_table_entry(struct ctl_table *entry,
94490 static struct ctl_table *
94491 sd_alloc_ctl_domain_table(struct sched_domain *sd)
94492 {
94493- struct ctl_table *table = sd_alloc_ctl_entry(14);
94494+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
94495
94496 if (table == NULL)
94497 return NULL;
94498@@ -4975,9 +4985,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
94499 return table;
94500 }
94501
94502-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
94503+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
94504 {
94505- struct ctl_table *entry, *table;
94506+ ctl_table_no_const *entry, *table;
94507 struct sched_domain *sd;
94508 int domain_num = 0, i;
94509 char buf[32];
94510@@ -5004,11 +5014,13 @@ static struct ctl_table_header *sd_sysctl_header;
94511 static void register_sched_domain_sysctl(void)
94512 {
94513 int i, cpu_num = num_possible_cpus();
94514- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
94515+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
94516 char buf[32];
94517
94518 WARN_ON(sd_ctl_dir[0].child);
94519+ pax_open_kernel();
94520 sd_ctl_dir[0].child = entry;
94521+ pax_close_kernel();
94522
94523 if (entry == NULL)
94524 return;
94525@@ -5031,8 +5043,12 @@ static void unregister_sched_domain_sysctl(void)
94526 if (sd_sysctl_header)
94527 unregister_sysctl_table(sd_sysctl_header);
94528 sd_sysctl_header = NULL;
94529- if (sd_ctl_dir[0].child)
94530- sd_free_ctl_entry(&sd_ctl_dir[0].child);
94531+ if (sd_ctl_dir[0].child) {
94532+ sd_free_ctl_entry(sd_ctl_dir[0].child);
94533+ pax_open_kernel();
94534+ sd_ctl_dir[0].child = NULL;
94535+ pax_close_kernel();
94536+ }
94537 }
94538 #else
94539 static void register_sched_domain_sysctl(void)
94540diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
94541index fea7d33..84faa94 100644
94542--- a/kernel/sched/fair.c
94543+++ b/kernel/sched/fair.c
94544@@ -1857,7 +1857,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
94545
94546 static void reset_ptenuma_scan(struct task_struct *p)
94547 {
94548- ACCESS_ONCE(p->mm->numa_scan_seq)++;
94549+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
94550 p->mm->numa_scan_offset = 0;
94551 }
94552
94553@@ -7289,7 +7289,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
94554 * run_rebalance_domains is triggered when needed from the scheduler tick.
94555 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
94556 */
94557-static void run_rebalance_domains(struct softirq_action *h)
94558+static __latent_entropy void run_rebalance_domains(void)
94559 {
94560 struct rq *this_rq = this_rq();
94561 enum cpu_idle_type idle = this_rq->idle_balance ?
94562diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
94563index 31cc02e..734fb85 100644
94564--- a/kernel/sched/sched.h
94565+++ b/kernel/sched/sched.h
94566@@ -1153,7 +1153,7 @@ struct sched_class {
94567 #ifdef CONFIG_FAIR_GROUP_SCHED
94568 void (*task_move_group) (struct task_struct *p, int on_rq);
94569 #endif
94570-};
94571+} __do_const;
94572
94573 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
94574 {
94575diff --git a/kernel/seccomp.c b/kernel/seccomp.c
94576index 301bbc2..eda2da3 100644
94577--- a/kernel/seccomp.c
94578+++ b/kernel/seccomp.c
94579@@ -39,7 +39,7 @@
94580 * is only needed for handling filters shared across tasks.
94581 * @prev: points to a previously installed, or inherited, filter
94582 * @len: the number of instructions in the program
94583- * @insnsi: the BPF program instructions to evaluate
94584+ * @insns: the BPF program instructions to evaluate
94585 *
94586 * seccomp_filter objects are organized in a tree linked via the @prev
94587 * pointer. For any task, it appears to be a singly-linked list starting
94588@@ -54,32 +54,61 @@
94589 struct seccomp_filter {
94590 atomic_t usage;
94591 struct seccomp_filter *prev;
94592- struct sk_filter *prog;
94593+ unsigned short len; /* Instruction count */
94594+ struct sock_filter insns[];
94595 };
94596
94597 /* Limit any path through the tree to 256KB worth of instructions. */
94598 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
94599
94600-/*
94601+/**
94602+ * get_u32 - returns a u32 offset into data
94603+ * @data: a unsigned 64 bit value
94604+ * @index: 0 or 1 to return the first or second 32-bits
94605+ *
94606+ * This inline exists to hide the length of unsigned long. If a 32-bit
94607+ * unsigned long is passed in, it will be extended and the top 32-bits will be
94608+ * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
94609+ * properly returned.
94610+ *
94611 * Endianness is explicitly ignored and left for BPF program authors to manage
94612 * as per the specific architecture.
94613 */
94614-static void populate_seccomp_data(struct seccomp_data *sd)
94615+static inline u32 get_u32(u64 data, int index)
94616 {
94617- struct task_struct *task = current;
94618- struct pt_regs *regs = task_pt_regs(task);
94619- unsigned long args[6];
94620+ return ((u32 *)&data)[index];
94621+}
94622
94623- sd->nr = syscall_get_nr(task, regs);
94624- sd->arch = syscall_get_arch();
94625- syscall_get_arguments(task, regs, 0, 6, args);
94626- sd->args[0] = args[0];
94627- sd->args[1] = args[1];
94628- sd->args[2] = args[2];
94629- sd->args[3] = args[3];
94630- sd->args[4] = args[4];
94631- sd->args[5] = args[5];
94632- sd->instruction_pointer = KSTK_EIP(task);
94633+/* Helper for bpf_load below. */
94634+#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
94635+/**
94636+ * bpf_load: checks and returns a pointer to the requested offset
94637+ * @off: offset into struct seccomp_data to load from
94638+ *
94639+ * Returns the requested 32-bits of data.
94640+ * seccomp_check_filter() should assure that @off is 32-bit aligned
94641+ * and not out of bounds. Failure to do so is a BUG.
94642+ */
94643+u32 seccomp_bpf_load(int off)
94644+{
94645+ struct pt_regs *regs = task_pt_regs(current);
94646+ if (off == BPF_DATA(nr))
94647+ return syscall_get_nr(current, regs);
94648+ if (off == BPF_DATA(arch))
94649+ return syscall_get_arch();
94650+ if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
94651+ unsigned long value;
94652+ int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
94653+ int index = !!(off % sizeof(u64));
94654+ syscall_get_arguments(current, regs, arg, 1, &value);
94655+ return get_u32(value, index);
94656+ }
94657+ if (off == BPF_DATA(instruction_pointer))
94658+ return get_u32(KSTK_EIP(current), 0);
94659+ if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
94660+ return get_u32(KSTK_EIP(current), 1);
94661+ /* seccomp_check_filter should make this impossible. */
94662+ BUG();
94663 }
94664
94665 /**
94666@@ -103,59 +132,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
94667 u32 k = ftest->k;
94668
94669 switch (code) {
94670- case BPF_LD | BPF_W | BPF_ABS:
94671- ftest->code = BPF_LDX | BPF_W | BPF_ABS;
94672+ case BPF_S_LD_W_ABS:
94673+ ftest->code = BPF_S_ANC_SECCOMP_LD_W;
94674 /* 32-bit aligned and not out of bounds. */
94675 if (k >= sizeof(struct seccomp_data) || k & 3)
94676 return -EINVAL;
94677 continue;
94678- case BPF_LD | BPF_W | BPF_LEN:
94679- ftest->code = BPF_LD | BPF_IMM;
94680+ case BPF_S_LD_W_LEN:
94681+ ftest->code = BPF_S_LD_IMM;
94682 ftest->k = sizeof(struct seccomp_data);
94683 continue;
94684- case BPF_LDX | BPF_W | BPF_LEN:
94685- ftest->code = BPF_LDX | BPF_IMM;
94686+ case BPF_S_LDX_W_LEN:
94687+ ftest->code = BPF_S_LDX_IMM;
94688 ftest->k = sizeof(struct seccomp_data);
94689 continue;
94690 /* Explicitly include allowed calls. */
94691- case BPF_RET | BPF_K:
94692- case BPF_RET | BPF_A:
94693- case BPF_ALU | BPF_ADD | BPF_K:
94694- case BPF_ALU | BPF_ADD | BPF_X:
94695- case BPF_ALU | BPF_SUB | BPF_K:
94696- case BPF_ALU | BPF_SUB | BPF_X:
94697- case BPF_ALU | BPF_MUL | BPF_K:
94698- case BPF_ALU | BPF_MUL | BPF_X:
94699- case BPF_ALU | BPF_DIV | BPF_K:
94700- case BPF_ALU | BPF_DIV | BPF_X:
94701- case BPF_ALU | BPF_AND | BPF_K:
94702- case BPF_ALU | BPF_AND | BPF_X:
94703- case BPF_ALU | BPF_OR | BPF_K:
94704- case BPF_ALU | BPF_OR | BPF_X:
94705- case BPF_ALU | BPF_XOR | BPF_K:
94706- case BPF_ALU | BPF_XOR | BPF_X:
94707- case BPF_ALU | BPF_LSH | BPF_K:
94708- case BPF_ALU | BPF_LSH | BPF_X:
94709- case BPF_ALU | BPF_RSH | BPF_K:
94710- case BPF_ALU | BPF_RSH | BPF_X:
94711- case BPF_ALU | BPF_NEG:
94712- case BPF_LD | BPF_IMM:
94713- case BPF_LDX | BPF_IMM:
94714- case BPF_MISC | BPF_TAX:
94715- case BPF_MISC | BPF_TXA:
94716- case BPF_LD | BPF_MEM:
94717- case BPF_LDX | BPF_MEM:
94718- case BPF_ST:
94719- case BPF_STX:
94720- case BPF_JMP | BPF_JA:
94721- case BPF_JMP | BPF_JEQ | BPF_K:
94722- case BPF_JMP | BPF_JEQ | BPF_X:
94723- case BPF_JMP | BPF_JGE | BPF_K:
94724- case BPF_JMP | BPF_JGE | BPF_X:
94725- case BPF_JMP | BPF_JGT | BPF_K:
94726- case BPF_JMP | BPF_JGT | BPF_X:
94727- case BPF_JMP | BPF_JSET | BPF_K:
94728- case BPF_JMP | BPF_JSET | BPF_X:
94729+ case BPF_S_RET_K:
94730+ case BPF_S_RET_A:
94731+ case BPF_S_ALU_ADD_K:
94732+ case BPF_S_ALU_ADD_X:
94733+ case BPF_S_ALU_SUB_K:
94734+ case BPF_S_ALU_SUB_X:
94735+ case BPF_S_ALU_MUL_K:
94736+ case BPF_S_ALU_MUL_X:
94737+ case BPF_S_ALU_DIV_X:
94738+ case BPF_S_ALU_AND_K:
94739+ case BPF_S_ALU_AND_X:
94740+ case BPF_S_ALU_OR_K:
94741+ case BPF_S_ALU_OR_X:
94742+ case BPF_S_ALU_XOR_K:
94743+ case BPF_S_ALU_XOR_X:
94744+ case BPF_S_ALU_LSH_K:
94745+ case BPF_S_ALU_LSH_X:
94746+ case BPF_S_ALU_RSH_K:
94747+ case BPF_S_ALU_RSH_X:
94748+ case BPF_S_ALU_NEG:
94749+ case BPF_S_LD_IMM:
94750+ case BPF_S_LDX_IMM:
94751+ case BPF_S_MISC_TAX:
94752+ case BPF_S_MISC_TXA:
94753+ case BPF_S_ALU_DIV_K:
94754+ case BPF_S_LD_MEM:
94755+ case BPF_S_LDX_MEM:
94756+ case BPF_S_ST:
94757+ case BPF_S_STX:
94758+ case BPF_S_JMP_JA:
94759+ case BPF_S_JMP_JEQ_K:
94760+ case BPF_S_JMP_JEQ_X:
94761+ case BPF_S_JMP_JGE_K:
94762+ case BPF_S_JMP_JGE_X:
94763+ case BPF_S_JMP_JGT_K:
94764+ case BPF_S_JMP_JGT_X:
94765+ case BPF_S_JMP_JSET_K:
94766+ case BPF_S_JMP_JSET_X:
94767 continue;
94768 default:
94769 return -EINVAL;
94770@@ -173,22 +202,18 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
94771 static u32 seccomp_run_filters(int syscall)
94772 {
94773 struct seccomp_filter *f;
94774- struct seccomp_data sd;
94775 u32 ret = SECCOMP_RET_ALLOW;
94776
94777 /* Ensure unexpected behavior doesn't result in failing open. */
94778 if (WARN_ON(current->seccomp.filter == NULL))
94779 return SECCOMP_RET_KILL;
94780
94781- populate_seccomp_data(&sd);
94782-
94783 /*
94784 * All filters in the list are evaluated and the lowest BPF return
94785 * value always takes priority (ignoring the DATA).
94786 */
94787 for (f = current->seccomp.filter; f; f = f->prev) {
94788- u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
94789-
94790+ u32 cur_ret = sk_run_filter(NULL, f->insns);
94791 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
94792 ret = cur_ret;
94793 }
94794@@ -206,20 +231,18 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
94795 struct seccomp_filter *filter;
94796 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
94797 unsigned long total_insns = fprog->len;
94798- struct sock_filter *fp;
94799- int new_len;
94800 long ret;
94801
94802 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
94803 return -EINVAL;
94804
94805 for (filter = current->seccomp.filter; filter; filter = filter->prev)
94806- total_insns += filter->prog->len + 4; /* include a 4 instr penalty */
94807+ total_insns += filter->len + 4; /* include a 4 instr penalty */
94808 if (total_insns > MAX_INSNS_PER_PATH)
94809 return -ENOMEM;
94810
94811 /*
94812- * Installing a seccomp filter requires that the task has
94813+ * Installing a seccomp filter requires that the task have
94814 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
94815 * This avoids scenarios where unprivileged tasks can affect the
94816 * behavior of privileged children.
94817@@ -229,51 +252,28 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
94818 CAP_SYS_ADMIN) != 0)
94819 return -EACCES;
94820
94821- fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
94822- if (!fp)
94823- return -ENOMEM;
94824-
94825- /* Copy the instructions from fprog. */
94826- ret = -EFAULT;
94827- if (copy_from_user(fp, fprog->filter, fp_size))
94828- goto free_prog;
94829-
94830- /* Check and rewrite the fprog via the skb checker */
94831- ret = sk_chk_filter(fp, fprog->len);
94832- if (ret)
94833- goto free_prog;
94834-
94835- /* Check and rewrite the fprog for seccomp use */
94836- ret = seccomp_check_filter(fp, fprog->len);
94837- if (ret)
94838- goto free_prog;
94839-
94840- /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
94841- ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
94842- if (ret)
94843- goto free_prog;
94844-
94845 /* Allocate a new seccomp_filter */
94846- ret = -ENOMEM;
94847- filter = kzalloc(sizeof(struct seccomp_filter),
94848+ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
94849 GFP_KERNEL|__GFP_NOWARN);
94850 if (!filter)
94851- goto free_prog;
94852-
94853- filter->prog = kzalloc(sk_filter_size(new_len),
94854- GFP_KERNEL|__GFP_NOWARN);
94855- if (!filter->prog)
94856- goto free_filter;
94857-
94858- ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
94859- if (ret)
94860- goto free_filter_prog;
94861- kfree(fp);
94862-
94863+ return -ENOMEM;
94864 atomic_set(&filter->usage, 1);
94865- filter->prog->len = new_len;
94866+ filter->len = fprog->len;
94867
94868- sk_filter_select_runtime(filter->prog);
94869+ /* Copy the instructions from fprog. */
94870+ ret = -EFAULT;
94871+ if (copy_from_user(filter->insns, fprog->filter, fp_size))
94872+ goto fail;
94873+
94874+ /* Check and rewrite the fprog via the skb checker */
94875+ ret = sk_chk_filter(filter->insns, filter->len);
94876+ if (ret)
94877+ goto fail;
94878+
94879+ /* Check and rewrite the fprog for seccomp use */
94880+ ret = seccomp_check_filter(filter->insns, filter->len);
94881+ if (ret)
94882+ goto fail;
94883
94884 /*
94885 * If there is an existing filter, make it the prev and don't drop its
94886@@ -282,13 +282,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
94887 filter->prev = current->seccomp.filter;
94888 current->seccomp.filter = filter;
94889 return 0;
94890-
94891-free_filter_prog:
94892- kfree(filter->prog);
94893-free_filter:
94894+fail:
94895 kfree(filter);
94896-free_prog:
94897- kfree(fp);
94898 return ret;
94899 }
94900
94901@@ -298,7 +293,7 @@ free_prog:
94902 *
94903 * Returns 0 on success and non-zero otherwise.
94904 */
94905-static long seccomp_attach_user_filter(char __user *user_filter)
94906+long seccomp_attach_user_filter(char __user *user_filter)
94907 {
94908 struct sock_fprog fprog;
94909 long ret = -EFAULT;
94910@@ -337,7 +332,6 @@ void put_seccomp_filter(struct task_struct *tsk)
94911 while (orig && atomic_dec_and_test(&orig->usage)) {
94912 struct seccomp_filter *freeme = orig;
94913 orig = orig->prev;
94914- sk_filter_free(freeme->prog);
94915 kfree(freeme);
94916 }
94917 }
94918diff --git a/kernel/signal.c b/kernel/signal.c
94919index a4077e9..f0d4e5c 100644
94920--- a/kernel/signal.c
94921+++ b/kernel/signal.c
94922@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
94923
94924 int print_fatal_signals __read_mostly;
94925
94926-static void __user *sig_handler(struct task_struct *t, int sig)
94927+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94928 {
94929 return t->sighand->action[sig - 1].sa.sa_handler;
94930 }
94931
94932-static int sig_handler_ignored(void __user *handler, int sig)
94933+static int sig_handler_ignored(__sighandler_t handler, int sig)
94934 {
94935 /* Is it explicitly or implicitly ignored? */
94936 return handler == SIG_IGN ||
94937@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94938
94939 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
94940 {
94941- void __user *handler;
94942+ __sighandler_t handler;
94943
94944 handler = sig_handler(t, sig);
94945
94946@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
94947 atomic_inc(&user->sigpending);
94948 rcu_read_unlock();
94949
94950+ if (!override_rlimit)
94951+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94952+
94953 if (override_rlimit ||
94954 atomic_read(&user->sigpending) <=
94955 task_rlimit(t, RLIMIT_SIGPENDING)) {
94956@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94957
94958 int unhandled_signal(struct task_struct *tsk, int sig)
94959 {
94960- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94961+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94962 if (is_global_init(tsk))
94963 return 1;
94964 if (handler != SIG_IGN && handler != SIG_DFL)
94965@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94966 }
94967 }
94968
94969+ /* allow glibc communication via tgkill to other threads in our
94970+ thread group */
94971+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94972+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94973+ && gr_handle_signal(t, sig))
94974+ return -EPERM;
94975+
94976 return security_task_kill(t, info, sig, 0);
94977 }
94978
94979@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94980 return send_signal(sig, info, p, 1);
94981 }
94982
94983-static int
94984+int
94985 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94986 {
94987 return send_signal(sig, info, t, 0);
94988@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94989 unsigned long int flags;
94990 int ret, blocked, ignored;
94991 struct k_sigaction *action;
94992+ int is_unhandled = 0;
94993
94994 spin_lock_irqsave(&t->sighand->siglock, flags);
94995 action = &t->sighand->action[sig-1];
94996@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94997 }
94998 if (action->sa.sa_handler == SIG_DFL)
94999 t->signal->flags &= ~SIGNAL_UNKILLABLE;
95000+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
95001+ is_unhandled = 1;
95002 ret = specific_send_sig_info(sig, info, t);
95003 spin_unlock_irqrestore(&t->sighand->siglock, flags);
95004
95005+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
95006+ normal operation */
95007+ if (is_unhandled) {
95008+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
95009+ gr_handle_crash(t, sig);
95010+ }
95011+
95012 return ret;
95013 }
95014
95015@@ -1296,8 +1316,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
95016 ret = check_kill_permission(sig, info, p);
95017 rcu_read_unlock();
95018
95019- if (!ret && sig)
95020+ if (!ret && sig) {
95021 ret = do_send_sig_info(sig, info, p, true);
95022+ if (!ret)
95023+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
95024+ }
95025
95026 return ret;
95027 }
95028@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
95029 int error = -ESRCH;
95030
95031 rcu_read_lock();
95032- p = find_task_by_vpid(pid);
95033+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
95034+ /* allow glibc communication via tgkill to other threads in our
95035+ thread group */
95036+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
95037+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
95038+ p = find_task_by_vpid_unrestricted(pid);
95039+ else
95040+#endif
95041+ p = find_task_by_vpid(pid);
95042 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
95043 error = check_kill_permission(sig, info, p);
95044 /*
95045@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
95046 }
95047 seg = get_fs();
95048 set_fs(KERNEL_DS);
95049- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
95050- (stack_t __force __user *) &uoss,
95051+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
95052+ (stack_t __force_user *) &uoss,
95053 compat_user_stack_pointer());
95054 set_fs(seg);
95055 if (ret >= 0 && uoss_ptr) {
95056diff --git a/kernel/smpboot.c b/kernel/smpboot.c
95057index eb89e18..a4e6792 100644
95058--- a/kernel/smpboot.c
95059+++ b/kernel/smpboot.c
95060@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
95061 }
95062 smpboot_unpark_thread(plug_thread, cpu);
95063 }
95064- list_add(&plug_thread->list, &hotplug_threads);
95065+ pax_list_add(&plug_thread->list, &hotplug_threads);
95066 out:
95067 mutex_unlock(&smpboot_threads_lock);
95068 return ret;
95069@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
95070 {
95071 get_online_cpus();
95072 mutex_lock(&smpboot_threads_lock);
95073- list_del(&plug_thread->list);
95074+ pax_list_del(&plug_thread->list);
95075 smpboot_destroy_threads(plug_thread);
95076 mutex_unlock(&smpboot_threads_lock);
95077 put_online_cpus();
95078diff --git a/kernel/softirq.c b/kernel/softirq.c
95079index 5918d22..e95d1926 100644
95080--- a/kernel/softirq.c
95081+++ b/kernel/softirq.c
95082@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
95083 EXPORT_SYMBOL(irq_stat);
95084 #endif
95085
95086-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
95087+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
95088
95089 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
95090
95091@@ -266,7 +266,7 @@ restart:
95092 kstat_incr_softirqs_this_cpu(vec_nr);
95093
95094 trace_softirq_entry(vec_nr);
95095- h->action(h);
95096+ h->action();
95097 trace_softirq_exit(vec_nr);
95098 if (unlikely(prev_count != preempt_count())) {
95099 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
95100@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
95101 or_softirq_pending(1UL << nr);
95102 }
95103
95104-void open_softirq(int nr, void (*action)(struct softirq_action *))
95105+void __init open_softirq(int nr, void (*action)(void))
95106 {
95107 softirq_vec[nr].action = action;
95108 }
95109@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
95110 }
95111 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
95112
95113-static void tasklet_action(struct softirq_action *a)
95114+static void tasklet_action(void)
95115 {
95116 struct tasklet_struct *list;
95117
95118@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
95119 }
95120 }
95121
95122-static void tasklet_hi_action(struct softirq_action *a)
95123+static __latent_entropy void tasklet_hi_action(void)
95124 {
95125 struct tasklet_struct *list;
95126
95127@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
95128 .notifier_call = cpu_callback
95129 };
95130
95131-static struct smp_hotplug_thread softirq_threads = {
95132+static struct smp_hotplug_thread softirq_threads __read_only = {
95133 .store = &ksoftirqd,
95134 .thread_should_run = ksoftirqd_should_run,
95135 .thread_fn = run_ksoftirqd,
95136diff --git a/kernel/sys.c b/kernel/sys.c
95137index 66a751e..a42497e 100644
95138--- a/kernel/sys.c
95139+++ b/kernel/sys.c
95140@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
95141 error = -EACCES;
95142 goto out;
95143 }
95144+
95145+ if (gr_handle_chroot_setpriority(p, niceval)) {
95146+ error = -EACCES;
95147+ goto out;
95148+ }
95149+
95150 no_nice = security_task_setnice(p, niceval);
95151 if (no_nice) {
95152 error = no_nice;
95153@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
95154 goto error;
95155 }
95156
95157+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
95158+ goto error;
95159+
95160+ if (!gid_eq(new->gid, old->gid)) {
95161+ /* make sure we generate a learn log for what will
95162+ end up being a role transition after a full-learning
95163+ policy is generated
95164+ CAP_SETGID is required to perform a transition
95165+ we may not log a CAP_SETGID check above, e.g.
95166+ in the case where new rgid = old egid
95167+ */
95168+ gr_learn_cap(current, new, CAP_SETGID);
95169+ }
95170+
95171 if (rgid != (gid_t) -1 ||
95172 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
95173 new->sgid = new->egid;
95174@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
95175 old = current_cred();
95176
95177 retval = -EPERM;
95178+
95179+ if (gr_check_group_change(kgid, kgid, kgid))
95180+ goto error;
95181+
95182 if (ns_capable(old->user_ns, CAP_SETGID))
95183 new->gid = new->egid = new->sgid = new->fsgid = kgid;
95184 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
95185@@ -403,7 +427,7 @@ error:
95186 /*
95187 * change the user struct in a credentials set to match the new UID
95188 */
95189-static int set_user(struct cred *new)
95190+int set_user(struct cred *new)
95191 {
95192 struct user_struct *new_user;
95193
95194@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
95195 goto error;
95196 }
95197
95198+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
95199+ goto error;
95200+
95201 if (!uid_eq(new->uid, old->uid)) {
95202+ /* make sure we generate a learn log for what will
95203+ end up being a role transition after a full-learning
95204+ policy is generated
95205+ CAP_SETUID is required to perform a transition
95206+ we may not log a CAP_SETUID check above, e.g.
95207+ in the case where new ruid = old euid
95208+ */
95209+ gr_learn_cap(current, new, CAP_SETUID);
95210 retval = set_user(new);
95211 if (retval < 0)
95212 goto error;
95213@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
95214 old = current_cred();
95215
95216 retval = -EPERM;
95217+
95218+ if (gr_check_crash_uid(kuid))
95219+ goto error;
95220+ if (gr_check_user_change(kuid, kuid, kuid))
95221+ goto error;
95222+
95223 if (ns_capable(old->user_ns, CAP_SETUID)) {
95224 new->suid = new->uid = kuid;
95225 if (!uid_eq(kuid, old->uid)) {
95226@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
95227 goto error;
95228 }
95229
95230+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
95231+ goto error;
95232+
95233 if (ruid != (uid_t) -1) {
95234 new->uid = kruid;
95235 if (!uid_eq(kruid, old->uid)) {
95236@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
95237 goto error;
95238 }
95239
95240+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
95241+ goto error;
95242+
95243 if (rgid != (gid_t) -1)
95244 new->gid = krgid;
95245 if (egid != (gid_t) -1)
95246@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
95247 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
95248 ns_capable(old->user_ns, CAP_SETUID)) {
95249 if (!uid_eq(kuid, old->fsuid)) {
95250+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
95251+ goto error;
95252+
95253 new->fsuid = kuid;
95254 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
95255 goto change_okay;
95256 }
95257 }
95258
95259+error:
95260 abort_creds(new);
95261 return old_fsuid;
95262
95263@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
95264 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
95265 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
95266 ns_capable(old->user_ns, CAP_SETGID)) {
95267+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
95268+ goto error;
95269+
95270 if (!gid_eq(kgid, old->fsgid)) {
95271 new->fsgid = kgid;
95272 goto change_okay;
95273 }
95274 }
95275
95276+error:
95277 abort_creds(new);
95278 return old_fsgid;
95279
95280@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
95281 return -EFAULT;
95282
95283 down_read(&uts_sem);
95284- error = __copy_to_user(&name->sysname, &utsname()->sysname,
95285+ error = __copy_to_user(name->sysname, &utsname()->sysname,
95286 __OLD_UTS_LEN);
95287 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
95288- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
95289+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
95290 __OLD_UTS_LEN);
95291 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
95292- error |= __copy_to_user(&name->release, &utsname()->release,
95293+ error |= __copy_to_user(name->release, &utsname()->release,
95294 __OLD_UTS_LEN);
95295 error |= __put_user(0, name->release + __OLD_UTS_LEN);
95296- error |= __copy_to_user(&name->version, &utsname()->version,
95297+ error |= __copy_to_user(name->version, &utsname()->version,
95298 __OLD_UTS_LEN);
95299 error |= __put_user(0, name->version + __OLD_UTS_LEN);
95300- error |= __copy_to_user(&name->machine, &utsname()->machine,
95301+ error |= __copy_to_user(name->machine, &utsname()->machine,
95302 __OLD_UTS_LEN);
95303 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
95304 up_read(&uts_sem);
95305@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
95306 */
95307 new_rlim->rlim_cur = 1;
95308 }
95309+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
95310+ is changed to a lower value. Since tasks can be created by the same
95311+ user in between this limit change and an execve by this task, force
95312+ a recheck only for this task by setting PF_NPROC_EXCEEDED
95313+ */
95314+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
95315+ tsk->flags |= PF_NPROC_EXCEEDED;
95316 }
95317 if (!retval) {
95318 if (old_rlim)
95319diff --git a/kernel/sysctl.c b/kernel/sysctl.c
95320index 75b22e2..65c0ac8 100644
95321--- a/kernel/sysctl.c
95322+++ b/kernel/sysctl.c
95323@@ -94,7 +94,6 @@
95324
95325
95326 #if defined(CONFIG_SYSCTL)
95327-
95328 /* External variables not in a header file. */
95329 extern int max_threads;
95330 extern int suid_dumpable;
95331@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
95332
95333 /* Constants used for minimum and maximum */
95334 #ifdef CONFIG_LOCKUP_DETECTOR
95335-static int sixty = 60;
95336+static int sixty __read_only = 60;
95337 #endif
95338
95339-static int __maybe_unused neg_one = -1;
95340+static int __maybe_unused neg_one __read_only = -1;
95341
95342-static int zero;
95343-static int __maybe_unused one = 1;
95344-static int __maybe_unused two = 2;
95345-static int __maybe_unused four = 4;
95346-static unsigned long one_ul = 1;
95347-static int one_hundred = 100;
95348+static int zero __read_only = 0;
95349+static int __maybe_unused one __read_only = 1;
95350+static int __maybe_unused two __read_only = 2;
95351+static int __maybe_unused three __read_only = 3;
95352+static int __maybe_unused four __read_only = 4;
95353+static unsigned long one_ul __read_only = 1;
95354+static int one_hundred __read_only = 100;
95355 #ifdef CONFIG_PRINTK
95356-static int ten_thousand = 10000;
95357+static int ten_thousand __read_only = 10000;
95358 #endif
95359
95360 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
95361@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
95362 void __user *buffer, size_t *lenp, loff_t *ppos);
95363 #endif
95364
95365-#ifdef CONFIG_PRINTK
95366 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95367 void __user *buffer, size_t *lenp, loff_t *ppos);
95368-#endif
95369
95370 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
95371 void __user *buffer, size_t *lenp, loff_t *ppos);
95372@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
95373
95374 #endif
95375
95376+extern struct ctl_table grsecurity_table[];
95377+
95378 static struct ctl_table kern_table[];
95379 static struct ctl_table vm_table[];
95380 static struct ctl_table fs_table[];
95381@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
95382 int sysctl_legacy_va_layout;
95383 #endif
95384
95385+#ifdef CONFIG_PAX_SOFTMODE
95386+static ctl_table pax_table[] = {
95387+ {
95388+ .procname = "softmode",
95389+ .data = &pax_softmode,
95390+ .maxlen = sizeof(unsigned int),
95391+ .mode = 0600,
95392+ .proc_handler = &proc_dointvec,
95393+ },
95394+
95395+ { }
95396+};
95397+#endif
95398+
95399 /* The default sysctl tables: */
95400
95401 static struct ctl_table sysctl_base_table[] = {
95402@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
95403 #endif
95404
95405 static struct ctl_table kern_table[] = {
95406+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
95407+ {
95408+ .procname = "grsecurity",
95409+ .mode = 0500,
95410+ .child = grsecurity_table,
95411+ },
95412+#endif
95413+
95414+#ifdef CONFIG_PAX_SOFTMODE
95415+ {
95416+ .procname = "pax",
95417+ .mode = 0500,
95418+ .child = pax_table,
95419+ },
95420+#endif
95421+
95422 {
95423 .procname = "sched_child_runs_first",
95424 .data = &sysctl_sched_child_runs_first,
95425@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
95426 .data = &modprobe_path,
95427 .maxlen = KMOD_PATH_LEN,
95428 .mode = 0644,
95429- .proc_handler = proc_dostring,
95430+ .proc_handler = proc_dostring_modpriv,
95431 },
95432 {
95433 .procname = "modules_disabled",
95434@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
95435 .extra1 = &zero,
95436 .extra2 = &one,
95437 },
95438+#endif
95439 {
95440 .procname = "kptr_restrict",
95441 .data = &kptr_restrict,
95442 .maxlen = sizeof(int),
95443 .mode = 0644,
95444 .proc_handler = proc_dointvec_minmax_sysadmin,
95445+#ifdef CONFIG_GRKERNSEC_HIDESYM
95446+ .extra1 = &two,
95447+#else
95448 .extra1 = &zero,
95449+#endif
95450 .extra2 = &two,
95451 },
95452-#endif
95453 {
95454 .procname = "ngroups_max",
95455 .data = &ngroups_max,
95456@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
95457 */
95458 {
95459 .procname = "perf_event_paranoid",
95460- .data = &sysctl_perf_event_paranoid,
95461- .maxlen = sizeof(sysctl_perf_event_paranoid),
95462+ .data = &sysctl_perf_event_legitimately_concerned,
95463+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
95464 .mode = 0644,
95465- .proc_handler = proc_dointvec,
95466+ /* go ahead, be a hero */
95467+ .proc_handler = proc_dointvec_minmax_sysadmin,
95468+ .extra1 = &neg_one,
95469+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
95470+ .extra2 = &three,
95471+#else
95472+ .extra2 = &two,
95473+#endif
95474 },
95475 {
95476 .procname = "perf_event_mlock_kb",
95477@@ -1338,6 +1379,13 @@ static struct ctl_table vm_table[] = {
95478 .proc_handler = proc_dointvec_minmax,
95479 .extra1 = &zero,
95480 },
95481+ {
95482+ .procname = "heap_stack_gap",
95483+ .data = &sysctl_heap_stack_gap,
95484+ .maxlen = sizeof(sysctl_heap_stack_gap),
95485+ .mode = 0644,
95486+ .proc_handler = proc_doulongvec_minmax,
95487+ },
95488 #else
95489 {
95490 .procname = "nr_trim_pages",
95491@@ -1827,6 +1875,16 @@ int proc_dostring(struct ctl_table *table, int write,
95492 (char __user *)buffer, lenp, ppos);
95493 }
95494
95495+int proc_dostring_modpriv(struct ctl_table *table, int write,
95496+ void __user *buffer, size_t *lenp, loff_t *ppos)
95497+{
95498+ if (write && !capable(CAP_SYS_MODULE))
95499+ return -EPERM;
95500+
95501+ return _proc_do_string(table->data, table->maxlen, write,
95502+ buffer, lenp, ppos);
95503+}
95504+
95505 static size_t proc_skip_spaces(char **buf)
95506 {
95507 size_t ret;
95508@@ -1932,6 +1990,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
95509 len = strlen(tmp);
95510 if (len > *size)
95511 len = *size;
95512+ if (len > sizeof(tmp))
95513+ len = sizeof(tmp);
95514 if (copy_to_user(*buf, tmp, len))
95515 return -EFAULT;
95516 *size -= len;
95517@@ -2109,7 +2169,7 @@ int proc_dointvec(struct ctl_table *table, int write,
95518 static int proc_taint(struct ctl_table *table, int write,
95519 void __user *buffer, size_t *lenp, loff_t *ppos)
95520 {
95521- struct ctl_table t;
95522+ ctl_table_no_const t;
95523 unsigned long tmptaint = get_taint();
95524 int err;
95525
95526@@ -2137,7 +2197,6 @@ static int proc_taint(struct ctl_table *table, int write,
95527 return err;
95528 }
95529
95530-#ifdef CONFIG_PRINTK
95531 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95532 void __user *buffer, size_t *lenp, loff_t *ppos)
95533 {
95534@@ -2146,7 +2205,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95535
95536 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
95537 }
95538-#endif
95539
95540 struct do_proc_dointvec_minmax_conv_param {
95541 int *min;
95542@@ -2706,6 +2764,12 @@ int proc_dostring(struct ctl_table *table, int write,
95543 return -ENOSYS;
95544 }
95545
95546+int proc_dostring_modpriv(struct ctl_table *table, int write,
95547+ void __user *buffer, size_t *lenp, loff_t *ppos)
95548+{
95549+ return -ENOSYS;
95550+}
95551+
95552 int proc_dointvec(struct ctl_table *table, int write,
95553 void __user *buffer, size_t *lenp, loff_t *ppos)
95554 {
95555@@ -2762,5 +2826,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
95556 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
95557 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
95558 EXPORT_SYMBOL(proc_dostring);
95559+EXPORT_SYMBOL(proc_dostring_modpriv);
95560 EXPORT_SYMBOL(proc_doulongvec_minmax);
95561 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
95562diff --git a/kernel/taskstats.c b/kernel/taskstats.c
95563index 13d2f7c..c93d0b0 100644
95564--- a/kernel/taskstats.c
95565+++ b/kernel/taskstats.c
95566@@ -28,9 +28,12 @@
95567 #include <linux/fs.h>
95568 #include <linux/file.h>
95569 #include <linux/pid_namespace.h>
95570+#include <linux/grsecurity.h>
95571 #include <net/genetlink.h>
95572 #include <linux/atomic.h>
95573
95574+extern int gr_is_taskstats_denied(int pid);
95575+
95576 /*
95577 * Maximum length of a cpumask that can be specified in
95578 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
95579@@ -576,6 +579,9 @@ err:
95580
95581 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
95582 {
95583+ if (gr_is_taskstats_denied(current->pid))
95584+ return -EACCES;
95585+
95586 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
95587 return cmd_attr_register_cpumask(info);
95588 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
95589diff --git a/kernel/time.c b/kernel/time.c
95590index 7c7964c..2a0d412 100644
95591--- a/kernel/time.c
95592+++ b/kernel/time.c
95593@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
95594 return error;
95595
95596 if (tz) {
95597+ /* we log in do_settimeofday called below, so don't log twice
95598+ */
95599+ if (!tv)
95600+ gr_log_timechange();
95601+
95602 sys_tz = *tz;
95603 update_vsyscall_tz();
95604 if (firsttime) {
95605diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
95606index fe75444..190c528 100644
95607--- a/kernel/time/alarmtimer.c
95608+++ b/kernel/time/alarmtimer.c
95609@@ -811,7 +811,7 @@ static int __init alarmtimer_init(void)
95610 struct platform_device *pdev;
95611 int error = 0;
95612 int i;
95613- struct k_clock alarm_clock = {
95614+ static struct k_clock alarm_clock = {
95615 .clock_getres = alarm_clock_getres,
95616 .clock_get = alarm_clock_get,
95617 .timer_create = alarm_timer_create,
95618diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
95619index 32d8d6a..11486af 100644
95620--- a/kernel/time/timekeeping.c
95621+++ b/kernel/time/timekeeping.c
95622@@ -15,6 +15,7 @@
95623 #include <linux/init.h>
95624 #include <linux/mm.h>
95625 #include <linux/sched.h>
95626+#include <linux/grsecurity.h>
95627 #include <linux/syscore_ops.h>
95628 #include <linux/clocksource.h>
95629 #include <linux/jiffies.h>
95630@@ -502,6 +503,8 @@ int do_settimeofday(const struct timespec *tv)
95631 if (!timespec_valid_strict(tv))
95632 return -EINVAL;
95633
95634+ gr_log_timechange();
95635+
95636 raw_spin_lock_irqsave(&timekeeper_lock, flags);
95637 write_seqcount_begin(&timekeeper_seq);
95638
95639diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
95640index 61ed862..3b52c65 100644
95641--- a/kernel/time/timer_list.c
95642+++ b/kernel/time/timer_list.c
95643@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
95644
95645 static void print_name_offset(struct seq_file *m, void *sym)
95646 {
95647+#ifdef CONFIG_GRKERNSEC_HIDESYM
95648+ SEQ_printf(m, "<%p>", NULL);
95649+#else
95650 char symname[KSYM_NAME_LEN];
95651
95652 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
95653 SEQ_printf(m, "<%pK>", sym);
95654 else
95655 SEQ_printf(m, "%s", symname);
95656+#endif
95657 }
95658
95659 static void
95660@@ -119,7 +123,11 @@ next_one:
95661 static void
95662 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
95663 {
95664+#ifdef CONFIG_GRKERNSEC_HIDESYM
95665+ SEQ_printf(m, " .base: %p\n", NULL);
95666+#else
95667 SEQ_printf(m, " .base: %pK\n", base);
95668+#endif
95669 SEQ_printf(m, " .index: %d\n",
95670 base->index);
95671 SEQ_printf(m, " .resolution: %Lu nsecs\n",
95672@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
95673 {
95674 struct proc_dir_entry *pe;
95675
95676+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95677+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
95678+#else
95679 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
95680+#endif
95681 if (!pe)
95682 return -ENOMEM;
95683 return 0;
95684diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
95685index 1fb08f2..ca4bb1e 100644
95686--- a/kernel/time/timer_stats.c
95687+++ b/kernel/time/timer_stats.c
95688@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
95689 static unsigned long nr_entries;
95690 static struct entry entries[MAX_ENTRIES];
95691
95692-static atomic_t overflow_count;
95693+static atomic_unchecked_t overflow_count;
95694
95695 /*
95696 * The entries are in a hash-table, for fast lookup:
95697@@ -140,7 +140,7 @@ static void reset_entries(void)
95698 nr_entries = 0;
95699 memset(entries, 0, sizeof(entries));
95700 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
95701- atomic_set(&overflow_count, 0);
95702+ atomic_set_unchecked(&overflow_count, 0);
95703 }
95704
95705 static struct entry *alloc_entry(void)
95706@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95707 if (likely(entry))
95708 entry->count++;
95709 else
95710- atomic_inc(&overflow_count);
95711+ atomic_inc_unchecked(&overflow_count);
95712
95713 out_unlock:
95714 raw_spin_unlock_irqrestore(lock, flags);
95715@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95716
95717 static void print_name_offset(struct seq_file *m, unsigned long addr)
95718 {
95719+#ifdef CONFIG_GRKERNSEC_HIDESYM
95720+ seq_printf(m, "<%p>", NULL);
95721+#else
95722 char symname[KSYM_NAME_LEN];
95723
95724 if (lookup_symbol_name(addr, symname) < 0)
95725- seq_printf(m, "<%p>", (void *)addr);
95726+ seq_printf(m, "<%pK>", (void *)addr);
95727 else
95728 seq_printf(m, "%s", symname);
95729+#endif
95730 }
95731
95732 static int tstats_show(struct seq_file *m, void *v)
95733@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
95734
95735 seq_puts(m, "Timer Stats Version: v0.3\n");
95736 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
95737- if (atomic_read(&overflow_count))
95738- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
95739+ if (atomic_read_unchecked(&overflow_count))
95740+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
95741 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
95742
95743 for (i = 0; i < nr_entries; i++) {
95744@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
95745 {
95746 struct proc_dir_entry *pe;
95747
95748+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95749+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95750+#else
95751 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95752+#endif
95753 if (!pe)
95754 return -ENOMEM;
95755 return 0;
95756diff --git a/kernel/timer.c b/kernel/timer.c
95757index 3bb01a3..0e7760e 100644
95758--- a/kernel/timer.c
95759+++ b/kernel/timer.c
95760@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
95761 /*
95762 * This function runs timers and the timer-tq in bottom half context.
95763 */
95764-static void run_timer_softirq(struct softirq_action *h)
95765+static __latent_entropy void run_timer_softirq(void)
95766 {
95767 struct tvec_base *base = __this_cpu_read(tvec_bases);
95768
95769@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
95770 *
95771 * In all cases the return value is guaranteed to be non-negative.
95772 */
95773-signed long __sched schedule_timeout(signed long timeout)
95774+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
95775 {
95776 struct timer_list timer;
95777 unsigned long expire;
95778diff --git a/kernel/torture.c b/kernel/torture.c
95779index 40bb511..91190b9 100644
95780--- a/kernel/torture.c
95781+++ b/kernel/torture.c
95782@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
95783 mutex_lock(&fullstop_mutex);
95784 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
95785 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
95786- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
95787+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
95788 } else {
95789 pr_warn("Concurrent rmmod and shutdown illegal!\n");
95790 }
95791@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
95792 if (!torture_must_stop()) {
95793 if (stutter > 1) {
95794 schedule_timeout_interruptible(stutter - 1);
95795- ACCESS_ONCE(stutter_pause_test) = 2;
95796+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
95797 }
95798 schedule_timeout_interruptible(1);
95799- ACCESS_ONCE(stutter_pause_test) = 1;
95800+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
95801 }
95802 if (!torture_must_stop())
95803 schedule_timeout_interruptible(stutter);
95804- ACCESS_ONCE(stutter_pause_test) = 0;
95805+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
95806 torture_shutdown_absorb("torture_stutter");
95807 } while (!torture_must_stop());
95808 torture_kthread_stopping("torture_stutter");
95809@@ -645,7 +645,7 @@ bool torture_cleanup(void)
95810 schedule_timeout_uninterruptible(10);
95811 return true;
95812 }
95813- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
95814+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
95815 mutex_unlock(&fullstop_mutex);
95816 torture_shutdown_cleanup();
95817 torture_shuffle_cleanup();
95818diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95819index c1bd4ad..4b861dc 100644
95820--- a/kernel/trace/blktrace.c
95821+++ b/kernel/trace/blktrace.c
95822@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95823 struct blk_trace *bt = filp->private_data;
95824 char buf[16];
95825
95826- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95827+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95828
95829 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95830 }
95831@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95832 return 1;
95833
95834 bt = buf->chan->private_data;
95835- atomic_inc(&bt->dropped);
95836+ atomic_inc_unchecked(&bt->dropped);
95837 return 0;
95838 }
95839
95840@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95841
95842 bt->dir = dir;
95843 bt->dev = dev;
95844- atomic_set(&bt->dropped, 0);
95845+ atomic_set_unchecked(&bt->dropped, 0);
95846 INIT_LIST_HEAD(&bt->running_list);
95847
95848 ret = -EIO;
95849diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95850index ac9d1da..ce98b35 100644
95851--- a/kernel/trace/ftrace.c
95852+++ b/kernel/trace/ftrace.c
95853@@ -1920,12 +1920,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95854 if (unlikely(ftrace_disabled))
95855 return 0;
95856
95857+ ret = ftrace_arch_code_modify_prepare();
95858+ FTRACE_WARN_ON(ret);
95859+ if (ret)
95860+ return 0;
95861+
95862 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95863+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95864 if (ret) {
95865 ftrace_bug(ret, ip);
95866- return 0;
95867 }
95868- return 1;
95869+ return ret ? 0 : 1;
95870 }
95871
95872 /*
95873@@ -4126,8 +4131,10 @@ static int ftrace_process_locs(struct module *mod,
95874 if (!count)
95875 return 0;
95876
95877+ pax_open_kernel();
95878 sort(start, count, sizeof(*start),
95879 ftrace_cmp_ips, ftrace_swap_ips);
95880+ pax_close_kernel();
95881
95882 start_pg = ftrace_allocate_pages(count);
95883 if (!start_pg)
95884diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95885index ff70271..4242e69 100644
95886--- a/kernel/trace/ring_buffer.c
95887+++ b/kernel/trace/ring_buffer.c
95888@@ -352,9 +352,9 @@ struct buffer_data_page {
95889 */
95890 struct buffer_page {
95891 struct list_head list; /* list of buffer pages */
95892- local_t write; /* index for next write */
95893+ local_unchecked_t write; /* index for next write */
95894 unsigned read; /* index for next read */
95895- local_t entries; /* entries on this page */
95896+ local_unchecked_t entries; /* entries on this page */
95897 unsigned long real_end; /* real end of data */
95898 struct buffer_data_page *page; /* Actual data page */
95899 };
95900@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
95901 unsigned long last_overrun;
95902 local_t entries_bytes;
95903 local_t entries;
95904- local_t overrun;
95905- local_t commit_overrun;
95906+ local_unchecked_t overrun;
95907+ local_unchecked_t commit_overrun;
95908 local_t dropped_events;
95909 local_t committing;
95910 local_t commits;
95911@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
95912 work = &cpu_buffer->irq_work;
95913 }
95914
95915- work->waiters_pending = true;
95916 poll_wait(filp, &work->waiters, poll_table);
95917+ work->waiters_pending = true;
95918+ /*
95919+ * There's a tight race between setting the waiters_pending and
95920+ * checking if the ring buffer is empty. Once the waiters_pending bit
95921+ * is set, the next event will wake the task up, but we can get stuck
95922+ * if there's only a single event in.
95923+ *
95924+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
95925+ * but adding a memory barrier to all events will cause too much of a
95926+ * performance hit in the fast path. We only need a memory barrier when
95927+ * the buffer goes from empty to having content. But as this race is
95928+ * extremely small, and it's not a problem if another event comes in, we
95929+ * will fix it later.
95930+ */
95931+ smp_mb();
95932
95933 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
95934 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
95935@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95936 *
95937 * We add a counter to the write field to denote this.
95938 */
95939- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
95940- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
95941+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
95942+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
95943
95944 /*
95945 * Just make sure we have seen our old_write and synchronize
95946@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95947 * cmpxchg to only update if an interrupt did not already
95948 * do it for us. If the cmpxchg fails, we don't care.
95949 */
95950- (void)local_cmpxchg(&next_page->write, old_write, val);
95951- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
95952+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
95953+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
95954
95955 /*
95956 * No need to worry about races with clearing out the commit.
95957@@ -1388,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
95958
95959 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
95960 {
95961- return local_read(&bpage->entries) & RB_WRITE_MASK;
95962+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
95963 }
95964
95965 static inline unsigned long rb_page_write(struct buffer_page *bpage)
95966 {
95967- return local_read(&bpage->write) & RB_WRITE_MASK;
95968+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
95969 }
95970
95971 static int
95972@@ -1488,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
95973 * bytes consumed in ring buffer from here.
95974 * Increment overrun to account for the lost events.
95975 */
95976- local_add(page_entries, &cpu_buffer->overrun);
95977+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
95978 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95979 }
95980
95981@@ -2066,7 +2080,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95982 * it is our responsibility to update
95983 * the counters.
95984 */
95985- local_add(entries, &cpu_buffer->overrun);
95986+ local_add_unchecked(entries, &cpu_buffer->overrun);
95987 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95988
95989 /*
95990@@ -2216,7 +2230,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95991 if (tail == BUF_PAGE_SIZE)
95992 tail_page->real_end = 0;
95993
95994- local_sub(length, &tail_page->write);
95995+ local_sub_unchecked(length, &tail_page->write);
95996 return;
95997 }
95998
95999@@ -2251,7 +2265,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96000 rb_event_set_padding(event);
96001
96002 /* Set the write back to the previous setting */
96003- local_sub(length, &tail_page->write);
96004+ local_sub_unchecked(length, &tail_page->write);
96005 return;
96006 }
96007
96008@@ -2263,7 +2277,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96009
96010 /* Set write to end of buffer */
96011 length = (tail + length) - BUF_PAGE_SIZE;
96012- local_sub(length, &tail_page->write);
96013+ local_sub_unchecked(length, &tail_page->write);
96014 }
96015
96016 /*
96017@@ -2289,7 +2303,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
96018 * about it.
96019 */
96020 if (unlikely(next_page == commit_page)) {
96021- local_inc(&cpu_buffer->commit_overrun);
96022+ local_inc_unchecked(&cpu_buffer->commit_overrun);
96023 goto out_reset;
96024 }
96025
96026@@ -2345,7 +2359,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
96027 cpu_buffer->tail_page) &&
96028 (cpu_buffer->commit_page ==
96029 cpu_buffer->reader_page))) {
96030- local_inc(&cpu_buffer->commit_overrun);
96031+ local_inc_unchecked(&cpu_buffer->commit_overrun);
96032 goto out_reset;
96033 }
96034 }
96035@@ -2393,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
96036 length += RB_LEN_TIME_EXTEND;
96037
96038 tail_page = cpu_buffer->tail_page;
96039- write = local_add_return(length, &tail_page->write);
96040+ write = local_add_return_unchecked(length, &tail_page->write);
96041
96042 /* set write to only the index of the write */
96043 write &= RB_WRITE_MASK;
96044@@ -2417,7 +2431,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
96045 kmemcheck_annotate_bitfield(event, bitfield);
96046 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
96047
96048- local_inc(&tail_page->entries);
96049+ local_inc_unchecked(&tail_page->entries);
96050
96051 /*
96052 * If this is the first commit on the page, then update
96053@@ -2450,7 +2464,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
96054
96055 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
96056 unsigned long write_mask =
96057- local_read(&bpage->write) & ~RB_WRITE_MASK;
96058+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
96059 unsigned long event_length = rb_event_length(event);
96060 /*
96061 * This is on the tail page. It is possible that
96062@@ -2460,7 +2474,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
96063 */
96064 old_index += write_mask;
96065 new_index += write_mask;
96066- index = local_cmpxchg(&bpage->write, old_index, new_index);
96067+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
96068 if (index == old_index) {
96069 /* update counters */
96070 local_sub(event_length, &cpu_buffer->entries_bytes);
96071@@ -2852,7 +2866,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
96072
96073 /* Do the likely case first */
96074 if (likely(bpage->page == (void *)addr)) {
96075- local_dec(&bpage->entries);
96076+ local_dec_unchecked(&bpage->entries);
96077 return;
96078 }
96079
96080@@ -2864,7 +2878,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
96081 start = bpage;
96082 do {
96083 if (bpage->page == (void *)addr) {
96084- local_dec(&bpage->entries);
96085+ local_dec_unchecked(&bpage->entries);
96086 return;
96087 }
96088 rb_inc_page(cpu_buffer, &bpage);
96089@@ -3148,7 +3162,7 @@ static inline unsigned long
96090 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
96091 {
96092 return local_read(&cpu_buffer->entries) -
96093- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
96094+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
96095 }
96096
96097 /**
96098@@ -3237,7 +3251,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
96099 return 0;
96100
96101 cpu_buffer = buffer->buffers[cpu];
96102- ret = local_read(&cpu_buffer->overrun);
96103+ ret = local_read_unchecked(&cpu_buffer->overrun);
96104
96105 return ret;
96106 }
96107@@ -3260,7 +3274,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
96108 return 0;
96109
96110 cpu_buffer = buffer->buffers[cpu];
96111- ret = local_read(&cpu_buffer->commit_overrun);
96112+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
96113
96114 return ret;
96115 }
96116@@ -3345,7 +3359,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
96117 /* if you care about this being correct, lock the buffer */
96118 for_each_buffer_cpu(buffer, cpu) {
96119 cpu_buffer = buffer->buffers[cpu];
96120- overruns += local_read(&cpu_buffer->overrun);
96121+ overruns += local_read_unchecked(&cpu_buffer->overrun);
96122 }
96123
96124 return overruns;
96125@@ -3521,8 +3535,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96126 /*
96127 * Reset the reader page to size zero.
96128 */
96129- local_set(&cpu_buffer->reader_page->write, 0);
96130- local_set(&cpu_buffer->reader_page->entries, 0);
96131+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96132+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96133 local_set(&cpu_buffer->reader_page->page->commit, 0);
96134 cpu_buffer->reader_page->real_end = 0;
96135
96136@@ -3556,7 +3570,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96137 * want to compare with the last_overrun.
96138 */
96139 smp_mb();
96140- overwrite = local_read(&(cpu_buffer->overrun));
96141+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
96142
96143 /*
96144 * Here's the tricky part.
96145@@ -4126,8 +4140,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96146
96147 cpu_buffer->head_page
96148 = list_entry(cpu_buffer->pages, struct buffer_page, list);
96149- local_set(&cpu_buffer->head_page->write, 0);
96150- local_set(&cpu_buffer->head_page->entries, 0);
96151+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
96152+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
96153 local_set(&cpu_buffer->head_page->page->commit, 0);
96154
96155 cpu_buffer->head_page->read = 0;
96156@@ -4137,14 +4151,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96157
96158 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
96159 INIT_LIST_HEAD(&cpu_buffer->new_pages);
96160- local_set(&cpu_buffer->reader_page->write, 0);
96161- local_set(&cpu_buffer->reader_page->entries, 0);
96162+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96163+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96164 local_set(&cpu_buffer->reader_page->page->commit, 0);
96165 cpu_buffer->reader_page->read = 0;
96166
96167 local_set(&cpu_buffer->entries_bytes, 0);
96168- local_set(&cpu_buffer->overrun, 0);
96169- local_set(&cpu_buffer->commit_overrun, 0);
96170+ local_set_unchecked(&cpu_buffer->overrun, 0);
96171+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
96172 local_set(&cpu_buffer->dropped_events, 0);
96173 local_set(&cpu_buffer->entries, 0);
96174 local_set(&cpu_buffer->committing, 0);
96175@@ -4549,8 +4563,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
96176 rb_init_page(bpage);
96177 bpage = reader->page;
96178 reader->page = *data_page;
96179- local_set(&reader->write, 0);
96180- local_set(&reader->entries, 0);
96181+ local_set_unchecked(&reader->write, 0);
96182+ local_set_unchecked(&reader->entries, 0);
96183 reader->read = 0;
96184 *data_page = bpage;
96185
96186diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
96187index 291397e..db3836d 100644
96188--- a/kernel/trace/trace.c
96189+++ b/kernel/trace/trace.c
96190@@ -3510,7 +3510,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
96191 return 0;
96192 }
96193
96194-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
96195+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
96196 {
96197 /* do nothing if flag is already set */
96198 if (!!(trace_flags & mask) == !!enabled)
96199diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
96200index 9258f5a..9b1e41e 100644
96201--- a/kernel/trace/trace.h
96202+++ b/kernel/trace/trace.h
96203@@ -1278,7 +1278,7 @@ extern const char *__stop___tracepoint_str[];
96204 void trace_printk_init_buffers(void);
96205 void trace_printk_start_comm(void);
96206 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
96207-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
96208+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
96209
96210 /*
96211 * Normal trace_printk() and friends allocates special buffers
96212diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
96213index 57b67b1..66082a9 100644
96214--- a/kernel/trace/trace_clock.c
96215+++ b/kernel/trace/trace_clock.c
96216@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
96217 return now;
96218 }
96219
96220-static atomic64_t trace_counter;
96221+static atomic64_unchecked_t trace_counter;
96222
96223 /*
96224 * trace_clock_counter(): simply an atomic counter.
96225@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
96226 */
96227 u64 notrace trace_clock_counter(void)
96228 {
96229- return atomic64_add_return(1, &trace_counter);
96230+ return atomic64_inc_return_unchecked(&trace_counter);
96231 }
96232diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
96233index 2de5362..c4c7003 100644
96234--- a/kernel/trace/trace_events.c
96235+++ b/kernel/trace/trace_events.c
96236@@ -1722,7 +1722,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
96237 return 0;
96238 }
96239
96240-struct ftrace_module_file_ops;
96241 static void __add_event_to_tracers(struct ftrace_event_call *call);
96242
96243 /* Add an additional event_call dynamically */
96244diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
96245index 0abd9b8..6a663a2 100644
96246--- a/kernel/trace/trace_mmiotrace.c
96247+++ b/kernel/trace/trace_mmiotrace.c
96248@@ -24,7 +24,7 @@ struct header_iter {
96249 static struct trace_array *mmio_trace_array;
96250 static bool overrun_detected;
96251 static unsigned long prev_overruns;
96252-static atomic_t dropped_count;
96253+static atomic_unchecked_t dropped_count;
96254
96255 static void mmio_reset_data(struct trace_array *tr)
96256 {
96257@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
96258
96259 static unsigned long count_overruns(struct trace_iterator *iter)
96260 {
96261- unsigned long cnt = atomic_xchg(&dropped_count, 0);
96262+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
96263 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
96264
96265 if (over > prev_overruns)
96266@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
96267 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
96268 sizeof(*entry), 0, pc);
96269 if (!event) {
96270- atomic_inc(&dropped_count);
96271+ atomic_inc_unchecked(&dropped_count);
96272 return;
96273 }
96274 entry = ring_buffer_event_data(event);
96275@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
96276 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
96277 sizeof(*entry), 0, pc);
96278 if (!event) {
96279- atomic_inc(&dropped_count);
96280+ atomic_inc_unchecked(&dropped_count);
96281 return;
96282 }
96283 entry = ring_buffer_event_data(event);
96284diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
96285index f3dad80..d291d61 100644
96286--- a/kernel/trace/trace_output.c
96287+++ b/kernel/trace/trace_output.c
96288@@ -322,7 +322,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
96289
96290 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
96291 if (!IS_ERR(p)) {
96292- p = mangle_path(s->buffer + s->len, p, "\n");
96293+ p = mangle_path(s->buffer + s->len, p, "\n\\");
96294 if (p) {
96295 s->len = p - s->buffer;
96296 return 1;
96297@@ -980,14 +980,16 @@ int register_ftrace_event(struct trace_event *event)
96298 goto out;
96299 }
96300
96301+ pax_open_kernel();
96302 if (event->funcs->trace == NULL)
96303- event->funcs->trace = trace_nop_print;
96304+ *(void **)&event->funcs->trace = trace_nop_print;
96305 if (event->funcs->raw == NULL)
96306- event->funcs->raw = trace_nop_print;
96307+ *(void **)&event->funcs->raw = trace_nop_print;
96308 if (event->funcs->hex == NULL)
96309- event->funcs->hex = trace_nop_print;
96310+ *(void **)&event->funcs->hex = trace_nop_print;
96311 if (event->funcs->binary == NULL)
96312- event->funcs->binary = trace_nop_print;
96313+ *(void **)&event->funcs->binary = trace_nop_print;
96314+ pax_close_kernel();
96315
96316 key = event->type & (EVENT_HASHSIZE - 1);
96317
96318diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
96319index 8a4e5cb..64f270d 100644
96320--- a/kernel/trace/trace_stack.c
96321+++ b/kernel/trace/trace_stack.c
96322@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
96323 return;
96324
96325 /* we do not handle interrupt stacks yet */
96326- if (!object_is_on_stack(stack))
96327+ if (!object_starts_on_stack(stack))
96328 return;
96329
96330 local_irq_save(flags);
96331diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
96332index fcc0256..aee880f 100644
96333--- a/kernel/user_namespace.c
96334+++ b/kernel/user_namespace.c
96335@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
96336 !kgid_has_mapping(parent_ns, group))
96337 return -EPERM;
96338
96339+#ifdef CONFIG_GRKERNSEC
96340+ /*
96341+ * This doesn't really inspire confidence:
96342+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
96343+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
96344+ * Increases kernel attack surface in areas developers
96345+ * previously cared little about ("low importance due
96346+ * to requiring "root" capability")
96347+ * To be removed when this code receives *proper* review
96348+ */
96349+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
96350+ !capable(CAP_SETGID))
96351+ return -EPERM;
96352+#endif
96353+
96354 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
96355 if (!ns)
96356 return -ENOMEM;
96357@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
96358 if (atomic_read(&current->mm->mm_users) > 1)
96359 return -EINVAL;
96360
96361- if (current->fs->users != 1)
96362+ if (atomic_read(&current->fs->users) != 1)
96363 return -EINVAL;
96364
96365 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
96366diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
96367index c8eac43..4b5f08f 100644
96368--- a/kernel/utsname_sysctl.c
96369+++ b/kernel/utsname_sysctl.c
96370@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
96371 static int proc_do_uts_string(struct ctl_table *table, int write,
96372 void __user *buffer, size_t *lenp, loff_t *ppos)
96373 {
96374- struct ctl_table uts_table;
96375+ ctl_table_no_const uts_table;
96376 int r;
96377 memcpy(&uts_table, table, sizeof(uts_table));
96378 uts_table.data = get_uts(table, write);
96379diff --git a/kernel/watchdog.c b/kernel/watchdog.c
96380index c3319bd..67efc3c 100644
96381--- a/kernel/watchdog.c
96382+++ b/kernel/watchdog.c
96383@@ -518,7 +518,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
96384 static void watchdog_nmi_disable(unsigned int cpu) { return; }
96385 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
96386
96387-static struct smp_hotplug_thread watchdog_threads = {
96388+static struct smp_hotplug_thread watchdog_threads __read_only = {
96389 .store = &softlockup_watchdog,
96390 .thread_should_run = watchdog_should_run,
96391 .thread_fn = watchdog,
96392diff --git a/kernel/workqueue.c b/kernel/workqueue.c
96393index 35974ac..43c9e87 100644
96394--- a/kernel/workqueue.c
96395+++ b/kernel/workqueue.c
96396@@ -4576,7 +4576,7 @@ static void rebind_workers(struct worker_pool *pool)
96397 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
96398 worker_flags |= WORKER_REBOUND;
96399 worker_flags &= ~WORKER_UNBOUND;
96400- ACCESS_ONCE(worker->flags) = worker_flags;
96401+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
96402 }
96403
96404 spin_unlock_irq(&pool->lock);
96405diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
96406index 7a638aa..20db901 100644
96407--- a/lib/Kconfig.debug
96408+++ b/lib/Kconfig.debug
96409@@ -858,7 +858,7 @@ config DEBUG_MUTEXES
96410
96411 config DEBUG_WW_MUTEX_SLOWPATH
96412 bool "Wait/wound mutex debugging: Slowpath testing"
96413- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96414+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96415 select DEBUG_LOCK_ALLOC
96416 select DEBUG_SPINLOCK
96417 select DEBUG_MUTEXES
96418@@ -871,7 +871,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
96419
96420 config DEBUG_LOCK_ALLOC
96421 bool "Lock debugging: detect incorrect freeing of live locks"
96422- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96423+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96424 select DEBUG_SPINLOCK
96425 select DEBUG_MUTEXES
96426 select LOCKDEP
96427@@ -885,7 +885,7 @@ config DEBUG_LOCK_ALLOC
96428
96429 config PROVE_LOCKING
96430 bool "Lock debugging: prove locking correctness"
96431- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96432+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96433 select LOCKDEP
96434 select DEBUG_SPINLOCK
96435 select DEBUG_MUTEXES
96436@@ -936,7 +936,7 @@ config LOCKDEP
96437
96438 config LOCK_STAT
96439 bool "Lock usage statistics"
96440- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96441+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96442 select LOCKDEP
96443 select DEBUG_SPINLOCK
96444 select DEBUG_MUTEXES
96445@@ -1418,6 +1418,7 @@ config LATENCYTOP
96446 depends on DEBUG_KERNEL
96447 depends on STACKTRACE_SUPPORT
96448 depends on PROC_FS
96449+ depends on !GRKERNSEC_HIDESYM
96450 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
96451 select KALLSYMS
96452 select KALLSYMS_ALL
96453@@ -1434,7 +1435,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96454 config DEBUG_STRICT_USER_COPY_CHECKS
96455 bool "Strict user copy size checks"
96456 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96457- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
96458+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
96459 help
96460 Enabling this option turns a certain set of sanity checks for user
96461 copy operations into compile time failures.
96462@@ -1554,7 +1555,7 @@ endmenu # runtime tests
96463
96464 config PROVIDE_OHCI1394_DMA_INIT
96465 bool "Remote debugging over FireWire early on boot"
96466- depends on PCI && X86
96467+ depends on PCI && X86 && !GRKERNSEC
96468 help
96469 If you want to debug problems which hang or crash the kernel early
96470 on boot and the crashing machine has a FireWire port, you can use
96471diff --git a/lib/Makefile b/lib/Makefile
96472index ba967a1..2cc869a 100644
96473--- a/lib/Makefile
96474+++ b/lib/Makefile
96475@@ -33,7 +33,6 @@ obj-y += kstrtox.o
96476 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
96477 obj-$(CONFIG_TEST_MODULE) += test_module.o
96478 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
96479-obj-$(CONFIG_TEST_BPF) += test_bpf.o
96480
96481 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
96482 CFLAGS_kobject.o += -DDEBUG
96483@@ -54,7 +53,7 @@ obj-$(CONFIG_BTREE) += btree.o
96484 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
96485 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
96486 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
96487-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
96488+obj-y += list_debug.o
96489 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
96490
96491 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
96492diff --git a/lib/assoc_array.c b/lib/assoc_array.c
96493index c0b1007..ae146f0 100644
96494--- a/lib/assoc_array.c
96495+++ b/lib/assoc_array.c
96496@@ -1735,7 +1735,7 @@ ascend_old_tree:
96497 gc_complete:
96498 edit->set[0].to = new_root;
96499 assoc_array_apply_edit(edit);
96500- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
96501+ array->nr_leaves_on_tree = nr_leaves_on_tree;
96502 return 0;
96503
96504 enomem:
96505diff --git a/lib/average.c b/lib/average.c
96506index 114d1be..ab0350c 100644
96507--- a/lib/average.c
96508+++ b/lib/average.c
96509@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
96510 {
96511 unsigned long internal = ACCESS_ONCE(avg->internal);
96512
96513- ACCESS_ONCE(avg->internal) = internal ?
96514+ ACCESS_ONCE_RW(avg->internal) = internal ?
96515 (((internal << avg->weight) - internal) +
96516 (val << avg->factor)) >> avg->weight :
96517 (val << avg->factor);
96518diff --git a/lib/bitmap.c b/lib/bitmap.c
96519index 06f7e4f..f3cf2b0 100644
96520--- a/lib/bitmap.c
96521+++ b/lib/bitmap.c
96522@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
96523 {
96524 int c, old_c, totaldigits, ndigits, nchunks, nbits;
96525 u32 chunk;
96526- const char __user __force *ubuf = (const char __user __force *)buf;
96527+ const char __user *ubuf = (const char __force_user *)buf;
96528
96529 bitmap_zero(maskp, nmaskbits);
96530
96531@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
96532 {
96533 if (!access_ok(VERIFY_READ, ubuf, ulen))
96534 return -EFAULT;
96535- return __bitmap_parse((const char __force *)ubuf,
96536+ return __bitmap_parse((const char __force_kernel *)ubuf,
96537 ulen, 1, maskp, nmaskbits);
96538
96539 }
96540@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
96541 {
96542 unsigned a, b;
96543 int c, old_c, totaldigits;
96544- const char __user __force *ubuf = (const char __user __force *)buf;
96545+ const char __user *ubuf = (const char __force_user *)buf;
96546 int exp_digit, in_range;
96547
96548 totaldigits = c = 0;
96549@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
96550 {
96551 if (!access_ok(VERIFY_READ, ubuf, ulen))
96552 return -EFAULT;
96553- return __bitmap_parselist((const char __force *)ubuf,
96554+ return __bitmap_parselist((const char __force_kernel *)ubuf,
96555 ulen, 1, maskp, nmaskbits);
96556 }
96557 EXPORT_SYMBOL(bitmap_parselist_user);
96558diff --git a/lib/bug.c b/lib/bug.c
96559index d1d7c78..b354235 100644
96560--- a/lib/bug.c
96561+++ b/lib/bug.c
96562@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
96563 return BUG_TRAP_TYPE_NONE;
96564
96565 bug = find_bug(bugaddr);
96566+ if (!bug)
96567+ return BUG_TRAP_TYPE_NONE;
96568
96569 file = NULL;
96570 line = 0;
96571diff --git a/lib/debugobjects.c b/lib/debugobjects.c
96572index 547f7f9..a6d4ba0 100644
96573--- a/lib/debugobjects.c
96574+++ b/lib/debugobjects.c
96575@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
96576 if (limit > 4)
96577 return;
96578
96579- is_on_stack = object_is_on_stack(addr);
96580+ is_on_stack = object_starts_on_stack(addr);
96581 if (is_on_stack == onstack)
96582 return;
96583
96584diff --git a/lib/div64.c b/lib/div64.c
96585index 4382ad7..08aa558 100644
96586--- a/lib/div64.c
96587+++ b/lib/div64.c
96588@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
96589 EXPORT_SYMBOL(__div64_32);
96590
96591 #ifndef div_s64_rem
96592-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96593+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96594 {
96595 u64 quotient;
96596
96597@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
96598 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
96599 */
96600 #ifndef div64_u64
96601-u64 div64_u64(u64 dividend, u64 divisor)
96602+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
96603 {
96604 u32 high = divisor >> 32;
96605 u64 quot;
96606diff --git a/lib/dma-debug.c b/lib/dma-debug.c
96607index 98f2d7e..899da5c 100644
96608--- a/lib/dma-debug.c
96609+++ b/lib/dma-debug.c
96610@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
96611
96612 void dma_debug_add_bus(struct bus_type *bus)
96613 {
96614- struct notifier_block *nb;
96615+ notifier_block_no_const *nb;
96616
96617 if (global_disable)
96618 return;
96619@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
96620
96621 static void check_for_stack(struct device *dev, void *addr)
96622 {
96623- if (object_is_on_stack(addr))
96624+ if (object_starts_on_stack(addr))
96625 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
96626 "stack [addr=%p]\n", addr);
96627 }
96628diff --git a/lib/hash.c b/lib/hash.c
96629index fea973f..386626f 100644
96630--- a/lib/hash.c
96631+++ b/lib/hash.c
96632@@ -14,7 +14,7 @@
96633 #include <linux/hash.h>
96634 #include <linux/cache.h>
96635
96636-static struct fast_hash_ops arch_hash_ops __read_mostly = {
96637+static struct fast_hash_ops arch_hash_ops __read_only = {
96638 .hash = jhash,
96639 .hash2 = jhash2,
96640 };
96641diff --git a/lib/inflate.c b/lib/inflate.c
96642index 013a761..c28f3fc 100644
96643--- a/lib/inflate.c
96644+++ b/lib/inflate.c
96645@@ -269,7 +269,7 @@ static void free(void *where)
96646 malloc_ptr = free_mem_ptr;
96647 }
96648 #else
96649-#define malloc(a) kmalloc(a, GFP_KERNEL)
96650+#define malloc(a) kmalloc((a), GFP_KERNEL)
96651 #define free(a) kfree(a)
96652 #endif
96653
96654diff --git a/lib/ioremap.c b/lib/ioremap.c
96655index 0c9216c..863bd89 100644
96656--- a/lib/ioremap.c
96657+++ b/lib/ioremap.c
96658@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
96659 unsigned long next;
96660
96661 phys_addr -= addr;
96662- pmd = pmd_alloc(&init_mm, pud, addr);
96663+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
96664 if (!pmd)
96665 return -ENOMEM;
96666 do {
96667@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
96668 unsigned long next;
96669
96670 phys_addr -= addr;
96671- pud = pud_alloc(&init_mm, pgd, addr);
96672+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
96673 if (!pud)
96674 return -ENOMEM;
96675 do {
96676diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
96677index bd2bea9..6b3c95e 100644
96678--- a/lib/is_single_threaded.c
96679+++ b/lib/is_single_threaded.c
96680@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
96681 struct task_struct *p, *t;
96682 bool ret;
96683
96684+ if (!mm)
96685+ return true;
96686+
96687 if (atomic_read(&task->signal->live) != 1)
96688 return false;
96689
96690diff --git a/lib/kobject.c b/lib/kobject.c
96691index 58751bb..93a1853 100644
96692--- a/lib/kobject.c
96693+++ b/lib/kobject.c
96694@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
96695
96696
96697 static DEFINE_SPINLOCK(kobj_ns_type_lock);
96698-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
96699+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
96700
96701-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96702+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96703 {
96704 enum kobj_ns_type type = ops->type;
96705 int error;
96706diff --git a/lib/list_debug.c b/lib/list_debug.c
96707index c24c2f7..f0296f4 100644
96708--- a/lib/list_debug.c
96709+++ b/lib/list_debug.c
96710@@ -11,7 +11,9 @@
96711 #include <linux/bug.h>
96712 #include <linux/kernel.h>
96713 #include <linux/rculist.h>
96714+#include <linux/mm.h>
96715
96716+#ifdef CONFIG_DEBUG_LIST
96717 /*
96718 * Insert a new entry between two known consecutive entries.
96719 *
96720@@ -19,21 +21,40 @@
96721 * the prev/next entries already!
96722 */
96723
96724+static bool __list_add_debug(struct list_head *new,
96725+ struct list_head *prev,
96726+ struct list_head *next)
96727+{
96728+ if (unlikely(next->prev != prev)) {
96729+ printk(KERN_ERR "list_add corruption. next->prev should be "
96730+ "prev (%p), but was %p. (next=%p).\n",
96731+ prev, next->prev, next);
96732+ BUG();
96733+ return false;
96734+ }
96735+ if (unlikely(prev->next != next)) {
96736+ printk(KERN_ERR "list_add corruption. prev->next should be "
96737+ "next (%p), but was %p. (prev=%p).\n",
96738+ next, prev->next, prev);
96739+ BUG();
96740+ return false;
96741+ }
96742+ if (unlikely(new == prev || new == next)) {
96743+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
96744+ new, prev, next);
96745+ BUG();
96746+ return false;
96747+ }
96748+ return true;
96749+}
96750+
96751 void __list_add(struct list_head *new,
96752- struct list_head *prev,
96753- struct list_head *next)
96754+ struct list_head *prev,
96755+ struct list_head *next)
96756 {
96757- WARN(next->prev != prev,
96758- "list_add corruption. next->prev should be "
96759- "prev (%p), but was %p. (next=%p).\n",
96760- prev, next->prev, next);
96761- WARN(prev->next != next,
96762- "list_add corruption. prev->next should be "
96763- "next (%p), but was %p. (prev=%p).\n",
96764- next, prev->next, prev);
96765- WARN(new == prev || new == next,
96766- "list_add double add: new=%p, prev=%p, next=%p.\n",
96767- new, prev, next);
96768+ if (!__list_add_debug(new, prev, next))
96769+ return;
96770+
96771 next->prev = new;
96772 new->next = next;
96773 new->prev = prev;
96774@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
96775 }
96776 EXPORT_SYMBOL(__list_add);
96777
96778-void __list_del_entry(struct list_head *entry)
96779+static bool __list_del_entry_debug(struct list_head *entry)
96780 {
96781 struct list_head *prev, *next;
96782
96783 prev = entry->prev;
96784 next = entry->next;
96785
96786- if (WARN(next == LIST_POISON1,
96787- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96788- entry, LIST_POISON1) ||
96789- WARN(prev == LIST_POISON2,
96790- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96791- entry, LIST_POISON2) ||
96792- WARN(prev->next != entry,
96793- "list_del corruption. prev->next should be %p, "
96794- "but was %p\n", entry, prev->next) ||
96795- WARN(next->prev != entry,
96796- "list_del corruption. next->prev should be %p, "
96797- "but was %p\n", entry, next->prev))
96798+ if (unlikely(next == LIST_POISON1)) {
96799+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96800+ entry, LIST_POISON1);
96801+ BUG();
96802+ return false;
96803+ }
96804+ if (unlikely(prev == LIST_POISON2)) {
96805+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96806+ entry, LIST_POISON2);
96807+ BUG();
96808+ return false;
96809+ }
96810+ if (unlikely(entry->prev->next != entry)) {
96811+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
96812+ "but was %p\n", entry, prev->next);
96813+ BUG();
96814+ return false;
96815+ }
96816+ if (unlikely(entry->next->prev != entry)) {
96817+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
96818+ "but was %p\n", entry, next->prev);
96819+ BUG();
96820+ return false;
96821+ }
96822+ return true;
96823+}
96824+
96825+void __list_del_entry(struct list_head *entry)
96826+{
96827+ if (!__list_del_entry_debug(entry))
96828 return;
96829
96830- __list_del(prev, next);
96831+ __list_del(entry->prev, entry->next);
96832 }
96833 EXPORT_SYMBOL(__list_del_entry);
96834
96835@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
96836 void __list_add_rcu(struct list_head *new,
96837 struct list_head *prev, struct list_head *next)
96838 {
96839- WARN(next->prev != prev,
96840- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
96841- prev, next->prev, next);
96842- WARN(prev->next != next,
96843- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
96844- next, prev->next, prev);
96845+ if (!__list_add_debug(new, prev, next))
96846+ return;
96847+
96848 new->next = next;
96849 new->prev = prev;
96850 rcu_assign_pointer(list_next_rcu(prev), new);
96851 next->prev = new;
96852 }
96853 EXPORT_SYMBOL(__list_add_rcu);
96854+#endif
96855+
96856+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
96857+{
96858+#ifdef CONFIG_DEBUG_LIST
96859+ if (!__list_add_debug(new, prev, next))
96860+ return;
96861+#endif
96862+
96863+ pax_open_kernel();
96864+ next->prev = new;
96865+ new->next = next;
96866+ new->prev = prev;
96867+ prev->next = new;
96868+ pax_close_kernel();
96869+}
96870+EXPORT_SYMBOL(__pax_list_add);
96871+
96872+void pax_list_del(struct list_head *entry)
96873+{
96874+#ifdef CONFIG_DEBUG_LIST
96875+ if (!__list_del_entry_debug(entry))
96876+ return;
96877+#endif
96878+
96879+ pax_open_kernel();
96880+ __list_del(entry->prev, entry->next);
96881+ entry->next = LIST_POISON1;
96882+ entry->prev = LIST_POISON2;
96883+ pax_close_kernel();
96884+}
96885+EXPORT_SYMBOL(pax_list_del);
96886+
96887+void pax_list_del_init(struct list_head *entry)
96888+{
96889+ pax_open_kernel();
96890+ __list_del(entry->prev, entry->next);
96891+ INIT_LIST_HEAD(entry);
96892+ pax_close_kernel();
96893+}
96894+EXPORT_SYMBOL(pax_list_del_init);
96895+
96896+void __pax_list_add_rcu(struct list_head *new,
96897+ struct list_head *prev, struct list_head *next)
96898+{
96899+#ifdef CONFIG_DEBUG_LIST
96900+ if (!__list_add_debug(new, prev, next))
96901+ return;
96902+#endif
96903+
96904+ pax_open_kernel();
96905+ new->next = next;
96906+ new->prev = prev;
96907+ rcu_assign_pointer(list_next_rcu(prev), new);
96908+ next->prev = new;
96909+ pax_close_kernel();
96910+}
96911+EXPORT_SYMBOL(__pax_list_add_rcu);
96912+
96913+void pax_list_del_rcu(struct list_head *entry)
96914+{
96915+#ifdef CONFIG_DEBUG_LIST
96916+ if (!__list_del_entry_debug(entry))
96917+ return;
96918+#endif
96919+
96920+ pax_open_kernel();
96921+ __list_del(entry->prev, entry->next);
96922+ entry->next = LIST_POISON1;
96923+ entry->prev = LIST_POISON2;
96924+ pax_close_kernel();
96925+}
96926+EXPORT_SYMBOL(pax_list_del_rcu);
96927diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
96928index 963b703..438bc51 100644
96929--- a/lib/percpu-refcount.c
96930+++ b/lib/percpu-refcount.c
96931@@ -29,7 +29,7 @@
96932 * can't hit 0 before we've added up all the percpu refs.
96933 */
96934
96935-#define PCPU_COUNT_BIAS (1U << 31)
96936+#define PCPU_COUNT_BIAS (1U << 30)
96937
96938 /**
96939 * percpu_ref_init - initialize a percpu refcount
96940diff --git a/lib/radix-tree.c b/lib/radix-tree.c
96941index 3291a8e..346a91e 100644
96942--- a/lib/radix-tree.c
96943+++ b/lib/radix-tree.c
96944@@ -67,7 +67,7 @@ struct radix_tree_preload {
96945 int nr;
96946 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
96947 };
96948-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
96949+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
96950
96951 static inline void *ptr_to_indirect(void *ptr)
96952 {
96953diff --git a/lib/random32.c b/lib/random32.c
96954index fa5da61..35fe9af 100644
96955--- a/lib/random32.c
96956+++ b/lib/random32.c
96957@@ -42,7 +42,7 @@
96958 static void __init prandom_state_selftest(void);
96959 #endif
96960
96961-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
96962+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
96963
96964 /**
96965 * prandom_u32_state - seeded pseudo-random number generator.
96966diff --git a/lib/rbtree.c b/lib/rbtree.c
96967index 65f4eff..2cfa167 100644
96968--- a/lib/rbtree.c
96969+++ b/lib/rbtree.c
96970@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
96971 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
96972
96973 static const struct rb_augment_callbacks dummy_callbacks = {
96974- dummy_propagate, dummy_copy, dummy_rotate
96975+ .propagate = dummy_propagate,
96976+ .copy = dummy_copy,
96977+ .rotate = dummy_rotate
96978 };
96979
96980 void rb_insert_color(struct rb_node *node, struct rb_root *root)
96981diff --git a/lib/show_mem.c b/lib/show_mem.c
96982index 0922579..9d7adb9 100644
96983--- a/lib/show_mem.c
96984+++ b/lib/show_mem.c
96985@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
96986 quicklist_total_size());
96987 #endif
96988 #ifdef CONFIG_MEMORY_FAILURE
96989- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
96990+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
96991 #endif
96992 }
96993diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
96994index bb2b201..46abaf9 100644
96995--- a/lib/strncpy_from_user.c
96996+++ b/lib/strncpy_from_user.c
96997@@ -21,7 +21,7 @@
96998 */
96999 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
97000 {
97001- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97002+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97003 long res = 0;
97004
97005 /*
97006diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
97007index a28df52..3d55877 100644
97008--- a/lib/strnlen_user.c
97009+++ b/lib/strnlen_user.c
97010@@ -26,7 +26,7 @@
97011 */
97012 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
97013 {
97014- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97015+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97016 long align, res = 0;
97017 unsigned long c;
97018
97019diff --git a/lib/swiotlb.c b/lib/swiotlb.c
97020index 4abda07..b9d3765 100644
97021--- a/lib/swiotlb.c
97022+++ b/lib/swiotlb.c
97023@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
97024
97025 void
97026 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
97027- dma_addr_t dev_addr)
97028+ dma_addr_t dev_addr, struct dma_attrs *attrs)
97029 {
97030 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
97031
97032diff --git a/lib/test_bpf.c b/lib/test_bpf.c
97033deleted file mode 100644
97034index c579e0f..0000000
97035--- a/lib/test_bpf.c
97036+++ /dev/null
97037@@ -1,1929 +0,0 @@
97038-/*
97039- * Testsuite for BPF interpreter and BPF JIT compiler
97040- *
97041- * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
97042- *
97043- * This program is free software; you can redistribute it and/or
97044- * modify it under the terms of version 2 of the GNU General Public
97045- * License as published by the Free Software Foundation.
97046- *
97047- * This program is distributed in the hope that it will be useful, but
97048- * WITHOUT ANY WARRANTY; without even the implied warranty of
97049- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
97050- * General Public License for more details.
97051- */
97052-
97053-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
97054-
97055-#include <linux/init.h>
97056-#include <linux/module.h>
97057-#include <linux/filter.h>
97058-#include <linux/skbuff.h>
97059-#include <linux/netdevice.h>
97060-#include <linux/if_vlan.h>
97061-
97062-/* General test specific settings */
97063-#define MAX_SUBTESTS 3
97064-#define MAX_TESTRUNS 10000
97065-#define MAX_DATA 128
97066-#define MAX_INSNS 512
97067-#define MAX_K 0xffffFFFF
97068-
97069-/* Few constants used to init test 'skb' */
97070-#define SKB_TYPE 3
97071-#define SKB_MARK 0x1234aaaa
97072-#define SKB_HASH 0x1234aaab
97073-#define SKB_QUEUE_MAP 123
97074-#define SKB_VLAN_TCI 0xffff
97075-#define SKB_DEV_IFINDEX 577
97076-#define SKB_DEV_TYPE 588
97077-
97078-/* Redefine REGs to make tests less verbose */
97079-#define R0 BPF_REG_0
97080-#define R1 BPF_REG_1
97081-#define R2 BPF_REG_2
97082-#define R3 BPF_REG_3
97083-#define R4 BPF_REG_4
97084-#define R5 BPF_REG_5
97085-#define R6 BPF_REG_6
97086-#define R7 BPF_REG_7
97087-#define R8 BPF_REG_8
97088-#define R9 BPF_REG_9
97089-#define R10 BPF_REG_10
97090-
97091-/* Flags that can be passed to test cases */
97092-#define FLAG_NO_DATA BIT(0)
97093-#define FLAG_EXPECTED_FAIL BIT(1)
97094-
97095-enum {
97096- CLASSIC = BIT(6), /* Old BPF instructions only. */
97097- INTERNAL = BIT(7), /* Extended instruction set. */
97098-};
97099-
97100-#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
97101-
97102-struct bpf_test {
97103- const char *descr;
97104- union {
97105- struct sock_filter insns[MAX_INSNS];
97106- struct sock_filter_int insns_int[MAX_INSNS];
97107- } u;
97108- __u8 aux;
97109- __u8 data[MAX_DATA];
97110- struct {
97111- int data_size;
97112- __u32 result;
97113- } test[MAX_SUBTESTS];
97114-};
97115-
97116-static struct bpf_test tests[] = {
97117- {
97118- "TAX",
97119- .u.insns = {
97120- BPF_STMT(BPF_LD | BPF_IMM, 1),
97121- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97122- BPF_STMT(BPF_LD | BPF_IMM, 2),
97123- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97124- BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
97125- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97126- BPF_STMT(BPF_LD | BPF_LEN, 0),
97127- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97128- BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
97129- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
97130- BPF_STMT(BPF_RET | BPF_A, 0)
97131- },
97132- CLASSIC,
97133- { 10, 20, 30, 40, 50 },
97134- { { 2, 10 }, { 3, 20 }, { 4, 30 } },
97135- },
97136- {
97137- "TXA",
97138- .u.insns = {
97139- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97140- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97141- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97142- BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
97143- },
97144- CLASSIC,
97145- { 10, 20, 30, 40, 50 },
97146- { { 1, 2 }, { 3, 6 }, { 4, 8 } },
97147- },
97148- {
97149- "ADD_SUB_MUL_K",
97150- .u.insns = {
97151- BPF_STMT(BPF_LD | BPF_IMM, 1),
97152- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
97153- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97154- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97155- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
97156- BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
97157- BPF_STMT(BPF_RET | BPF_A, 0)
97158- },
97159- CLASSIC | FLAG_NO_DATA,
97160- { },
97161- { { 0, 0xfffffffd } }
97162- },
97163- {
97164- "DIV_KX",
97165- .u.insns = {
97166- BPF_STMT(BPF_LD | BPF_IMM, 8),
97167- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
97168- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97169- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
97170- BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
97171- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97172- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
97173- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
97174- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97175- BPF_STMT(BPF_RET | BPF_A, 0)
97176- },
97177- CLASSIC | FLAG_NO_DATA,
97178- { },
97179- { { 0, 0x40000001 } }
97180- },
97181- {
97182- "AND_OR_LSH_K",
97183- .u.insns = {
97184- BPF_STMT(BPF_LD | BPF_IMM, 0xff),
97185- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
97186- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
97187- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97188- BPF_STMT(BPF_LD | BPF_IMM, 0xf),
97189- BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
97190- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97191- BPF_STMT(BPF_RET | BPF_A, 0)
97192- },
97193- CLASSIC | FLAG_NO_DATA,
97194- { },
97195- { { 0, 0x800000ff }, { 1, 0x800000ff } },
97196- },
97197- {
97198- "LD_IMM_0",
97199- .u.insns = {
97200- BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
97201- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
97202- BPF_STMT(BPF_RET | BPF_K, 0),
97203- BPF_STMT(BPF_RET | BPF_K, 1),
97204- },
97205- CLASSIC,
97206- { },
97207- { { 1, 1 } },
97208- },
97209- {
97210- "LD_IND",
97211- .u.insns = {
97212- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97213- BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
97214- BPF_STMT(BPF_RET | BPF_K, 1)
97215- },
97216- CLASSIC,
97217- { },
97218- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
97219- },
97220- {
97221- "LD_ABS",
97222- .u.insns = {
97223- BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
97224- BPF_STMT(BPF_RET | BPF_K, 1)
97225- },
97226- CLASSIC,
97227- { },
97228- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
97229- },
97230- {
97231- "LD_ABS_LL",
97232- .u.insns = {
97233- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
97234- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97235- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
97236- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97237- BPF_STMT(BPF_RET | BPF_A, 0)
97238- },
97239- CLASSIC,
97240- { 1, 2, 3 },
97241- { { 1, 0 }, { 2, 3 } },
97242- },
97243- {
97244- "LD_IND_LL",
97245- .u.insns = {
97246- BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
97247- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97248- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97249- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97250- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
97251- BPF_STMT(BPF_RET | BPF_A, 0)
97252- },
97253- CLASSIC,
97254- { 1, 2, 3, 0xff },
97255- { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
97256- },
97257- {
97258- "LD_ABS_NET",
97259- .u.insns = {
97260- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
97261- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97262- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
97263- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97264- BPF_STMT(BPF_RET | BPF_A, 0)
97265- },
97266- CLASSIC,
97267- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
97268- { { 15, 0 }, { 16, 3 } },
97269- },
97270- {
97271- "LD_IND_NET",
97272- .u.insns = {
97273- BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
97274- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97275- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97276- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97277- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
97278- BPF_STMT(BPF_RET | BPF_A, 0)
97279- },
97280- CLASSIC,
97281- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
97282- { { 14, 0 }, { 15, 1 }, { 17, 3 } },
97283- },
97284- {
97285- "LD_PKTTYPE",
97286- .u.insns = {
97287- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97288- SKF_AD_OFF + SKF_AD_PKTTYPE),
97289- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97290- BPF_STMT(BPF_RET | BPF_K, 1),
97291- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97292- SKF_AD_OFF + SKF_AD_PKTTYPE),
97293- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97294- BPF_STMT(BPF_RET | BPF_K, 1),
97295- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97296- SKF_AD_OFF + SKF_AD_PKTTYPE),
97297- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97298- BPF_STMT(BPF_RET | BPF_K, 1),
97299- BPF_STMT(BPF_RET | BPF_A, 0)
97300- },
97301- CLASSIC,
97302- { },
97303- { { 1, 3 }, { 10, 3 } },
97304- },
97305- {
97306- "LD_MARK",
97307- .u.insns = {
97308- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97309- SKF_AD_OFF + SKF_AD_MARK),
97310- BPF_STMT(BPF_RET | BPF_A, 0)
97311- },
97312- CLASSIC,
97313- { },
97314- { { 1, SKB_MARK}, { 10, SKB_MARK} },
97315- },
97316- {
97317- "LD_RXHASH",
97318- .u.insns = {
97319- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97320- SKF_AD_OFF + SKF_AD_RXHASH),
97321- BPF_STMT(BPF_RET | BPF_A, 0)
97322- },
97323- CLASSIC,
97324- { },
97325- { { 1, SKB_HASH}, { 10, SKB_HASH} },
97326- },
97327- {
97328- "LD_QUEUE",
97329- .u.insns = {
97330- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97331- SKF_AD_OFF + SKF_AD_QUEUE),
97332- BPF_STMT(BPF_RET | BPF_A, 0)
97333- },
97334- CLASSIC,
97335- { },
97336- { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
97337- },
97338- {
97339- "LD_PROTOCOL",
97340- .u.insns = {
97341- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
97342- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
97343- BPF_STMT(BPF_RET | BPF_K, 0),
97344- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97345- SKF_AD_OFF + SKF_AD_PROTOCOL),
97346- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97347- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97348- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
97349- BPF_STMT(BPF_RET | BPF_K, 0),
97350- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97351- BPF_STMT(BPF_RET | BPF_A, 0)
97352- },
97353- CLASSIC,
97354- { 10, 20, 30 },
97355- { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
97356- },
97357- {
97358- "LD_VLAN_TAG",
97359- .u.insns = {
97360- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97361- SKF_AD_OFF + SKF_AD_VLAN_TAG),
97362- BPF_STMT(BPF_RET | BPF_A, 0)
97363- },
97364- CLASSIC,
97365- { },
97366- {
97367- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
97368- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
97369- },
97370- },
97371- {
97372- "LD_VLAN_TAG_PRESENT",
97373- .u.insns = {
97374- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97375- SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
97376- BPF_STMT(BPF_RET | BPF_A, 0)
97377- },
97378- CLASSIC,
97379- { },
97380- {
97381- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
97382- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
97383- },
97384- },
97385- {
97386- "LD_IFINDEX",
97387- .u.insns = {
97388- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97389- SKF_AD_OFF + SKF_AD_IFINDEX),
97390- BPF_STMT(BPF_RET | BPF_A, 0)
97391- },
97392- CLASSIC,
97393- { },
97394- { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
97395- },
97396- {
97397- "LD_HATYPE",
97398- .u.insns = {
97399- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97400- SKF_AD_OFF + SKF_AD_HATYPE),
97401- BPF_STMT(BPF_RET | BPF_A, 0)
97402- },
97403- CLASSIC,
97404- { },
97405- { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
97406- },
97407- {
97408- "LD_CPU",
97409- .u.insns = {
97410- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97411- SKF_AD_OFF + SKF_AD_CPU),
97412- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97413- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97414- SKF_AD_OFF + SKF_AD_CPU),
97415- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97416- BPF_STMT(BPF_RET | BPF_A, 0)
97417- },
97418- CLASSIC,
97419- { },
97420- { { 1, 0 }, { 10, 0 } },
97421- },
97422- {
97423- "LD_NLATTR",
97424- .u.insns = {
97425- BPF_STMT(BPF_LDX | BPF_IMM, 2),
97426- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97427- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97428- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97429- SKF_AD_OFF + SKF_AD_NLATTR),
97430- BPF_STMT(BPF_RET | BPF_A, 0)
97431- },
97432- CLASSIC,
97433-#ifdef __BIG_ENDIAN
97434- { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
97435-#else
97436- { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
97437-#endif
97438- { { 4, 0 }, { 20, 6 } },
97439- },
97440- {
97441- "LD_NLATTR_NEST",
97442- .u.insns = {
97443- BPF_STMT(BPF_LD | BPF_IMM, 2),
97444- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97445- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97446- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97447- BPF_STMT(BPF_LD | BPF_IMM, 2),
97448- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97449- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97450- BPF_STMT(BPF_LD | BPF_IMM, 2),
97451- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97452- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97453- BPF_STMT(BPF_LD | BPF_IMM, 2),
97454- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97455- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97456- BPF_STMT(BPF_LD | BPF_IMM, 2),
97457- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97458- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97459- BPF_STMT(BPF_LD | BPF_IMM, 2),
97460- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97461- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97462- BPF_STMT(BPF_LD | BPF_IMM, 2),
97463- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97464- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97465- BPF_STMT(BPF_LD | BPF_IMM, 2),
97466- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97467- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97468- BPF_STMT(BPF_RET | BPF_A, 0)
97469- },
97470- CLASSIC,
97471-#ifdef __BIG_ENDIAN
97472- { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
97473-#else
97474- { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
97475-#endif
97476- { { 4, 0 }, { 20, 10 } },
97477- },
97478- {
97479- "LD_PAYLOAD_OFF",
97480- .u.insns = {
97481- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97482- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97483- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97484- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97485- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97486- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97487- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97488- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97489- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97490- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97491- BPF_STMT(BPF_RET | BPF_A, 0)
97492- },
97493- CLASSIC,
97494- /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
97495- * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
97496- * id 9737, seq 1, length 64
97497- */
97498- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97499- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97500- 0x08, 0x00,
97501- 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
97502- 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
97503- { { 30, 0 }, { 100, 42 } },
97504- },
97505- {
97506- "LD_ANC_XOR",
97507- .u.insns = {
97508- BPF_STMT(BPF_LD | BPF_IMM, 10),
97509- BPF_STMT(BPF_LDX | BPF_IMM, 300),
97510- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97511- SKF_AD_OFF + SKF_AD_ALU_XOR_X),
97512- BPF_STMT(BPF_RET | BPF_A, 0)
97513- },
97514- CLASSIC,
97515- { },
97516- { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
97517- },
97518- {
97519- "SPILL_FILL",
97520- .u.insns = {
97521- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97522- BPF_STMT(BPF_LD | BPF_IMM, 2),
97523- BPF_STMT(BPF_ALU | BPF_RSH, 1),
97524- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97525- BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
97526- BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
97527- BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
97528- BPF_STMT(BPF_STX, 15), /* M3 = len */
97529- BPF_STMT(BPF_LDX | BPF_MEM, 1),
97530- BPF_STMT(BPF_LD | BPF_MEM, 2),
97531- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97532- BPF_STMT(BPF_LDX | BPF_MEM, 15),
97533- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97534- BPF_STMT(BPF_RET | BPF_A, 0)
97535- },
97536- CLASSIC,
97537- { },
97538- { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
97539- },
97540- {
97541- "JEQ",
97542- .u.insns = {
97543- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97544- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97545- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
97546- BPF_STMT(BPF_RET | BPF_K, 1),
97547- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97548- },
97549- CLASSIC,
97550- { 3, 3, 3, 3, 3 },
97551- { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
97552- },
97553- {
97554- "JGT",
97555- .u.insns = {
97556- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97557- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97558- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
97559- BPF_STMT(BPF_RET | BPF_K, 1),
97560- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97561- },
97562- CLASSIC,
97563- { 4, 4, 4, 3, 3 },
97564- { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
97565- },
97566- {
97567- "JGE",
97568- .u.insns = {
97569- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97570- BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
97571- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
97572- BPF_STMT(BPF_RET | BPF_K, 10),
97573- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
97574- BPF_STMT(BPF_RET | BPF_K, 20),
97575- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
97576- BPF_STMT(BPF_RET | BPF_K, 30),
97577- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
97578- BPF_STMT(BPF_RET | BPF_K, 40),
97579- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97580- },
97581- CLASSIC,
97582- { 1, 2, 3, 4, 5 },
97583- { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
97584- },
97585- {
97586- "JSET",
97587- .u.insns = {
97588- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97589- BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
97590- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97591- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97592- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97593- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97594- BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
97595- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97596- BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
97597- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
97598- BPF_STMT(BPF_RET | BPF_K, 10),
97599- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
97600- BPF_STMT(BPF_RET | BPF_K, 20),
97601- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97602- BPF_STMT(BPF_RET | BPF_K, 30),
97603- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97604- BPF_STMT(BPF_RET | BPF_K, 30),
97605- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97606- BPF_STMT(BPF_RET | BPF_K, 30),
97607- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97608- BPF_STMT(BPF_RET | BPF_K, 30),
97609- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97610- BPF_STMT(BPF_RET | BPF_K, 30),
97611- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97612- },
97613- CLASSIC,
97614- { 0, 0xAA, 0x55, 1 },
97615- { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
97616- },
97617- {
97618- "tcpdump port 22",
97619- .u.insns = {
97620- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
97621- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
97622- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
97623- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
97624- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
97625- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
97626- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
97627- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
97628- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
97629- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
97630- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
97631- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
97632- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
97633- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
97634- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
97635- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
97636- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
97637- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97638- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
97639- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
97640- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
97641- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
97642- BPF_STMT(BPF_RET | BPF_K, 0xffff),
97643- BPF_STMT(BPF_RET | BPF_K, 0),
97644- },
97645- CLASSIC,
97646- /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
97647- * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
97648- * seq 1305692979:1305693027, ack 3650467037, win 65535,
97649- * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
97650- */
97651- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
97652- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
97653- 0x08, 0x00,
97654- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
97655- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
97656- 0x0a, 0x01, 0x01, 0x95, /* ip src */
97657- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
97658- 0xc2, 0x24,
97659- 0x00, 0x16 /* dst port */ },
97660- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
97661- },
97662- {
97663- "tcpdump complex",
97664- .u.insns = {
97665- /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
97666- * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
97667- * (len > 115 or len < 30000000000)' -d
97668- */
97669- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
97670- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
97671- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
97672- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
97673- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
97674- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
97675- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
97676- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97677- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
97678- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
97679- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
97680- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
97681- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
97682- BPF_STMT(BPF_ST, 1),
97683- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
97684- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
97685- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
97686- BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
97687- BPF_STMT(BPF_LD | BPF_MEM, 1),
97688- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97689- BPF_STMT(BPF_ST, 5),
97690- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97691- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
97692- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
97693- BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
97694- BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
97695- BPF_STMT(BPF_LD | BPF_MEM, 5),
97696- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
97697- BPF_STMT(BPF_LD | BPF_LEN, 0),
97698- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
97699- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
97700- BPF_STMT(BPF_RET | BPF_K, 0xffff),
97701- BPF_STMT(BPF_RET | BPF_K, 0),
97702- },
97703- CLASSIC,
97704- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
97705- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
97706- 0x08, 0x00,
97707- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
97708- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
97709- 0x0a, 0x01, 0x01, 0x95, /* ip src */
97710- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
97711- 0xc2, 0x24,
97712- 0x00, 0x16 /* dst port */ },
97713- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
97714- },
97715- {
97716- "RET_A",
97717- .u.insns = {
97718- /* check that unitialized X and A contain zeros */
97719- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97720- BPF_STMT(BPF_RET | BPF_A, 0)
97721- },
97722- CLASSIC,
97723- { },
97724- { {1, 0}, {2, 0} },
97725- },
97726- {
97727- "INT: ADD trivial",
97728- .u.insns_int = {
97729- BPF_ALU64_IMM(BPF_MOV, R1, 1),
97730- BPF_ALU64_IMM(BPF_ADD, R1, 2),
97731- BPF_ALU64_IMM(BPF_MOV, R2, 3),
97732- BPF_ALU64_REG(BPF_SUB, R1, R2),
97733- BPF_ALU64_IMM(BPF_ADD, R1, -1),
97734- BPF_ALU64_IMM(BPF_MUL, R1, 3),
97735- BPF_ALU64_REG(BPF_MOV, R0, R1),
97736- BPF_EXIT_INSN(),
97737- },
97738- INTERNAL,
97739- { },
97740- { { 0, 0xfffffffd } }
97741- },
97742- {
97743- "INT: MUL_X",
97744- .u.insns_int = {
97745- BPF_ALU64_IMM(BPF_MOV, R0, -1),
97746- BPF_ALU64_IMM(BPF_MOV, R1, -1),
97747- BPF_ALU64_IMM(BPF_MOV, R2, 3),
97748- BPF_ALU64_REG(BPF_MUL, R1, R2),
97749- BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
97750- BPF_EXIT_INSN(),
97751- BPF_ALU64_IMM(BPF_MOV, R0, 1),
97752- BPF_EXIT_INSN(),
97753- },
97754- INTERNAL,
97755- { },
97756- { { 0, 1 } }
97757- },
97758- {
97759- "INT: MUL_X2",
97760- .u.insns_int = {
97761- BPF_ALU32_IMM(BPF_MOV, R0, -1),
97762- BPF_ALU32_IMM(BPF_MOV, R1, -1),
97763- BPF_ALU32_IMM(BPF_MOV, R2, 3),
97764- BPF_ALU64_REG(BPF_MUL, R1, R2),
97765- BPF_ALU64_IMM(BPF_RSH, R1, 8),
97766- BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
97767- BPF_EXIT_INSN(),
97768- BPF_ALU32_IMM(BPF_MOV, R0, 1),
97769- BPF_EXIT_INSN(),
97770- },
97771- INTERNAL,
97772- { },
97773- { { 0, 1 } }
97774- },
97775- {
97776- "INT: MUL32_X",
97777- .u.insns_int = {
97778- BPF_ALU32_IMM(BPF_MOV, R0, -1),
97779- BPF_ALU64_IMM(BPF_MOV, R1, -1),
97780- BPF_ALU32_IMM(BPF_MOV, R2, 3),
97781- BPF_ALU32_REG(BPF_MUL, R1, R2),
97782- BPF_ALU64_IMM(BPF_RSH, R1, 8),
97783- BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
97784- BPF_EXIT_INSN(),
97785- BPF_ALU32_IMM(BPF_MOV, R0, 1),
97786- BPF_EXIT_INSN(),
97787- },
97788- INTERNAL,
97789- { },
97790- { { 0, 1 } }
97791- },
97792- {
97793- /* Have to test all register combinations, since
97794- * JITing of different registers will produce
97795- * different asm code.
97796- */
97797- "INT: ADD 64-bit",
97798- .u.insns_int = {
97799- BPF_ALU64_IMM(BPF_MOV, R0, 0),
97800- BPF_ALU64_IMM(BPF_MOV, R1, 1),
97801- BPF_ALU64_IMM(BPF_MOV, R2, 2),
97802- BPF_ALU64_IMM(BPF_MOV, R3, 3),
97803- BPF_ALU64_IMM(BPF_MOV, R4, 4),
97804- BPF_ALU64_IMM(BPF_MOV, R5, 5),
97805- BPF_ALU64_IMM(BPF_MOV, R6, 6),
97806- BPF_ALU64_IMM(BPF_MOV, R7, 7),
97807- BPF_ALU64_IMM(BPF_MOV, R8, 8),
97808- BPF_ALU64_IMM(BPF_MOV, R9, 9),
97809- BPF_ALU64_IMM(BPF_ADD, R0, 20),
97810- BPF_ALU64_IMM(BPF_ADD, R1, 20),
97811- BPF_ALU64_IMM(BPF_ADD, R2, 20),
97812- BPF_ALU64_IMM(BPF_ADD, R3, 20),
97813- BPF_ALU64_IMM(BPF_ADD, R4, 20),
97814- BPF_ALU64_IMM(BPF_ADD, R5, 20),
97815- BPF_ALU64_IMM(BPF_ADD, R6, 20),
97816- BPF_ALU64_IMM(BPF_ADD, R7, 20),
97817- BPF_ALU64_IMM(BPF_ADD, R8, 20),
97818- BPF_ALU64_IMM(BPF_ADD, R9, 20),
97819- BPF_ALU64_IMM(BPF_SUB, R0, 10),
97820- BPF_ALU64_IMM(BPF_SUB, R1, 10),
97821- BPF_ALU64_IMM(BPF_SUB, R2, 10),
97822- BPF_ALU64_IMM(BPF_SUB, R3, 10),
97823- BPF_ALU64_IMM(BPF_SUB, R4, 10),
97824- BPF_ALU64_IMM(BPF_SUB, R5, 10),
97825- BPF_ALU64_IMM(BPF_SUB, R6, 10),
97826- BPF_ALU64_IMM(BPF_SUB, R7, 10),
97827- BPF_ALU64_IMM(BPF_SUB, R8, 10),
97828- BPF_ALU64_IMM(BPF_SUB, R9, 10),
97829- BPF_ALU64_REG(BPF_ADD, R0, R0),
97830- BPF_ALU64_REG(BPF_ADD, R0, R1),
97831- BPF_ALU64_REG(BPF_ADD, R0, R2),
97832- BPF_ALU64_REG(BPF_ADD, R0, R3),
97833- BPF_ALU64_REG(BPF_ADD, R0, R4),
97834- BPF_ALU64_REG(BPF_ADD, R0, R5),
97835- BPF_ALU64_REG(BPF_ADD, R0, R6),
97836- BPF_ALU64_REG(BPF_ADD, R0, R7),
97837- BPF_ALU64_REG(BPF_ADD, R0, R8),
97838- BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
97839- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
97840- BPF_EXIT_INSN(),
97841- BPF_ALU64_REG(BPF_ADD, R1, R0),
97842- BPF_ALU64_REG(BPF_ADD, R1, R1),
97843- BPF_ALU64_REG(BPF_ADD, R1, R2),
97844- BPF_ALU64_REG(BPF_ADD, R1, R3),
97845- BPF_ALU64_REG(BPF_ADD, R1, R4),
97846- BPF_ALU64_REG(BPF_ADD, R1, R5),
97847- BPF_ALU64_REG(BPF_ADD, R1, R6),
97848- BPF_ALU64_REG(BPF_ADD, R1, R7),
97849- BPF_ALU64_REG(BPF_ADD, R1, R8),
97850- BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
97851- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
97852- BPF_EXIT_INSN(),
97853- BPF_ALU64_REG(BPF_ADD, R2, R0),
97854- BPF_ALU64_REG(BPF_ADD, R2, R1),
97855- BPF_ALU64_REG(BPF_ADD, R2, R2),
97856- BPF_ALU64_REG(BPF_ADD, R2, R3),
97857- BPF_ALU64_REG(BPF_ADD, R2, R4),
97858- BPF_ALU64_REG(BPF_ADD, R2, R5),
97859- BPF_ALU64_REG(BPF_ADD, R2, R6),
97860- BPF_ALU64_REG(BPF_ADD, R2, R7),
97861- BPF_ALU64_REG(BPF_ADD, R2, R8),
97862- BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
97863- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
97864- BPF_EXIT_INSN(),
97865- BPF_ALU64_REG(BPF_ADD, R3, R0),
97866- BPF_ALU64_REG(BPF_ADD, R3, R1),
97867- BPF_ALU64_REG(BPF_ADD, R3, R2),
97868- BPF_ALU64_REG(BPF_ADD, R3, R3),
97869- BPF_ALU64_REG(BPF_ADD, R3, R4),
97870- BPF_ALU64_REG(BPF_ADD, R3, R5),
97871- BPF_ALU64_REG(BPF_ADD, R3, R6),
97872- BPF_ALU64_REG(BPF_ADD, R3, R7),
97873- BPF_ALU64_REG(BPF_ADD, R3, R8),
97874- BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
97875- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
97876- BPF_EXIT_INSN(),
97877- BPF_ALU64_REG(BPF_ADD, R4, R0),
97878- BPF_ALU64_REG(BPF_ADD, R4, R1),
97879- BPF_ALU64_REG(BPF_ADD, R4, R2),
97880- BPF_ALU64_REG(BPF_ADD, R4, R3),
97881- BPF_ALU64_REG(BPF_ADD, R4, R4),
97882- BPF_ALU64_REG(BPF_ADD, R4, R5),
97883- BPF_ALU64_REG(BPF_ADD, R4, R6),
97884- BPF_ALU64_REG(BPF_ADD, R4, R7),
97885- BPF_ALU64_REG(BPF_ADD, R4, R8),
97886- BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
97887- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
97888- BPF_EXIT_INSN(),
97889- BPF_ALU64_REG(BPF_ADD, R5, R0),
97890- BPF_ALU64_REG(BPF_ADD, R5, R1),
97891- BPF_ALU64_REG(BPF_ADD, R5, R2),
97892- BPF_ALU64_REG(BPF_ADD, R5, R3),
97893- BPF_ALU64_REG(BPF_ADD, R5, R4),
97894- BPF_ALU64_REG(BPF_ADD, R5, R5),
97895- BPF_ALU64_REG(BPF_ADD, R5, R6),
97896- BPF_ALU64_REG(BPF_ADD, R5, R7),
97897- BPF_ALU64_REG(BPF_ADD, R5, R8),
97898- BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
97899- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
97900- BPF_EXIT_INSN(),
97901- BPF_ALU64_REG(BPF_ADD, R6, R0),
97902- BPF_ALU64_REG(BPF_ADD, R6, R1),
97903- BPF_ALU64_REG(BPF_ADD, R6, R2),
97904- BPF_ALU64_REG(BPF_ADD, R6, R3),
97905- BPF_ALU64_REG(BPF_ADD, R6, R4),
97906- BPF_ALU64_REG(BPF_ADD, R6, R5),
97907- BPF_ALU64_REG(BPF_ADD, R6, R6),
97908- BPF_ALU64_REG(BPF_ADD, R6, R7),
97909- BPF_ALU64_REG(BPF_ADD, R6, R8),
97910- BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
97911- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
97912- BPF_EXIT_INSN(),
97913- BPF_ALU64_REG(BPF_ADD, R7, R0),
97914- BPF_ALU64_REG(BPF_ADD, R7, R1),
97915- BPF_ALU64_REG(BPF_ADD, R7, R2),
97916- BPF_ALU64_REG(BPF_ADD, R7, R3),
97917- BPF_ALU64_REG(BPF_ADD, R7, R4),
97918- BPF_ALU64_REG(BPF_ADD, R7, R5),
97919- BPF_ALU64_REG(BPF_ADD, R7, R6),
97920- BPF_ALU64_REG(BPF_ADD, R7, R7),
97921- BPF_ALU64_REG(BPF_ADD, R7, R8),
97922- BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
97923- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
97924- BPF_EXIT_INSN(),
97925- BPF_ALU64_REG(BPF_ADD, R8, R0),
97926- BPF_ALU64_REG(BPF_ADD, R8, R1),
97927- BPF_ALU64_REG(BPF_ADD, R8, R2),
97928- BPF_ALU64_REG(BPF_ADD, R8, R3),
97929- BPF_ALU64_REG(BPF_ADD, R8, R4),
97930- BPF_ALU64_REG(BPF_ADD, R8, R5),
97931- BPF_ALU64_REG(BPF_ADD, R8, R6),
97932- BPF_ALU64_REG(BPF_ADD, R8, R7),
97933- BPF_ALU64_REG(BPF_ADD, R8, R8),
97934- BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
97935- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
97936- BPF_EXIT_INSN(),
97937- BPF_ALU64_REG(BPF_ADD, R9, R0),
97938- BPF_ALU64_REG(BPF_ADD, R9, R1),
97939- BPF_ALU64_REG(BPF_ADD, R9, R2),
97940- BPF_ALU64_REG(BPF_ADD, R9, R3),
97941- BPF_ALU64_REG(BPF_ADD, R9, R4),
97942- BPF_ALU64_REG(BPF_ADD, R9, R5),
97943- BPF_ALU64_REG(BPF_ADD, R9, R6),
97944- BPF_ALU64_REG(BPF_ADD, R9, R7),
97945- BPF_ALU64_REG(BPF_ADD, R9, R8),
97946- BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
97947- BPF_ALU64_REG(BPF_MOV, R0, R9),
97948- BPF_EXIT_INSN(),
97949- },
97950- INTERNAL,
97951- { },
97952- { { 0, 2957380 } }
97953- },
97954- {
97955- "INT: ADD 32-bit",
97956- .u.insns_int = {
97957- BPF_ALU32_IMM(BPF_MOV, R0, 20),
97958- BPF_ALU32_IMM(BPF_MOV, R1, 1),
97959- BPF_ALU32_IMM(BPF_MOV, R2, 2),
97960- BPF_ALU32_IMM(BPF_MOV, R3, 3),
97961- BPF_ALU32_IMM(BPF_MOV, R4, 4),
97962- BPF_ALU32_IMM(BPF_MOV, R5, 5),
97963- BPF_ALU32_IMM(BPF_MOV, R6, 6),
97964- BPF_ALU32_IMM(BPF_MOV, R7, 7),
97965- BPF_ALU32_IMM(BPF_MOV, R8, 8),
97966- BPF_ALU32_IMM(BPF_MOV, R9, 9),
97967- BPF_ALU64_IMM(BPF_ADD, R1, 10),
97968- BPF_ALU64_IMM(BPF_ADD, R2, 10),
97969- BPF_ALU64_IMM(BPF_ADD, R3, 10),
97970- BPF_ALU64_IMM(BPF_ADD, R4, 10),
97971- BPF_ALU64_IMM(BPF_ADD, R5, 10),
97972- BPF_ALU64_IMM(BPF_ADD, R6, 10),
97973- BPF_ALU64_IMM(BPF_ADD, R7, 10),
97974- BPF_ALU64_IMM(BPF_ADD, R8, 10),
97975- BPF_ALU64_IMM(BPF_ADD, R9, 10),
97976- BPF_ALU32_REG(BPF_ADD, R0, R1),
97977- BPF_ALU32_REG(BPF_ADD, R0, R2),
97978- BPF_ALU32_REG(BPF_ADD, R0, R3),
97979- BPF_ALU32_REG(BPF_ADD, R0, R4),
97980- BPF_ALU32_REG(BPF_ADD, R0, R5),
97981- BPF_ALU32_REG(BPF_ADD, R0, R6),
97982- BPF_ALU32_REG(BPF_ADD, R0, R7),
97983- BPF_ALU32_REG(BPF_ADD, R0, R8),
97984- BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
97985- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
97986- BPF_EXIT_INSN(),
97987- BPF_ALU32_REG(BPF_ADD, R1, R0),
97988- BPF_ALU32_REG(BPF_ADD, R1, R1),
97989- BPF_ALU32_REG(BPF_ADD, R1, R2),
97990- BPF_ALU32_REG(BPF_ADD, R1, R3),
97991- BPF_ALU32_REG(BPF_ADD, R1, R4),
97992- BPF_ALU32_REG(BPF_ADD, R1, R5),
97993- BPF_ALU32_REG(BPF_ADD, R1, R6),
97994- BPF_ALU32_REG(BPF_ADD, R1, R7),
97995- BPF_ALU32_REG(BPF_ADD, R1, R8),
97996- BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
97997- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
97998- BPF_EXIT_INSN(),
97999- BPF_ALU32_REG(BPF_ADD, R2, R0),
98000- BPF_ALU32_REG(BPF_ADD, R2, R1),
98001- BPF_ALU32_REG(BPF_ADD, R2, R2),
98002- BPF_ALU32_REG(BPF_ADD, R2, R3),
98003- BPF_ALU32_REG(BPF_ADD, R2, R4),
98004- BPF_ALU32_REG(BPF_ADD, R2, R5),
98005- BPF_ALU32_REG(BPF_ADD, R2, R6),
98006- BPF_ALU32_REG(BPF_ADD, R2, R7),
98007- BPF_ALU32_REG(BPF_ADD, R2, R8),
98008- BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
98009- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
98010- BPF_EXIT_INSN(),
98011- BPF_ALU32_REG(BPF_ADD, R3, R0),
98012- BPF_ALU32_REG(BPF_ADD, R3, R1),
98013- BPF_ALU32_REG(BPF_ADD, R3, R2),
98014- BPF_ALU32_REG(BPF_ADD, R3, R3),
98015- BPF_ALU32_REG(BPF_ADD, R3, R4),
98016- BPF_ALU32_REG(BPF_ADD, R3, R5),
98017- BPF_ALU32_REG(BPF_ADD, R3, R6),
98018- BPF_ALU32_REG(BPF_ADD, R3, R7),
98019- BPF_ALU32_REG(BPF_ADD, R3, R8),
98020- BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
98021- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
98022- BPF_EXIT_INSN(),
98023- BPF_ALU32_REG(BPF_ADD, R4, R0),
98024- BPF_ALU32_REG(BPF_ADD, R4, R1),
98025- BPF_ALU32_REG(BPF_ADD, R4, R2),
98026- BPF_ALU32_REG(BPF_ADD, R4, R3),
98027- BPF_ALU32_REG(BPF_ADD, R4, R4),
98028- BPF_ALU32_REG(BPF_ADD, R4, R5),
98029- BPF_ALU32_REG(BPF_ADD, R4, R6),
98030- BPF_ALU32_REG(BPF_ADD, R4, R7),
98031- BPF_ALU32_REG(BPF_ADD, R4, R8),
98032- BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
98033- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
98034- BPF_EXIT_INSN(),
98035- BPF_ALU32_REG(BPF_ADD, R5, R0),
98036- BPF_ALU32_REG(BPF_ADD, R5, R1),
98037- BPF_ALU32_REG(BPF_ADD, R5, R2),
98038- BPF_ALU32_REG(BPF_ADD, R5, R3),
98039- BPF_ALU32_REG(BPF_ADD, R5, R4),
98040- BPF_ALU32_REG(BPF_ADD, R5, R5),
98041- BPF_ALU32_REG(BPF_ADD, R5, R6),
98042- BPF_ALU32_REG(BPF_ADD, R5, R7),
98043- BPF_ALU32_REG(BPF_ADD, R5, R8),
98044- BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
98045- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
98046- BPF_EXIT_INSN(),
98047- BPF_ALU32_REG(BPF_ADD, R6, R0),
98048- BPF_ALU32_REG(BPF_ADD, R6, R1),
98049- BPF_ALU32_REG(BPF_ADD, R6, R2),
98050- BPF_ALU32_REG(BPF_ADD, R6, R3),
98051- BPF_ALU32_REG(BPF_ADD, R6, R4),
98052- BPF_ALU32_REG(BPF_ADD, R6, R5),
98053- BPF_ALU32_REG(BPF_ADD, R6, R6),
98054- BPF_ALU32_REG(BPF_ADD, R6, R7),
98055- BPF_ALU32_REG(BPF_ADD, R6, R8),
98056- BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
98057- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
98058- BPF_EXIT_INSN(),
98059- BPF_ALU32_REG(BPF_ADD, R7, R0),
98060- BPF_ALU32_REG(BPF_ADD, R7, R1),
98061- BPF_ALU32_REG(BPF_ADD, R7, R2),
98062- BPF_ALU32_REG(BPF_ADD, R7, R3),
98063- BPF_ALU32_REG(BPF_ADD, R7, R4),
98064- BPF_ALU32_REG(BPF_ADD, R7, R5),
98065- BPF_ALU32_REG(BPF_ADD, R7, R6),
98066- BPF_ALU32_REG(BPF_ADD, R7, R7),
98067- BPF_ALU32_REG(BPF_ADD, R7, R8),
98068- BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
98069- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
98070- BPF_EXIT_INSN(),
98071- BPF_ALU32_REG(BPF_ADD, R8, R0),
98072- BPF_ALU32_REG(BPF_ADD, R8, R1),
98073- BPF_ALU32_REG(BPF_ADD, R8, R2),
98074- BPF_ALU32_REG(BPF_ADD, R8, R3),
98075- BPF_ALU32_REG(BPF_ADD, R8, R4),
98076- BPF_ALU32_REG(BPF_ADD, R8, R5),
98077- BPF_ALU32_REG(BPF_ADD, R8, R6),
98078- BPF_ALU32_REG(BPF_ADD, R8, R7),
98079- BPF_ALU32_REG(BPF_ADD, R8, R8),
98080- BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
98081- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
98082- BPF_EXIT_INSN(),
98083- BPF_ALU32_REG(BPF_ADD, R9, R0),
98084- BPF_ALU32_REG(BPF_ADD, R9, R1),
98085- BPF_ALU32_REG(BPF_ADD, R9, R2),
98086- BPF_ALU32_REG(BPF_ADD, R9, R3),
98087- BPF_ALU32_REG(BPF_ADD, R9, R4),
98088- BPF_ALU32_REG(BPF_ADD, R9, R5),
98089- BPF_ALU32_REG(BPF_ADD, R9, R6),
98090- BPF_ALU32_REG(BPF_ADD, R9, R7),
98091- BPF_ALU32_REG(BPF_ADD, R9, R8),
98092- BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
98093- BPF_ALU32_REG(BPF_MOV, R0, R9),
98094- BPF_EXIT_INSN(),
98095- },
98096- INTERNAL,
98097- { },
98098- { { 0, 2957380 } }
98099- },
98100- { /* Mainly checking JIT here. */
98101- "INT: SUB",
98102- .u.insns_int = {
98103- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98104- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98105- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98106- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98107- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98108- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98109- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98110- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98111- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98112- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98113- BPF_ALU64_REG(BPF_SUB, R0, R0),
98114- BPF_ALU64_REG(BPF_SUB, R0, R1),
98115- BPF_ALU64_REG(BPF_SUB, R0, R2),
98116- BPF_ALU64_REG(BPF_SUB, R0, R3),
98117- BPF_ALU64_REG(BPF_SUB, R0, R4),
98118- BPF_ALU64_REG(BPF_SUB, R0, R5),
98119- BPF_ALU64_REG(BPF_SUB, R0, R6),
98120- BPF_ALU64_REG(BPF_SUB, R0, R7),
98121- BPF_ALU64_REG(BPF_SUB, R0, R8),
98122- BPF_ALU64_REG(BPF_SUB, R0, R9),
98123- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98124- BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
98125- BPF_EXIT_INSN(),
98126- BPF_ALU64_REG(BPF_SUB, R1, R0),
98127- BPF_ALU64_REG(BPF_SUB, R1, R2),
98128- BPF_ALU64_REG(BPF_SUB, R1, R3),
98129- BPF_ALU64_REG(BPF_SUB, R1, R4),
98130- BPF_ALU64_REG(BPF_SUB, R1, R5),
98131- BPF_ALU64_REG(BPF_SUB, R1, R6),
98132- BPF_ALU64_REG(BPF_SUB, R1, R7),
98133- BPF_ALU64_REG(BPF_SUB, R1, R8),
98134- BPF_ALU64_REG(BPF_SUB, R1, R9),
98135- BPF_ALU64_IMM(BPF_SUB, R1, 10),
98136- BPF_ALU64_REG(BPF_SUB, R2, R0),
98137- BPF_ALU64_REG(BPF_SUB, R2, R1),
98138- BPF_ALU64_REG(BPF_SUB, R2, R3),
98139- BPF_ALU64_REG(BPF_SUB, R2, R4),
98140- BPF_ALU64_REG(BPF_SUB, R2, R5),
98141- BPF_ALU64_REG(BPF_SUB, R2, R6),
98142- BPF_ALU64_REG(BPF_SUB, R2, R7),
98143- BPF_ALU64_REG(BPF_SUB, R2, R8),
98144- BPF_ALU64_REG(BPF_SUB, R2, R9),
98145- BPF_ALU64_IMM(BPF_SUB, R2, 10),
98146- BPF_ALU64_REG(BPF_SUB, R3, R0),
98147- BPF_ALU64_REG(BPF_SUB, R3, R1),
98148- BPF_ALU64_REG(BPF_SUB, R3, R2),
98149- BPF_ALU64_REG(BPF_SUB, R3, R4),
98150- BPF_ALU64_REG(BPF_SUB, R3, R5),
98151- BPF_ALU64_REG(BPF_SUB, R3, R6),
98152- BPF_ALU64_REG(BPF_SUB, R3, R7),
98153- BPF_ALU64_REG(BPF_SUB, R3, R8),
98154- BPF_ALU64_REG(BPF_SUB, R3, R9),
98155- BPF_ALU64_IMM(BPF_SUB, R3, 10),
98156- BPF_ALU64_REG(BPF_SUB, R4, R0),
98157- BPF_ALU64_REG(BPF_SUB, R4, R1),
98158- BPF_ALU64_REG(BPF_SUB, R4, R2),
98159- BPF_ALU64_REG(BPF_SUB, R4, R3),
98160- BPF_ALU64_REG(BPF_SUB, R4, R5),
98161- BPF_ALU64_REG(BPF_SUB, R4, R6),
98162- BPF_ALU64_REG(BPF_SUB, R4, R7),
98163- BPF_ALU64_REG(BPF_SUB, R4, R8),
98164- BPF_ALU64_REG(BPF_SUB, R4, R9),
98165- BPF_ALU64_IMM(BPF_SUB, R4, 10),
98166- BPF_ALU64_REG(BPF_SUB, R5, R0),
98167- BPF_ALU64_REG(BPF_SUB, R5, R1),
98168- BPF_ALU64_REG(BPF_SUB, R5, R2),
98169- BPF_ALU64_REG(BPF_SUB, R5, R3),
98170- BPF_ALU64_REG(BPF_SUB, R5, R4),
98171- BPF_ALU64_REG(BPF_SUB, R5, R6),
98172- BPF_ALU64_REG(BPF_SUB, R5, R7),
98173- BPF_ALU64_REG(BPF_SUB, R5, R8),
98174- BPF_ALU64_REG(BPF_SUB, R5, R9),
98175- BPF_ALU64_IMM(BPF_SUB, R5, 10),
98176- BPF_ALU64_REG(BPF_SUB, R6, R0),
98177- BPF_ALU64_REG(BPF_SUB, R6, R1),
98178- BPF_ALU64_REG(BPF_SUB, R6, R2),
98179- BPF_ALU64_REG(BPF_SUB, R6, R3),
98180- BPF_ALU64_REG(BPF_SUB, R6, R4),
98181- BPF_ALU64_REG(BPF_SUB, R6, R5),
98182- BPF_ALU64_REG(BPF_SUB, R6, R7),
98183- BPF_ALU64_REG(BPF_SUB, R6, R8),
98184- BPF_ALU64_REG(BPF_SUB, R6, R9),
98185- BPF_ALU64_IMM(BPF_SUB, R6, 10),
98186- BPF_ALU64_REG(BPF_SUB, R7, R0),
98187- BPF_ALU64_REG(BPF_SUB, R7, R1),
98188- BPF_ALU64_REG(BPF_SUB, R7, R2),
98189- BPF_ALU64_REG(BPF_SUB, R7, R3),
98190- BPF_ALU64_REG(BPF_SUB, R7, R4),
98191- BPF_ALU64_REG(BPF_SUB, R7, R5),
98192- BPF_ALU64_REG(BPF_SUB, R7, R6),
98193- BPF_ALU64_REG(BPF_SUB, R7, R8),
98194- BPF_ALU64_REG(BPF_SUB, R7, R9),
98195- BPF_ALU64_IMM(BPF_SUB, R7, 10),
98196- BPF_ALU64_REG(BPF_SUB, R8, R0),
98197- BPF_ALU64_REG(BPF_SUB, R8, R1),
98198- BPF_ALU64_REG(BPF_SUB, R8, R2),
98199- BPF_ALU64_REG(BPF_SUB, R8, R3),
98200- BPF_ALU64_REG(BPF_SUB, R8, R4),
98201- BPF_ALU64_REG(BPF_SUB, R8, R5),
98202- BPF_ALU64_REG(BPF_SUB, R8, R6),
98203- BPF_ALU64_REG(BPF_SUB, R8, R7),
98204- BPF_ALU64_REG(BPF_SUB, R8, R9),
98205- BPF_ALU64_IMM(BPF_SUB, R8, 10),
98206- BPF_ALU64_REG(BPF_SUB, R9, R0),
98207- BPF_ALU64_REG(BPF_SUB, R9, R1),
98208- BPF_ALU64_REG(BPF_SUB, R9, R2),
98209- BPF_ALU64_REG(BPF_SUB, R9, R3),
98210- BPF_ALU64_REG(BPF_SUB, R9, R4),
98211- BPF_ALU64_REG(BPF_SUB, R9, R5),
98212- BPF_ALU64_REG(BPF_SUB, R9, R6),
98213- BPF_ALU64_REG(BPF_SUB, R9, R7),
98214- BPF_ALU64_REG(BPF_SUB, R9, R8),
98215- BPF_ALU64_IMM(BPF_SUB, R9, 10),
98216- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98217- BPF_ALU64_IMM(BPF_NEG, R0, 0),
98218- BPF_ALU64_REG(BPF_SUB, R0, R1),
98219- BPF_ALU64_REG(BPF_SUB, R0, R2),
98220- BPF_ALU64_REG(BPF_SUB, R0, R3),
98221- BPF_ALU64_REG(BPF_SUB, R0, R4),
98222- BPF_ALU64_REG(BPF_SUB, R0, R5),
98223- BPF_ALU64_REG(BPF_SUB, R0, R6),
98224- BPF_ALU64_REG(BPF_SUB, R0, R7),
98225- BPF_ALU64_REG(BPF_SUB, R0, R8),
98226- BPF_ALU64_REG(BPF_SUB, R0, R9),
98227- BPF_EXIT_INSN(),
98228- },
98229- INTERNAL,
98230- { },
98231- { { 0, 11 } }
98232- },
98233- { /* Mainly checking JIT here. */
98234- "INT: XOR",
98235- .u.insns_int = {
98236- BPF_ALU64_REG(BPF_SUB, R0, R0),
98237- BPF_ALU64_REG(BPF_XOR, R1, R1),
98238- BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
98239- BPF_EXIT_INSN(),
98240- BPF_ALU64_IMM(BPF_MOV, R0, 10),
98241- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98242- BPF_ALU64_REG(BPF_SUB, R1, R1),
98243- BPF_ALU64_REG(BPF_XOR, R2, R2),
98244- BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
98245- BPF_EXIT_INSN(),
98246- BPF_ALU64_REG(BPF_SUB, R2, R2),
98247- BPF_ALU64_REG(BPF_XOR, R3, R3),
98248- BPF_ALU64_IMM(BPF_MOV, R0, 10),
98249- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98250- BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
98251- BPF_EXIT_INSN(),
98252- BPF_ALU64_REG(BPF_SUB, R3, R3),
98253- BPF_ALU64_REG(BPF_XOR, R4, R4),
98254- BPF_ALU64_IMM(BPF_MOV, R2, 1),
98255- BPF_ALU64_IMM(BPF_MOV, R5, -1),
98256- BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
98257- BPF_EXIT_INSN(),
98258- BPF_ALU64_REG(BPF_SUB, R4, R4),
98259- BPF_ALU64_REG(BPF_XOR, R5, R5),
98260- BPF_ALU64_IMM(BPF_MOV, R3, 1),
98261- BPF_ALU64_IMM(BPF_MOV, R7, -1),
98262- BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
98263- BPF_EXIT_INSN(),
98264- BPF_ALU64_IMM(BPF_MOV, R5, 1),
98265- BPF_ALU64_REG(BPF_SUB, R5, R5),
98266- BPF_ALU64_REG(BPF_XOR, R6, R6),
98267- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98268- BPF_ALU64_IMM(BPF_MOV, R8, -1),
98269- BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
98270- BPF_EXIT_INSN(),
98271- BPF_ALU64_REG(BPF_SUB, R6, R6),
98272- BPF_ALU64_REG(BPF_XOR, R7, R7),
98273- BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
98274- BPF_EXIT_INSN(),
98275- BPF_ALU64_REG(BPF_SUB, R7, R7),
98276- BPF_ALU64_REG(BPF_XOR, R8, R8),
98277- BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
98278- BPF_EXIT_INSN(),
98279- BPF_ALU64_REG(BPF_SUB, R8, R8),
98280- BPF_ALU64_REG(BPF_XOR, R9, R9),
98281- BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
98282- BPF_EXIT_INSN(),
98283- BPF_ALU64_REG(BPF_SUB, R9, R9),
98284- BPF_ALU64_REG(BPF_XOR, R0, R0),
98285- BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
98286- BPF_EXIT_INSN(),
98287- BPF_ALU64_REG(BPF_SUB, R1, R1),
98288- BPF_ALU64_REG(BPF_XOR, R0, R0),
98289- BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
98290- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98291- BPF_EXIT_INSN(),
98292- BPF_ALU64_IMM(BPF_MOV, R0, 1),
98293- BPF_EXIT_INSN(),
98294- },
98295- INTERNAL,
98296- { },
98297- { { 0, 1 } }
98298- },
98299- { /* Mainly checking JIT here. */
98300- "INT: MUL",
98301- .u.insns_int = {
98302- BPF_ALU64_IMM(BPF_MOV, R0, 11),
98303- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98304- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98305- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98306- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98307- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98308- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98309- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98310- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98311- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98312- BPF_ALU64_REG(BPF_MUL, R0, R0),
98313- BPF_ALU64_REG(BPF_MUL, R0, R1),
98314- BPF_ALU64_REG(BPF_MUL, R0, R2),
98315- BPF_ALU64_REG(BPF_MUL, R0, R3),
98316- BPF_ALU64_REG(BPF_MUL, R0, R4),
98317- BPF_ALU64_REG(BPF_MUL, R0, R5),
98318- BPF_ALU64_REG(BPF_MUL, R0, R6),
98319- BPF_ALU64_REG(BPF_MUL, R0, R7),
98320- BPF_ALU64_REG(BPF_MUL, R0, R8),
98321- BPF_ALU64_REG(BPF_MUL, R0, R9),
98322- BPF_ALU64_IMM(BPF_MUL, R0, 10),
98323- BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
98324- BPF_EXIT_INSN(),
98325- BPF_ALU64_REG(BPF_MUL, R1, R0),
98326- BPF_ALU64_REG(BPF_MUL, R1, R2),
98327- BPF_ALU64_REG(BPF_MUL, R1, R3),
98328- BPF_ALU64_REG(BPF_MUL, R1, R4),
98329- BPF_ALU64_REG(BPF_MUL, R1, R5),
98330- BPF_ALU64_REG(BPF_MUL, R1, R6),
98331- BPF_ALU64_REG(BPF_MUL, R1, R7),
98332- BPF_ALU64_REG(BPF_MUL, R1, R8),
98333- BPF_ALU64_REG(BPF_MUL, R1, R9),
98334- BPF_ALU64_IMM(BPF_MUL, R1, 10),
98335- BPF_ALU64_REG(BPF_MOV, R2, R1),
98336- BPF_ALU64_IMM(BPF_RSH, R2, 32),
98337- BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
98338- BPF_EXIT_INSN(),
98339- BPF_ALU64_IMM(BPF_LSH, R1, 32),
98340- BPF_ALU64_IMM(BPF_ARSH, R1, 32),
98341- BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
98342- BPF_EXIT_INSN(),
98343- BPF_ALU64_REG(BPF_MUL, R2, R0),
98344- BPF_ALU64_REG(BPF_MUL, R2, R1),
98345- BPF_ALU64_REG(BPF_MUL, R2, R3),
98346- BPF_ALU64_REG(BPF_MUL, R2, R4),
98347- BPF_ALU64_REG(BPF_MUL, R2, R5),
98348- BPF_ALU64_REG(BPF_MUL, R2, R6),
98349- BPF_ALU64_REG(BPF_MUL, R2, R7),
98350- BPF_ALU64_REG(BPF_MUL, R2, R8),
98351- BPF_ALU64_REG(BPF_MUL, R2, R9),
98352- BPF_ALU64_IMM(BPF_MUL, R2, 10),
98353- BPF_ALU64_IMM(BPF_RSH, R2, 32),
98354- BPF_ALU64_REG(BPF_MOV, R0, R2),
98355- BPF_EXIT_INSN(),
98356- },
98357- INTERNAL,
98358- { },
98359- { { 0, 0x35d97ef2 } }
98360- },
98361- {
98362- "INT: ALU MIX",
98363- .u.insns_int = {
98364- BPF_ALU64_IMM(BPF_MOV, R0, 11),
98365- BPF_ALU64_IMM(BPF_ADD, R0, -1),
98366- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98367- BPF_ALU64_IMM(BPF_XOR, R2, 3),
98368- BPF_ALU64_REG(BPF_DIV, R0, R2),
98369- BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
98370- BPF_EXIT_INSN(),
98371- BPF_ALU64_IMM(BPF_MOD, R0, 3),
98372- BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
98373- BPF_EXIT_INSN(),
98374- BPF_ALU64_IMM(BPF_MOV, R0, -1),
98375- BPF_EXIT_INSN(),
98376- },
98377- INTERNAL,
98378- { },
98379- { { 0, -1 } }
98380- },
98381- {
98382- "INT: DIV + ABS",
98383- .u.insns_int = {
98384- BPF_ALU64_REG(BPF_MOV, R6, R1),
98385- BPF_LD_ABS(BPF_B, 3),
98386- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98387- BPF_ALU32_REG(BPF_DIV, R0, R2),
98388- BPF_ALU64_REG(BPF_MOV, R8, R0),
98389- BPF_LD_ABS(BPF_B, 4),
98390- BPF_ALU64_REG(BPF_ADD, R8, R0),
98391- BPF_LD_IND(BPF_B, R8, -70),
98392- BPF_EXIT_INSN(),
98393- },
98394- INTERNAL,
98395- { 10, 20, 30, 40, 50 },
98396- { { 4, 0 }, { 5, 10 } }
98397- },
98398- {
98399- "INT: DIV by zero",
98400- .u.insns_int = {
98401- BPF_ALU64_REG(BPF_MOV, R6, R1),
98402- BPF_ALU64_IMM(BPF_MOV, R7, 0),
98403- BPF_LD_ABS(BPF_B, 3),
98404- BPF_ALU32_REG(BPF_DIV, R0, R7),
98405- BPF_EXIT_INSN(),
98406- },
98407- INTERNAL,
98408- { 10, 20, 30, 40, 50 },
98409- { { 3, 0 }, { 4, 0 } }
98410- },
98411- {
98412- "check: missing ret",
98413- .u.insns = {
98414- BPF_STMT(BPF_LD | BPF_IMM, 1),
98415- },
98416- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98417- { },
98418- { }
98419- },
98420- {
98421- "check: div_k_0",
98422- .u.insns = {
98423- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
98424- BPF_STMT(BPF_RET | BPF_K, 0)
98425- },
98426- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98427- { },
98428- { }
98429- },
98430- {
98431- "check: unknown insn",
98432- .u.insns = {
98433- /* seccomp insn, rejected in socket filter */
98434- BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
98435- BPF_STMT(BPF_RET | BPF_K, 0)
98436- },
98437- CLASSIC | FLAG_EXPECTED_FAIL,
98438- { },
98439- { }
98440- },
98441- {
98442- "check: out of range spill/fill",
98443- .u.insns = {
98444- BPF_STMT(BPF_STX, 16),
98445- BPF_STMT(BPF_RET | BPF_K, 0)
98446- },
98447- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98448- { },
98449- { }
98450- },
98451- {
98452- "JUMPS + HOLES",
98453- .u.insns = {
98454- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98455- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
98456- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98457- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98458- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98459- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98460- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98461- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98462- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98463- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98464- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98465- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98466- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98467- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98468- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98469- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
98470- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98471- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
98472- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98473- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
98474- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
98475- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98476- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98477- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98478- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98479- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98480- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98481- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98482- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98483- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98484- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98485- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98486- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98487- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98488- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
98489- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
98490- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98491- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
98492- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
98493- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98494- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98495- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98496- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98497- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98498- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98499- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98500- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98501- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98502- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98503- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98504- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98505- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98506- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
98507- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
98508- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98509- BPF_STMT(BPF_RET | BPF_A, 0),
98510- BPF_STMT(BPF_RET | BPF_A, 0),
98511- },
98512- CLASSIC,
98513- { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
98514- 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
98515- 0x08, 0x00,
98516- 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
98517- 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
98518- 0xc0, 0xa8, 0x33, 0x01,
98519- 0xc0, 0xa8, 0x33, 0x02,
98520- 0xbb, 0xb6,
98521- 0xa9, 0xfa,
98522- 0x00, 0x14, 0x00, 0x00,
98523- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98524- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98525- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98526- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98527- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98528- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98529- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98530- 0xcc, 0xcc, 0xcc, 0xcc },
98531- { { 88, 0x001b } }
98532- },
98533- {
98534- "check: RET X",
98535- .u.insns = {
98536- BPF_STMT(BPF_RET | BPF_X, 0),
98537- },
98538- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98539- { },
98540- { },
98541- },
98542- {
98543- "check: LDX + RET X",
98544- .u.insns = {
98545- BPF_STMT(BPF_LDX | BPF_IMM, 42),
98546- BPF_STMT(BPF_RET | BPF_X, 0),
98547- },
98548- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98549- { },
98550- { },
98551- },
98552- { /* Mainly checking JIT here. */
98553- "M[]: alt STX + LDX",
98554- .u.insns = {
98555- BPF_STMT(BPF_LDX | BPF_IMM, 100),
98556- BPF_STMT(BPF_STX, 0),
98557- BPF_STMT(BPF_LDX | BPF_MEM, 0),
98558- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98559- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98560- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98561- BPF_STMT(BPF_STX, 1),
98562- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98563- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98564- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98565- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98566- BPF_STMT(BPF_STX, 2),
98567- BPF_STMT(BPF_LDX | BPF_MEM, 2),
98568- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98569- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98570- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98571- BPF_STMT(BPF_STX, 3),
98572- BPF_STMT(BPF_LDX | BPF_MEM, 3),
98573- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98574- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98575- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98576- BPF_STMT(BPF_STX, 4),
98577- BPF_STMT(BPF_LDX | BPF_MEM, 4),
98578- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98579- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98580- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98581- BPF_STMT(BPF_STX, 5),
98582- BPF_STMT(BPF_LDX | BPF_MEM, 5),
98583- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98584- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98585- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98586- BPF_STMT(BPF_STX, 6),
98587- BPF_STMT(BPF_LDX | BPF_MEM, 6),
98588- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98589- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98590- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98591- BPF_STMT(BPF_STX, 7),
98592- BPF_STMT(BPF_LDX | BPF_MEM, 7),
98593- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98594- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98595- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98596- BPF_STMT(BPF_STX, 8),
98597- BPF_STMT(BPF_LDX | BPF_MEM, 8),
98598- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98599- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98600- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98601- BPF_STMT(BPF_STX, 9),
98602- BPF_STMT(BPF_LDX | BPF_MEM, 9),
98603- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98604- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98605- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98606- BPF_STMT(BPF_STX, 10),
98607- BPF_STMT(BPF_LDX | BPF_MEM, 10),
98608- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98609- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98610- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98611- BPF_STMT(BPF_STX, 11),
98612- BPF_STMT(BPF_LDX | BPF_MEM, 11),
98613- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98614- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98615- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98616- BPF_STMT(BPF_STX, 12),
98617- BPF_STMT(BPF_LDX | BPF_MEM, 12),
98618- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98619- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98620- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98621- BPF_STMT(BPF_STX, 13),
98622- BPF_STMT(BPF_LDX | BPF_MEM, 13),
98623- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98624- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98625- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98626- BPF_STMT(BPF_STX, 14),
98627- BPF_STMT(BPF_LDX | BPF_MEM, 14),
98628- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98629- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98630- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98631- BPF_STMT(BPF_STX, 15),
98632- BPF_STMT(BPF_LDX | BPF_MEM, 15),
98633- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98634- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98635- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98636- BPF_STMT(BPF_RET | BPF_A, 0),
98637- },
98638- CLASSIC | FLAG_NO_DATA,
98639- { },
98640- { { 0, 116 } },
98641- },
98642- { /* Mainly checking JIT here. */
98643- "M[]: full STX + full LDX",
98644- .u.insns = {
98645- BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
98646- BPF_STMT(BPF_STX, 0),
98647- BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
98648- BPF_STMT(BPF_STX, 1),
98649- BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
98650- BPF_STMT(BPF_STX, 2),
98651- BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
98652- BPF_STMT(BPF_STX, 3),
98653- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
98654- BPF_STMT(BPF_STX, 4),
98655- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
98656- BPF_STMT(BPF_STX, 5),
98657- BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
98658- BPF_STMT(BPF_STX, 6),
98659- BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
98660- BPF_STMT(BPF_STX, 7),
98661- BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
98662- BPF_STMT(BPF_STX, 8),
98663- BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
98664- BPF_STMT(BPF_STX, 9),
98665- BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
98666- BPF_STMT(BPF_STX, 10),
98667- BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
98668- BPF_STMT(BPF_STX, 11),
98669- BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
98670- BPF_STMT(BPF_STX, 12),
98671- BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
98672- BPF_STMT(BPF_STX, 13),
98673- BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
98674- BPF_STMT(BPF_STX, 14),
98675- BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
98676- BPF_STMT(BPF_STX, 15),
98677- BPF_STMT(BPF_LDX | BPF_MEM, 0),
98678- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98679- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98680- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98681- BPF_STMT(BPF_LDX | BPF_MEM, 2),
98682- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98683- BPF_STMT(BPF_LDX | BPF_MEM, 3),
98684- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98685- BPF_STMT(BPF_LDX | BPF_MEM, 4),
98686- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98687- BPF_STMT(BPF_LDX | BPF_MEM, 5),
98688- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98689- BPF_STMT(BPF_LDX | BPF_MEM, 6),
98690- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98691- BPF_STMT(BPF_LDX | BPF_MEM, 7),
98692- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98693- BPF_STMT(BPF_LDX | BPF_MEM, 8),
98694- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98695- BPF_STMT(BPF_LDX | BPF_MEM, 9),
98696- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98697- BPF_STMT(BPF_LDX | BPF_MEM, 10),
98698- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98699- BPF_STMT(BPF_LDX | BPF_MEM, 11),
98700- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98701- BPF_STMT(BPF_LDX | BPF_MEM, 12),
98702- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98703- BPF_STMT(BPF_LDX | BPF_MEM, 13),
98704- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98705- BPF_STMT(BPF_LDX | BPF_MEM, 14),
98706- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98707- BPF_STMT(BPF_LDX | BPF_MEM, 15),
98708- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98709- BPF_STMT(BPF_RET | BPF_A, 0),
98710- },
98711- CLASSIC | FLAG_NO_DATA,
98712- { },
98713- { { 0, 0x2a5a5e5 } },
98714- },
98715- {
98716- "check: SKF_AD_MAX",
98717- .u.insns = {
98718- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98719- SKF_AD_OFF + SKF_AD_MAX),
98720- BPF_STMT(BPF_RET | BPF_A, 0),
98721- },
98722- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98723- { },
98724- { },
98725- },
98726- { /* Passes checker but fails during runtime. */
98727- "LD [SKF_AD_OFF-1]",
98728- .u.insns = {
98729- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
98730- SKF_AD_OFF - 1),
98731- BPF_STMT(BPF_RET | BPF_K, 1),
98732- },
98733- CLASSIC,
98734- { },
98735- { { 1, 0 } },
98736- },
98737-};
98738-
98739-static struct net_device dev;
98740-
98741-static struct sk_buff *populate_skb(char *buf, int size)
98742-{
98743- struct sk_buff *skb;
98744-
98745- if (size >= MAX_DATA)
98746- return NULL;
98747-
98748- skb = alloc_skb(MAX_DATA, GFP_KERNEL);
98749- if (!skb)
98750- return NULL;
98751-
98752- memcpy(__skb_put(skb, size), buf, size);
98753-
98754- /* Initialize a fake skb with test pattern. */
98755- skb_reset_mac_header(skb);
98756- skb->protocol = htons(ETH_P_IP);
98757- skb->pkt_type = SKB_TYPE;
98758- skb->mark = SKB_MARK;
98759- skb->hash = SKB_HASH;
98760- skb->queue_mapping = SKB_QUEUE_MAP;
98761- skb->vlan_tci = SKB_VLAN_TCI;
98762- skb->dev = &dev;
98763- skb->dev->ifindex = SKB_DEV_IFINDEX;
98764- skb->dev->type = SKB_DEV_TYPE;
98765- skb_set_network_header(skb, min(size, ETH_HLEN));
98766-
98767- return skb;
98768-}
98769-
98770-static void *generate_test_data(struct bpf_test *test, int sub)
98771-{
98772- if (test->aux & FLAG_NO_DATA)
98773- return NULL;
98774-
98775- /* Test case expects an skb, so populate one. Various
98776- * subtests generate skbs of different sizes based on
98777- * the same data.
98778- */
98779- return populate_skb(test->data, test->test[sub].data_size);
98780-}
98781-
98782-static void release_test_data(const struct bpf_test *test, void *data)
98783-{
98784- if (test->aux & FLAG_NO_DATA)
98785- return;
98786-
98787- kfree_skb(data);
98788-}
98789-
98790-static int probe_filter_length(struct sock_filter *fp)
98791-{
98792- int len = 0;
98793-
98794- for (len = MAX_INSNS - 1; len > 0; --len)
98795- if (fp[len].code != 0 || fp[len].k != 0)
98796- break;
98797-
98798- return len + 1;
98799-}
98800-
98801-static struct sk_filter *generate_filter(int which, int *err)
98802-{
98803- struct sk_filter *fp;
98804- struct sock_fprog_kern fprog;
98805- unsigned int flen = probe_filter_length(tests[which].u.insns);
98806- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
98807-
98808- switch (test_type) {
98809- case CLASSIC:
98810- fprog.filter = tests[which].u.insns;
98811- fprog.len = flen;
98812-
98813- *err = sk_unattached_filter_create(&fp, &fprog);
98814- if (tests[which].aux & FLAG_EXPECTED_FAIL) {
98815- if (*err == -EINVAL) {
98816- pr_cont("PASS\n");
98817- /* Verifier rejected filter as expected. */
98818- *err = 0;
98819- return NULL;
98820- } else {
98821- pr_cont("UNEXPECTED_PASS\n");
98822- /* Verifier didn't reject the test that's
98823- * bad enough, just return!
98824- */
98825- *err = -EINVAL;
98826- return NULL;
98827- }
98828- }
98829- /* We don't expect to fail. */
98830- if (*err) {
98831- pr_cont("FAIL to attach err=%d len=%d\n",
98832- *err, fprog.len);
98833- return NULL;
98834- }
98835- break;
98836-
98837- case INTERNAL:
98838- fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
98839- if (fp == NULL) {
98840- pr_cont("UNEXPECTED_FAIL no memory left\n");
98841- *err = -ENOMEM;
98842- return NULL;
98843- }
98844-
98845- fp->len = flen;
98846- memcpy(fp->insnsi, tests[which].u.insns_int,
98847- fp->len * sizeof(struct sock_filter_int));
98848-
98849- sk_filter_select_runtime(fp);
98850- break;
98851- }
98852-
98853- *err = 0;
98854- return fp;
98855-}
98856-
98857-static void release_filter(struct sk_filter *fp, int which)
98858-{
98859- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
98860-
98861- switch (test_type) {
98862- case CLASSIC:
98863- sk_unattached_filter_destroy(fp);
98864- break;
98865- case INTERNAL:
98866- sk_filter_free(fp);
98867- break;
98868- }
98869-}
98870-
98871-static int __run_one(const struct sk_filter *fp, const void *data,
98872- int runs, u64 *duration)
98873-{
98874- u64 start, finish;
98875- int ret, i;
98876-
98877- start = ktime_to_us(ktime_get());
98878-
98879- for (i = 0; i < runs; i++)
98880- ret = SK_RUN_FILTER(fp, data);
98881-
98882- finish = ktime_to_us(ktime_get());
98883-
98884- *duration = (finish - start) * 1000ULL;
98885- do_div(*duration, runs);
98886-
98887- return ret;
98888-}
98889-
98890-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
98891-{
98892- int err_cnt = 0, i, runs = MAX_TESTRUNS;
98893-
98894- for (i = 0; i < MAX_SUBTESTS; i++) {
98895- void *data;
98896- u64 duration;
98897- u32 ret;
98898-
98899- if (test->test[i].data_size == 0 &&
98900- test->test[i].result == 0)
98901- break;
98902-
98903- data = generate_test_data(test, i);
98904- ret = __run_one(fp, data, runs, &duration);
98905- release_test_data(test, data);
98906-
98907- if (ret == test->test[i].result) {
98908- pr_cont("%lld ", duration);
98909- } else {
98910- pr_cont("ret %d != %d ", ret,
98911- test->test[i].result);
98912- err_cnt++;
98913- }
98914- }
98915-
98916- return err_cnt;
98917-}
98918-
98919-static __init int test_bpf(void)
98920-{
98921- int i, err_cnt = 0, pass_cnt = 0;
98922-
98923- for (i = 0; i < ARRAY_SIZE(tests); i++) {
98924- struct sk_filter *fp;
98925- int err;
98926-
98927- pr_info("#%d %s ", i, tests[i].descr);
98928-
98929- fp = generate_filter(i, &err);
98930- if (fp == NULL) {
98931- if (err == 0) {
98932- pass_cnt++;
98933- continue;
98934- }
98935-
98936- return err;
98937- }
98938- err = run_one(fp, &tests[i]);
98939- release_filter(fp, i);
98940-
98941- if (err) {
98942- pr_cont("FAIL (%d times)\n", err);
98943- err_cnt++;
98944- } else {
98945- pr_cont("PASS\n");
98946- pass_cnt++;
98947- }
98948- }
98949-
98950- pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
98951- return err_cnt ? -EINVAL : 0;
98952-}
98953-
98954-static int __init test_bpf_init(void)
98955-{
98956- return test_bpf();
98957-}
98958-
98959-static void __exit test_bpf_exit(void)
98960-{
98961-}
98962-
98963-module_init(test_bpf_init);
98964-module_exit(test_bpf_exit);
98965-
98966-MODULE_LICENSE("GPL");
98967diff --git a/lib/usercopy.c b/lib/usercopy.c
98968index 4f5b1dd..7cab418 100644
98969--- a/lib/usercopy.c
98970+++ b/lib/usercopy.c
98971@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
98972 WARN(1, "Buffer overflow detected!\n");
98973 }
98974 EXPORT_SYMBOL(copy_from_user_overflow);
98975+
98976+void copy_to_user_overflow(void)
98977+{
98978+ WARN(1, "Buffer overflow detected!\n");
98979+}
98980+EXPORT_SYMBOL(copy_to_user_overflow);
98981diff --git a/lib/vsprintf.c b/lib/vsprintf.c
98982index 6fe2c84..2fe5ec6 100644
98983--- a/lib/vsprintf.c
98984+++ b/lib/vsprintf.c
98985@@ -16,6 +16,9 @@
98986 * - scnprintf and vscnprintf
98987 */
98988
98989+#ifdef CONFIG_GRKERNSEC_HIDESYM
98990+#define __INCLUDED_BY_HIDESYM 1
98991+#endif
98992 #include <stdarg.h>
98993 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
98994 #include <linux/types.h>
98995@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
98996 #ifdef CONFIG_KALLSYMS
98997 if (*fmt == 'B')
98998 sprint_backtrace(sym, value);
98999- else if (*fmt != 'f' && *fmt != 's')
99000+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
99001 sprint_symbol(sym, value);
99002 else
99003 sprint_symbol_no_offset(sym, value);
99004@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
99005 return number(buf, end, num, spec);
99006 }
99007
99008+#ifdef CONFIG_GRKERNSEC_HIDESYM
99009+int kptr_restrict __read_mostly = 2;
99010+#else
99011 int kptr_restrict __read_mostly;
99012+#endif
99013
99014 /*
99015 * Show a '%p' thing. A kernel extension is that the '%p' is followed
99016@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
99017 *
99018 * - 'F' For symbolic function descriptor pointers with offset
99019 * - 'f' For simple symbolic function names without offset
99020+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
99021 * - 'S' For symbolic direct pointers with offset
99022 * - 's' For symbolic direct pointers without offset
99023+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
99024 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
99025 * - 'B' For backtraced symbolic direct pointers with offset
99026 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
99027@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99028
99029 if (!ptr && *fmt != 'K') {
99030 /*
99031- * Print (null) with the same width as a pointer so it makes
99032+ * Print (nil) with the same width as a pointer so it makes
99033 * tabular output look nice.
99034 */
99035 if (spec.field_width == -1)
99036 spec.field_width = default_width;
99037- return string(buf, end, "(null)", spec);
99038+ return string(buf, end, "(nil)", spec);
99039 }
99040
99041 switch (*fmt) {
99042@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99043 /* Fallthrough */
99044 case 'S':
99045 case 's':
99046+#ifdef CONFIG_GRKERNSEC_HIDESYM
99047+ break;
99048+#else
99049+ return symbol_string(buf, end, ptr, spec, fmt);
99050+#endif
99051+ case 'X':
99052+ ptr = dereference_function_descriptor(ptr);
99053+ case 'A':
99054 case 'B':
99055 return symbol_string(buf, end, ptr, spec, fmt);
99056 case 'R':
99057@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99058 va_end(va);
99059 return buf;
99060 }
99061+ case 'P':
99062+ break;
99063 case 'K':
99064 /*
99065 * %pK cannot be used in IRQ context because its test
99066@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99067 ((const struct file *)ptr)->f_path.dentry,
99068 spec, fmt);
99069 }
99070+
99071+#ifdef CONFIG_GRKERNSEC_HIDESYM
99072+ /* 'P' = approved pointers to copy to userland,
99073+ as in the /proc/kallsyms case, as we make it display nothing
99074+ for non-root users, and the real contents for root users
99075+ 'X' = approved simple symbols
99076+ Also ignore 'K' pointers, since we force their NULLing for non-root users
99077+ above
99078+ */
99079+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
99080+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
99081+ dump_stack();
99082+ ptr = NULL;
99083+ }
99084+#endif
99085+
99086 spec.flags |= SMALL;
99087 if (spec.field_width == -1) {
99088 spec.field_width = default_width;
99089@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
99090 typeof(type) value; \
99091 if (sizeof(type) == 8) { \
99092 args = PTR_ALIGN(args, sizeof(u32)); \
99093- *(u32 *)&value = *(u32 *)args; \
99094- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
99095+ *(u32 *)&value = *(const u32 *)args; \
99096+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
99097 } else { \
99098 args = PTR_ALIGN(args, sizeof(type)); \
99099- value = *(typeof(type) *)args; \
99100+ value = *(const typeof(type) *)args; \
99101 } \
99102 args += sizeof(type); \
99103 value; \
99104@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
99105 case FORMAT_TYPE_STR: {
99106 const char *str_arg = args;
99107 args += strlen(str_arg) + 1;
99108- str = string(str, end, (char *)str_arg, spec);
99109+ str = string(str, end, str_arg, spec);
99110 break;
99111 }
99112
99113diff --git a/localversion-grsec b/localversion-grsec
99114new file mode 100644
99115index 0000000..7cd6065
99116--- /dev/null
99117+++ b/localversion-grsec
99118@@ -0,0 +1 @@
99119+-grsec
99120diff --git a/mm/Kconfig b/mm/Kconfig
99121index 3e9977a..205074f 100644
99122--- a/mm/Kconfig
99123+++ b/mm/Kconfig
99124@@ -333,10 +333,11 @@ config KSM
99125 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
99126
99127 config DEFAULT_MMAP_MIN_ADDR
99128- int "Low address space to protect from user allocation"
99129+ int "Low address space to protect from user allocation"
99130 depends on MMU
99131- default 4096
99132- help
99133+ default 32768 if ALPHA || ARM || PARISC || SPARC32
99134+ default 65536
99135+ help
99136 This is the portion of low virtual memory which should be protected
99137 from userspace allocation. Keeping a user from writing to low pages
99138 can help reduce the impact of kernel NULL pointer bugs.
99139@@ -367,7 +368,7 @@ config MEMORY_FAILURE
99140
99141 config HWPOISON_INJECT
99142 tristate "HWPoison pages injector"
99143- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
99144+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
99145 select PROC_PAGE_MONITOR
99146
99147 config NOMMU_INITIAL_TRIM_EXCESS
99148diff --git a/mm/backing-dev.c b/mm/backing-dev.c
99149index 1706cbb..f89dbca 100644
99150--- a/mm/backing-dev.c
99151+++ b/mm/backing-dev.c
99152@@ -12,7 +12,7 @@
99153 #include <linux/device.h>
99154 #include <trace/events/writeback.h>
99155
99156-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
99157+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
99158
99159 struct backing_dev_info default_backing_dev_info = {
99160 .name = "default",
99161@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
99162 return err;
99163
99164 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
99165- atomic_long_inc_return(&bdi_seq));
99166+ atomic_long_inc_return_unchecked(&bdi_seq));
99167 if (err) {
99168 bdi_destroy(bdi);
99169 return err;
99170diff --git a/mm/filemap.c b/mm/filemap.c
99171index 900edfa..ff056b1 100644
99172--- a/mm/filemap.c
99173+++ b/mm/filemap.c
99174@@ -2074,7 +2074,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
99175 struct address_space *mapping = file->f_mapping;
99176
99177 if (!mapping->a_ops->readpage)
99178- return -ENOEXEC;
99179+ return -ENODEV;
99180 file_accessed(file);
99181 vma->vm_ops = &generic_file_vm_ops;
99182 return 0;
99183@@ -2252,6 +2252,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
99184 *pos = i_size_read(inode);
99185
99186 if (limit != RLIM_INFINITY) {
99187+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
99188 if (*pos >= limit) {
99189 send_sig(SIGXFSZ, current, 0);
99190 return -EFBIG;
99191diff --git a/mm/fremap.c b/mm/fremap.c
99192index 72b8fa3..c5b39f1 100644
99193--- a/mm/fremap.c
99194+++ b/mm/fremap.c
99195@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
99196 retry:
99197 vma = find_vma(mm, start);
99198
99199+#ifdef CONFIG_PAX_SEGMEXEC
99200+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
99201+ goto out;
99202+#endif
99203+
99204 /*
99205 * Make sure the vma is shared, that it supports prefaulting,
99206 * and that the remapped range is valid and fully within
99207diff --git a/mm/gup.c b/mm/gup.c
99208index cc5a9e7..d496acf 100644
99209--- a/mm/gup.c
99210+++ b/mm/gup.c
99211@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
99212 unsigned int fault_flags = 0;
99213 int ret;
99214
99215- /* For mlock, just skip the stack guard page. */
99216- if ((*flags & FOLL_MLOCK) &&
99217- (stack_guard_page_start(vma, address) ||
99218- stack_guard_page_end(vma, address + PAGE_SIZE)))
99219- return -ENOENT;
99220 if (*flags & FOLL_WRITE)
99221 fault_flags |= FAULT_FLAG_WRITE;
99222 if (nonblocking)
99223@@ -424,14 +419,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
99224 if (!(gup_flags & FOLL_FORCE))
99225 gup_flags |= FOLL_NUMA;
99226
99227- do {
99228+ while (nr_pages) {
99229 struct page *page;
99230 unsigned int foll_flags = gup_flags;
99231 unsigned int page_increm;
99232
99233 /* first iteration or cross vma bound */
99234 if (!vma || start >= vma->vm_end) {
99235- vma = find_extend_vma(mm, start);
99236+ vma = find_vma(mm, start);
99237 if (!vma && in_gate_area(mm, start)) {
99238 int ret;
99239 ret = get_gate_page(mm, start & PAGE_MASK,
99240@@ -443,7 +438,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
99241 goto next_page;
99242 }
99243
99244- if (!vma || check_vma_flags(vma, gup_flags))
99245+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
99246 return i ? : -EFAULT;
99247 if (is_vm_hugetlb_page(vma)) {
99248 i = follow_hugetlb_page(mm, vma, pages, vmas,
99249@@ -498,7 +493,7 @@ next_page:
99250 i += page_increm;
99251 start += page_increm * PAGE_SIZE;
99252 nr_pages -= page_increm;
99253- } while (nr_pages);
99254+ }
99255 return i;
99256 }
99257 EXPORT_SYMBOL(__get_user_pages);
99258diff --git a/mm/highmem.c b/mm/highmem.c
99259index b32b70c..e512eb0 100644
99260--- a/mm/highmem.c
99261+++ b/mm/highmem.c
99262@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
99263 * So no dangers, even with speculative execution.
99264 */
99265 page = pte_page(pkmap_page_table[i]);
99266+ pax_open_kernel();
99267 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
99268-
99269+ pax_close_kernel();
99270 set_page_address(page, NULL);
99271 need_flush = 1;
99272 }
99273@@ -198,9 +199,11 @@ start:
99274 }
99275 }
99276 vaddr = PKMAP_ADDR(last_pkmap_nr);
99277+
99278+ pax_open_kernel();
99279 set_pte_at(&init_mm, vaddr,
99280 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
99281-
99282+ pax_close_kernel();
99283 pkmap_count[last_pkmap_nr] = 1;
99284 set_page_address(page, (void *)vaddr);
99285
99286diff --git a/mm/hugetlb.c b/mm/hugetlb.c
99287index 7a0a73d..d583cca 100644
99288--- a/mm/hugetlb.c
99289+++ b/mm/hugetlb.c
99290@@ -2250,6 +2250,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
99291 struct hstate *h = &default_hstate;
99292 unsigned long tmp;
99293 int ret;
99294+ ctl_table_no_const hugetlb_table;
99295
99296 if (!hugepages_supported())
99297 return -ENOTSUPP;
99298@@ -2259,9 +2260,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
99299 if (write && hstate_is_gigantic(h) && !gigantic_page_supported())
99300 return -EINVAL;
99301
99302- table->data = &tmp;
99303- table->maxlen = sizeof(unsigned long);
99304- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
99305+ hugetlb_table = *table;
99306+ hugetlb_table.data = &tmp;
99307+ hugetlb_table.maxlen = sizeof(unsigned long);
99308+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
99309 if (ret)
99310 goto out;
99311
99312@@ -2306,6 +2308,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
99313 struct hstate *h = &default_hstate;
99314 unsigned long tmp;
99315 int ret;
99316+ ctl_table_no_const hugetlb_table;
99317
99318 if (!hugepages_supported())
99319 return -ENOTSUPP;
99320@@ -2315,9 +2318,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
99321 if (write && hstate_is_gigantic(h))
99322 return -EINVAL;
99323
99324- table->data = &tmp;
99325- table->maxlen = sizeof(unsigned long);
99326- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
99327+ hugetlb_table = *table;
99328+ hugetlb_table.data = &tmp;
99329+ hugetlb_table.maxlen = sizeof(unsigned long);
99330+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
99331 if (ret)
99332 goto out;
99333
99334@@ -2798,6 +2802,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
99335 return 1;
99336 }
99337
99338+#ifdef CONFIG_PAX_SEGMEXEC
99339+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
99340+{
99341+ struct mm_struct *mm = vma->vm_mm;
99342+ struct vm_area_struct *vma_m;
99343+ unsigned long address_m;
99344+ pte_t *ptep_m;
99345+
99346+ vma_m = pax_find_mirror_vma(vma);
99347+ if (!vma_m)
99348+ return;
99349+
99350+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99351+ address_m = address + SEGMEXEC_TASK_SIZE;
99352+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
99353+ get_page(page_m);
99354+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
99355+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
99356+}
99357+#endif
99358+
99359 /*
99360 * Hugetlb_cow() should be called with page lock of the original hugepage held.
99361 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
99362@@ -2915,6 +2940,11 @@ retry_avoidcopy:
99363 make_huge_pte(vma, new_page, 1));
99364 page_remove_rmap(old_page);
99365 hugepage_add_new_anon_rmap(new_page, vma, address);
99366+
99367+#ifdef CONFIG_PAX_SEGMEXEC
99368+ pax_mirror_huge_pte(vma, address, new_page);
99369+#endif
99370+
99371 /* Make the old page be freed below */
99372 new_page = old_page;
99373 }
99374@@ -3074,6 +3104,10 @@ retry:
99375 && (vma->vm_flags & VM_SHARED)));
99376 set_huge_pte_at(mm, address, ptep, new_pte);
99377
99378+#ifdef CONFIG_PAX_SEGMEXEC
99379+ pax_mirror_huge_pte(vma, address, page);
99380+#endif
99381+
99382 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
99383 /* Optimization, do the COW without a second fault */
99384 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
99385@@ -3140,6 +3174,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99386 struct hstate *h = hstate_vma(vma);
99387 struct address_space *mapping;
99388
99389+#ifdef CONFIG_PAX_SEGMEXEC
99390+ struct vm_area_struct *vma_m;
99391+#endif
99392+
99393 address &= huge_page_mask(h);
99394
99395 ptep = huge_pte_offset(mm, address);
99396@@ -3153,6 +3191,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99397 VM_FAULT_SET_HINDEX(hstate_index(h));
99398 }
99399
99400+#ifdef CONFIG_PAX_SEGMEXEC
99401+ vma_m = pax_find_mirror_vma(vma);
99402+ if (vma_m) {
99403+ unsigned long address_m;
99404+
99405+ if (vma->vm_start > vma_m->vm_start) {
99406+ address_m = address;
99407+ address -= SEGMEXEC_TASK_SIZE;
99408+ vma = vma_m;
99409+ h = hstate_vma(vma);
99410+ } else
99411+ address_m = address + SEGMEXEC_TASK_SIZE;
99412+
99413+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
99414+ return VM_FAULT_OOM;
99415+ address_m &= HPAGE_MASK;
99416+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
99417+ }
99418+#endif
99419+
99420 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
99421 if (!ptep)
99422 return VM_FAULT_OOM;
99423diff --git a/mm/internal.h b/mm/internal.h
99424index 7f22a11f..f3c207f 100644
99425--- a/mm/internal.h
99426+++ b/mm/internal.h
99427@@ -109,6 +109,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
99428 * in mm/page_alloc.c
99429 */
99430 extern void __free_pages_bootmem(struct page *page, unsigned int order);
99431+extern void free_compound_page(struct page *page);
99432 extern void prep_compound_page(struct page *page, unsigned long order);
99433 #ifdef CONFIG_MEMORY_FAILURE
99434 extern bool is_free_buddy_page(struct page *page);
99435@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
99436
99437 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
99438 unsigned long, unsigned long,
99439- unsigned long, unsigned long);
99440+ unsigned long, unsigned long) __intentional_overflow(-1);
99441
99442 extern void set_pageblock_order(void);
99443 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
99444diff --git a/mm/iov_iter.c b/mm/iov_iter.c
99445index 7b5dbd1..af0e329 100644
99446--- a/mm/iov_iter.c
99447+++ b/mm/iov_iter.c
99448@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
99449
99450 while (bytes) {
99451 char __user *buf = iov->iov_base + base;
99452- int copy = min(bytes, iov->iov_len - base);
99453+ size_t copy = min(bytes, iov->iov_len - base);
99454
99455 base = 0;
99456 left = __copy_from_user_inatomic(vaddr, buf, copy);
99457@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
99458
99459 kaddr = kmap_atomic(page);
99460 if (likely(i->nr_segs == 1)) {
99461- int left;
99462+ size_t left;
99463 char __user *buf = i->iov->iov_base + i->iov_offset;
99464 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
99465 copied = bytes - left;
99466@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
99467 * zero-length segments (without overruning the iovec).
99468 */
99469 while (bytes || unlikely(i->count && !iov->iov_len)) {
99470- int copy;
99471+ size_t copy;
99472
99473 copy = min(bytes, iov->iov_len - base);
99474 BUG_ON(!i->count || i->count < copy);
99475diff --git a/mm/kmemleak.c b/mm/kmemleak.c
99476index 3cda50c..032ba634 100644
99477--- a/mm/kmemleak.c
99478+++ b/mm/kmemleak.c
99479@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
99480
99481 for (i = 0; i < object->trace_len; i++) {
99482 void *ptr = (void *)object->trace[i];
99483- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
99484+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
99485 }
99486 }
99487
99488@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
99489 return -ENOMEM;
99490 }
99491
99492- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
99493+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
99494 &kmemleak_fops);
99495 if (!dentry)
99496 pr_warning("Failed to create the debugfs kmemleak file\n");
99497diff --git a/mm/maccess.c b/mm/maccess.c
99498index d53adf9..03a24bf 100644
99499--- a/mm/maccess.c
99500+++ b/mm/maccess.c
99501@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
99502 set_fs(KERNEL_DS);
99503 pagefault_disable();
99504 ret = __copy_from_user_inatomic(dst,
99505- (__force const void __user *)src, size);
99506+ (const void __force_user *)src, size);
99507 pagefault_enable();
99508 set_fs(old_fs);
99509
99510@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
99511
99512 set_fs(KERNEL_DS);
99513 pagefault_disable();
99514- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
99515+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
99516 pagefault_enable();
99517 set_fs(old_fs);
99518
99519diff --git a/mm/madvise.c b/mm/madvise.c
99520index a402f8f..f5e5daa 100644
99521--- a/mm/madvise.c
99522+++ b/mm/madvise.c
99523@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
99524 pgoff_t pgoff;
99525 unsigned long new_flags = vma->vm_flags;
99526
99527+#ifdef CONFIG_PAX_SEGMEXEC
99528+ struct vm_area_struct *vma_m;
99529+#endif
99530+
99531 switch (behavior) {
99532 case MADV_NORMAL:
99533 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
99534@@ -126,6 +130,13 @@ success:
99535 /*
99536 * vm_flags is protected by the mmap_sem held in write mode.
99537 */
99538+
99539+#ifdef CONFIG_PAX_SEGMEXEC
99540+ vma_m = pax_find_mirror_vma(vma);
99541+ if (vma_m)
99542+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
99543+#endif
99544+
99545 vma->vm_flags = new_flags;
99546
99547 out:
99548@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
99549 struct vm_area_struct **prev,
99550 unsigned long start, unsigned long end)
99551 {
99552+
99553+#ifdef CONFIG_PAX_SEGMEXEC
99554+ struct vm_area_struct *vma_m;
99555+#endif
99556+
99557 *prev = vma;
99558 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
99559 return -EINVAL;
99560@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
99561 zap_page_range(vma, start, end - start, &details);
99562 } else
99563 zap_page_range(vma, start, end - start, NULL);
99564+
99565+#ifdef CONFIG_PAX_SEGMEXEC
99566+ vma_m = pax_find_mirror_vma(vma);
99567+ if (vma_m) {
99568+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
99569+ struct zap_details details = {
99570+ .nonlinear_vma = vma_m,
99571+ .last_index = ULONG_MAX,
99572+ };
99573+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
99574+ } else
99575+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
99576+ }
99577+#endif
99578+
99579 return 0;
99580 }
99581
99582@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
99583 if (end < start)
99584 return error;
99585
99586+#ifdef CONFIG_PAX_SEGMEXEC
99587+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
99588+ if (end > SEGMEXEC_TASK_SIZE)
99589+ return error;
99590+ } else
99591+#endif
99592+
99593+ if (end > TASK_SIZE)
99594+ return error;
99595+
99596 error = 0;
99597 if (end == start)
99598 return error;
99599diff --git a/mm/memory-failure.c b/mm/memory-failure.c
99600index a013bc9..a897a14 100644
99601--- a/mm/memory-failure.c
99602+++ b/mm/memory-failure.c
99603@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
99604
99605 int sysctl_memory_failure_recovery __read_mostly = 1;
99606
99607-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
99608+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
99609
99610 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
99611
99612@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
99613 pfn, t->comm, t->pid);
99614 si.si_signo = SIGBUS;
99615 si.si_errno = 0;
99616- si.si_addr = (void *)addr;
99617+ si.si_addr = (void __user *)addr;
99618 #ifdef __ARCH_SI_TRAPNO
99619 si.si_trapno = trapno;
99620 #endif
99621@@ -791,7 +791,7 @@ static struct page_state {
99622 unsigned long res;
99623 char *msg;
99624 int (*action)(struct page *p, unsigned long pfn);
99625-} error_states[] = {
99626+} __do_const error_states[] = {
99627 { reserved, reserved, "reserved kernel", me_kernel },
99628 /*
99629 * free pages are specially detected outside this table:
99630@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99631 nr_pages = 1 << compound_order(hpage);
99632 else /* normal page or thp */
99633 nr_pages = 1;
99634- atomic_long_add(nr_pages, &num_poisoned_pages);
99635+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
99636
99637 /*
99638 * We need/can do nothing about count=0 pages.
99639@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99640 if (PageHWPoison(hpage)) {
99641 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
99642 || (p != hpage && TestSetPageHWPoison(hpage))) {
99643- atomic_long_sub(nr_pages, &num_poisoned_pages);
99644+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99645 unlock_page(hpage);
99646 return 0;
99647 }
99648@@ -1186,14 +1186,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99649 */
99650 if (!PageHWPoison(p)) {
99651 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
99652- atomic_long_sub(nr_pages, &num_poisoned_pages);
99653+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99654 put_page(hpage);
99655 res = 0;
99656 goto out;
99657 }
99658 if (hwpoison_filter(p)) {
99659 if (TestClearPageHWPoison(p))
99660- atomic_long_sub(nr_pages, &num_poisoned_pages);
99661+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99662 unlock_page(hpage);
99663 put_page(hpage);
99664 return 0;
99665@@ -1423,7 +1423,7 @@ int unpoison_memory(unsigned long pfn)
99666 return 0;
99667 }
99668 if (TestClearPageHWPoison(p))
99669- atomic_long_dec(&num_poisoned_pages);
99670+ atomic_long_dec_unchecked(&num_poisoned_pages);
99671 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
99672 return 0;
99673 }
99674@@ -1437,7 +1437,7 @@ int unpoison_memory(unsigned long pfn)
99675 */
99676 if (TestClearPageHWPoison(page)) {
99677 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
99678- atomic_long_sub(nr_pages, &num_poisoned_pages);
99679+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99680 freeit = 1;
99681 if (PageHuge(page))
99682 clear_page_hwpoison_huge_page(page);
99683@@ -1562,11 +1562,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
99684 if (PageHuge(page)) {
99685 set_page_hwpoison_huge_page(hpage);
99686 dequeue_hwpoisoned_huge_page(hpage);
99687- atomic_long_add(1 << compound_order(hpage),
99688+ atomic_long_add_unchecked(1 << compound_order(hpage),
99689 &num_poisoned_pages);
99690 } else {
99691 SetPageHWPoison(page);
99692- atomic_long_inc(&num_poisoned_pages);
99693+ atomic_long_inc_unchecked(&num_poisoned_pages);
99694 }
99695 }
99696 return ret;
99697@@ -1605,7 +1605,7 @@ static int __soft_offline_page(struct page *page, int flags)
99698 put_page(page);
99699 pr_info("soft_offline: %#lx: invalidated\n", pfn);
99700 SetPageHWPoison(page);
99701- atomic_long_inc(&num_poisoned_pages);
99702+ atomic_long_inc_unchecked(&num_poisoned_pages);
99703 return 0;
99704 }
99705
99706@@ -1656,7 +1656,7 @@ static int __soft_offline_page(struct page *page, int flags)
99707 if (!is_free_buddy_page(page))
99708 pr_info("soft offline: %#lx: page leaked\n",
99709 pfn);
99710- atomic_long_inc(&num_poisoned_pages);
99711+ atomic_long_inc_unchecked(&num_poisoned_pages);
99712 }
99713 } else {
99714 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
99715@@ -1726,11 +1726,11 @@ int soft_offline_page(struct page *page, int flags)
99716 if (PageHuge(page)) {
99717 set_page_hwpoison_huge_page(hpage);
99718 dequeue_hwpoisoned_huge_page(hpage);
99719- atomic_long_add(1 << compound_order(hpage),
99720+ atomic_long_add_unchecked(1 << compound_order(hpage),
99721 &num_poisoned_pages);
99722 } else {
99723 SetPageHWPoison(page);
99724- atomic_long_inc(&num_poisoned_pages);
99725+ atomic_long_inc_unchecked(&num_poisoned_pages);
99726 }
99727 }
99728 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
99729diff --git a/mm/memory.c b/mm/memory.c
99730index 0a21f3d..babeaec 100644
99731--- a/mm/memory.c
99732+++ b/mm/memory.c
99733@@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
99734 free_pte_range(tlb, pmd, addr);
99735 } while (pmd++, addr = next, addr != end);
99736
99737+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
99738 start &= PUD_MASK;
99739 if (start < floor)
99740 return;
99741@@ -427,6 +428,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
99742 pmd = pmd_offset(pud, start);
99743 pud_clear(pud);
99744 pmd_free_tlb(tlb, pmd, start);
99745+#endif
99746+
99747 }
99748
99749 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
99750@@ -446,6 +449,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
99751 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
99752 } while (pud++, addr = next, addr != end);
99753
99754+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
99755 start &= PGDIR_MASK;
99756 if (start < floor)
99757 return;
99758@@ -460,6 +464,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
99759 pud = pud_offset(pgd, start);
99760 pgd_clear(pgd);
99761 pud_free_tlb(tlb, pud, start);
99762+#endif
99763+
99764 }
99765
99766 /*
99767@@ -1500,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
99768 page_add_file_rmap(page);
99769 set_pte_at(mm, addr, pte, mk_pte(page, prot));
99770
99771+#ifdef CONFIG_PAX_SEGMEXEC
99772+ pax_mirror_file_pte(vma, addr, page, ptl);
99773+#endif
99774+
99775 retval = 0;
99776 pte_unmap_unlock(pte, ptl);
99777 return retval;
99778@@ -1544,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
99779 if (!page_count(page))
99780 return -EINVAL;
99781 if (!(vma->vm_flags & VM_MIXEDMAP)) {
99782+
99783+#ifdef CONFIG_PAX_SEGMEXEC
99784+ struct vm_area_struct *vma_m;
99785+#endif
99786+
99787 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
99788 BUG_ON(vma->vm_flags & VM_PFNMAP);
99789 vma->vm_flags |= VM_MIXEDMAP;
99790+
99791+#ifdef CONFIG_PAX_SEGMEXEC
99792+ vma_m = pax_find_mirror_vma(vma);
99793+ if (vma_m)
99794+ vma_m->vm_flags |= VM_MIXEDMAP;
99795+#endif
99796+
99797 }
99798 return insert_page(vma, addr, page, vma->vm_page_prot);
99799 }
99800@@ -1629,6 +1651,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
99801 unsigned long pfn)
99802 {
99803 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
99804+ BUG_ON(vma->vm_mirror);
99805
99806 if (addr < vma->vm_start || addr >= vma->vm_end)
99807 return -EFAULT;
99808@@ -1876,7 +1899,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
99809
99810 BUG_ON(pud_huge(*pud));
99811
99812- pmd = pmd_alloc(mm, pud, addr);
99813+ pmd = (mm == &init_mm) ?
99814+ pmd_alloc_kernel(mm, pud, addr) :
99815+ pmd_alloc(mm, pud, addr);
99816 if (!pmd)
99817 return -ENOMEM;
99818 do {
99819@@ -1896,7 +1921,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
99820 unsigned long next;
99821 int err;
99822
99823- pud = pud_alloc(mm, pgd, addr);
99824+ pud = (mm == &init_mm) ?
99825+ pud_alloc_kernel(mm, pgd, addr) :
99826+ pud_alloc(mm, pgd, addr);
99827 if (!pud)
99828 return -ENOMEM;
99829 do {
99830@@ -2018,6 +2045,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
99831 return ret;
99832 }
99833
99834+#ifdef CONFIG_PAX_SEGMEXEC
99835+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
99836+{
99837+ struct mm_struct *mm = vma->vm_mm;
99838+ spinlock_t *ptl;
99839+ pte_t *pte, entry;
99840+
99841+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
99842+ entry = *pte;
99843+ if (!pte_present(entry)) {
99844+ if (!pte_none(entry)) {
99845+ BUG_ON(pte_file(entry));
99846+ free_swap_and_cache(pte_to_swp_entry(entry));
99847+ pte_clear_not_present_full(mm, address, pte, 0);
99848+ }
99849+ } else {
99850+ struct page *page;
99851+
99852+ flush_cache_page(vma, address, pte_pfn(entry));
99853+ entry = ptep_clear_flush(vma, address, pte);
99854+ BUG_ON(pte_dirty(entry));
99855+ page = vm_normal_page(vma, address, entry);
99856+ if (page) {
99857+ update_hiwater_rss(mm);
99858+ if (PageAnon(page))
99859+ dec_mm_counter_fast(mm, MM_ANONPAGES);
99860+ else
99861+ dec_mm_counter_fast(mm, MM_FILEPAGES);
99862+ page_remove_rmap(page);
99863+ page_cache_release(page);
99864+ }
99865+ }
99866+ pte_unmap_unlock(pte, ptl);
99867+}
99868+
99869+/* PaX: if vma is mirrored, synchronize the mirror's PTE
99870+ *
99871+ * the ptl of the lower mapped page is held on entry and is not released on exit
99872+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
99873+ */
99874+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
99875+{
99876+ struct mm_struct *mm = vma->vm_mm;
99877+ unsigned long address_m;
99878+ spinlock_t *ptl_m;
99879+ struct vm_area_struct *vma_m;
99880+ pmd_t *pmd_m;
99881+ pte_t *pte_m, entry_m;
99882+
99883+ BUG_ON(!page_m || !PageAnon(page_m));
99884+
99885+ vma_m = pax_find_mirror_vma(vma);
99886+ if (!vma_m)
99887+ return;
99888+
99889+ BUG_ON(!PageLocked(page_m));
99890+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99891+ address_m = address + SEGMEXEC_TASK_SIZE;
99892+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
99893+ pte_m = pte_offset_map(pmd_m, address_m);
99894+ ptl_m = pte_lockptr(mm, pmd_m);
99895+ if (ptl != ptl_m) {
99896+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
99897+ if (!pte_none(*pte_m))
99898+ goto out;
99899+ }
99900+
99901+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
99902+ page_cache_get(page_m);
99903+ page_add_anon_rmap(page_m, vma_m, address_m);
99904+ inc_mm_counter_fast(mm, MM_ANONPAGES);
99905+ set_pte_at(mm, address_m, pte_m, entry_m);
99906+ update_mmu_cache(vma_m, address_m, pte_m);
99907+out:
99908+ if (ptl != ptl_m)
99909+ spin_unlock(ptl_m);
99910+ pte_unmap(pte_m);
99911+ unlock_page(page_m);
99912+}
99913+
99914+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
99915+{
99916+ struct mm_struct *mm = vma->vm_mm;
99917+ unsigned long address_m;
99918+ spinlock_t *ptl_m;
99919+ struct vm_area_struct *vma_m;
99920+ pmd_t *pmd_m;
99921+ pte_t *pte_m, entry_m;
99922+
99923+ BUG_ON(!page_m || PageAnon(page_m));
99924+
99925+ vma_m = pax_find_mirror_vma(vma);
99926+ if (!vma_m)
99927+ return;
99928+
99929+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99930+ address_m = address + SEGMEXEC_TASK_SIZE;
99931+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
99932+ pte_m = pte_offset_map(pmd_m, address_m);
99933+ ptl_m = pte_lockptr(mm, pmd_m);
99934+ if (ptl != ptl_m) {
99935+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
99936+ if (!pte_none(*pte_m))
99937+ goto out;
99938+ }
99939+
99940+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
99941+ page_cache_get(page_m);
99942+ page_add_file_rmap(page_m);
99943+ inc_mm_counter_fast(mm, MM_FILEPAGES);
99944+ set_pte_at(mm, address_m, pte_m, entry_m);
99945+ update_mmu_cache(vma_m, address_m, pte_m);
99946+out:
99947+ if (ptl != ptl_m)
99948+ spin_unlock(ptl_m);
99949+ pte_unmap(pte_m);
99950+}
99951+
99952+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
99953+{
99954+ struct mm_struct *mm = vma->vm_mm;
99955+ unsigned long address_m;
99956+ spinlock_t *ptl_m;
99957+ struct vm_area_struct *vma_m;
99958+ pmd_t *pmd_m;
99959+ pte_t *pte_m, entry_m;
99960+
99961+ vma_m = pax_find_mirror_vma(vma);
99962+ if (!vma_m)
99963+ return;
99964+
99965+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99966+ address_m = address + SEGMEXEC_TASK_SIZE;
99967+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
99968+ pte_m = pte_offset_map(pmd_m, address_m);
99969+ ptl_m = pte_lockptr(mm, pmd_m);
99970+ if (ptl != ptl_m) {
99971+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
99972+ if (!pte_none(*pte_m))
99973+ goto out;
99974+ }
99975+
99976+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
99977+ set_pte_at(mm, address_m, pte_m, entry_m);
99978+out:
99979+ if (ptl != ptl_m)
99980+ spin_unlock(ptl_m);
99981+ pte_unmap(pte_m);
99982+}
99983+
99984+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
99985+{
99986+ struct page *page_m;
99987+ pte_t entry;
99988+
99989+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
99990+ goto out;
99991+
99992+ entry = *pte;
99993+ page_m = vm_normal_page(vma, address, entry);
99994+ if (!page_m)
99995+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
99996+ else if (PageAnon(page_m)) {
99997+ if (pax_find_mirror_vma(vma)) {
99998+ pte_unmap_unlock(pte, ptl);
99999+ lock_page(page_m);
100000+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
100001+ if (pte_same(entry, *pte))
100002+ pax_mirror_anon_pte(vma, address, page_m, ptl);
100003+ else
100004+ unlock_page(page_m);
100005+ }
100006+ } else
100007+ pax_mirror_file_pte(vma, address, page_m, ptl);
100008+
100009+out:
100010+ pte_unmap_unlock(pte, ptl);
100011+}
100012+#endif
100013+
100014 /*
100015 * This routine handles present pages, when users try to write
100016 * to a shared page. It is done by copying the page to a new address
100017@@ -2215,6 +2422,12 @@ gotten:
100018 */
100019 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
100020 if (likely(pte_same(*page_table, orig_pte))) {
100021+
100022+#ifdef CONFIG_PAX_SEGMEXEC
100023+ if (pax_find_mirror_vma(vma))
100024+ BUG_ON(!trylock_page(new_page));
100025+#endif
100026+
100027 if (old_page) {
100028 if (!PageAnon(old_page)) {
100029 dec_mm_counter_fast(mm, MM_FILEPAGES);
100030@@ -2266,6 +2479,10 @@ gotten:
100031 page_remove_rmap(old_page);
100032 }
100033
100034+#ifdef CONFIG_PAX_SEGMEXEC
100035+ pax_mirror_anon_pte(vma, address, new_page, ptl);
100036+#endif
100037+
100038 /* Free the old page.. */
100039 new_page = old_page;
100040 ret |= VM_FAULT_WRITE;
100041@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
100042 swap_free(entry);
100043 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
100044 try_to_free_swap(page);
100045+
100046+#ifdef CONFIG_PAX_SEGMEXEC
100047+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
100048+#endif
100049+
100050 unlock_page(page);
100051 if (page != swapcache) {
100052 /*
100053@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
100054
100055 /* No need to invalidate - it was non-present before */
100056 update_mmu_cache(vma, address, page_table);
100057+
100058+#ifdef CONFIG_PAX_SEGMEXEC
100059+ pax_mirror_anon_pte(vma, address, page, ptl);
100060+#endif
100061+
100062 unlock:
100063 pte_unmap_unlock(page_table, ptl);
100064 out:
100065@@ -2581,40 +2808,6 @@ out_release:
100066 }
100067
100068 /*
100069- * This is like a special single-page "expand_{down|up}wards()",
100070- * except we must first make sure that 'address{-|+}PAGE_SIZE'
100071- * doesn't hit another vma.
100072- */
100073-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
100074-{
100075- address &= PAGE_MASK;
100076- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
100077- struct vm_area_struct *prev = vma->vm_prev;
100078-
100079- /*
100080- * Is there a mapping abutting this one below?
100081- *
100082- * That's only ok if it's the same stack mapping
100083- * that has gotten split..
100084- */
100085- if (prev && prev->vm_end == address)
100086- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
100087-
100088- expand_downwards(vma, address - PAGE_SIZE);
100089- }
100090- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
100091- struct vm_area_struct *next = vma->vm_next;
100092-
100093- /* As VM_GROWSDOWN but s/below/above/ */
100094- if (next && next->vm_start == address + PAGE_SIZE)
100095- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
100096-
100097- expand_upwards(vma, address + PAGE_SIZE);
100098- }
100099- return 0;
100100-}
100101-
100102-/*
100103 * We enter with non-exclusive mmap_sem (to exclude vma changes,
100104 * but allow concurrent faults), and pte mapped but not yet locked.
100105 * We return with mmap_sem still held, but pte unmapped and unlocked.
100106@@ -2623,27 +2816,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
100107 unsigned long address, pte_t *page_table, pmd_t *pmd,
100108 unsigned int flags)
100109 {
100110- struct page *page;
100111+ struct page *page = NULL;
100112 spinlock_t *ptl;
100113 pte_t entry;
100114
100115- pte_unmap(page_table);
100116-
100117- /* Check if we need to add a guard page to the stack */
100118- if (check_stack_guard_page(vma, address) < 0)
100119- return VM_FAULT_SIGBUS;
100120-
100121- /* Use the zero-page for reads */
100122 if (!(flags & FAULT_FLAG_WRITE)) {
100123 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
100124 vma->vm_page_prot));
100125- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
100126+ ptl = pte_lockptr(mm, pmd);
100127+ spin_lock(ptl);
100128 if (!pte_none(*page_table))
100129 goto unlock;
100130 goto setpte;
100131 }
100132
100133 /* Allocate our own private page. */
100134+ pte_unmap(page_table);
100135+
100136 if (unlikely(anon_vma_prepare(vma)))
100137 goto oom;
100138 page = alloc_zeroed_user_highpage_movable(vma, address);
100139@@ -2667,6 +2856,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
100140 if (!pte_none(*page_table))
100141 goto release;
100142
100143+#ifdef CONFIG_PAX_SEGMEXEC
100144+ if (pax_find_mirror_vma(vma))
100145+ BUG_ON(!trylock_page(page));
100146+#endif
100147+
100148 inc_mm_counter_fast(mm, MM_ANONPAGES);
100149 page_add_new_anon_rmap(page, vma, address);
100150 setpte:
100151@@ -2674,6 +2868,12 @@ setpte:
100152
100153 /* No need to invalidate - it was non-present before */
100154 update_mmu_cache(vma, address, page_table);
100155+
100156+#ifdef CONFIG_PAX_SEGMEXEC
100157+ if (page)
100158+ pax_mirror_anon_pte(vma, address, page, ptl);
100159+#endif
100160+
100161 unlock:
100162 pte_unmap_unlock(page_table, ptl);
100163 return 0;
100164@@ -2905,6 +3105,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100165 return ret;
100166 }
100167 do_set_pte(vma, address, fault_page, pte, false, false);
100168+
100169+#ifdef CONFIG_PAX_SEGMEXEC
100170+ pax_mirror_file_pte(vma, address, fault_page, ptl);
100171+#endif
100172+
100173 unlock_page(fault_page);
100174 unlock_out:
100175 pte_unmap_unlock(pte, ptl);
100176@@ -2946,7 +3151,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100177 page_cache_release(fault_page);
100178 goto uncharge_out;
100179 }
100180+
100181+#ifdef CONFIG_PAX_SEGMEXEC
100182+ if (pax_find_mirror_vma(vma))
100183+ BUG_ON(!trylock_page(new_page));
100184+#endif
100185+
100186 do_set_pte(vma, address, new_page, pte, true, true);
100187+
100188+#ifdef CONFIG_PAX_SEGMEXEC
100189+ pax_mirror_anon_pte(vma, address, new_page, ptl);
100190+#endif
100191+
100192 pte_unmap_unlock(pte, ptl);
100193 unlock_page(fault_page);
100194 page_cache_release(fault_page);
100195@@ -2994,6 +3210,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100196 return ret;
100197 }
100198 do_set_pte(vma, address, fault_page, pte, true, false);
100199+
100200+#ifdef CONFIG_PAX_SEGMEXEC
100201+ pax_mirror_file_pte(vma, address, fault_page, ptl);
100202+#endif
100203+
100204 pte_unmap_unlock(pte, ptl);
100205
100206 if (set_page_dirty(fault_page))
100207@@ -3224,6 +3445,12 @@ static int handle_pte_fault(struct mm_struct *mm,
100208 if (flags & FAULT_FLAG_WRITE)
100209 flush_tlb_fix_spurious_fault(vma, address);
100210 }
100211+
100212+#ifdef CONFIG_PAX_SEGMEXEC
100213+ pax_mirror_pte(vma, address, pte, pmd, ptl);
100214+ return 0;
100215+#endif
100216+
100217 unlock:
100218 pte_unmap_unlock(pte, ptl);
100219 return 0;
100220@@ -3240,9 +3467,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100221 pmd_t *pmd;
100222 pte_t *pte;
100223
100224+#ifdef CONFIG_PAX_SEGMEXEC
100225+ struct vm_area_struct *vma_m;
100226+#endif
100227+
100228 if (unlikely(is_vm_hugetlb_page(vma)))
100229 return hugetlb_fault(mm, vma, address, flags);
100230
100231+#ifdef CONFIG_PAX_SEGMEXEC
100232+ vma_m = pax_find_mirror_vma(vma);
100233+ if (vma_m) {
100234+ unsigned long address_m;
100235+ pgd_t *pgd_m;
100236+ pud_t *pud_m;
100237+ pmd_t *pmd_m;
100238+
100239+ if (vma->vm_start > vma_m->vm_start) {
100240+ address_m = address;
100241+ address -= SEGMEXEC_TASK_SIZE;
100242+ vma = vma_m;
100243+ } else
100244+ address_m = address + SEGMEXEC_TASK_SIZE;
100245+
100246+ pgd_m = pgd_offset(mm, address_m);
100247+ pud_m = pud_alloc(mm, pgd_m, address_m);
100248+ if (!pud_m)
100249+ return VM_FAULT_OOM;
100250+ pmd_m = pmd_alloc(mm, pud_m, address_m);
100251+ if (!pmd_m)
100252+ return VM_FAULT_OOM;
100253+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
100254+ return VM_FAULT_OOM;
100255+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
100256+ }
100257+#endif
100258+
100259 pgd = pgd_offset(mm, address);
100260 pud = pud_alloc(mm, pgd, address);
100261 if (!pud)
100262@@ -3370,6 +3629,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
100263 spin_unlock(&mm->page_table_lock);
100264 return 0;
100265 }
100266+
100267+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
100268+{
100269+ pud_t *new = pud_alloc_one(mm, address);
100270+ if (!new)
100271+ return -ENOMEM;
100272+
100273+ smp_wmb(); /* See comment in __pte_alloc */
100274+
100275+ spin_lock(&mm->page_table_lock);
100276+ if (pgd_present(*pgd)) /* Another has populated it */
100277+ pud_free(mm, new);
100278+ else
100279+ pgd_populate_kernel(mm, pgd, new);
100280+ spin_unlock(&mm->page_table_lock);
100281+ return 0;
100282+}
100283 #endif /* __PAGETABLE_PUD_FOLDED */
100284
100285 #ifndef __PAGETABLE_PMD_FOLDED
100286@@ -3400,6 +3676,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
100287 spin_unlock(&mm->page_table_lock);
100288 return 0;
100289 }
100290+
100291+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
100292+{
100293+ pmd_t *new = pmd_alloc_one(mm, address);
100294+ if (!new)
100295+ return -ENOMEM;
100296+
100297+ smp_wmb(); /* See comment in __pte_alloc */
100298+
100299+ spin_lock(&mm->page_table_lock);
100300+#ifndef __ARCH_HAS_4LEVEL_HACK
100301+ if (pud_present(*pud)) /* Another has populated it */
100302+ pmd_free(mm, new);
100303+ else
100304+ pud_populate_kernel(mm, pud, new);
100305+#else
100306+ if (pgd_present(*pud)) /* Another has populated it */
100307+ pmd_free(mm, new);
100308+ else
100309+ pgd_populate_kernel(mm, pud, new);
100310+#endif /* __ARCH_HAS_4LEVEL_HACK */
100311+ spin_unlock(&mm->page_table_lock);
100312+ return 0;
100313+}
100314 #endif /* __PAGETABLE_PMD_FOLDED */
100315
100316 #if !defined(__HAVE_ARCH_GATE_AREA)
100317@@ -3413,7 +3713,7 @@ static int __init gate_vma_init(void)
100318 gate_vma.vm_start = FIXADDR_USER_START;
100319 gate_vma.vm_end = FIXADDR_USER_END;
100320 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
100321- gate_vma.vm_page_prot = __P101;
100322+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
100323
100324 return 0;
100325 }
100326@@ -3547,8 +3847,8 @@ out:
100327 return ret;
100328 }
100329
100330-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
100331- void *buf, int len, int write)
100332+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
100333+ void *buf, size_t len, int write)
100334 {
100335 resource_size_t phys_addr;
100336 unsigned long prot = 0;
100337@@ -3574,8 +3874,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
100338 * Access another process' address space as given in mm. If non-NULL, use the
100339 * given task for page fault accounting.
100340 */
100341-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100342- unsigned long addr, void *buf, int len, int write)
100343+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100344+ unsigned long addr, void *buf, size_t len, int write)
100345 {
100346 struct vm_area_struct *vma;
100347 void *old_buf = buf;
100348@@ -3583,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100349 down_read(&mm->mmap_sem);
100350 /* ignore errors, just check how much was successfully transferred */
100351 while (len) {
100352- int bytes, ret, offset;
100353+ ssize_t bytes, ret, offset;
100354 void *maddr;
100355 struct page *page = NULL;
100356
100357@@ -3642,8 +3942,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100358 *
100359 * The caller must hold a reference on @mm.
100360 */
100361-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100362- void *buf, int len, int write)
100363+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
100364+ void *buf, size_t len, int write)
100365 {
100366 return __access_remote_vm(NULL, mm, addr, buf, len, write);
100367 }
100368@@ -3653,11 +3953,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100369 * Source/target buffer must be kernel space,
100370 * Do not walk the page table directly, use get_user_pages
100371 */
100372-int access_process_vm(struct task_struct *tsk, unsigned long addr,
100373- void *buf, int len, int write)
100374+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
100375+ void *buf, size_t len, int write)
100376 {
100377 struct mm_struct *mm;
100378- int ret;
100379+ ssize_t ret;
100380
100381 mm = get_task_mm(tsk);
100382 if (!mm)
100383diff --git a/mm/mempolicy.c b/mm/mempolicy.c
100384index 8f5330d..b41914b 100644
100385--- a/mm/mempolicy.c
100386+++ b/mm/mempolicy.c
100387@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
100388 unsigned long vmstart;
100389 unsigned long vmend;
100390
100391+#ifdef CONFIG_PAX_SEGMEXEC
100392+ struct vm_area_struct *vma_m;
100393+#endif
100394+
100395 vma = find_vma(mm, start);
100396 if (!vma || vma->vm_start > start)
100397 return -EFAULT;
100398@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
100399 err = vma_replace_policy(vma, new_pol);
100400 if (err)
100401 goto out;
100402+
100403+#ifdef CONFIG_PAX_SEGMEXEC
100404+ vma_m = pax_find_mirror_vma(vma);
100405+ if (vma_m) {
100406+ err = vma_replace_policy(vma_m, new_pol);
100407+ if (err)
100408+ goto out;
100409+ }
100410+#endif
100411+
100412 }
100413
100414 out:
100415@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
100416
100417 if (end < start)
100418 return -EINVAL;
100419+
100420+#ifdef CONFIG_PAX_SEGMEXEC
100421+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
100422+ if (end > SEGMEXEC_TASK_SIZE)
100423+ return -EINVAL;
100424+ } else
100425+#endif
100426+
100427+ if (end > TASK_SIZE)
100428+ return -EINVAL;
100429+
100430 if (end == start)
100431 return 0;
100432
100433@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
100434 */
100435 tcred = __task_cred(task);
100436 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
100437- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
100438- !capable(CAP_SYS_NICE)) {
100439+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
100440 rcu_read_unlock();
100441 err = -EPERM;
100442 goto out_put;
100443@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
100444 goto out;
100445 }
100446
100447+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
100448+ if (mm != current->mm &&
100449+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
100450+ mmput(mm);
100451+ err = -EPERM;
100452+ goto out;
100453+ }
100454+#endif
100455+
100456 err = do_migrate_pages(mm, old, new,
100457 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
100458
100459diff --git a/mm/migrate.c b/mm/migrate.c
100460index be6dbf9..febb8ec 100644
100461--- a/mm/migrate.c
100462+++ b/mm/migrate.c
100463@@ -1506,8 +1506,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
100464 */
100465 tcred = __task_cred(task);
100466 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
100467- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
100468- !capable(CAP_SYS_NICE)) {
100469+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
100470 rcu_read_unlock();
100471 err = -EPERM;
100472 goto out;
100473diff --git a/mm/mlock.c b/mm/mlock.c
100474index b1eb536..091d154 100644
100475--- a/mm/mlock.c
100476+++ b/mm/mlock.c
100477@@ -14,6 +14,7 @@
100478 #include <linux/pagevec.h>
100479 #include <linux/mempolicy.h>
100480 #include <linux/syscalls.h>
100481+#include <linux/security.h>
100482 #include <linux/sched.h>
100483 #include <linux/export.h>
100484 #include <linux/rmap.h>
100485@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
100486 {
100487 unsigned long nstart, end, tmp;
100488 struct vm_area_struct * vma, * prev;
100489- int error;
100490+ int error = 0;
100491
100492 VM_BUG_ON(start & ~PAGE_MASK);
100493 VM_BUG_ON(len != PAGE_ALIGN(len));
100494@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
100495 return -EINVAL;
100496 if (end == start)
100497 return 0;
100498+ if (end > TASK_SIZE)
100499+ return -EINVAL;
100500+
100501 vma = find_vma(current->mm, start);
100502 if (!vma || vma->vm_start > start)
100503 return -ENOMEM;
100504@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
100505 for (nstart = start ; ; ) {
100506 vm_flags_t newflags;
100507
100508+#ifdef CONFIG_PAX_SEGMEXEC
100509+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
100510+ break;
100511+#endif
100512+
100513 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
100514
100515 newflags = vma->vm_flags & ~VM_LOCKED;
100516@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
100517 locked += current->mm->locked_vm;
100518
100519 /* check against resource limits */
100520+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
100521 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
100522 error = do_mlock(start, len, 1);
100523
100524@@ -776,6 +786,11 @@ static int do_mlockall(int flags)
100525 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
100526 vm_flags_t newflags;
100527
100528+#ifdef CONFIG_PAX_SEGMEXEC
100529+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
100530+ break;
100531+#endif
100532+
100533 newflags = vma->vm_flags & ~VM_LOCKED;
100534 if (flags & MCL_CURRENT)
100535 newflags |= VM_LOCKED;
100536@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
100537 lock_limit >>= PAGE_SHIFT;
100538
100539 ret = -ENOMEM;
100540+
100541+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
100542+
100543 down_write(&current->mm->mmap_sem);
100544-
100545 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
100546 capable(CAP_IPC_LOCK))
100547 ret = do_mlockall(flags);
100548diff --git a/mm/mmap.c b/mm/mmap.c
100549index 129b847..fbed804 100644
100550--- a/mm/mmap.c
100551+++ b/mm/mmap.c
100552@@ -40,6 +40,7 @@
100553 #include <linux/notifier.h>
100554 #include <linux/memory.h>
100555 #include <linux/printk.h>
100556+#include <linux/random.h>
100557
100558 #include <asm/uaccess.h>
100559 #include <asm/cacheflush.h>
100560@@ -56,6 +57,16 @@
100561 #define arch_rebalance_pgtables(addr, len) (addr)
100562 #endif
100563
100564+static inline void verify_mm_writelocked(struct mm_struct *mm)
100565+{
100566+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
100567+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
100568+ up_read(&mm->mmap_sem);
100569+ BUG();
100570+ }
100571+#endif
100572+}
100573+
100574 static void unmap_region(struct mm_struct *mm,
100575 struct vm_area_struct *vma, struct vm_area_struct *prev,
100576 unsigned long start, unsigned long end);
100577@@ -75,16 +86,25 @@ static void unmap_region(struct mm_struct *mm,
100578 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
100579 *
100580 */
100581-pgprot_t protection_map[16] = {
100582+pgprot_t protection_map[16] __read_only = {
100583 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
100584 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
100585 };
100586
100587-pgprot_t vm_get_page_prot(unsigned long vm_flags)
100588+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
100589 {
100590- return __pgprot(pgprot_val(protection_map[vm_flags &
100591+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
100592 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
100593 pgprot_val(arch_vm_get_page_prot(vm_flags)));
100594+
100595+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100596+ if (!(__supported_pte_mask & _PAGE_NX) &&
100597+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
100598+ (vm_flags & (VM_READ | VM_WRITE)))
100599+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
100600+#endif
100601+
100602+ return prot;
100603 }
100604 EXPORT_SYMBOL(vm_get_page_prot);
100605
100606@@ -94,6 +114,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
100607 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
100608 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
100609 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
100610+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
100611 /*
100612 * Make sure vm_committed_as in one cacheline and not cacheline shared with
100613 * other variables. It can be updated by several CPUs frequently.
100614@@ -250,6 +271,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
100615 struct vm_area_struct *next = vma->vm_next;
100616
100617 might_sleep();
100618+ BUG_ON(vma->vm_mirror);
100619 if (vma->vm_ops && vma->vm_ops->close)
100620 vma->vm_ops->close(vma);
100621 if (vma->vm_file)
100622@@ -294,6 +316,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
100623 * not page aligned -Ram Gupta
100624 */
100625 rlim = rlimit(RLIMIT_DATA);
100626+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
100627+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
100628+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
100629+ rlim = 4096 * PAGE_SIZE;
100630+#endif
100631+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
100632 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
100633 (mm->end_data - mm->start_data) > rlim)
100634 goto out;
100635@@ -944,6 +972,12 @@ static int
100636 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
100637 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
100638 {
100639+
100640+#ifdef CONFIG_PAX_SEGMEXEC
100641+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
100642+ return 0;
100643+#endif
100644+
100645 if (is_mergeable_vma(vma, file, vm_flags) &&
100646 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
100647 if (vma->vm_pgoff == vm_pgoff)
100648@@ -963,6 +997,12 @@ static int
100649 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
100650 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
100651 {
100652+
100653+#ifdef CONFIG_PAX_SEGMEXEC
100654+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
100655+ return 0;
100656+#endif
100657+
100658 if (is_mergeable_vma(vma, file, vm_flags) &&
100659 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
100660 pgoff_t vm_pglen;
100661@@ -1005,13 +1045,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
100662 struct vm_area_struct *vma_merge(struct mm_struct *mm,
100663 struct vm_area_struct *prev, unsigned long addr,
100664 unsigned long end, unsigned long vm_flags,
100665- struct anon_vma *anon_vma, struct file *file,
100666+ struct anon_vma *anon_vma, struct file *file,
100667 pgoff_t pgoff, struct mempolicy *policy)
100668 {
100669 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
100670 struct vm_area_struct *area, *next;
100671 int err;
100672
100673+#ifdef CONFIG_PAX_SEGMEXEC
100674+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
100675+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
100676+
100677+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
100678+#endif
100679+
100680 /*
100681 * We later require that vma->vm_flags == vm_flags,
100682 * so this tests vma->vm_flags & VM_SPECIAL, too.
100683@@ -1027,6 +1074,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100684 if (next && next->vm_end == end) /* cases 6, 7, 8 */
100685 next = next->vm_next;
100686
100687+#ifdef CONFIG_PAX_SEGMEXEC
100688+ if (prev)
100689+ prev_m = pax_find_mirror_vma(prev);
100690+ if (area)
100691+ area_m = pax_find_mirror_vma(area);
100692+ if (next)
100693+ next_m = pax_find_mirror_vma(next);
100694+#endif
100695+
100696 /*
100697 * Can it merge with the predecessor?
100698 */
100699@@ -1046,9 +1102,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100700 /* cases 1, 6 */
100701 err = vma_adjust(prev, prev->vm_start,
100702 next->vm_end, prev->vm_pgoff, NULL);
100703- } else /* cases 2, 5, 7 */
100704+
100705+#ifdef CONFIG_PAX_SEGMEXEC
100706+ if (!err && prev_m)
100707+ err = vma_adjust(prev_m, prev_m->vm_start,
100708+ next_m->vm_end, prev_m->vm_pgoff, NULL);
100709+#endif
100710+
100711+ } else { /* cases 2, 5, 7 */
100712 err = vma_adjust(prev, prev->vm_start,
100713 end, prev->vm_pgoff, NULL);
100714+
100715+#ifdef CONFIG_PAX_SEGMEXEC
100716+ if (!err && prev_m)
100717+ err = vma_adjust(prev_m, prev_m->vm_start,
100718+ end_m, prev_m->vm_pgoff, NULL);
100719+#endif
100720+
100721+ }
100722 if (err)
100723 return NULL;
100724 khugepaged_enter_vma_merge(prev);
100725@@ -1062,12 +1133,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100726 mpol_equal(policy, vma_policy(next)) &&
100727 can_vma_merge_before(next, vm_flags,
100728 anon_vma, file, pgoff+pglen)) {
100729- if (prev && addr < prev->vm_end) /* case 4 */
100730+ if (prev && addr < prev->vm_end) { /* case 4 */
100731 err = vma_adjust(prev, prev->vm_start,
100732 addr, prev->vm_pgoff, NULL);
100733- else /* cases 3, 8 */
100734+
100735+#ifdef CONFIG_PAX_SEGMEXEC
100736+ if (!err && prev_m)
100737+ err = vma_adjust(prev_m, prev_m->vm_start,
100738+ addr_m, prev_m->vm_pgoff, NULL);
100739+#endif
100740+
100741+ } else { /* cases 3, 8 */
100742 err = vma_adjust(area, addr, next->vm_end,
100743 next->vm_pgoff - pglen, NULL);
100744+
100745+#ifdef CONFIG_PAX_SEGMEXEC
100746+ if (!err && area_m)
100747+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
100748+ next_m->vm_pgoff - pglen, NULL);
100749+#endif
100750+
100751+ }
100752 if (err)
100753 return NULL;
100754 khugepaged_enter_vma_merge(area);
100755@@ -1176,8 +1262,10 @@ none:
100756 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
100757 struct file *file, long pages)
100758 {
100759- const unsigned long stack_flags
100760- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
100761+
100762+#ifdef CONFIG_PAX_RANDMMAP
100763+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
100764+#endif
100765
100766 mm->total_vm += pages;
100767
100768@@ -1185,7 +1273,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
100769 mm->shared_vm += pages;
100770 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
100771 mm->exec_vm += pages;
100772- } else if (flags & stack_flags)
100773+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
100774 mm->stack_vm += pages;
100775 }
100776 #endif /* CONFIG_PROC_FS */
100777@@ -1215,6 +1303,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
100778 locked += mm->locked_vm;
100779 lock_limit = rlimit(RLIMIT_MEMLOCK);
100780 lock_limit >>= PAGE_SHIFT;
100781+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
100782 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
100783 return -EAGAIN;
100784 }
100785@@ -1241,7 +1330,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100786 * (the exception is when the underlying filesystem is noexec
100787 * mounted, in which case we dont add PROT_EXEC.)
100788 */
100789- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
100790+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
100791 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
100792 prot |= PROT_EXEC;
100793
100794@@ -1267,7 +1356,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100795 /* Obtain the address to map to. we verify (or select) it and ensure
100796 * that it represents a valid section of the address space.
100797 */
100798- addr = get_unmapped_area(file, addr, len, pgoff, flags);
100799+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
100800 if (addr & ~PAGE_MASK)
100801 return addr;
100802
100803@@ -1278,6 +1367,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100804 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
100805 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
100806
100807+#ifdef CONFIG_PAX_MPROTECT
100808+ if (mm->pax_flags & MF_PAX_MPROTECT) {
100809+
100810+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
100811+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
100812+ mm->binfmt->handle_mmap)
100813+ mm->binfmt->handle_mmap(file);
100814+#endif
100815+
100816+#ifndef CONFIG_PAX_MPROTECT_COMPAT
100817+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
100818+ gr_log_rwxmmap(file);
100819+
100820+#ifdef CONFIG_PAX_EMUPLT
100821+ vm_flags &= ~VM_EXEC;
100822+#else
100823+ return -EPERM;
100824+#endif
100825+
100826+ }
100827+
100828+ if (!(vm_flags & VM_EXEC))
100829+ vm_flags &= ~VM_MAYEXEC;
100830+#else
100831+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
100832+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
100833+#endif
100834+ else
100835+ vm_flags &= ~VM_MAYWRITE;
100836+ }
100837+#endif
100838+
100839+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100840+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
100841+ vm_flags &= ~VM_PAGEEXEC;
100842+#endif
100843+
100844 if (flags & MAP_LOCKED)
100845 if (!can_do_mlock())
100846 return -EPERM;
100847@@ -1365,6 +1491,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
100848 vm_flags |= VM_NORESERVE;
100849 }
100850
100851+ if (!gr_acl_handle_mmap(file, prot))
100852+ return -EACCES;
100853+
100854 addr = mmap_region(file, addr, len, vm_flags, pgoff);
100855 if (!IS_ERR_VALUE(addr) &&
100856 ((vm_flags & VM_LOCKED) ||
100857@@ -1458,7 +1587,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
100858 vm_flags_t vm_flags = vma->vm_flags;
100859
100860 /* If it was private or non-writable, the write bit is already clear */
100861- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
100862+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
100863 return 0;
100864
100865 /* The backer wishes to know when pages are first written to? */
100866@@ -1504,7 +1633,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
100867 struct rb_node **rb_link, *rb_parent;
100868 unsigned long charged = 0;
100869
100870+#ifdef CONFIG_PAX_SEGMEXEC
100871+ struct vm_area_struct *vma_m = NULL;
100872+#endif
100873+
100874+ /*
100875+ * mm->mmap_sem is required to protect against another thread
100876+ * changing the mappings in case we sleep.
100877+ */
100878+ verify_mm_writelocked(mm);
100879+
100880 /* Check against address space limit. */
100881+
100882+#ifdef CONFIG_PAX_RANDMMAP
100883+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
100884+#endif
100885+
100886 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
100887 unsigned long nr_pages;
100888
100889@@ -1523,11 +1667,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
100890
100891 /* Clear old maps */
100892 error = -ENOMEM;
100893-munmap_back:
100894 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
100895 if (do_munmap(mm, addr, len))
100896 return -ENOMEM;
100897- goto munmap_back;
100898+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
100899 }
100900
100901 /*
100902@@ -1558,6 +1701,16 @@ munmap_back:
100903 goto unacct_error;
100904 }
100905
100906+#ifdef CONFIG_PAX_SEGMEXEC
100907+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
100908+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
100909+ if (!vma_m) {
100910+ error = -ENOMEM;
100911+ goto free_vma;
100912+ }
100913+ }
100914+#endif
100915+
100916 vma->vm_mm = mm;
100917 vma->vm_start = addr;
100918 vma->vm_end = addr + len;
100919@@ -1577,6 +1730,13 @@ munmap_back:
100920 if (error)
100921 goto unmap_and_free_vma;
100922
100923+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100924+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
100925+ vma->vm_flags |= VM_PAGEEXEC;
100926+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
100927+ }
100928+#endif
100929+
100930 /* Can addr have changed??
100931 *
100932 * Answer: Yes, several device drivers can do it in their
100933@@ -1610,6 +1770,12 @@ munmap_back:
100934 }
100935
100936 vma_link(mm, vma, prev, rb_link, rb_parent);
100937+
100938+#ifdef CONFIG_PAX_SEGMEXEC
100939+ if (vma_m)
100940+ BUG_ON(pax_mirror_vma(vma_m, vma));
100941+#endif
100942+
100943 /* Once vma denies write, undo our temporary denial count */
100944 if (vm_flags & VM_DENYWRITE)
100945 allow_write_access(file);
100946@@ -1618,6 +1784,7 @@ out:
100947 perf_event_mmap(vma);
100948
100949 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
100950+ track_exec_limit(mm, addr, addr + len, vm_flags);
100951 if (vm_flags & VM_LOCKED) {
100952 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
100953 vma == get_gate_vma(current->mm)))
100954@@ -1650,6 +1817,12 @@ unmap_and_free_vma:
100955 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
100956 charged = 0;
100957 free_vma:
100958+
100959+#ifdef CONFIG_PAX_SEGMEXEC
100960+ if (vma_m)
100961+ kmem_cache_free(vm_area_cachep, vma_m);
100962+#endif
100963+
100964 kmem_cache_free(vm_area_cachep, vma);
100965 unacct_error:
100966 if (charged)
100967@@ -1657,7 +1830,63 @@ unacct_error:
100968 return error;
100969 }
100970
100971-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
100972+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
100973+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
100974+{
100975+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
100976+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
100977+
100978+ return 0;
100979+}
100980+#endif
100981+
100982+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
100983+{
100984+ if (!vma) {
100985+#ifdef CONFIG_STACK_GROWSUP
100986+ if (addr > sysctl_heap_stack_gap)
100987+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
100988+ else
100989+ vma = find_vma(current->mm, 0);
100990+ if (vma && (vma->vm_flags & VM_GROWSUP))
100991+ return false;
100992+#endif
100993+ return true;
100994+ }
100995+
100996+ if (addr + len > vma->vm_start)
100997+ return false;
100998+
100999+ if (vma->vm_flags & VM_GROWSDOWN)
101000+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
101001+#ifdef CONFIG_STACK_GROWSUP
101002+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
101003+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
101004+#endif
101005+ else if (offset)
101006+ return offset <= vma->vm_start - addr - len;
101007+
101008+ return true;
101009+}
101010+
101011+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
101012+{
101013+ if (vma->vm_start < len)
101014+ return -ENOMEM;
101015+
101016+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
101017+ if (offset <= vma->vm_start - len)
101018+ return vma->vm_start - len - offset;
101019+ else
101020+ return -ENOMEM;
101021+ }
101022+
101023+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
101024+ return vma->vm_start - len - sysctl_heap_stack_gap;
101025+ return -ENOMEM;
101026+}
101027+
101028+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
101029 {
101030 /*
101031 * We implement the search by looking for an rbtree node that
101032@@ -1705,11 +1934,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
101033 }
101034 }
101035
101036- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
101037+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
101038 check_current:
101039 /* Check if current node has a suitable gap */
101040 if (gap_start > high_limit)
101041 return -ENOMEM;
101042+
101043+ if (gap_end - gap_start > info->threadstack_offset)
101044+ gap_start += info->threadstack_offset;
101045+ else
101046+ gap_start = gap_end;
101047+
101048+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
101049+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101050+ gap_start += sysctl_heap_stack_gap;
101051+ else
101052+ gap_start = gap_end;
101053+ }
101054+ if (vma->vm_flags & VM_GROWSDOWN) {
101055+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101056+ gap_end -= sysctl_heap_stack_gap;
101057+ else
101058+ gap_end = gap_start;
101059+ }
101060 if (gap_end >= low_limit && gap_end - gap_start >= length)
101061 goto found;
101062
101063@@ -1759,7 +2006,7 @@ found:
101064 return gap_start;
101065 }
101066
101067-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
101068+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
101069 {
101070 struct mm_struct *mm = current->mm;
101071 struct vm_area_struct *vma;
101072@@ -1813,6 +2060,24 @@ check_current:
101073 gap_end = vma->vm_start;
101074 if (gap_end < low_limit)
101075 return -ENOMEM;
101076+
101077+ if (gap_end - gap_start > info->threadstack_offset)
101078+ gap_end -= info->threadstack_offset;
101079+ else
101080+ gap_end = gap_start;
101081+
101082+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
101083+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101084+ gap_start += sysctl_heap_stack_gap;
101085+ else
101086+ gap_start = gap_end;
101087+ }
101088+ if (vma->vm_flags & VM_GROWSDOWN) {
101089+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101090+ gap_end -= sysctl_heap_stack_gap;
101091+ else
101092+ gap_end = gap_start;
101093+ }
101094 if (gap_start <= high_limit && gap_end - gap_start >= length)
101095 goto found;
101096
101097@@ -1876,6 +2141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101098 struct mm_struct *mm = current->mm;
101099 struct vm_area_struct *vma;
101100 struct vm_unmapped_area_info info;
101101+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
101102
101103 if (len > TASK_SIZE - mmap_min_addr)
101104 return -ENOMEM;
101105@@ -1883,11 +2149,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101106 if (flags & MAP_FIXED)
101107 return addr;
101108
101109+#ifdef CONFIG_PAX_RANDMMAP
101110+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
101111+#endif
101112+
101113 if (addr) {
101114 addr = PAGE_ALIGN(addr);
101115 vma = find_vma(mm, addr);
101116 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101117- (!vma || addr + len <= vma->vm_start))
101118+ check_heap_stack_gap(vma, addr, len, offset))
101119 return addr;
101120 }
101121
101122@@ -1896,6 +2166,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101123 info.low_limit = mm->mmap_base;
101124 info.high_limit = TASK_SIZE;
101125 info.align_mask = 0;
101126+ info.threadstack_offset = offset;
101127 return vm_unmapped_area(&info);
101128 }
101129 #endif
101130@@ -1914,6 +2185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101131 struct mm_struct *mm = current->mm;
101132 unsigned long addr = addr0;
101133 struct vm_unmapped_area_info info;
101134+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
101135
101136 /* requested length too big for entire address space */
101137 if (len > TASK_SIZE - mmap_min_addr)
101138@@ -1922,12 +2194,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101139 if (flags & MAP_FIXED)
101140 return addr;
101141
101142+#ifdef CONFIG_PAX_RANDMMAP
101143+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
101144+#endif
101145+
101146 /* requesting a specific address */
101147 if (addr) {
101148 addr = PAGE_ALIGN(addr);
101149 vma = find_vma(mm, addr);
101150 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101151- (!vma || addr + len <= vma->vm_start))
101152+ check_heap_stack_gap(vma, addr, len, offset))
101153 return addr;
101154 }
101155
101156@@ -1936,6 +2212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101157 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
101158 info.high_limit = mm->mmap_base;
101159 info.align_mask = 0;
101160+ info.threadstack_offset = offset;
101161 addr = vm_unmapped_area(&info);
101162
101163 /*
101164@@ -1948,6 +2225,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101165 VM_BUG_ON(addr != -ENOMEM);
101166 info.flags = 0;
101167 info.low_limit = TASK_UNMAPPED_BASE;
101168+
101169+#ifdef CONFIG_PAX_RANDMMAP
101170+ if (mm->pax_flags & MF_PAX_RANDMMAP)
101171+ info.low_limit += mm->delta_mmap;
101172+#endif
101173+
101174 info.high_limit = TASK_SIZE;
101175 addr = vm_unmapped_area(&info);
101176 }
101177@@ -2048,6 +2331,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
101178 return vma;
101179 }
101180
101181+#ifdef CONFIG_PAX_SEGMEXEC
101182+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
101183+{
101184+ struct vm_area_struct *vma_m;
101185+
101186+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
101187+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
101188+ BUG_ON(vma->vm_mirror);
101189+ return NULL;
101190+ }
101191+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
101192+ vma_m = vma->vm_mirror;
101193+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
101194+ BUG_ON(vma->vm_file != vma_m->vm_file);
101195+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
101196+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
101197+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
101198+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
101199+ return vma_m;
101200+}
101201+#endif
101202+
101203 /*
101204 * Verify that the stack growth is acceptable and
101205 * update accounting. This is shared with both the
101206@@ -2064,6 +2369,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101207 return -ENOMEM;
101208
101209 /* Stack limit test */
101210+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
101211 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
101212 return -ENOMEM;
101213
101214@@ -2074,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101215 locked = mm->locked_vm + grow;
101216 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
101217 limit >>= PAGE_SHIFT;
101218+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
101219 if (locked > limit && !capable(CAP_IPC_LOCK))
101220 return -ENOMEM;
101221 }
101222@@ -2103,37 +2410,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101223 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
101224 * vma is the last one with address > vma->vm_end. Have to extend vma.
101225 */
101226+#ifndef CONFIG_IA64
101227+static
101228+#endif
101229 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
101230 {
101231 int error;
101232+ bool locknext;
101233
101234 if (!(vma->vm_flags & VM_GROWSUP))
101235 return -EFAULT;
101236
101237+ /* Also guard against wrapping around to address 0. */
101238+ if (address < PAGE_ALIGN(address+1))
101239+ address = PAGE_ALIGN(address+1);
101240+ else
101241+ return -ENOMEM;
101242+
101243 /*
101244 * We must make sure the anon_vma is allocated
101245 * so that the anon_vma locking is not a noop.
101246 */
101247 if (unlikely(anon_vma_prepare(vma)))
101248 return -ENOMEM;
101249+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
101250+ if (locknext && anon_vma_prepare(vma->vm_next))
101251+ return -ENOMEM;
101252 vma_lock_anon_vma(vma);
101253+ if (locknext)
101254+ vma_lock_anon_vma(vma->vm_next);
101255
101256 /*
101257 * vma->vm_start/vm_end cannot change under us because the caller
101258 * is required to hold the mmap_sem in read mode. We need the
101259- * anon_vma lock to serialize against concurrent expand_stacks.
101260- * Also guard against wrapping around to address 0.
101261+ * anon_vma locks to serialize against concurrent expand_stacks
101262+ * and expand_upwards.
101263 */
101264- if (address < PAGE_ALIGN(address+4))
101265- address = PAGE_ALIGN(address+4);
101266- else {
101267- vma_unlock_anon_vma(vma);
101268- return -ENOMEM;
101269- }
101270 error = 0;
101271
101272 /* Somebody else might have raced and expanded it already */
101273- if (address > vma->vm_end) {
101274+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
101275+ error = -ENOMEM;
101276+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
101277 unsigned long size, grow;
101278
101279 size = address - vma->vm_start;
101280@@ -2168,6 +2486,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
101281 }
101282 }
101283 }
101284+ if (locknext)
101285+ vma_unlock_anon_vma(vma->vm_next);
101286 vma_unlock_anon_vma(vma);
101287 khugepaged_enter_vma_merge(vma);
101288 validate_mm(vma->vm_mm);
101289@@ -2182,6 +2502,8 @@ int expand_downwards(struct vm_area_struct *vma,
101290 unsigned long address)
101291 {
101292 int error;
101293+ bool lockprev = false;
101294+ struct vm_area_struct *prev;
101295
101296 /*
101297 * We must make sure the anon_vma is allocated
101298@@ -2195,6 +2517,15 @@ int expand_downwards(struct vm_area_struct *vma,
101299 if (error)
101300 return error;
101301
101302+ prev = vma->vm_prev;
101303+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
101304+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
101305+#endif
101306+ if (lockprev && anon_vma_prepare(prev))
101307+ return -ENOMEM;
101308+ if (lockprev)
101309+ vma_lock_anon_vma(prev);
101310+
101311 vma_lock_anon_vma(vma);
101312
101313 /*
101314@@ -2204,9 +2535,17 @@ int expand_downwards(struct vm_area_struct *vma,
101315 */
101316
101317 /* Somebody else might have raced and expanded it already */
101318- if (address < vma->vm_start) {
101319+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
101320+ error = -ENOMEM;
101321+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
101322 unsigned long size, grow;
101323
101324+#ifdef CONFIG_PAX_SEGMEXEC
101325+ struct vm_area_struct *vma_m;
101326+
101327+ vma_m = pax_find_mirror_vma(vma);
101328+#endif
101329+
101330 size = vma->vm_end - address;
101331 grow = (vma->vm_start - address) >> PAGE_SHIFT;
101332
101333@@ -2231,13 +2570,27 @@ int expand_downwards(struct vm_area_struct *vma,
101334 vma->vm_pgoff -= grow;
101335 anon_vma_interval_tree_post_update_vma(vma);
101336 vma_gap_update(vma);
101337+
101338+#ifdef CONFIG_PAX_SEGMEXEC
101339+ if (vma_m) {
101340+ anon_vma_interval_tree_pre_update_vma(vma_m);
101341+ vma_m->vm_start -= grow << PAGE_SHIFT;
101342+ vma_m->vm_pgoff -= grow;
101343+ anon_vma_interval_tree_post_update_vma(vma_m);
101344+ vma_gap_update(vma_m);
101345+ }
101346+#endif
101347+
101348 spin_unlock(&vma->vm_mm->page_table_lock);
101349
101350+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
101351 perf_event_mmap(vma);
101352 }
101353 }
101354 }
101355 vma_unlock_anon_vma(vma);
101356+ if (lockprev)
101357+ vma_unlock_anon_vma(prev);
101358 khugepaged_enter_vma_merge(vma);
101359 validate_mm(vma->vm_mm);
101360 return error;
101361@@ -2335,6 +2688,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
101362 do {
101363 long nrpages = vma_pages(vma);
101364
101365+#ifdef CONFIG_PAX_SEGMEXEC
101366+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
101367+ vma = remove_vma(vma);
101368+ continue;
101369+ }
101370+#endif
101371+
101372 if (vma->vm_flags & VM_ACCOUNT)
101373 nr_accounted += nrpages;
101374 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
101375@@ -2379,6 +2739,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
101376 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
101377 vma->vm_prev = NULL;
101378 do {
101379+
101380+#ifdef CONFIG_PAX_SEGMEXEC
101381+ if (vma->vm_mirror) {
101382+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
101383+ vma->vm_mirror->vm_mirror = NULL;
101384+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
101385+ vma->vm_mirror = NULL;
101386+ }
101387+#endif
101388+
101389 vma_rb_erase(vma, &mm->mm_rb);
101390 mm->map_count--;
101391 tail_vma = vma;
101392@@ -2406,14 +2776,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101393 struct vm_area_struct *new;
101394 int err = -ENOMEM;
101395
101396+#ifdef CONFIG_PAX_SEGMEXEC
101397+ struct vm_area_struct *vma_m, *new_m = NULL;
101398+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
101399+#endif
101400+
101401 if (is_vm_hugetlb_page(vma) && (addr &
101402 ~(huge_page_mask(hstate_vma(vma)))))
101403 return -EINVAL;
101404
101405+#ifdef CONFIG_PAX_SEGMEXEC
101406+ vma_m = pax_find_mirror_vma(vma);
101407+#endif
101408+
101409 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
101410 if (!new)
101411 goto out_err;
101412
101413+#ifdef CONFIG_PAX_SEGMEXEC
101414+ if (vma_m) {
101415+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
101416+ if (!new_m) {
101417+ kmem_cache_free(vm_area_cachep, new);
101418+ goto out_err;
101419+ }
101420+ }
101421+#endif
101422+
101423 /* most fields are the same, copy all, and then fixup */
101424 *new = *vma;
101425
101426@@ -2426,6 +2815,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101427 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
101428 }
101429
101430+#ifdef CONFIG_PAX_SEGMEXEC
101431+ if (vma_m) {
101432+ *new_m = *vma_m;
101433+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
101434+ new_m->vm_mirror = new;
101435+ new->vm_mirror = new_m;
101436+
101437+ if (new_below)
101438+ new_m->vm_end = addr_m;
101439+ else {
101440+ new_m->vm_start = addr_m;
101441+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
101442+ }
101443+ }
101444+#endif
101445+
101446 err = vma_dup_policy(vma, new);
101447 if (err)
101448 goto out_free_vma;
101449@@ -2445,6 +2850,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101450 else
101451 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
101452
101453+#ifdef CONFIG_PAX_SEGMEXEC
101454+ if (!err && vma_m) {
101455+ struct mempolicy *pol = vma_policy(new);
101456+
101457+ if (anon_vma_clone(new_m, vma_m))
101458+ goto out_free_mpol;
101459+
101460+ mpol_get(pol);
101461+ set_vma_policy(new_m, pol);
101462+
101463+ if (new_m->vm_file)
101464+ get_file(new_m->vm_file);
101465+
101466+ if (new_m->vm_ops && new_m->vm_ops->open)
101467+ new_m->vm_ops->open(new_m);
101468+
101469+ if (new_below)
101470+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
101471+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
101472+ else
101473+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
101474+
101475+ if (err) {
101476+ if (new_m->vm_ops && new_m->vm_ops->close)
101477+ new_m->vm_ops->close(new_m);
101478+ if (new_m->vm_file)
101479+ fput(new_m->vm_file);
101480+ mpol_put(pol);
101481+ }
101482+ }
101483+#endif
101484+
101485 /* Success. */
101486 if (!err)
101487 return 0;
101488@@ -2454,10 +2891,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101489 new->vm_ops->close(new);
101490 if (new->vm_file)
101491 fput(new->vm_file);
101492- unlink_anon_vmas(new);
101493 out_free_mpol:
101494 mpol_put(vma_policy(new));
101495 out_free_vma:
101496+
101497+#ifdef CONFIG_PAX_SEGMEXEC
101498+ if (new_m) {
101499+ unlink_anon_vmas(new_m);
101500+ kmem_cache_free(vm_area_cachep, new_m);
101501+ }
101502+#endif
101503+
101504+ unlink_anon_vmas(new);
101505 kmem_cache_free(vm_area_cachep, new);
101506 out_err:
101507 return err;
101508@@ -2470,6 +2915,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101509 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101510 unsigned long addr, int new_below)
101511 {
101512+
101513+#ifdef CONFIG_PAX_SEGMEXEC
101514+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
101515+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
101516+ if (mm->map_count >= sysctl_max_map_count-1)
101517+ return -ENOMEM;
101518+ } else
101519+#endif
101520+
101521 if (mm->map_count >= sysctl_max_map_count)
101522 return -ENOMEM;
101523
101524@@ -2481,11 +2935,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101525 * work. This now handles partial unmappings.
101526 * Jeremy Fitzhardinge <jeremy@goop.org>
101527 */
101528+#ifdef CONFIG_PAX_SEGMEXEC
101529 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101530 {
101531+ int ret = __do_munmap(mm, start, len);
101532+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
101533+ return ret;
101534+
101535+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
101536+}
101537+
101538+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101539+#else
101540+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101541+#endif
101542+{
101543 unsigned long end;
101544 struct vm_area_struct *vma, *prev, *last;
101545
101546+ /*
101547+ * mm->mmap_sem is required to protect against another thread
101548+ * changing the mappings in case we sleep.
101549+ */
101550+ verify_mm_writelocked(mm);
101551+
101552 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
101553 return -EINVAL;
101554
101555@@ -2560,6 +3033,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101556 /* Fix up all other VM information */
101557 remove_vma_list(mm, vma);
101558
101559+ track_exec_limit(mm, start, end, 0UL);
101560+
101561 return 0;
101562 }
101563
101564@@ -2568,6 +3043,13 @@ int vm_munmap(unsigned long start, size_t len)
101565 int ret;
101566 struct mm_struct *mm = current->mm;
101567
101568+
101569+#ifdef CONFIG_PAX_SEGMEXEC
101570+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
101571+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
101572+ return -EINVAL;
101573+#endif
101574+
101575 down_write(&mm->mmap_sem);
101576 ret = do_munmap(mm, start, len);
101577 up_write(&mm->mmap_sem);
101578@@ -2581,16 +3063,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
101579 return vm_munmap(addr, len);
101580 }
101581
101582-static inline void verify_mm_writelocked(struct mm_struct *mm)
101583-{
101584-#ifdef CONFIG_DEBUG_VM
101585- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
101586- WARN_ON(1);
101587- up_read(&mm->mmap_sem);
101588- }
101589-#endif
101590-}
101591-
101592 /*
101593 * this is really a simplified "do_mmap". it only handles
101594 * anonymous maps. eventually we may be able to do some
101595@@ -2604,6 +3076,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101596 struct rb_node ** rb_link, * rb_parent;
101597 pgoff_t pgoff = addr >> PAGE_SHIFT;
101598 int error;
101599+ unsigned long charged;
101600
101601 len = PAGE_ALIGN(len);
101602 if (!len)
101603@@ -2611,10 +3084,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101604
101605 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
101606
101607+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
101608+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
101609+ flags &= ~VM_EXEC;
101610+
101611+#ifdef CONFIG_PAX_MPROTECT
101612+ if (mm->pax_flags & MF_PAX_MPROTECT)
101613+ flags &= ~VM_MAYEXEC;
101614+#endif
101615+
101616+ }
101617+#endif
101618+
101619 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
101620 if (error & ~PAGE_MASK)
101621 return error;
101622
101623+ charged = len >> PAGE_SHIFT;
101624+
101625 error = mlock_future_check(mm, mm->def_flags, len);
101626 if (error)
101627 return error;
101628@@ -2628,21 +3115,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101629 /*
101630 * Clear old maps. this also does some error checking for us
101631 */
101632- munmap_back:
101633 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
101634 if (do_munmap(mm, addr, len))
101635 return -ENOMEM;
101636- goto munmap_back;
101637+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
101638 }
101639
101640 /* Check against address space limits *after* clearing old maps... */
101641- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
101642+ if (!may_expand_vm(mm, charged))
101643 return -ENOMEM;
101644
101645 if (mm->map_count > sysctl_max_map_count)
101646 return -ENOMEM;
101647
101648- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
101649+ if (security_vm_enough_memory_mm(mm, charged))
101650 return -ENOMEM;
101651
101652 /* Can we just expand an old private anonymous mapping? */
101653@@ -2656,7 +3142,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101654 */
101655 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101656 if (!vma) {
101657- vm_unacct_memory(len >> PAGE_SHIFT);
101658+ vm_unacct_memory(charged);
101659 return -ENOMEM;
101660 }
101661
101662@@ -2670,10 +3156,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101663 vma_link(mm, vma, prev, rb_link, rb_parent);
101664 out:
101665 perf_event_mmap(vma);
101666- mm->total_vm += len >> PAGE_SHIFT;
101667+ mm->total_vm += charged;
101668 if (flags & VM_LOCKED)
101669- mm->locked_vm += (len >> PAGE_SHIFT);
101670+ mm->locked_vm += charged;
101671 vma->vm_flags |= VM_SOFTDIRTY;
101672+ track_exec_limit(mm, addr, addr + len, flags);
101673 return addr;
101674 }
101675
101676@@ -2735,6 +3222,7 @@ void exit_mmap(struct mm_struct *mm)
101677 while (vma) {
101678 if (vma->vm_flags & VM_ACCOUNT)
101679 nr_accounted += vma_pages(vma);
101680+ vma->vm_mirror = NULL;
101681 vma = remove_vma(vma);
101682 }
101683 vm_unacct_memory(nr_accounted);
101684@@ -2752,6 +3240,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
101685 struct vm_area_struct *prev;
101686 struct rb_node **rb_link, *rb_parent;
101687
101688+#ifdef CONFIG_PAX_SEGMEXEC
101689+ struct vm_area_struct *vma_m = NULL;
101690+#endif
101691+
101692+ if (security_mmap_addr(vma->vm_start))
101693+ return -EPERM;
101694+
101695 /*
101696 * The vm_pgoff of a purely anonymous vma should be irrelevant
101697 * until its first write fault, when page's anon_vma and index
101698@@ -2775,7 +3270,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
101699 security_vm_enough_memory_mm(mm, vma_pages(vma)))
101700 return -ENOMEM;
101701
101702+#ifdef CONFIG_PAX_SEGMEXEC
101703+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
101704+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101705+ if (!vma_m)
101706+ return -ENOMEM;
101707+ }
101708+#endif
101709+
101710 vma_link(mm, vma, prev, rb_link, rb_parent);
101711+
101712+#ifdef CONFIG_PAX_SEGMEXEC
101713+ if (vma_m)
101714+ BUG_ON(pax_mirror_vma(vma_m, vma));
101715+#endif
101716+
101717 return 0;
101718 }
101719
101720@@ -2794,6 +3303,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
101721 struct rb_node **rb_link, *rb_parent;
101722 bool faulted_in_anon_vma = true;
101723
101724+ BUG_ON(vma->vm_mirror);
101725+
101726 /*
101727 * If anonymous vma has not yet been faulted, update new pgoff
101728 * to match new location, to increase its chance of merging.
101729@@ -2858,6 +3369,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
101730 return NULL;
101731 }
101732
101733+#ifdef CONFIG_PAX_SEGMEXEC
101734+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
101735+{
101736+ struct vm_area_struct *prev_m;
101737+ struct rb_node **rb_link_m, *rb_parent_m;
101738+ struct mempolicy *pol_m;
101739+
101740+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
101741+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
101742+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
101743+ *vma_m = *vma;
101744+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
101745+ if (anon_vma_clone(vma_m, vma))
101746+ return -ENOMEM;
101747+ pol_m = vma_policy(vma_m);
101748+ mpol_get(pol_m);
101749+ set_vma_policy(vma_m, pol_m);
101750+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
101751+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
101752+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
101753+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
101754+ if (vma_m->vm_file)
101755+ get_file(vma_m->vm_file);
101756+ if (vma_m->vm_ops && vma_m->vm_ops->open)
101757+ vma_m->vm_ops->open(vma_m);
101758+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
101759+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
101760+ vma_m->vm_mirror = vma;
101761+ vma->vm_mirror = vma_m;
101762+ return 0;
101763+}
101764+#endif
101765+
101766 /*
101767 * Return true if the calling process may expand its vm space by the passed
101768 * number of pages
101769@@ -2869,6 +3413,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
101770
101771 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
101772
101773+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
101774 if (cur + npages > lim)
101775 return 0;
101776 return 1;
101777@@ -2951,6 +3496,22 @@ static struct vm_area_struct *__install_special_mapping(
101778 vma->vm_start = addr;
101779 vma->vm_end = addr + len;
101780
101781+#ifdef CONFIG_PAX_MPROTECT
101782+ if (mm->pax_flags & MF_PAX_MPROTECT) {
101783+#ifndef CONFIG_PAX_MPROTECT_COMPAT
101784+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
101785+ return ERR_PTR(-EPERM);
101786+ if (!(vm_flags & VM_EXEC))
101787+ vm_flags &= ~VM_MAYEXEC;
101788+#else
101789+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
101790+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
101791+#endif
101792+ else
101793+ vm_flags &= ~VM_MAYWRITE;
101794+ }
101795+#endif
101796+
101797 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
101798 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
101799
101800diff --git a/mm/mprotect.c b/mm/mprotect.c
101801index c43d557..0b7ccd2 100644
101802--- a/mm/mprotect.c
101803+++ b/mm/mprotect.c
101804@@ -24,10 +24,18 @@
101805 #include <linux/migrate.h>
101806 #include <linux/perf_event.h>
101807 #include <linux/ksm.h>
101808+#include <linux/sched/sysctl.h>
101809+
101810+#ifdef CONFIG_PAX_MPROTECT
101811+#include <linux/elf.h>
101812+#include <linux/binfmts.h>
101813+#endif
101814+
101815 #include <asm/uaccess.h>
101816 #include <asm/pgtable.h>
101817 #include <asm/cacheflush.h>
101818 #include <asm/tlbflush.h>
101819+#include <asm/mmu_context.h>
101820
101821 #ifndef pgprot_modify
101822 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
101823@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
101824 return pages;
101825 }
101826
101827+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
101828+/* called while holding the mmap semaphor for writing except stack expansion */
101829+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
101830+{
101831+ unsigned long oldlimit, newlimit = 0UL;
101832+
101833+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
101834+ return;
101835+
101836+ spin_lock(&mm->page_table_lock);
101837+ oldlimit = mm->context.user_cs_limit;
101838+ if ((prot & VM_EXEC) && oldlimit < end)
101839+ /* USER_CS limit moved up */
101840+ newlimit = end;
101841+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
101842+ /* USER_CS limit moved down */
101843+ newlimit = start;
101844+
101845+ if (newlimit) {
101846+ mm->context.user_cs_limit = newlimit;
101847+
101848+#ifdef CONFIG_SMP
101849+ wmb();
101850+ cpus_clear(mm->context.cpu_user_cs_mask);
101851+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
101852+#endif
101853+
101854+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
101855+ }
101856+ spin_unlock(&mm->page_table_lock);
101857+ if (newlimit == end) {
101858+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
101859+
101860+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
101861+ if (is_vm_hugetlb_page(vma))
101862+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
101863+ else
101864+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
101865+ }
101866+}
101867+#endif
101868+
101869 int
101870 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
101871 unsigned long start, unsigned long end, unsigned long newflags)
101872@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
101873 int error;
101874 int dirty_accountable = 0;
101875
101876+#ifdef CONFIG_PAX_SEGMEXEC
101877+ struct vm_area_struct *vma_m = NULL;
101878+ unsigned long start_m, end_m;
101879+
101880+ start_m = start + SEGMEXEC_TASK_SIZE;
101881+ end_m = end + SEGMEXEC_TASK_SIZE;
101882+#endif
101883+
101884 if (newflags == oldflags) {
101885 *pprev = vma;
101886 return 0;
101887 }
101888
101889+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
101890+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
101891+
101892+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
101893+ return -ENOMEM;
101894+
101895+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
101896+ return -ENOMEM;
101897+ }
101898+
101899 /*
101900 * If we make a private mapping writable we increase our commit;
101901 * but (without finer accounting) cannot reduce our commit if we
101902@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
101903 }
101904 }
101905
101906+#ifdef CONFIG_PAX_SEGMEXEC
101907+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
101908+ if (start != vma->vm_start) {
101909+ error = split_vma(mm, vma, start, 1);
101910+ if (error)
101911+ goto fail;
101912+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
101913+ *pprev = (*pprev)->vm_next;
101914+ }
101915+
101916+ if (end != vma->vm_end) {
101917+ error = split_vma(mm, vma, end, 0);
101918+ if (error)
101919+ goto fail;
101920+ }
101921+
101922+ if (pax_find_mirror_vma(vma)) {
101923+ error = __do_munmap(mm, start_m, end_m - start_m);
101924+ if (error)
101925+ goto fail;
101926+ } else {
101927+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101928+ if (!vma_m) {
101929+ error = -ENOMEM;
101930+ goto fail;
101931+ }
101932+ vma->vm_flags = newflags;
101933+ error = pax_mirror_vma(vma_m, vma);
101934+ if (error) {
101935+ vma->vm_flags = oldflags;
101936+ goto fail;
101937+ }
101938+ }
101939+ }
101940+#endif
101941+
101942 /*
101943 * First try to merge with previous and/or next vma.
101944 */
101945@@ -319,9 +423,21 @@ success:
101946 * vm_flags and vm_page_prot are protected by the mmap_sem
101947 * held in write mode.
101948 */
101949+
101950+#ifdef CONFIG_PAX_SEGMEXEC
101951+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
101952+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
101953+#endif
101954+
101955 vma->vm_flags = newflags;
101956+
101957+#ifdef CONFIG_PAX_MPROTECT
101958+ if (mm->binfmt && mm->binfmt->handle_mprotect)
101959+ mm->binfmt->handle_mprotect(vma, newflags);
101960+#endif
101961+
101962 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
101963- vm_get_page_prot(newflags));
101964+ vm_get_page_prot(vma->vm_flags));
101965
101966 if (vma_wants_writenotify(vma)) {
101967 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
101968@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101969 end = start + len;
101970 if (end <= start)
101971 return -ENOMEM;
101972+
101973+#ifdef CONFIG_PAX_SEGMEXEC
101974+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
101975+ if (end > SEGMEXEC_TASK_SIZE)
101976+ return -EINVAL;
101977+ } else
101978+#endif
101979+
101980+ if (end > TASK_SIZE)
101981+ return -EINVAL;
101982+
101983 if (!arch_validate_prot(prot))
101984 return -EINVAL;
101985
101986@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101987 /*
101988 * Does the application expect PROT_READ to imply PROT_EXEC:
101989 */
101990- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
101991+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
101992 prot |= PROT_EXEC;
101993
101994 vm_flags = calc_vm_prot_bits(prot);
101995@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
101996 if (start > vma->vm_start)
101997 prev = vma;
101998
101999+#ifdef CONFIG_PAX_MPROTECT
102000+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
102001+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
102002+#endif
102003+
102004 for (nstart = start ; ; ) {
102005 unsigned long newflags;
102006
102007@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102008
102009 /* newflags >> 4 shift VM_MAY% in place of VM_% */
102010 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
102011+ if (prot & (PROT_WRITE | PROT_EXEC))
102012+ gr_log_rwxmprotect(vma);
102013+
102014+ error = -EACCES;
102015+ goto out;
102016+ }
102017+
102018+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
102019 error = -EACCES;
102020 goto out;
102021 }
102022@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102023 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
102024 if (error)
102025 goto out;
102026+
102027+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
102028+
102029 nstart = tmp;
102030
102031 if (nstart < prev->vm_end)
102032diff --git a/mm/mremap.c b/mm/mremap.c
102033index 05f1180..c3cde48 100644
102034--- a/mm/mremap.c
102035+++ b/mm/mremap.c
102036@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
102037 continue;
102038 pte = ptep_get_and_clear(mm, old_addr, old_pte);
102039 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
102040+
102041+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
102042+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
102043+ pte = pte_exprotect(pte);
102044+#endif
102045+
102046 pte = move_soft_dirty_pte(pte);
102047 set_pte_at(mm, new_addr, new_pte, pte);
102048 }
102049@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
102050 if (is_vm_hugetlb_page(vma))
102051 goto Einval;
102052
102053+#ifdef CONFIG_PAX_SEGMEXEC
102054+ if (pax_find_mirror_vma(vma))
102055+ goto Einval;
102056+#endif
102057+
102058 /* We can't remap across vm area boundaries */
102059 if (old_len > vma->vm_end - addr)
102060 goto Efault;
102061@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
102062 unsigned long ret = -EINVAL;
102063 unsigned long charged = 0;
102064 unsigned long map_flags;
102065+ unsigned long pax_task_size = TASK_SIZE;
102066
102067 if (new_addr & ~PAGE_MASK)
102068 goto out;
102069
102070- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
102071+#ifdef CONFIG_PAX_SEGMEXEC
102072+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
102073+ pax_task_size = SEGMEXEC_TASK_SIZE;
102074+#endif
102075+
102076+ pax_task_size -= PAGE_SIZE;
102077+
102078+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
102079 goto out;
102080
102081 /* Check if the location we're moving into overlaps the
102082 * old location at all, and fail if it does.
102083 */
102084- if ((new_addr <= addr) && (new_addr+new_len) > addr)
102085- goto out;
102086-
102087- if ((addr <= new_addr) && (addr+old_len) > new_addr)
102088+ if (addr + old_len > new_addr && new_addr + new_len > addr)
102089 goto out;
102090
102091 ret = do_munmap(mm, new_addr, new_len);
102092@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102093 unsigned long ret = -EINVAL;
102094 unsigned long charged = 0;
102095 bool locked = false;
102096+ unsigned long pax_task_size = TASK_SIZE;
102097
102098 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
102099 return ret;
102100@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102101 if (!new_len)
102102 return ret;
102103
102104+#ifdef CONFIG_PAX_SEGMEXEC
102105+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
102106+ pax_task_size = SEGMEXEC_TASK_SIZE;
102107+#endif
102108+
102109+ pax_task_size -= PAGE_SIZE;
102110+
102111+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
102112+ old_len > pax_task_size || addr > pax_task_size-old_len)
102113+ return ret;
102114+
102115 down_write(&current->mm->mmap_sem);
102116
102117 if (flags & MREMAP_FIXED) {
102118@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102119 new_addr = addr;
102120 }
102121 ret = addr;
102122+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
102123 goto out;
102124 }
102125 }
102126@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102127 goto out;
102128 }
102129
102130+ map_flags = vma->vm_flags;
102131 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
102132+ if (!(ret & ~PAGE_MASK)) {
102133+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
102134+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
102135+ }
102136 }
102137 out:
102138 if (ret & ~PAGE_MASK)
102139diff --git a/mm/nommu.c b/mm/nommu.c
102140index 4a852f6..4371a6b 100644
102141--- a/mm/nommu.c
102142+++ b/mm/nommu.c
102143@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
102144 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
102145 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
102146 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
102147-int heap_stack_gap = 0;
102148
102149 atomic_long_t mmap_pages_allocated;
102150
102151@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
102152 EXPORT_SYMBOL(find_vma);
102153
102154 /*
102155- * find a VMA
102156- * - we don't extend stack VMAs under NOMMU conditions
102157- */
102158-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
102159-{
102160- return find_vma(mm, addr);
102161-}
102162-
102163-/*
102164 * expand a stack to a given address
102165 * - not supported under NOMMU conditions
102166 */
102167@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
102168
102169 /* most fields are the same, copy all, and then fixup */
102170 *new = *vma;
102171+ INIT_LIST_HEAD(&new->anon_vma_chain);
102172 *region = *vma->vm_region;
102173 new->vm_region = region;
102174
102175@@ -2007,8 +1998,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
102176 }
102177 EXPORT_SYMBOL(generic_file_remap_pages);
102178
102179-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102180- unsigned long addr, void *buf, int len, int write)
102181+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102182+ unsigned long addr, void *buf, size_t len, int write)
102183 {
102184 struct vm_area_struct *vma;
102185
102186@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102187 *
102188 * The caller must hold a reference on @mm.
102189 */
102190-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
102191- void *buf, int len, int write)
102192+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
102193+ void *buf, size_t len, int write)
102194 {
102195 return __access_remote_vm(NULL, mm, addr, buf, len, write);
102196 }
102197@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
102198 * Access another process' address space.
102199 * - source/target buffer must be kernel space
102200 */
102201-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
102202+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
102203 {
102204 struct mm_struct *mm;
102205
102206diff --git a/mm/page-writeback.c b/mm/page-writeback.c
102207index e0c9430..3c6bf79 100644
102208--- a/mm/page-writeback.c
102209+++ b/mm/page-writeback.c
102210@@ -667,7 +667,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
102211 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
102212 * - the bdi dirty thresh drops quickly due to change of JBOD workload
102213 */
102214-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
102215+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
102216 unsigned long thresh,
102217 unsigned long bg_thresh,
102218 unsigned long dirty,
102219diff --git a/mm/page_alloc.c b/mm/page_alloc.c
102220index ef44ad7..1056bc7 100644
102221--- a/mm/page_alloc.c
102222+++ b/mm/page_alloc.c
102223@@ -61,6 +61,7 @@
102224 #include <linux/page-debug-flags.h>
102225 #include <linux/hugetlb.h>
102226 #include <linux/sched/rt.h>
102227+#include <linux/random.h>
102228
102229 #include <asm/sections.h>
102230 #include <asm/tlbflush.h>
102231@@ -357,7 +358,7 @@ out:
102232 * This usage means that zero-order pages may not be compound.
102233 */
102234
102235-static void free_compound_page(struct page *page)
102236+void free_compound_page(struct page *page)
102237 {
102238 __free_pages_ok(page, compound_order(page));
102239 }
102240@@ -745,6 +746,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
102241 int i;
102242 int bad = 0;
102243
102244+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102245+ unsigned long index = 1UL << order;
102246+#endif
102247+
102248 trace_mm_page_free(page, order);
102249 kmemcheck_free_shadow(page, order);
102250
102251@@ -761,6 +766,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
102252 debug_check_no_obj_freed(page_address(page),
102253 PAGE_SIZE << order);
102254 }
102255+
102256+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102257+ for (; index; --index)
102258+ sanitize_highpage(page + index - 1);
102259+#endif
102260+
102261 arch_free_page(page, order);
102262 kernel_map_pages(page, 1 << order, 0);
102263
102264@@ -784,6 +795,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
102265 local_irq_restore(flags);
102266 }
102267
102268+#ifdef CONFIG_PAX_LATENT_ENTROPY
102269+bool __meminitdata extra_latent_entropy;
102270+
102271+static int __init setup_pax_extra_latent_entropy(char *str)
102272+{
102273+ extra_latent_entropy = true;
102274+ return 0;
102275+}
102276+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
102277+
102278+volatile u64 latent_entropy __latent_entropy;
102279+EXPORT_SYMBOL(latent_entropy);
102280+#endif
102281+
102282 void __init __free_pages_bootmem(struct page *page, unsigned int order)
102283 {
102284 unsigned int nr_pages = 1 << order;
102285@@ -799,6 +824,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
102286 __ClearPageReserved(p);
102287 set_page_count(p, 0);
102288
102289+#ifdef CONFIG_PAX_LATENT_ENTROPY
102290+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
102291+ u64 hash = 0;
102292+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
102293+ const u64 *data = lowmem_page_address(page);
102294+
102295+ for (index = 0; index < end; index++)
102296+ hash ^= hash + data[index];
102297+ latent_entropy ^= hash;
102298+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
102299+ }
102300+#endif
102301+
102302 page_zone(page)->managed_pages += nr_pages;
102303 set_page_refcounted(page);
102304 __free_pages(page, order);
102305@@ -927,8 +965,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
102306 arch_alloc_page(page, order);
102307 kernel_map_pages(page, 1 << order, 1);
102308
102309+#ifndef CONFIG_PAX_MEMORY_SANITIZE
102310 if (gfp_flags & __GFP_ZERO)
102311 prep_zero_page(page, order, gfp_flags);
102312+#endif
102313
102314 if (order && (gfp_flags & __GFP_COMP))
102315 prep_compound_page(page, order);
102316@@ -2427,7 +2467,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
102317 continue;
102318 mod_zone_page_state(zone, NR_ALLOC_BATCH,
102319 high_wmark_pages(zone) - low_wmark_pages(zone) -
102320- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
102321+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
102322 }
102323 }
102324
102325diff --git a/mm/percpu.c b/mm/percpu.c
102326index 2ddf9a9..f8fc075 100644
102327--- a/mm/percpu.c
102328+++ b/mm/percpu.c
102329@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
102330 static unsigned int pcpu_high_unit_cpu __read_mostly;
102331
102332 /* the address of the first chunk which starts with the kernel static area */
102333-void *pcpu_base_addr __read_mostly;
102334+void *pcpu_base_addr __read_only;
102335 EXPORT_SYMBOL_GPL(pcpu_base_addr);
102336
102337 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
102338diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
102339index a8b9199..dfb79e0 100644
102340--- a/mm/pgtable-generic.c
102341+++ b/mm/pgtable-generic.c
102342@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
102343 pmd_t entry = *pmdp;
102344 if (pmd_numa(entry))
102345 entry = pmd_mknonnuma(entry);
102346- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
102347+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
102348 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
102349 }
102350 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102351diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
102352index 5077afc..846c9ef 100644
102353--- a/mm/process_vm_access.c
102354+++ b/mm/process_vm_access.c
102355@@ -13,6 +13,7 @@
102356 #include <linux/uio.h>
102357 #include <linux/sched.h>
102358 #include <linux/highmem.h>
102359+#include <linux/security.h>
102360 #include <linux/ptrace.h>
102361 #include <linux/slab.h>
102362 #include <linux/syscalls.h>
102363@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
102364 ssize_t iov_len;
102365 size_t total_len = iov_iter_count(iter);
102366
102367+ return -ENOSYS; // PaX: until properly audited
102368+
102369 /*
102370 * Work out how many pages of struct pages we're going to need
102371 * when eventually calling get_user_pages
102372 */
102373 for (i = 0; i < riovcnt; i++) {
102374 iov_len = rvec[i].iov_len;
102375- if (iov_len > 0) {
102376- nr_pages_iov = ((unsigned long)rvec[i].iov_base
102377- + iov_len)
102378- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
102379- / PAGE_SIZE + 1;
102380- nr_pages = max(nr_pages, nr_pages_iov);
102381- }
102382+ if (iov_len <= 0)
102383+ continue;
102384+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
102385+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
102386+ nr_pages = max(nr_pages, nr_pages_iov);
102387 }
102388
102389 if (nr_pages == 0)
102390@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
102391 goto free_proc_pages;
102392 }
102393
102394+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
102395+ rc = -EPERM;
102396+ goto put_task_struct;
102397+ }
102398+
102399 mm = mm_access(task, PTRACE_MODE_ATTACH);
102400 if (!mm || IS_ERR(mm)) {
102401 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
102402diff --git a/mm/rmap.c b/mm/rmap.c
102403index 22a4a76..9551288 100644
102404--- a/mm/rmap.c
102405+++ b/mm/rmap.c
102406@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102407 struct anon_vma *anon_vma = vma->anon_vma;
102408 struct anon_vma_chain *avc;
102409
102410+#ifdef CONFIG_PAX_SEGMEXEC
102411+ struct anon_vma_chain *avc_m = NULL;
102412+#endif
102413+
102414 might_sleep();
102415 if (unlikely(!anon_vma)) {
102416 struct mm_struct *mm = vma->vm_mm;
102417@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102418 if (!avc)
102419 goto out_enomem;
102420
102421+#ifdef CONFIG_PAX_SEGMEXEC
102422+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
102423+ if (!avc_m)
102424+ goto out_enomem_free_avc;
102425+#endif
102426+
102427 anon_vma = find_mergeable_anon_vma(vma);
102428 allocated = NULL;
102429 if (!anon_vma) {
102430@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102431 /* page_table_lock to protect against threads */
102432 spin_lock(&mm->page_table_lock);
102433 if (likely(!vma->anon_vma)) {
102434+
102435+#ifdef CONFIG_PAX_SEGMEXEC
102436+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
102437+
102438+ if (vma_m) {
102439+ BUG_ON(vma_m->anon_vma);
102440+ vma_m->anon_vma = anon_vma;
102441+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
102442+ avc_m = NULL;
102443+ }
102444+#endif
102445+
102446 vma->anon_vma = anon_vma;
102447 anon_vma_chain_link(vma, avc, anon_vma);
102448 allocated = NULL;
102449@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102450
102451 if (unlikely(allocated))
102452 put_anon_vma(allocated);
102453+
102454+#ifdef CONFIG_PAX_SEGMEXEC
102455+ if (unlikely(avc_m))
102456+ anon_vma_chain_free(avc_m);
102457+#endif
102458+
102459 if (unlikely(avc))
102460 anon_vma_chain_free(avc);
102461 }
102462 return 0;
102463
102464 out_enomem_free_avc:
102465+
102466+#ifdef CONFIG_PAX_SEGMEXEC
102467+ if (avc_m)
102468+ anon_vma_chain_free(avc_m);
102469+#endif
102470+
102471 anon_vma_chain_free(avc);
102472 out_enomem:
102473 return -ENOMEM;
102474@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
102475 * Attach the anon_vmas from src to dst.
102476 * Returns 0 on success, -ENOMEM on failure.
102477 */
102478-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
102479+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
102480 {
102481 struct anon_vma_chain *avc, *pavc;
102482 struct anon_vma *root = NULL;
102483@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
102484 * the corresponding VMA in the parent process is attached to.
102485 * Returns 0 on success, non-zero on failure.
102486 */
102487-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
102488+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
102489 {
102490 struct anon_vma_chain *avc;
102491 struct anon_vma *anon_vma;
102492@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
102493 void __init anon_vma_init(void)
102494 {
102495 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
102496- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
102497- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
102498+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
102499+ anon_vma_ctor);
102500+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
102501+ SLAB_PANIC|SLAB_NO_SANITIZE);
102502 }
102503
102504 /*
102505diff --git a/mm/shmem.c b/mm/shmem.c
102506index af68b15..1227320 100644
102507--- a/mm/shmem.c
102508+++ b/mm/shmem.c
102509@@ -33,7 +33,7 @@
102510 #include <linux/swap.h>
102511 #include <linux/aio.h>
102512
102513-static struct vfsmount *shm_mnt;
102514+struct vfsmount *shm_mnt;
102515
102516 #ifdef CONFIG_SHMEM
102517 /*
102518@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
102519 #define BOGO_DIRENT_SIZE 20
102520
102521 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
102522-#define SHORT_SYMLINK_LEN 128
102523+#define SHORT_SYMLINK_LEN 64
102524
102525 /*
102526 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
102527@@ -2219,6 +2219,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
102528 static int shmem_xattr_validate(const char *name)
102529 {
102530 struct { const char *prefix; size_t len; } arr[] = {
102531+
102532+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
102533+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
102534+#endif
102535+
102536 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
102537 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
102538 };
102539@@ -2274,6 +2279,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
102540 if (err)
102541 return err;
102542
102543+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
102544+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
102545+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
102546+ return -EOPNOTSUPP;
102547+ if (size > 8)
102548+ return -EINVAL;
102549+ }
102550+#endif
102551+
102552 return simple_xattr_set(&info->xattrs, name, value, size, flags);
102553 }
102554
102555@@ -2586,8 +2600,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
102556 int err = -ENOMEM;
102557
102558 /* Round up to L1_CACHE_BYTES to resist false sharing */
102559- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
102560- L1_CACHE_BYTES), GFP_KERNEL);
102561+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
102562 if (!sbinfo)
102563 return -ENOMEM;
102564
102565diff --git a/mm/slab.c b/mm/slab.c
102566index 3070b92..bcfff83 100644
102567--- a/mm/slab.c
102568+++ b/mm/slab.c
102569@@ -311,10 +311,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
102570 if ((x)->max_freeable < i) \
102571 (x)->max_freeable = i; \
102572 } while (0)
102573-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
102574-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
102575-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
102576-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
102577+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
102578+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
102579+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
102580+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
102581+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
102582+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
102583 #else
102584 #define STATS_INC_ACTIVE(x) do { } while (0)
102585 #define STATS_DEC_ACTIVE(x) do { } while (0)
102586@@ -331,6 +333,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
102587 #define STATS_INC_ALLOCMISS(x) do { } while (0)
102588 #define STATS_INC_FREEHIT(x) do { } while (0)
102589 #define STATS_INC_FREEMISS(x) do { } while (0)
102590+#define STATS_INC_SANITIZED(x) do { } while (0)
102591+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
102592 #endif
102593
102594 #if DEBUG
102595@@ -447,7 +451,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
102596 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
102597 */
102598 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
102599- const struct page *page, void *obj)
102600+ const struct page *page, const void *obj)
102601 {
102602 u32 offset = (obj - page->s_mem);
102603 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
102604@@ -1558,12 +1562,12 @@ void __init kmem_cache_init(void)
102605 */
102606
102607 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
102608- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
102609+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
102610
102611 if (INDEX_AC != INDEX_NODE)
102612 kmalloc_caches[INDEX_NODE] =
102613 create_kmalloc_cache("kmalloc-node",
102614- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
102615+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
102616
102617 slab_early_init = 0;
102618
102619@@ -3512,6 +3516,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
102620 struct array_cache *ac = cpu_cache_get(cachep);
102621
102622 check_irq_off();
102623+
102624+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102625+ if (pax_sanitize_slab) {
102626+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
102627+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
102628+
102629+ if (cachep->ctor)
102630+ cachep->ctor(objp);
102631+
102632+ STATS_INC_SANITIZED(cachep);
102633+ } else
102634+ STATS_INC_NOT_SANITIZED(cachep);
102635+ }
102636+#endif
102637+
102638 kmemleak_free_recursive(objp, cachep->flags);
102639 objp = cache_free_debugcheck(cachep, objp, caller);
102640
102641@@ -3735,6 +3754,7 @@ void kfree(const void *objp)
102642
102643 if (unlikely(ZERO_OR_NULL_PTR(objp)))
102644 return;
102645+ VM_BUG_ON(!virt_addr_valid(objp));
102646 local_irq_save(flags);
102647 kfree_debugcheck(objp);
102648 c = virt_to_cache(objp);
102649@@ -4176,14 +4196,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
102650 }
102651 /* cpu stats */
102652 {
102653- unsigned long allochit = atomic_read(&cachep->allochit);
102654- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
102655- unsigned long freehit = atomic_read(&cachep->freehit);
102656- unsigned long freemiss = atomic_read(&cachep->freemiss);
102657+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
102658+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
102659+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
102660+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
102661
102662 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
102663 allochit, allocmiss, freehit, freemiss);
102664 }
102665+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102666+ {
102667+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
102668+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
102669+
102670+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
102671+ }
102672+#endif
102673 #endif
102674 }
102675
102676@@ -4404,13 +4432,69 @@ static const struct file_operations proc_slabstats_operations = {
102677 static int __init slab_proc_init(void)
102678 {
102679 #ifdef CONFIG_DEBUG_SLAB_LEAK
102680- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
102681+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
102682 #endif
102683 return 0;
102684 }
102685 module_init(slab_proc_init);
102686 #endif
102687
102688+bool is_usercopy_object(const void *ptr)
102689+{
102690+ struct page *page;
102691+ struct kmem_cache *cachep;
102692+
102693+ if (ZERO_OR_NULL_PTR(ptr))
102694+ return false;
102695+
102696+ if (!slab_is_available())
102697+ return false;
102698+
102699+ if (!virt_addr_valid(ptr))
102700+ return false;
102701+
102702+ page = virt_to_head_page(ptr);
102703+
102704+ if (!PageSlab(page))
102705+ return false;
102706+
102707+ cachep = page->slab_cache;
102708+ return cachep->flags & SLAB_USERCOPY;
102709+}
102710+
102711+#ifdef CONFIG_PAX_USERCOPY
102712+const char *check_heap_object(const void *ptr, unsigned long n)
102713+{
102714+ struct page *page;
102715+ struct kmem_cache *cachep;
102716+ unsigned int objnr;
102717+ unsigned long offset;
102718+
102719+ if (ZERO_OR_NULL_PTR(ptr))
102720+ return "<null>";
102721+
102722+ if (!virt_addr_valid(ptr))
102723+ return NULL;
102724+
102725+ page = virt_to_head_page(ptr);
102726+
102727+ if (!PageSlab(page))
102728+ return NULL;
102729+
102730+ cachep = page->slab_cache;
102731+ if (!(cachep->flags & SLAB_USERCOPY))
102732+ return cachep->name;
102733+
102734+ objnr = obj_to_index(cachep, page, ptr);
102735+ BUG_ON(objnr >= cachep->num);
102736+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
102737+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
102738+ return NULL;
102739+
102740+ return cachep->name;
102741+}
102742+#endif
102743+
102744 /**
102745 * ksize - get the actual amount of memory allocated for a given object
102746 * @objp: Pointer to the object
102747diff --git a/mm/slab.h b/mm/slab.h
102748index 961a3fb..6b12514 100644
102749--- a/mm/slab.h
102750+++ b/mm/slab.h
102751@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
102752 /* The slab cache that manages slab cache information */
102753 extern struct kmem_cache *kmem_cache;
102754
102755+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102756+#ifdef CONFIG_X86_64
102757+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
102758+#else
102759+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
102760+#endif
102761+extern bool pax_sanitize_slab;
102762+#endif
102763+
102764 unsigned long calculate_alignment(unsigned long flags,
102765 unsigned long align, unsigned long size);
102766
102767@@ -67,7 +76,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
102768
102769 /* Legal flag mask for kmem_cache_create(), for various configurations */
102770 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
102771- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
102772+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
102773+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
102774
102775 #if defined(CONFIG_DEBUG_SLAB)
102776 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
102777@@ -251,6 +261,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
102778 return s;
102779
102780 page = virt_to_head_page(x);
102781+
102782+ BUG_ON(!PageSlab(page));
102783+
102784 cachep = page->slab_cache;
102785 if (slab_equal_or_root(cachep, s))
102786 return cachep;
102787diff --git a/mm/slab_common.c b/mm/slab_common.c
102788index d31c4ba..1121296 100644
102789--- a/mm/slab_common.c
102790+++ b/mm/slab_common.c
102791@@ -23,11 +23,22 @@
102792
102793 #include "slab.h"
102794
102795-enum slab_state slab_state;
102796+enum slab_state slab_state __read_only;
102797 LIST_HEAD(slab_caches);
102798 DEFINE_MUTEX(slab_mutex);
102799 struct kmem_cache *kmem_cache;
102800
102801+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102802+bool pax_sanitize_slab __read_only = true;
102803+static int __init pax_sanitize_slab_setup(char *str)
102804+{
102805+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
102806+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
102807+ return 1;
102808+}
102809+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
102810+#endif
102811+
102812 #ifdef CONFIG_DEBUG_VM
102813 static int kmem_cache_sanity_check(const char *name, size_t size)
102814 {
102815@@ -158,7 +169,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
102816 if (err)
102817 goto out_free_cache;
102818
102819- s->refcount = 1;
102820+ atomic_set(&s->refcount, 1);
102821 list_add(&s->list, &slab_caches);
102822 out:
102823 if (err)
102824@@ -339,8 +350,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
102825
102826 mutex_lock(&slab_mutex);
102827
102828- s->refcount--;
102829- if (s->refcount)
102830+ if (!atomic_dec_and_test(&s->refcount))
102831 goto out_unlock;
102832
102833 if (memcg_cleanup_cache_params(s) != 0)
102834@@ -360,7 +370,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
102835 rcu_barrier();
102836
102837 memcg_free_cache_params(s);
102838-#ifdef SLAB_SUPPORTS_SYSFS
102839+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
102840 sysfs_slab_remove(s);
102841 #else
102842 slab_kmem_cache_release(s);
102843@@ -416,7 +426,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
102844 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
102845 name, size, err);
102846
102847- s->refcount = -1; /* Exempt from merging for now */
102848+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
102849 }
102850
102851 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
102852@@ -429,7 +439,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
102853
102854 create_boot_cache(s, name, size, flags);
102855 list_add(&s->list, &slab_caches);
102856- s->refcount = 1;
102857+ atomic_set(&s->refcount, 1);
102858 return s;
102859 }
102860
102861@@ -441,6 +451,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
102862 EXPORT_SYMBOL(kmalloc_dma_caches);
102863 #endif
102864
102865+#ifdef CONFIG_PAX_USERCOPY_SLABS
102866+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
102867+EXPORT_SYMBOL(kmalloc_usercopy_caches);
102868+#endif
102869+
102870 /*
102871 * Conversion table for small slabs sizes / 8 to the index in the
102872 * kmalloc array. This is necessary for slabs < 192 since we have non power
102873@@ -505,6 +520,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
102874 return kmalloc_dma_caches[index];
102875
102876 #endif
102877+
102878+#ifdef CONFIG_PAX_USERCOPY_SLABS
102879+ if (unlikely((flags & GFP_USERCOPY)))
102880+ return kmalloc_usercopy_caches[index];
102881+
102882+#endif
102883+
102884 return kmalloc_caches[index];
102885 }
102886
102887@@ -561,7 +583,7 @@ void __init create_kmalloc_caches(unsigned long flags)
102888 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
102889 if (!kmalloc_caches[i]) {
102890 kmalloc_caches[i] = create_kmalloc_cache(NULL,
102891- 1 << i, flags);
102892+ 1 << i, SLAB_USERCOPY | flags);
102893 }
102894
102895 /*
102896@@ -570,10 +592,10 @@ void __init create_kmalloc_caches(unsigned long flags)
102897 * earlier power of two caches
102898 */
102899 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
102900- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
102901+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
102902
102903 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
102904- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
102905+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
102906 }
102907
102908 /* Kmalloc array is now usable */
102909@@ -606,6 +628,23 @@ void __init create_kmalloc_caches(unsigned long flags)
102910 }
102911 }
102912 #endif
102913+
102914+#ifdef CONFIG_PAX_USERCOPY_SLABS
102915+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
102916+ struct kmem_cache *s = kmalloc_caches[i];
102917+
102918+ if (s) {
102919+ int size = kmalloc_size(i);
102920+ char *n = kasprintf(GFP_NOWAIT,
102921+ "usercopy-kmalloc-%d", size);
102922+
102923+ BUG_ON(!n);
102924+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
102925+ size, SLAB_USERCOPY | flags);
102926+ }
102927+ }
102928+#endif
102929+
102930 }
102931 #endif /* !CONFIG_SLOB */
102932
102933@@ -664,6 +703,9 @@ void print_slabinfo_header(struct seq_file *m)
102934 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
102935 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
102936 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
102937+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102938+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
102939+#endif
102940 #endif
102941 seq_putc(m, '\n');
102942 }
102943diff --git a/mm/slob.c b/mm/slob.c
102944index 21980e0..ed9a648 100644
102945--- a/mm/slob.c
102946+++ b/mm/slob.c
102947@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
102948 /*
102949 * Return the size of a slob block.
102950 */
102951-static slobidx_t slob_units(slob_t *s)
102952+static slobidx_t slob_units(const slob_t *s)
102953 {
102954 if (s->units > 0)
102955 return s->units;
102956@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
102957 /*
102958 * Return the next free slob block pointer after this one.
102959 */
102960-static slob_t *slob_next(slob_t *s)
102961+static slob_t *slob_next(const slob_t *s)
102962 {
102963 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
102964 slobidx_t next;
102965@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
102966 /*
102967 * Returns true if s is the last free block in its page.
102968 */
102969-static int slob_last(slob_t *s)
102970+static int slob_last(const slob_t *s)
102971 {
102972 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
102973 }
102974
102975-static void *slob_new_pages(gfp_t gfp, int order, int node)
102976+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
102977 {
102978- void *page;
102979+ struct page *page;
102980
102981 #ifdef CONFIG_NUMA
102982 if (node != NUMA_NO_NODE)
102983@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
102984 if (!page)
102985 return NULL;
102986
102987- return page_address(page);
102988+ __SetPageSlab(page);
102989+ return page;
102990 }
102991
102992-static void slob_free_pages(void *b, int order)
102993+static void slob_free_pages(struct page *sp, int order)
102994 {
102995 if (current->reclaim_state)
102996 current->reclaim_state->reclaimed_slab += 1 << order;
102997- free_pages((unsigned long)b, order);
102998+ __ClearPageSlab(sp);
102999+ page_mapcount_reset(sp);
103000+ sp->private = 0;
103001+ __free_pages(sp, order);
103002 }
103003
103004 /*
103005@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
103006
103007 /* Not enough space: must allocate a new page */
103008 if (!b) {
103009- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
103010- if (!b)
103011+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
103012+ if (!sp)
103013 return NULL;
103014- sp = virt_to_page(b);
103015- __SetPageSlab(sp);
103016+ b = page_address(sp);
103017
103018 spin_lock_irqsave(&slob_lock, flags);
103019 sp->units = SLOB_UNITS(PAGE_SIZE);
103020 sp->freelist = b;
103021+ sp->private = 0;
103022 INIT_LIST_HEAD(&sp->lru);
103023 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
103024 set_slob_page_free(sp, slob_list);
103025@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
103026 if (slob_page_free(sp))
103027 clear_slob_page_free(sp);
103028 spin_unlock_irqrestore(&slob_lock, flags);
103029- __ClearPageSlab(sp);
103030- page_mapcount_reset(sp);
103031- slob_free_pages(b, 0);
103032+ slob_free_pages(sp, 0);
103033 return;
103034 }
103035
103036+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103037+ if (pax_sanitize_slab)
103038+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
103039+#endif
103040+
103041 if (!slob_page_free(sp)) {
103042 /* This slob page is about to become partially free. Easy! */
103043 sp->units = units;
103044@@ -424,11 +431,10 @@ out:
103045 */
103046
103047 static __always_inline void *
103048-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103049+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
103050 {
103051- unsigned int *m;
103052- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103053- void *ret;
103054+ slob_t *m;
103055+ void *ret = NULL;
103056
103057 gfp &= gfp_allowed_mask;
103058
103059@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103060
103061 if (!m)
103062 return NULL;
103063- *m = size;
103064+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
103065+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
103066+ m[0].units = size;
103067+ m[1].units = align;
103068 ret = (void *)m + align;
103069
103070 trace_kmalloc_node(caller, ret,
103071 size, size + align, gfp, node);
103072 } else {
103073 unsigned int order = get_order(size);
103074+ struct page *page;
103075
103076 if (likely(order))
103077 gfp |= __GFP_COMP;
103078- ret = slob_new_pages(gfp, order, node);
103079+ page = slob_new_pages(gfp, order, node);
103080+ if (page) {
103081+ ret = page_address(page);
103082+ page->private = size;
103083+ }
103084
103085 trace_kmalloc_node(caller, ret,
103086 size, PAGE_SIZE << order, gfp, node);
103087 }
103088
103089- kmemleak_alloc(ret, size, 1, gfp);
103090+ return ret;
103091+}
103092+
103093+static __always_inline void *
103094+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103095+{
103096+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103097+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
103098+
103099+ if (!ZERO_OR_NULL_PTR(ret))
103100+ kmemleak_alloc(ret, size, 1, gfp);
103101 return ret;
103102 }
103103
103104@@ -493,34 +517,112 @@ void kfree(const void *block)
103105 return;
103106 kmemleak_free(block);
103107
103108+ VM_BUG_ON(!virt_addr_valid(block));
103109 sp = virt_to_page(block);
103110- if (PageSlab(sp)) {
103111+ VM_BUG_ON(!PageSlab(sp));
103112+ if (!sp->private) {
103113 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103114- unsigned int *m = (unsigned int *)(block - align);
103115- slob_free(m, *m + align);
103116- } else
103117+ slob_t *m = (slob_t *)(block - align);
103118+ slob_free(m, m[0].units + align);
103119+ } else {
103120+ __ClearPageSlab(sp);
103121+ page_mapcount_reset(sp);
103122+ sp->private = 0;
103123 __free_pages(sp, compound_order(sp));
103124+ }
103125 }
103126 EXPORT_SYMBOL(kfree);
103127
103128+bool is_usercopy_object(const void *ptr)
103129+{
103130+ if (!slab_is_available())
103131+ return false;
103132+
103133+ // PAX: TODO
103134+
103135+ return false;
103136+}
103137+
103138+#ifdef CONFIG_PAX_USERCOPY
103139+const char *check_heap_object(const void *ptr, unsigned long n)
103140+{
103141+ struct page *page;
103142+ const slob_t *free;
103143+ const void *base;
103144+ unsigned long flags;
103145+
103146+ if (ZERO_OR_NULL_PTR(ptr))
103147+ return "<null>";
103148+
103149+ if (!virt_addr_valid(ptr))
103150+ return NULL;
103151+
103152+ page = virt_to_head_page(ptr);
103153+ if (!PageSlab(page))
103154+ return NULL;
103155+
103156+ if (page->private) {
103157+ base = page;
103158+ if (base <= ptr && n <= page->private - (ptr - base))
103159+ return NULL;
103160+ return "<slob>";
103161+ }
103162+
103163+ /* some tricky double walking to find the chunk */
103164+ spin_lock_irqsave(&slob_lock, flags);
103165+ base = (void *)((unsigned long)ptr & PAGE_MASK);
103166+ free = page->freelist;
103167+
103168+ while (!slob_last(free) && (void *)free <= ptr) {
103169+ base = free + slob_units(free);
103170+ free = slob_next(free);
103171+ }
103172+
103173+ while (base < (void *)free) {
103174+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
103175+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
103176+ int offset;
103177+
103178+ if (ptr < base + align)
103179+ break;
103180+
103181+ offset = ptr - base - align;
103182+ if (offset >= m) {
103183+ base += size;
103184+ continue;
103185+ }
103186+
103187+ if (n > m - offset)
103188+ break;
103189+
103190+ spin_unlock_irqrestore(&slob_lock, flags);
103191+ return NULL;
103192+ }
103193+
103194+ spin_unlock_irqrestore(&slob_lock, flags);
103195+ return "<slob>";
103196+}
103197+#endif
103198+
103199 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
103200 size_t ksize(const void *block)
103201 {
103202 struct page *sp;
103203 int align;
103204- unsigned int *m;
103205+ slob_t *m;
103206
103207 BUG_ON(!block);
103208 if (unlikely(block == ZERO_SIZE_PTR))
103209 return 0;
103210
103211 sp = virt_to_page(block);
103212- if (unlikely(!PageSlab(sp)))
103213- return PAGE_SIZE << compound_order(sp);
103214+ VM_BUG_ON(!PageSlab(sp));
103215+ if (sp->private)
103216+ return sp->private;
103217
103218 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103219- m = (unsigned int *)(block - align);
103220- return SLOB_UNITS(*m) * SLOB_UNIT;
103221+ m = (slob_t *)(block - align);
103222+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
103223 }
103224 EXPORT_SYMBOL(ksize);
103225
103226@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
103227
103228 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
103229 {
103230- void *b;
103231+ void *b = NULL;
103232
103233 flags &= gfp_allowed_mask;
103234
103235 lockdep_trace_alloc(flags);
103236
103237+#ifdef CONFIG_PAX_USERCOPY_SLABS
103238+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
103239+#else
103240 if (c->size < PAGE_SIZE) {
103241 b = slob_alloc(c->size, flags, c->align, node);
103242 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
103243 SLOB_UNITS(c->size) * SLOB_UNIT,
103244 flags, node);
103245 } else {
103246- b = slob_new_pages(flags, get_order(c->size), node);
103247+ struct page *sp;
103248+
103249+ sp = slob_new_pages(flags, get_order(c->size), node);
103250+ if (sp) {
103251+ b = page_address(sp);
103252+ sp->private = c->size;
103253+ }
103254 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
103255 PAGE_SIZE << get_order(c->size),
103256 flags, node);
103257 }
103258+#endif
103259
103260 if (b && c->ctor)
103261 c->ctor(b);
103262@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
103263
103264 static void __kmem_cache_free(void *b, int size)
103265 {
103266- if (size < PAGE_SIZE)
103267+ struct page *sp;
103268+
103269+ sp = virt_to_page(b);
103270+ BUG_ON(!PageSlab(sp));
103271+ if (!sp->private)
103272 slob_free(b, size);
103273 else
103274- slob_free_pages(b, get_order(size));
103275+ slob_free_pages(sp, get_order(size));
103276 }
103277
103278 static void kmem_rcu_free(struct rcu_head *head)
103279@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
103280
103281 void kmem_cache_free(struct kmem_cache *c, void *b)
103282 {
103283+ int size = c->size;
103284+
103285+#ifdef CONFIG_PAX_USERCOPY_SLABS
103286+ if (size + c->align < PAGE_SIZE) {
103287+ size += c->align;
103288+ b -= c->align;
103289+ }
103290+#endif
103291+
103292 kmemleak_free_recursive(b, c->flags);
103293 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
103294 struct slob_rcu *slob_rcu;
103295- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
103296- slob_rcu->size = c->size;
103297+ slob_rcu = b + (size - sizeof(struct slob_rcu));
103298+ slob_rcu->size = size;
103299 call_rcu(&slob_rcu->head, kmem_rcu_free);
103300 } else {
103301- __kmem_cache_free(b, c->size);
103302+ __kmem_cache_free(b, size);
103303 }
103304
103305+#ifdef CONFIG_PAX_USERCOPY_SLABS
103306+ trace_kfree(_RET_IP_, b);
103307+#else
103308 trace_kmem_cache_free(_RET_IP_, b);
103309+#endif
103310+
103311 }
103312 EXPORT_SYMBOL(kmem_cache_free);
103313
103314diff --git a/mm/slub.c b/mm/slub.c
103315index 7300480..cb92846 100644
103316--- a/mm/slub.c
103317+++ b/mm/slub.c
103318@@ -207,7 +207,7 @@ struct track {
103319
103320 enum track_item { TRACK_ALLOC, TRACK_FREE };
103321
103322-#ifdef CONFIG_SYSFS
103323+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103324 static int sysfs_slab_add(struct kmem_cache *);
103325 static int sysfs_slab_alias(struct kmem_cache *, const char *);
103326 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
103327@@ -546,7 +546,7 @@ static void print_track(const char *s, struct track *t)
103328 if (!t->addr)
103329 return;
103330
103331- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
103332+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
103333 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
103334 #ifdef CONFIG_STACKTRACE
103335 {
103336@@ -2673,6 +2673,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
103337
103338 slab_free_hook(s, x);
103339
103340+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103341+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
103342+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
103343+ if (s->ctor)
103344+ s->ctor(x);
103345+ }
103346+#endif
103347+
103348 redo:
103349 /*
103350 * Determine the currently cpus per cpu slab.
103351@@ -2740,7 +2748,7 @@ static int slub_min_objects;
103352 * Merge control. If this is set then no merging of slab caches will occur.
103353 * (Could be removed. This was introduced to pacify the merge skeptics.)
103354 */
103355-static int slub_nomerge;
103356+static int slub_nomerge = 1;
103357
103358 /*
103359 * Calculate the order of allocation given an slab object size.
103360@@ -3019,6 +3027,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
103361 s->inuse = size;
103362
103363 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
103364+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103365+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
103366+#endif
103367 s->ctor)) {
103368 /*
103369 * Relocate free pointer after the object if it is not
103370@@ -3347,6 +3358,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
103371 EXPORT_SYMBOL(__kmalloc_node);
103372 #endif
103373
103374+bool is_usercopy_object(const void *ptr)
103375+{
103376+ struct page *page;
103377+ struct kmem_cache *s;
103378+
103379+ if (ZERO_OR_NULL_PTR(ptr))
103380+ return false;
103381+
103382+ if (!slab_is_available())
103383+ return false;
103384+
103385+ if (!virt_addr_valid(ptr))
103386+ return false;
103387+
103388+ page = virt_to_head_page(ptr);
103389+
103390+ if (!PageSlab(page))
103391+ return false;
103392+
103393+ s = page->slab_cache;
103394+ return s->flags & SLAB_USERCOPY;
103395+}
103396+
103397+#ifdef CONFIG_PAX_USERCOPY
103398+const char *check_heap_object(const void *ptr, unsigned long n)
103399+{
103400+ struct page *page;
103401+ struct kmem_cache *s;
103402+ unsigned long offset;
103403+
103404+ if (ZERO_OR_NULL_PTR(ptr))
103405+ return "<null>";
103406+
103407+ if (!virt_addr_valid(ptr))
103408+ return NULL;
103409+
103410+ page = virt_to_head_page(ptr);
103411+
103412+ if (!PageSlab(page))
103413+ return NULL;
103414+
103415+ s = page->slab_cache;
103416+ if (!(s->flags & SLAB_USERCOPY))
103417+ return s->name;
103418+
103419+ offset = (ptr - page_address(page)) % s->size;
103420+ if (offset <= s->object_size && n <= s->object_size - offset)
103421+ return NULL;
103422+
103423+ return s->name;
103424+}
103425+#endif
103426+
103427 size_t ksize(const void *object)
103428 {
103429 struct page *page;
103430@@ -3375,6 +3439,7 @@ void kfree(const void *x)
103431 if (unlikely(ZERO_OR_NULL_PTR(x)))
103432 return;
103433
103434+ VM_BUG_ON(!virt_addr_valid(x));
103435 page = virt_to_head_page(x);
103436 if (unlikely(!PageSlab(page))) {
103437 BUG_ON(!PageCompound(page));
103438@@ -3680,7 +3745,7 @@ static int slab_unmergeable(struct kmem_cache *s)
103439 /*
103440 * We may have set a slab to be unmergeable during bootstrap.
103441 */
103442- if (s->refcount < 0)
103443+ if (atomic_read(&s->refcount) < 0)
103444 return 1;
103445
103446 return 0;
103447@@ -3737,7 +3802,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103448 int i;
103449 struct kmem_cache *c;
103450
103451- s->refcount++;
103452+ atomic_inc(&s->refcount);
103453
103454 /*
103455 * Adjust the object sizes so that we clear
103456@@ -3756,7 +3821,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103457 }
103458
103459 if (sysfs_slab_alias(s, name)) {
103460- s->refcount--;
103461+ atomic_dec(&s->refcount);
103462 s = NULL;
103463 }
103464 }
103465@@ -3873,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
103466 }
103467 #endif
103468
103469-#ifdef CONFIG_SYSFS
103470+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103471 static int count_inuse(struct page *page)
103472 {
103473 return page->inuse;
103474@@ -4156,7 +4221,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
103475 len += sprintf(buf + len, "%7ld ", l->count);
103476
103477 if (l->addr)
103478+#ifdef CONFIG_GRKERNSEC_HIDESYM
103479+ len += sprintf(buf + len, "%pS", NULL);
103480+#else
103481 len += sprintf(buf + len, "%pS", (void *)l->addr);
103482+#endif
103483 else
103484 len += sprintf(buf + len, "<not-available>");
103485
103486@@ -4258,12 +4327,12 @@ static void resiliency_test(void)
103487 validate_slab_cache(kmalloc_caches[9]);
103488 }
103489 #else
103490-#ifdef CONFIG_SYSFS
103491+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103492 static void resiliency_test(void) {};
103493 #endif
103494 #endif
103495
103496-#ifdef CONFIG_SYSFS
103497+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103498 enum slab_stat_type {
103499 SL_ALL, /* All slabs */
103500 SL_PARTIAL, /* Only partially allocated slabs */
103501@@ -4503,13 +4572,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
103502 {
103503 if (!s->ctor)
103504 return 0;
103505+#ifdef CONFIG_GRKERNSEC_HIDESYM
103506+ return sprintf(buf, "%pS\n", NULL);
103507+#else
103508 return sprintf(buf, "%pS\n", s->ctor);
103509+#endif
103510 }
103511 SLAB_ATTR_RO(ctor);
103512
103513 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
103514 {
103515- return sprintf(buf, "%d\n", s->refcount - 1);
103516+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
103517 }
103518 SLAB_ATTR_RO(aliases);
103519
103520@@ -4597,6 +4670,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
103521 SLAB_ATTR_RO(cache_dma);
103522 #endif
103523
103524+#ifdef CONFIG_PAX_USERCOPY_SLABS
103525+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
103526+{
103527+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
103528+}
103529+SLAB_ATTR_RO(usercopy);
103530+#endif
103531+
103532 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
103533 {
103534 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
103535@@ -4931,6 +5012,9 @@ static struct attribute *slab_attrs[] = {
103536 #ifdef CONFIG_ZONE_DMA
103537 &cache_dma_attr.attr,
103538 #endif
103539+#ifdef CONFIG_PAX_USERCOPY_SLABS
103540+ &usercopy_attr.attr,
103541+#endif
103542 #ifdef CONFIG_NUMA
103543 &remote_node_defrag_ratio_attr.attr,
103544 #endif
103545@@ -5181,6 +5265,7 @@ static char *create_unique_id(struct kmem_cache *s)
103546 return name;
103547 }
103548
103549+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103550 static int sysfs_slab_add(struct kmem_cache *s)
103551 {
103552 int err;
103553@@ -5254,6 +5339,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
103554 kobject_del(&s->kobj);
103555 kobject_put(&s->kobj);
103556 }
103557+#endif
103558
103559 /*
103560 * Need to buffer aliases during bootup until sysfs becomes
103561@@ -5267,6 +5353,7 @@ struct saved_alias {
103562
103563 static struct saved_alias *alias_list;
103564
103565+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103566 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
103567 {
103568 struct saved_alias *al;
103569@@ -5289,6 +5376,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
103570 alias_list = al;
103571 return 0;
103572 }
103573+#endif
103574
103575 static int __init slab_sysfs_init(void)
103576 {
103577diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
103578index 4cba9c2..b4f9fcc 100644
103579--- a/mm/sparse-vmemmap.c
103580+++ b/mm/sparse-vmemmap.c
103581@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
103582 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103583 if (!p)
103584 return NULL;
103585- pud_populate(&init_mm, pud, p);
103586+ pud_populate_kernel(&init_mm, pud, p);
103587 }
103588 return pud;
103589 }
103590@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
103591 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103592 if (!p)
103593 return NULL;
103594- pgd_populate(&init_mm, pgd, p);
103595+ pgd_populate_kernel(&init_mm, pgd, p);
103596 }
103597 return pgd;
103598 }
103599diff --git a/mm/sparse.c b/mm/sparse.c
103600index d1b48b6..6e8590e 100644
103601--- a/mm/sparse.c
103602+++ b/mm/sparse.c
103603@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
103604
103605 for (i = 0; i < PAGES_PER_SECTION; i++) {
103606 if (PageHWPoison(&memmap[i])) {
103607- atomic_long_sub(1, &num_poisoned_pages);
103608+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
103609 ClearPageHWPoison(&memmap[i]);
103610 }
103611 }
103612diff --git a/mm/swap.c b/mm/swap.c
103613index 9e8e347..3c22e0f 100644
103614--- a/mm/swap.c
103615+++ b/mm/swap.c
103616@@ -31,6 +31,7 @@
103617 #include <linux/memcontrol.h>
103618 #include <linux/gfp.h>
103619 #include <linux/uio.h>
103620+#include <linux/hugetlb.h>
103621
103622 #include "internal.h"
103623
103624@@ -76,6 +77,8 @@ static void __put_compound_page(struct page *page)
103625
103626 __page_cache_release(page);
103627 dtor = get_compound_page_dtor(page);
103628+ if (!PageHuge(page))
103629+ BUG_ON(dtor != free_compound_page);
103630 (*dtor)(page);
103631 }
103632
103633diff --git a/mm/swapfile.c b/mm/swapfile.c
103634index 4c524f7..f7601f17 100644
103635--- a/mm/swapfile.c
103636+++ b/mm/swapfile.c
103637@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
103638
103639 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
103640 /* Activity counter to indicate that a swapon or swapoff has occurred */
103641-static atomic_t proc_poll_event = ATOMIC_INIT(0);
103642+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
103643
103644 static inline unsigned char swap_count(unsigned char ent)
103645 {
103646@@ -1945,7 +1945,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
103647 spin_unlock(&swap_lock);
103648
103649 err = 0;
103650- atomic_inc(&proc_poll_event);
103651+ atomic_inc_unchecked(&proc_poll_event);
103652 wake_up_interruptible(&proc_poll_wait);
103653
103654 out_dput:
103655@@ -1962,8 +1962,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
103656
103657 poll_wait(file, &proc_poll_wait, wait);
103658
103659- if (seq->poll_event != atomic_read(&proc_poll_event)) {
103660- seq->poll_event = atomic_read(&proc_poll_event);
103661+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
103662+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
103663 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
103664 }
103665
103666@@ -2061,7 +2061,7 @@ static int swaps_open(struct inode *inode, struct file *file)
103667 return ret;
103668
103669 seq = file->private_data;
103670- seq->poll_event = atomic_read(&proc_poll_event);
103671+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
103672 return 0;
103673 }
103674
103675@@ -2521,7 +2521,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
103676 (frontswap_map) ? "FS" : "");
103677
103678 mutex_unlock(&swapon_mutex);
103679- atomic_inc(&proc_poll_event);
103680+ atomic_inc_unchecked(&proc_poll_event);
103681 wake_up_interruptible(&proc_poll_wait);
103682
103683 if (S_ISREG(inode->i_mode))
103684diff --git a/mm/util.c b/mm/util.c
103685index 33e9f44..be026b2 100644
103686--- a/mm/util.c
103687+++ b/mm/util.c
103688@@ -296,6 +296,12 @@ done:
103689 void arch_pick_mmap_layout(struct mm_struct *mm)
103690 {
103691 mm->mmap_base = TASK_UNMAPPED_BASE;
103692+
103693+#ifdef CONFIG_PAX_RANDMMAP
103694+ if (mm->pax_flags & MF_PAX_RANDMMAP)
103695+ mm->mmap_base += mm->delta_mmap;
103696+#endif
103697+
103698 mm->get_unmapped_area = arch_get_unmapped_area;
103699 }
103700 #endif
103701@@ -472,6 +478,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
103702 if (!mm->arg_end)
103703 goto out_mm; /* Shh! No looking before we're done */
103704
103705+ if (gr_acl_handle_procpidmem(task))
103706+ goto out_mm;
103707+
103708 len = mm->arg_end - mm->arg_start;
103709
103710 if (len > buflen)
103711diff --git a/mm/vmalloc.c b/mm/vmalloc.c
103712index f64632b..e8c52e7 100644
103713--- a/mm/vmalloc.c
103714+++ b/mm/vmalloc.c
103715@@ -40,6 +40,21 @@ struct vfree_deferred {
103716 };
103717 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
103718
103719+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103720+struct stack_deferred_llist {
103721+ struct llist_head list;
103722+ void *stack;
103723+ void *lowmem_stack;
103724+};
103725+
103726+struct stack_deferred {
103727+ struct stack_deferred_llist list;
103728+ struct work_struct wq;
103729+};
103730+
103731+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
103732+#endif
103733+
103734 static void __vunmap(const void *, int);
103735
103736 static void free_work(struct work_struct *w)
103737@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
103738 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
103739 struct llist_node *llnode = llist_del_all(&p->list);
103740 while (llnode) {
103741- void *p = llnode;
103742+ void *x = llnode;
103743 llnode = llist_next(llnode);
103744- __vunmap(p, 1);
103745+ __vunmap(x, 1);
103746 }
103747 }
103748
103749+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103750+static void unmap_work(struct work_struct *w)
103751+{
103752+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
103753+ struct llist_node *llnode = llist_del_all(&p->list.list);
103754+ while (llnode) {
103755+ struct stack_deferred_llist *x =
103756+ llist_entry((struct llist_head *)llnode,
103757+ struct stack_deferred_llist, list);
103758+ void *stack = ACCESS_ONCE(x->stack);
103759+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
103760+ llnode = llist_next(llnode);
103761+ __vunmap(stack, 0);
103762+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
103763+ }
103764+}
103765+#endif
103766+
103767 /*** Page table manipulation functions ***/
103768
103769 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
103770@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
103771
103772 pte = pte_offset_kernel(pmd, addr);
103773 do {
103774- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
103775- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
103776+
103777+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
103778+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
103779+ BUG_ON(!pte_exec(*pte));
103780+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
103781+ continue;
103782+ }
103783+#endif
103784+
103785+ {
103786+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
103787+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
103788+ }
103789 } while (pte++, addr += PAGE_SIZE, addr != end);
103790 }
103791
103792@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
103793 pte = pte_alloc_kernel(pmd, addr);
103794 if (!pte)
103795 return -ENOMEM;
103796+
103797+ pax_open_kernel();
103798 do {
103799 struct page *page = pages[*nr];
103800
103801- if (WARN_ON(!pte_none(*pte)))
103802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
103803+ if (pgprot_val(prot) & _PAGE_NX)
103804+#endif
103805+
103806+ if (!pte_none(*pte)) {
103807+ pax_close_kernel();
103808+ WARN_ON(1);
103809 return -EBUSY;
103810- if (WARN_ON(!page))
103811+ }
103812+ if (!page) {
103813+ pax_close_kernel();
103814+ WARN_ON(1);
103815 return -ENOMEM;
103816+ }
103817 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
103818 (*nr)++;
103819 } while (pte++, addr += PAGE_SIZE, addr != end);
103820+ pax_close_kernel();
103821 return 0;
103822 }
103823
103824@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
103825 pmd_t *pmd;
103826 unsigned long next;
103827
103828- pmd = pmd_alloc(&init_mm, pud, addr);
103829+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
103830 if (!pmd)
103831 return -ENOMEM;
103832 do {
103833@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
103834 pud_t *pud;
103835 unsigned long next;
103836
103837- pud = pud_alloc(&init_mm, pgd, addr);
103838+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
103839 if (!pud)
103840 return -ENOMEM;
103841 do {
103842@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
103843 if (addr >= MODULES_VADDR && addr < MODULES_END)
103844 return 1;
103845 #endif
103846+
103847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
103848+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
103849+ return 1;
103850+#endif
103851+
103852 return is_vmalloc_addr(x);
103853 }
103854
103855@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
103856
103857 if (!pgd_none(*pgd)) {
103858 pud_t *pud = pud_offset(pgd, addr);
103859+#ifdef CONFIG_X86
103860+ if (!pud_large(*pud))
103861+#endif
103862 if (!pud_none(*pud)) {
103863 pmd_t *pmd = pmd_offset(pud, addr);
103864+#ifdef CONFIG_X86
103865+ if (!pmd_large(*pmd))
103866+#endif
103867 if (!pmd_none(*pmd)) {
103868 pte_t *ptep, pte;
103869
103870@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
103871 for_each_possible_cpu(i) {
103872 struct vmap_block_queue *vbq;
103873 struct vfree_deferred *p;
103874+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103875+ struct stack_deferred *p2;
103876+#endif
103877
103878 vbq = &per_cpu(vmap_block_queue, i);
103879 spin_lock_init(&vbq->lock);
103880 INIT_LIST_HEAD(&vbq->free);
103881+
103882 p = &per_cpu(vfree_deferred, i);
103883 init_llist_head(&p->list);
103884 INIT_WORK(&p->wq, free_work);
103885+
103886+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103887+ p2 = &per_cpu(stack_deferred, i);
103888+ init_llist_head(&p2->list.list);
103889+ INIT_WORK(&p2->wq, unmap_work);
103890+#endif
103891 }
103892
103893 /* Import existing vmlist entries. */
103894@@ -1318,6 +1397,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
103895 struct vm_struct *area;
103896
103897 BUG_ON(in_interrupt());
103898+
103899+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
103900+ if (flags & VM_KERNEXEC) {
103901+ if (start != VMALLOC_START || end != VMALLOC_END)
103902+ return NULL;
103903+ start = (unsigned long)MODULES_EXEC_VADDR;
103904+ end = (unsigned long)MODULES_EXEC_END;
103905+ }
103906+#endif
103907+
103908 if (flags & VM_IOREMAP)
103909 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
103910
103911@@ -1523,6 +1612,23 @@ void vunmap(const void *addr)
103912 }
103913 EXPORT_SYMBOL(vunmap);
103914
103915+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
103916+void unmap_process_stacks(struct task_struct *task)
103917+{
103918+ if (unlikely(in_interrupt())) {
103919+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
103920+ struct stack_deferred_llist *list = task->stack;
103921+ list->stack = task->stack;
103922+ list->lowmem_stack = task->lowmem_stack;
103923+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
103924+ schedule_work(&p->wq);
103925+ } else {
103926+ __vunmap(task->stack, 0);
103927+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
103928+ }
103929+}
103930+#endif
103931+
103932 /**
103933 * vmap - map an array of pages into virtually contiguous space
103934 * @pages: array of page pointers
103935@@ -1543,6 +1649,11 @@ void *vmap(struct page **pages, unsigned int count,
103936 if (count > totalram_pages)
103937 return NULL;
103938
103939+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
103940+ if (!(pgprot_val(prot) & _PAGE_NX))
103941+ flags |= VM_KERNEXEC;
103942+#endif
103943+
103944 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
103945 __builtin_return_address(0));
103946 if (!area)
103947@@ -1643,6 +1754,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
103948 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
103949 goto fail;
103950
103951+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
103952+ if (!(pgprot_val(prot) & _PAGE_NX))
103953+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
103954+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
103955+ else
103956+#endif
103957+
103958 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
103959 start, end, node, gfp_mask, caller);
103960 if (!area)
103961@@ -1819,10 +1937,9 @@ EXPORT_SYMBOL(vzalloc_node);
103962 * For tight control over page level allocator and protection flags
103963 * use __vmalloc() instead.
103964 */
103965-
103966 void *vmalloc_exec(unsigned long size)
103967 {
103968- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
103969+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
103970 NUMA_NO_NODE, __builtin_return_address(0));
103971 }
103972
103973@@ -2129,6 +2246,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
103974 {
103975 struct vm_struct *area;
103976
103977+ BUG_ON(vma->vm_mirror);
103978+
103979 size = PAGE_ALIGN(size);
103980
103981 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
103982@@ -2611,7 +2730,11 @@ static int s_show(struct seq_file *m, void *p)
103983 v->addr, v->addr + v->size, v->size);
103984
103985 if (v->caller)
103986+#ifdef CONFIG_GRKERNSEC_HIDESYM
103987+ seq_printf(m, " %pK", v->caller);
103988+#else
103989 seq_printf(m, " %pS", v->caller);
103990+#endif
103991
103992 if (v->nr_pages)
103993 seq_printf(m, " pages=%d", v->nr_pages);
103994diff --git a/mm/vmstat.c b/mm/vmstat.c
103995index b37bd49..4d7b3da 100644
103996--- a/mm/vmstat.c
103997+++ b/mm/vmstat.c
103998@@ -20,6 +20,7 @@
103999 #include <linux/writeback.h>
104000 #include <linux/compaction.h>
104001 #include <linux/mm_inline.h>
104002+#include <linux/grsecurity.h>
104003
104004 #include "internal.h"
104005
104006@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
104007 *
104008 * vm_stat contains the global counters
104009 */
104010-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
104011+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
104012 EXPORT_SYMBOL(vm_stat);
104013
104014 #ifdef CONFIG_SMP
104015@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
104016
104017 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
104018 if (diff[i])
104019- atomic_long_add(diff[i], &vm_stat[i]);
104020+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
104021 }
104022
104023 /*
104024@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
104025 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
104026 if (v) {
104027
104028- atomic_long_add(v, &zone->vm_stat[i]);
104029+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104030 global_diff[i] += v;
104031 #ifdef CONFIG_NUMA
104032 /* 3 seconds idle till flush */
104033@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
104034
104035 v = p->vm_stat_diff[i];
104036 p->vm_stat_diff[i] = 0;
104037- atomic_long_add(v, &zone->vm_stat[i]);
104038+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104039 global_diff[i] += v;
104040 }
104041 }
104042@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
104043 if (pset->vm_stat_diff[i]) {
104044 int v = pset->vm_stat_diff[i];
104045 pset->vm_stat_diff[i] = 0;
104046- atomic_long_add(v, &zone->vm_stat[i]);
104047- atomic_long_add(v, &vm_stat[i]);
104048+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104049+ atomic_long_add_unchecked(v, &vm_stat[i]);
104050 }
104051 }
104052 #endif
104053@@ -1162,10 +1163,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
104054 stat_items_size += sizeof(struct vm_event_state);
104055 #endif
104056
104057- v = kmalloc(stat_items_size, GFP_KERNEL);
104058+ v = kzalloc(stat_items_size, GFP_KERNEL);
104059 m->private = v;
104060 if (!v)
104061 return ERR_PTR(-ENOMEM);
104062+
104063+#ifdef CONFIG_GRKERNSEC_PROC_ADD
104064+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
104065+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
104066+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
104067+ && !in_group_p(grsec_proc_gid)
104068+#endif
104069+ )
104070+ return (unsigned long *)m->private + *pos;
104071+#endif
104072+#endif
104073+
104074 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
104075 v[i] = global_page_state(i);
104076 v += NR_VM_ZONE_STAT_ITEMS;
104077@@ -1314,10 +1327,16 @@ static int __init setup_vmstat(void)
104078 cpu_notifier_register_done();
104079 #endif
104080 #ifdef CONFIG_PROC_FS
104081- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
104082- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
104083- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
104084- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
104085+ {
104086+ mode_t gr_mode = S_IRUGO;
104087+#ifdef CONFIG_GRKERNSEC_PROC_ADD
104088+ gr_mode = S_IRUSR;
104089+#endif
104090+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
104091+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
104092+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
104093+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
104094+ }
104095 #endif
104096 return 0;
104097 }
104098diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
104099index 44ebd5c..1f732bae 100644
104100--- a/net/8021q/vlan.c
104101+++ b/net/8021q/vlan.c
104102@@ -475,7 +475,7 @@ out:
104103 return NOTIFY_DONE;
104104 }
104105
104106-static struct notifier_block vlan_notifier_block __read_mostly = {
104107+static struct notifier_block vlan_notifier_block = {
104108 .notifier_call = vlan_device_event,
104109 };
104110
104111@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
104112 err = -EPERM;
104113 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
104114 break;
104115- if ((args.u.name_type >= 0) &&
104116- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
104117+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
104118 struct vlan_net *vn;
104119
104120 vn = net_generic(net, vlan_net_id);
104121diff --git a/net/9p/client.c b/net/9p/client.c
104122index 0004cba..feba240 100644
104123--- a/net/9p/client.c
104124+++ b/net/9p/client.c
104125@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
104126 len - inline_len);
104127 } else {
104128 err = copy_from_user(ename + inline_len,
104129- uidata, len - inline_len);
104130+ (char __force_user *)uidata, len - inline_len);
104131 if (err) {
104132 err = -EFAULT;
104133 goto out_err;
104134@@ -1571,7 +1571,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
104135 kernel_buf = 1;
104136 indata = data;
104137 } else
104138- indata = (__force char *)udata;
104139+ indata = (__force_kernel char *)udata;
104140 /*
104141 * response header len is 11
104142 * PDU Header(7) + IO Size (4)
104143@@ -1646,7 +1646,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
104144 kernel_buf = 1;
104145 odata = data;
104146 } else
104147- odata = (char *)udata;
104148+ odata = (char __force_kernel *)udata;
104149 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
104150 P9_ZC_HDR_SZ, kernel_buf, "dqd",
104151 fid->fid, offset, rsize);
104152diff --git a/net/9p/mod.c b/net/9p/mod.c
104153index 6ab36ae..6f1841b 100644
104154--- a/net/9p/mod.c
104155+++ b/net/9p/mod.c
104156@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
104157 void v9fs_register_trans(struct p9_trans_module *m)
104158 {
104159 spin_lock(&v9fs_trans_lock);
104160- list_add_tail(&m->list, &v9fs_trans_list);
104161+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
104162 spin_unlock(&v9fs_trans_lock);
104163 }
104164 EXPORT_SYMBOL(v9fs_register_trans);
104165@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
104166 void v9fs_unregister_trans(struct p9_trans_module *m)
104167 {
104168 spin_lock(&v9fs_trans_lock);
104169- list_del_init(&m->list);
104170+ pax_list_del_init((struct list_head *)&m->list);
104171 spin_unlock(&v9fs_trans_lock);
104172 }
104173 EXPORT_SYMBOL(v9fs_unregister_trans);
104174diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
104175index 80d08f6..de63fd1 100644
104176--- a/net/9p/trans_fd.c
104177+++ b/net/9p/trans_fd.c
104178@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
104179 oldfs = get_fs();
104180 set_fs(get_ds());
104181 /* The cast to a user pointer is valid due to the set_fs() */
104182- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
104183+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
104184 set_fs(oldfs);
104185
104186 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
104187diff --git a/net/Kconfig b/net/Kconfig
104188index d92afe4..ab63892 100644
104189--- a/net/Kconfig
104190+++ b/net/Kconfig
104191@@ -89,12 +89,8 @@ config NETWORK_SECMARK
104192 to nfmark, but designated for security purposes.
104193 If you are unsure how to answer this question, answer N.
104194
104195-config NET_PTP_CLASSIFY
104196- def_bool n
104197-
104198 config NETWORK_PHY_TIMESTAMPING
104199 bool "Timestamping in PHY devices"
104200- select NET_PTP_CLASSIFY
104201 help
104202 This allows timestamping of network packets by PHYs with
104203 hardware timestamping capabilities. This option adds some
104204@@ -269,7 +265,7 @@ config BQL
104205 config BPF_JIT
104206 bool "enable BPF Just In Time compiler"
104207 depends on HAVE_BPF_JIT
104208- depends on MODULES
104209+ depends on MODULES && X86
104210 ---help---
104211 Berkeley Packet Filter filtering capabilities are normally handled
104212 by an interpreter. This option allows kernel to generate a native
104213diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
104214index af46bc4..f9adfcd 100644
104215--- a/net/appletalk/atalk_proc.c
104216+++ b/net/appletalk/atalk_proc.c
104217@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
104218 struct proc_dir_entry *p;
104219 int rc = -ENOMEM;
104220
104221- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
104222+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
104223 if (!atalk_proc_dir)
104224 goto out;
104225
104226diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
104227index 876fbe8..8bbea9f 100644
104228--- a/net/atm/atm_misc.c
104229+++ b/net/atm/atm_misc.c
104230@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
104231 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
104232 return 1;
104233 atm_return(vcc, truesize);
104234- atomic_inc(&vcc->stats->rx_drop);
104235+ atomic_inc_unchecked(&vcc->stats->rx_drop);
104236 return 0;
104237 }
104238 EXPORT_SYMBOL(atm_charge);
104239@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
104240 }
104241 }
104242 atm_return(vcc, guess);
104243- atomic_inc(&vcc->stats->rx_drop);
104244+ atomic_inc_unchecked(&vcc->stats->rx_drop);
104245 return NULL;
104246 }
104247 EXPORT_SYMBOL(atm_alloc_charge);
104248@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
104249
104250 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
104251 {
104252-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
104253+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
104254 __SONET_ITEMS
104255 #undef __HANDLE_ITEM
104256 }
104257@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
104258
104259 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
104260 {
104261-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
104262+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
104263 __SONET_ITEMS
104264 #undef __HANDLE_ITEM
104265 }
104266diff --git a/net/atm/lec.c b/net/atm/lec.c
104267index 4c5b8ba..95f7005 100644
104268--- a/net/atm/lec.c
104269+++ b/net/atm/lec.c
104270@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
104271 }
104272
104273 static struct lane2_ops lane2_ops = {
104274- lane2_resolve, /* resolve, spec 3.1.3 */
104275- lane2_associate_req, /* associate_req, spec 3.1.4 */
104276- NULL /* associate indicator, spec 3.1.5 */
104277+ .resolve = lane2_resolve,
104278+ .associate_req = lane2_associate_req,
104279+ .associate_indicator = NULL
104280 };
104281
104282 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
104283diff --git a/net/atm/lec.h b/net/atm/lec.h
104284index 4149db1..f2ab682 100644
104285--- a/net/atm/lec.h
104286+++ b/net/atm/lec.h
104287@@ -48,7 +48,7 @@ struct lane2_ops {
104288 const u8 *tlvs, u32 sizeoftlvs);
104289 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
104290 const u8 *tlvs, u32 sizeoftlvs);
104291-};
104292+} __no_const;
104293
104294 /*
104295 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
104296diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
104297index d1b2d9a..d549f7f 100644
104298--- a/net/atm/mpoa_caches.c
104299+++ b/net/atm/mpoa_caches.c
104300@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
104301
104302
104303 static struct in_cache_ops ingress_ops = {
104304- in_cache_add_entry, /* add_entry */
104305- in_cache_get, /* get */
104306- in_cache_get_with_mask, /* get_with_mask */
104307- in_cache_get_by_vcc, /* get_by_vcc */
104308- in_cache_put, /* put */
104309- in_cache_remove_entry, /* remove_entry */
104310- cache_hit, /* cache_hit */
104311- clear_count_and_expired, /* clear_count */
104312- check_resolving_entries, /* check_resolving */
104313- refresh_entries, /* refresh */
104314- in_destroy_cache /* destroy_cache */
104315+ .add_entry = in_cache_add_entry,
104316+ .get = in_cache_get,
104317+ .get_with_mask = in_cache_get_with_mask,
104318+ .get_by_vcc = in_cache_get_by_vcc,
104319+ .put = in_cache_put,
104320+ .remove_entry = in_cache_remove_entry,
104321+ .cache_hit = cache_hit,
104322+ .clear_count = clear_count_and_expired,
104323+ .check_resolving = check_resolving_entries,
104324+ .refresh = refresh_entries,
104325+ .destroy_cache = in_destroy_cache
104326 };
104327
104328 static struct eg_cache_ops egress_ops = {
104329- eg_cache_add_entry, /* add_entry */
104330- eg_cache_get_by_cache_id, /* get_by_cache_id */
104331- eg_cache_get_by_tag, /* get_by_tag */
104332- eg_cache_get_by_vcc, /* get_by_vcc */
104333- eg_cache_get_by_src_ip, /* get_by_src_ip */
104334- eg_cache_put, /* put */
104335- eg_cache_remove_entry, /* remove_entry */
104336- update_eg_cache_entry, /* update */
104337- clear_expired, /* clear_expired */
104338- eg_destroy_cache /* destroy_cache */
104339+ .add_entry = eg_cache_add_entry,
104340+ .get_by_cache_id = eg_cache_get_by_cache_id,
104341+ .get_by_tag = eg_cache_get_by_tag,
104342+ .get_by_vcc = eg_cache_get_by_vcc,
104343+ .get_by_src_ip = eg_cache_get_by_src_ip,
104344+ .put = eg_cache_put,
104345+ .remove_entry = eg_cache_remove_entry,
104346+ .update = update_eg_cache_entry,
104347+ .clear_expired = clear_expired,
104348+ .destroy_cache = eg_destroy_cache
104349 };
104350
104351
104352diff --git a/net/atm/proc.c b/net/atm/proc.c
104353index bbb6461..cf04016 100644
104354--- a/net/atm/proc.c
104355+++ b/net/atm/proc.c
104356@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
104357 const struct k_atm_aal_stats *stats)
104358 {
104359 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
104360- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
104361- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
104362- atomic_read(&stats->rx_drop));
104363+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
104364+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
104365+ atomic_read_unchecked(&stats->rx_drop));
104366 }
104367
104368 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
104369diff --git a/net/atm/resources.c b/net/atm/resources.c
104370index 0447d5d..3cf4728 100644
104371--- a/net/atm/resources.c
104372+++ b/net/atm/resources.c
104373@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
104374 static void copy_aal_stats(struct k_atm_aal_stats *from,
104375 struct atm_aal_stats *to)
104376 {
104377-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
104378+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
104379 __AAL_STAT_ITEMS
104380 #undef __HANDLE_ITEM
104381 }
104382@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
104383 static void subtract_aal_stats(struct k_atm_aal_stats *from,
104384 struct atm_aal_stats *to)
104385 {
104386-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
104387+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
104388 __AAL_STAT_ITEMS
104389 #undef __HANDLE_ITEM
104390 }
104391diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
104392index 919a5ce..cc6b444 100644
104393--- a/net/ax25/sysctl_net_ax25.c
104394+++ b/net/ax25/sysctl_net_ax25.c
104395@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
104396 {
104397 char path[sizeof("net/ax25/") + IFNAMSIZ];
104398 int k;
104399- struct ctl_table *table;
104400+ ctl_table_no_const *table;
104401
104402 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
104403 if (!table)
104404diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
104405index f04224c..f326579 100644
104406--- a/net/batman-adv/bat_iv_ogm.c
104407+++ b/net/batman-adv/bat_iv_ogm.c
104408@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
104409
104410 /* randomize initial seqno to avoid collision */
104411 get_random_bytes(&random_seqno, sizeof(random_seqno));
104412- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
104413+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
104414
104415 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
104416 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
104417@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
104418 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
104419
104420 /* change sequence number to network order */
104421- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
104422+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
104423 batadv_ogm_packet->seqno = htonl(seqno);
104424- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
104425+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
104426
104427 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
104428
104429@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
104430 return;
104431
104432 /* could be changed by schedule_own_packet() */
104433- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
104434+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
104435
104436 if (ogm_packet->flags & BATADV_DIRECTLINK)
104437 has_directlink_flag = true;
104438diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
104439index 022d18a..919daff 100644
104440--- a/net/batman-adv/fragmentation.c
104441+++ b/net/batman-adv/fragmentation.c
104442@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
104443 frag_header.packet_type = BATADV_UNICAST_FRAG;
104444 frag_header.version = BATADV_COMPAT_VERSION;
104445 frag_header.ttl = BATADV_TTL;
104446- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
104447+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
104448 frag_header.reserved = 0;
104449 frag_header.no = 0;
104450 frag_header.total_size = htons(skb->len);
104451diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
104452index cbd677f..b783347 100644
104453--- a/net/batman-adv/soft-interface.c
104454+++ b/net/batman-adv/soft-interface.c
104455@@ -296,7 +296,7 @@ send:
104456 primary_if->net_dev->dev_addr);
104457
104458 /* set broadcast sequence number */
104459- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
104460+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
104461 bcast_packet->seqno = htonl(seqno);
104462
104463 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
104464@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
104465 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
104466
104467 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
104468- atomic_set(&bat_priv->bcast_seqno, 1);
104469+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
104470 atomic_set(&bat_priv->tt.vn, 0);
104471 atomic_set(&bat_priv->tt.local_changes, 0);
104472 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
104473@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
104474
104475 /* randomize initial seqno to avoid collision */
104476 get_random_bytes(&random_seqno, sizeof(random_seqno));
104477- atomic_set(&bat_priv->frag_seqno, random_seqno);
104478+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
104479
104480 bat_priv->primary_if = NULL;
104481 bat_priv->num_ifaces = 0;
104482diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
104483index 8854c05..ee5d5497 100644
104484--- a/net/batman-adv/types.h
104485+++ b/net/batman-adv/types.h
104486@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
104487 struct batadv_hard_iface_bat_iv {
104488 unsigned char *ogm_buff;
104489 int ogm_buff_len;
104490- atomic_t ogm_seqno;
104491+ atomic_unchecked_t ogm_seqno;
104492 };
104493
104494 /**
104495@@ -768,7 +768,7 @@ struct batadv_priv {
104496 atomic_t bonding;
104497 atomic_t fragmentation;
104498 atomic_t packet_size_max;
104499- atomic_t frag_seqno;
104500+ atomic_unchecked_t frag_seqno;
104501 #ifdef CONFIG_BATMAN_ADV_BLA
104502 atomic_t bridge_loop_avoidance;
104503 #endif
104504@@ -787,7 +787,7 @@ struct batadv_priv {
104505 #endif
104506 uint32_t isolation_mark;
104507 uint32_t isolation_mark_mask;
104508- atomic_t bcast_seqno;
104509+ atomic_unchecked_t bcast_seqno;
104510 atomic_t bcast_queue_left;
104511 atomic_t batman_queue_left;
104512 char num_ifaces;
104513diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
104514index 80d25c1..aa99a98 100644
104515--- a/net/bluetooth/hci_sock.c
104516+++ b/net/bluetooth/hci_sock.c
104517@@ -1044,7 +1044,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
104518 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
104519 }
104520
104521- len = min_t(unsigned int, len, sizeof(uf));
104522+ len = min((size_t)len, sizeof(uf));
104523 if (copy_from_user(&uf, optval, len)) {
104524 err = -EFAULT;
104525 break;
104526diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
104527index 323f23c..5e27529 100644
104528--- a/net/bluetooth/l2cap_core.c
104529+++ b/net/bluetooth/l2cap_core.c
104530@@ -3548,8 +3548,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
104531 break;
104532
104533 case L2CAP_CONF_RFC:
104534- if (olen == sizeof(rfc))
104535- memcpy(&rfc, (void *)val, olen);
104536+ if (olen != sizeof(rfc))
104537+ break;
104538+
104539+ memcpy(&rfc, (void *)val, olen);
104540
104541 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
104542 rfc.mode != chan->mode)
104543diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
104544index e137869..33f3ebd 100644
104545--- a/net/bluetooth/l2cap_sock.c
104546+++ b/net/bluetooth/l2cap_sock.c
104547@@ -628,7 +628,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
104548 struct sock *sk = sock->sk;
104549 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
104550 struct l2cap_options opts;
104551- int len, err = 0;
104552+ int err = 0;
104553+ size_t len = optlen;
104554 u32 opt;
104555
104556 BT_DBG("sk %p", sk);
104557@@ -655,7 +656,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
104558 opts.max_tx = chan->max_tx;
104559 opts.txwin_size = chan->tx_win;
104560
104561- len = min_t(unsigned int, sizeof(opts), optlen);
104562+ len = min(sizeof(opts), len);
104563 if (copy_from_user((char *) &opts, optval, len)) {
104564 err = -EFAULT;
104565 break;
104566@@ -742,7 +743,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104567 struct bt_security sec;
104568 struct bt_power pwr;
104569 struct l2cap_conn *conn;
104570- int len, err = 0;
104571+ int err = 0;
104572+ size_t len = optlen;
104573 u32 opt;
104574
104575 BT_DBG("sk %p", sk);
104576@@ -766,7 +768,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104577
104578 sec.level = BT_SECURITY_LOW;
104579
104580- len = min_t(unsigned int, sizeof(sec), optlen);
104581+ len = min(sizeof(sec), len);
104582 if (copy_from_user((char *) &sec, optval, len)) {
104583 err = -EFAULT;
104584 break;
104585@@ -861,7 +863,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104586
104587 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
104588
104589- len = min_t(unsigned int, sizeof(pwr), optlen);
104590+ len = min(sizeof(pwr), len);
104591 if (copy_from_user((char *) &pwr, optval, len)) {
104592 err = -EFAULT;
104593 break;
104594diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
104595index c603a5e..7f08991 100644
104596--- a/net/bluetooth/rfcomm/sock.c
104597+++ b/net/bluetooth/rfcomm/sock.c
104598@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
104599 struct sock *sk = sock->sk;
104600 struct bt_security sec;
104601 int err = 0;
104602- size_t len;
104603+ size_t len = optlen;
104604 u32 opt;
104605
104606 BT_DBG("sk %p", sk);
104607@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
104608
104609 sec.level = BT_SECURITY_LOW;
104610
104611- len = min_t(unsigned int, sizeof(sec), optlen);
104612+ len = min(sizeof(sec), len);
104613 if (copy_from_user((char *) &sec, optval, len)) {
104614 err = -EFAULT;
104615 break;
104616diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
104617index 8e385a0..a5bdd8e 100644
104618--- a/net/bluetooth/rfcomm/tty.c
104619+++ b/net/bluetooth/rfcomm/tty.c
104620@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
104621 BT_DBG("tty %p id %d", tty, tty->index);
104622
104623 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
104624- dev->channel, dev->port.count);
104625+ dev->channel, atomic_read(&dev->port.count));
104626
104627 err = tty_port_open(&dev->port, tty, filp);
104628 if (err)
104629@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
104630 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
104631
104632 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
104633- dev->port.count);
104634+ atomic_read(&dev->port.count));
104635
104636 tty_port_close(&dev->port, tty, filp);
104637 }
104638diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
104639index 1059ed3..d70846a 100644
104640--- a/net/bridge/netfilter/ebtables.c
104641+++ b/net/bridge/netfilter/ebtables.c
104642@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104643 tmp.valid_hooks = t->table->valid_hooks;
104644 }
104645 mutex_unlock(&ebt_mutex);
104646- if (copy_to_user(user, &tmp, *len) != 0) {
104647+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104648 BUGPRINT("c2u Didn't work\n");
104649 ret = -EFAULT;
104650 break;
104651@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
104652 goto out;
104653 tmp.valid_hooks = t->valid_hooks;
104654
104655- if (copy_to_user(user, &tmp, *len) != 0) {
104656+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104657 ret = -EFAULT;
104658 break;
104659 }
104660@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
104661 tmp.entries_size = t->table->entries_size;
104662 tmp.valid_hooks = t->table->valid_hooks;
104663
104664- if (copy_to_user(user, &tmp, *len) != 0) {
104665+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104666 ret = -EFAULT;
104667 break;
104668 }
104669diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
104670index 0f45522..dab651f 100644
104671--- a/net/caif/cfctrl.c
104672+++ b/net/caif/cfctrl.c
104673@@ -10,6 +10,7 @@
104674 #include <linux/spinlock.h>
104675 #include <linux/slab.h>
104676 #include <linux/pkt_sched.h>
104677+#include <linux/sched.h>
104678 #include <net/caif/caif_layer.h>
104679 #include <net/caif/cfpkt.h>
104680 #include <net/caif/cfctrl.h>
104681@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
104682 memset(&dev_info, 0, sizeof(dev_info));
104683 dev_info.id = 0xff;
104684 cfsrvl_init(&this->serv, 0, &dev_info, false);
104685- atomic_set(&this->req_seq_no, 1);
104686- atomic_set(&this->rsp_seq_no, 1);
104687+ atomic_set_unchecked(&this->req_seq_no, 1);
104688+ atomic_set_unchecked(&this->rsp_seq_no, 1);
104689 this->serv.layer.receive = cfctrl_recv;
104690 sprintf(this->serv.layer.name, "ctrl");
104691 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
104692@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
104693 struct cfctrl_request_info *req)
104694 {
104695 spin_lock_bh(&ctrl->info_list_lock);
104696- atomic_inc(&ctrl->req_seq_no);
104697- req->sequence_no = atomic_read(&ctrl->req_seq_no);
104698+ atomic_inc_unchecked(&ctrl->req_seq_no);
104699+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
104700 list_add_tail(&req->list, &ctrl->list);
104701 spin_unlock_bh(&ctrl->info_list_lock);
104702 }
104703@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
104704 if (p != first)
104705 pr_warn("Requests are not received in order\n");
104706
104707- atomic_set(&ctrl->rsp_seq_no,
104708+ atomic_set_unchecked(&ctrl->rsp_seq_no,
104709 p->sequence_no);
104710 list_del(&p->list);
104711 goto out;
104712diff --git a/net/can/af_can.c b/net/can/af_can.c
104713index ce82337..5d17b4d 100644
104714--- a/net/can/af_can.c
104715+++ b/net/can/af_can.c
104716@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
104717 };
104718
104719 /* notifier block for netdevice event */
104720-static struct notifier_block can_netdev_notifier __read_mostly = {
104721+static struct notifier_block can_netdev_notifier = {
104722 .notifier_call = can_notifier,
104723 };
104724
104725diff --git a/net/can/bcm.c b/net/can/bcm.c
104726index dcb75c0..24b1b43 100644
104727--- a/net/can/bcm.c
104728+++ b/net/can/bcm.c
104729@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
104730 }
104731
104732 /* create /proc/net/can-bcm directory */
104733- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
104734+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
104735 return 0;
104736 }
104737
104738diff --git a/net/can/gw.c b/net/can/gw.c
104739index 050a211..bb9fe33 100644
104740--- a/net/can/gw.c
104741+++ b/net/can/gw.c
104742@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
104743 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
104744
104745 static HLIST_HEAD(cgw_list);
104746-static struct notifier_block notifier;
104747
104748 static struct kmem_cache *cgw_cache __read_mostly;
104749
104750@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
104751 return err;
104752 }
104753
104754+static struct notifier_block notifier = {
104755+ .notifier_call = cgw_notifier
104756+};
104757+
104758 static __init int cgw_module_init(void)
104759 {
104760 /* sanitize given module parameter */
104761@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
104762 return -ENOMEM;
104763
104764 /* set notifier */
104765- notifier.notifier_call = cgw_notifier;
104766 register_netdevice_notifier(&notifier);
104767
104768 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
104769diff --git a/net/can/proc.c b/net/can/proc.c
104770index 1a19b98..df2b4ec 100644
104771--- a/net/can/proc.c
104772+++ b/net/can/proc.c
104773@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
104774 void can_init_proc(void)
104775 {
104776 /* create /proc/net/can directory */
104777- can_dir = proc_mkdir("can", init_net.proc_net);
104778+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
104779
104780 if (!can_dir) {
104781 printk(KERN_INFO "can: failed to create /proc/net/can . "
104782diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
104783index 1948d59..9e854d5 100644
104784--- a/net/ceph/messenger.c
104785+++ b/net/ceph/messenger.c
104786@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
104787 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
104788
104789 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
104790-static atomic_t addr_str_seq = ATOMIC_INIT(0);
104791+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
104792
104793 static struct page *zero_page; /* used in certain error cases */
104794
104795@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
104796 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
104797 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
104798
104799- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
104800+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
104801 s = addr_str[i];
104802
104803 switch (ss->ss_family) {
104804diff --git a/net/compat.c b/net/compat.c
104805index bc8aeef..f9c070c 100644
104806--- a/net/compat.c
104807+++ b/net/compat.c
104808@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
104809 return -EFAULT;
104810 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
104811 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
104812- kmsg->msg_name = compat_ptr(tmp1);
104813- kmsg->msg_iov = compat_ptr(tmp2);
104814- kmsg->msg_control = compat_ptr(tmp3);
104815+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
104816+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
104817+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
104818 return 0;
104819 }
104820
104821@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
104822
104823 if (kern_msg->msg_name && kern_msg->msg_namelen) {
104824 if (mode == VERIFY_READ) {
104825- int err = move_addr_to_kernel(kern_msg->msg_name,
104826+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
104827 kern_msg->msg_namelen,
104828 kern_address);
104829 if (err < 0)
104830@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
104831 }
104832
104833 tot_len = iov_from_user_compat_to_kern(kern_iov,
104834- (struct compat_iovec __user *)kern_msg->msg_iov,
104835+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
104836 kern_msg->msg_iovlen);
104837 if (tot_len >= 0)
104838 kern_msg->msg_iov = kern_iov;
104839@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
104840
104841 #define CMSG_COMPAT_FIRSTHDR(msg) \
104842 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
104843- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
104844+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
104845 (struct compat_cmsghdr __user *)NULL)
104846
104847 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
104848 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
104849 (ucmlen) <= (unsigned long) \
104850 ((mhdr)->msg_controllen - \
104851- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
104852+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
104853
104854 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
104855 struct compat_cmsghdr __user *cmsg, int cmsg_len)
104856 {
104857 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
104858- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
104859+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
104860 msg->msg_controllen)
104861 return NULL;
104862 return (struct compat_cmsghdr __user *)ptr;
104863@@ -223,7 +223,7 @@ Efault:
104864
104865 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
104866 {
104867- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
104868+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
104869 struct compat_cmsghdr cmhdr;
104870 struct compat_timeval ctv;
104871 struct compat_timespec cts[3];
104872@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
104873
104874 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
104875 {
104876- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
104877+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
104878 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
104879 int fdnum = scm->fp->count;
104880 struct file **fp = scm->fp->fp;
104881@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
104882 return -EFAULT;
104883 old_fs = get_fs();
104884 set_fs(KERNEL_DS);
104885- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
104886+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
104887 set_fs(old_fs);
104888
104889 return err;
104890@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
104891 len = sizeof(ktime);
104892 old_fs = get_fs();
104893 set_fs(KERNEL_DS);
104894- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
104895+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
104896 set_fs(old_fs);
104897
104898 if (!err) {
104899@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
104900 case MCAST_JOIN_GROUP:
104901 case MCAST_LEAVE_GROUP:
104902 {
104903- struct compat_group_req __user *gr32 = (void *)optval;
104904+ struct compat_group_req __user *gr32 = (void __user *)optval;
104905 struct group_req __user *kgr =
104906 compat_alloc_user_space(sizeof(struct group_req));
104907 u32 interface;
104908@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
104909 case MCAST_BLOCK_SOURCE:
104910 case MCAST_UNBLOCK_SOURCE:
104911 {
104912- struct compat_group_source_req __user *gsr32 = (void *)optval;
104913+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
104914 struct group_source_req __user *kgsr = compat_alloc_user_space(
104915 sizeof(struct group_source_req));
104916 u32 interface;
104917@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
104918 }
104919 case MCAST_MSFILTER:
104920 {
104921- struct compat_group_filter __user *gf32 = (void *)optval;
104922+ struct compat_group_filter __user *gf32 = (void __user *)optval;
104923 struct group_filter __user *kgf;
104924 u32 interface, fmode, numsrc;
104925
104926@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
104927 char __user *optval, int __user *optlen,
104928 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
104929 {
104930- struct compat_group_filter __user *gf32 = (void *)optval;
104931+ struct compat_group_filter __user *gf32 = (void __user *)optval;
104932 struct group_filter __user *kgf;
104933 int __user *koptlen;
104934 u32 interface, fmode, numsrc;
104935@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
104936
104937 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
104938 return -EINVAL;
104939- if (copy_from_user(a, args, nas[call]))
104940+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
104941 return -EFAULT;
104942 a0 = a[0];
104943 a1 = a[1];
104944diff --git a/net/core/Makefile b/net/core/Makefile
104945index 71093d9..a8a035b 100644
104946--- a/net/core/Makefile
104947+++ b/net/core/Makefile
104948@@ -21,6 +21,5 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
104949 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
104950 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
104951 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
104952-obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
104953 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
104954 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
104955diff --git a/net/core/datagram.c b/net/core/datagram.c
104956index 488dd1a..7179f0f 100644
104957--- a/net/core/datagram.c
104958+++ b/net/core/datagram.c
104959@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
104960 }
104961
104962 kfree_skb(skb);
104963- atomic_inc(&sk->sk_drops);
104964+ atomic_inc_unchecked(&sk->sk_drops);
104965 sk_mem_reclaim_partial(sk);
104966
104967 return err;
104968diff --git a/net/core/dev.c b/net/core/dev.c
104969index 367a586..ef2fe17 100644
104970--- a/net/core/dev.c
104971+++ b/net/core/dev.c
104972@@ -1672,14 +1672,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
104973 {
104974 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
104975 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
104976- atomic_long_inc(&dev->rx_dropped);
104977+ atomic_long_inc_unchecked(&dev->rx_dropped);
104978 kfree_skb(skb);
104979 return NET_RX_DROP;
104980 }
104981 }
104982
104983 if (unlikely(!is_skb_forwardable(dev, skb))) {
104984- atomic_long_inc(&dev->rx_dropped);
104985+ atomic_long_inc_unchecked(&dev->rx_dropped);
104986 kfree_skb(skb);
104987 return NET_RX_DROP;
104988 }
104989@@ -2476,7 +2476,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
104990
104991 struct dev_gso_cb {
104992 void (*destructor)(struct sk_buff *skb);
104993-};
104994+} __no_const;
104995
104996 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
104997
104998@@ -2932,7 +2932,7 @@ recursion_alert:
104999 rc = -ENETDOWN;
105000 rcu_read_unlock_bh();
105001
105002- atomic_long_inc(&dev->tx_dropped);
105003+ atomic_long_inc_unchecked(&dev->tx_dropped);
105004 kfree_skb(skb);
105005 return rc;
105006 out:
105007@@ -3276,7 +3276,7 @@ enqueue:
105008
105009 local_irq_restore(flags);
105010
105011- atomic_long_inc(&skb->dev->rx_dropped);
105012+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
105013 kfree_skb(skb);
105014 return NET_RX_DROP;
105015 }
105016@@ -3353,7 +3353,7 @@ int netif_rx_ni(struct sk_buff *skb)
105017 }
105018 EXPORT_SYMBOL(netif_rx_ni);
105019
105020-static void net_tx_action(struct softirq_action *h)
105021+static __latent_entropy void net_tx_action(void)
105022 {
105023 struct softnet_data *sd = &__get_cpu_var(softnet_data);
105024
105025@@ -3686,7 +3686,7 @@ ncls:
105026 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
105027 } else {
105028 drop:
105029- atomic_long_inc(&skb->dev->rx_dropped);
105030+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
105031 kfree_skb(skb);
105032 /* Jamal, now you will not able to escape explaining
105033 * me how you were going to use this. :-)
105034@@ -4406,7 +4406,7 @@ void netif_napi_del(struct napi_struct *napi)
105035 }
105036 EXPORT_SYMBOL(netif_napi_del);
105037
105038-static void net_rx_action(struct softirq_action *h)
105039+static __latent_entropy void net_rx_action(void)
105040 {
105041 struct softnet_data *sd = &__get_cpu_var(softnet_data);
105042 unsigned long time_limit = jiffies + 2;
105043@@ -6403,8 +6403,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
105044 } else {
105045 netdev_stats_to_stats64(storage, &dev->stats);
105046 }
105047- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
105048- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
105049+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
105050+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
105051 return storage;
105052 }
105053 EXPORT_SYMBOL(dev_get_stats);
105054diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
105055index cf999e0..c59a975 100644
105056--- a/net/core/dev_ioctl.c
105057+++ b/net/core/dev_ioctl.c
105058@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
105059 if (no_module && capable(CAP_NET_ADMIN))
105060 no_module = request_module("netdev-%s", name);
105061 if (no_module && capable(CAP_SYS_MODULE)) {
105062+#ifdef CONFIG_GRKERNSEC_MODHARDEN
105063+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
105064+#else
105065 if (!request_module("%s", name))
105066 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
105067 name);
105068+#endif
105069 }
105070 }
105071 EXPORT_SYMBOL(dev_load);
105072diff --git a/net/core/filter.c b/net/core/filter.c
105073index 1dbf646..0f95703 100644
105074--- a/net/core/filter.c
105075+++ b/net/core/filter.c
105076@@ -1,16 +1,11 @@
105077 /*
105078 * Linux Socket Filter - Kernel level socket filtering
105079 *
105080- * Based on the design of the Berkeley Packet Filter. The new
105081- * internal format has been designed by PLUMgrid:
105082+ * Author:
105083+ * Jay Schulist <jschlst@samba.org>
105084 *
105085- * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
105086- *
105087- * Authors:
105088- *
105089- * Jay Schulist <jschlst@samba.org>
105090- * Alexei Starovoitov <ast@plumgrid.com>
105091- * Daniel Borkmann <dborkman@redhat.com>
105092+ * Based on the design of:
105093+ * - The Berkeley Packet Filter
105094 *
105095 * This program is free software; you can redistribute it and/or
105096 * modify it under the terms of the GNU General Public License
105097@@ -45,27 +40,6 @@
105098 #include <linux/seccomp.h>
105099 #include <linux/if_vlan.h>
105100
105101-/* Registers */
105102-#define BPF_R0 regs[BPF_REG_0]
105103-#define BPF_R1 regs[BPF_REG_1]
105104-#define BPF_R2 regs[BPF_REG_2]
105105-#define BPF_R3 regs[BPF_REG_3]
105106-#define BPF_R4 regs[BPF_REG_4]
105107-#define BPF_R5 regs[BPF_REG_5]
105108-#define BPF_R6 regs[BPF_REG_6]
105109-#define BPF_R7 regs[BPF_REG_7]
105110-#define BPF_R8 regs[BPF_REG_8]
105111-#define BPF_R9 regs[BPF_REG_9]
105112-#define BPF_R10 regs[BPF_REG_10]
105113-
105114-/* Named registers */
105115-#define DST regs[insn->dst_reg]
105116-#define SRC regs[insn->src_reg]
105117-#define FP regs[BPF_REG_FP]
105118-#define ARG1 regs[BPF_REG_ARG1]
105119-#define CTX regs[BPF_REG_CTX]
105120-#define IMM insn->imm
105121-
105122 /* No hurry in this branch
105123 *
105124 * Exported for the bpf jit load helper.
105125@@ -78,9 +52,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
105126 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
105127 else if (k >= SKF_LL_OFF)
105128 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
105129+
105130 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
105131 return ptr;
105132-
105133 return NULL;
105134 }
105135
105136@@ -89,7 +63,6 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
105137 {
105138 if (k >= 0)
105139 return skb_header_pointer(skb, k, size, buffer);
105140-
105141 return bpf_internal_load_pointer_neg_helper(skb, k, size);
105142 }
105143
105144@@ -135,960 +108,309 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
105145 }
105146 EXPORT_SYMBOL(sk_filter);
105147
105148-/* Base function for offset calculation. Needs to go into .text section,
105149- * therefore keeping it non-static as well; will also be used by JITs
105150- * anyway later on, so do not let the compiler omit it.
105151- */
105152-noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105153-{
105154- return 0;
105155-}
105156-
105157 /**
105158- * __sk_run_filter - run a filter on a given context
105159- * @ctx: buffer to run the filter on
105160- * @insn: filter to apply
105161+ * sk_run_filter - run a filter on a socket
105162+ * @skb: buffer to run the filter on
105163+ * @fentry: filter to apply
105164 *
105165- * Decode and apply filter instructions to the skb->data. Return length to
105166- * keep, 0 for none. @ctx is the data we are operating on, @insn is the
105167- * array of filter instructions.
105168+ * Decode and apply filter instructions to the skb->data.
105169+ * Return length to keep, 0 for none. @skb is the data we are
105170+ * filtering, @filter is the array of filter instructions.
105171+ * Because all jumps are guaranteed to be before last instruction,
105172+ * and last instruction guaranteed to be a RET, we dont need to check
105173+ * flen. (We used to pass to this function the length of filter)
105174 */
105175-static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
105176+unsigned int sk_run_filter(const struct sk_buff *skb,
105177+ const struct sock_filter *fentry)
105178 {
105179- u64 stack[MAX_BPF_STACK / sizeof(u64)];
105180- u64 regs[MAX_BPF_REG], tmp;
105181- static const void *jumptable[256] = {
105182- [0 ... 255] = &&default_label,
105183- /* Now overwrite non-defaults ... */
105184- /* 32 bit ALU operations */
105185- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
105186- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
105187- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
105188- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
105189- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
105190- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
105191- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
105192- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
105193- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
105194- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
105195- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
105196- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
105197- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
105198- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
105199- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
105200- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
105201- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
105202- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
105203- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
105204- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
105205- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
105206- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
105207- [BPF_ALU | BPF_NEG] = &&ALU_NEG,
105208- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
105209- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
105210- /* 64 bit ALU operations */
105211- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
105212- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
105213- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
105214- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
105215- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
105216- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
105217- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
105218- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
105219- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
105220- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
105221- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
105222- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
105223- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
105224- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
105225- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
105226- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
105227- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
105228- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
105229- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
105230- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
105231- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
105232- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
105233- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
105234- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
105235- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
105236- /* Call instruction */
105237- [BPF_JMP | BPF_CALL] = &&JMP_CALL,
105238- /* Jumps */
105239- [BPF_JMP | BPF_JA] = &&JMP_JA,
105240- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
105241- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
105242- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
105243- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
105244- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
105245- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
105246- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
105247- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
105248- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
105249- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
105250- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
105251- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
105252- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
105253- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
105254- /* Program return */
105255- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
105256- /* Store instructions */
105257- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
105258- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
105259- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
105260- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
105261- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
105262- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
105263- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
105264- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
105265- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
105266- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
105267- /* Load instructions */
105268- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
105269- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
105270- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
105271- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
105272- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
105273- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
105274- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
105275- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
105276- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
105277- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
105278- };
105279 void *ptr;
105280- int off;
105281-
105282-#define CONT ({ insn++; goto select_insn; })
105283-#define CONT_JMP ({ insn++; goto select_insn; })
105284-
105285- FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
105286- ARG1 = (u64) (unsigned long) ctx;
105287-
105288- /* Registers used in classic BPF programs need to be reset first. */
105289- regs[BPF_REG_A] = 0;
105290- regs[BPF_REG_X] = 0;
105291-
105292-select_insn:
105293- goto *jumptable[insn->code];
105294-
105295- /* ALU */
105296-#define ALU(OPCODE, OP) \
105297- ALU64_##OPCODE##_X: \
105298- DST = DST OP SRC; \
105299- CONT; \
105300- ALU_##OPCODE##_X: \
105301- DST = (u32) DST OP (u32) SRC; \
105302- CONT; \
105303- ALU64_##OPCODE##_K: \
105304- DST = DST OP IMM; \
105305- CONT; \
105306- ALU_##OPCODE##_K: \
105307- DST = (u32) DST OP (u32) IMM; \
105308- CONT;
105309-
105310- ALU(ADD, +)
105311- ALU(SUB, -)
105312- ALU(AND, &)
105313- ALU(OR, |)
105314- ALU(LSH, <<)
105315- ALU(RSH, >>)
105316- ALU(XOR, ^)
105317- ALU(MUL, *)
105318-#undef ALU
105319- ALU_NEG:
105320- DST = (u32) -DST;
105321- CONT;
105322- ALU64_NEG:
105323- DST = -DST;
105324- CONT;
105325- ALU_MOV_X:
105326- DST = (u32) SRC;
105327- CONT;
105328- ALU_MOV_K:
105329- DST = (u32) IMM;
105330- CONT;
105331- ALU64_MOV_X:
105332- DST = SRC;
105333- CONT;
105334- ALU64_MOV_K:
105335- DST = IMM;
105336- CONT;
105337- ALU64_ARSH_X:
105338- (*(s64 *) &DST) >>= SRC;
105339- CONT;
105340- ALU64_ARSH_K:
105341- (*(s64 *) &DST) >>= IMM;
105342- CONT;
105343- ALU64_MOD_X:
105344- if (unlikely(SRC == 0))
105345- return 0;
105346- tmp = DST;
105347- DST = do_div(tmp, SRC);
105348- CONT;
105349- ALU_MOD_X:
105350- if (unlikely(SRC == 0))
105351- return 0;
105352- tmp = (u32) DST;
105353- DST = do_div(tmp, (u32) SRC);
105354- CONT;
105355- ALU64_MOD_K:
105356- tmp = DST;
105357- DST = do_div(tmp, IMM);
105358- CONT;
105359- ALU_MOD_K:
105360- tmp = (u32) DST;
105361- DST = do_div(tmp, (u32) IMM);
105362- CONT;
105363- ALU64_DIV_X:
105364- if (unlikely(SRC == 0))
105365- return 0;
105366- do_div(DST, SRC);
105367- CONT;
105368- ALU_DIV_X:
105369- if (unlikely(SRC == 0))
105370- return 0;
105371- tmp = (u32) DST;
105372- do_div(tmp, (u32) SRC);
105373- DST = (u32) tmp;
105374- CONT;
105375- ALU64_DIV_K:
105376- do_div(DST, IMM);
105377- CONT;
105378- ALU_DIV_K:
105379- tmp = (u32) DST;
105380- do_div(tmp, (u32) IMM);
105381- DST = (u32) tmp;
105382- CONT;
105383- ALU_END_TO_BE:
105384- switch (IMM) {
105385- case 16:
105386- DST = (__force u16) cpu_to_be16(DST);
105387- break;
105388- case 32:
105389- DST = (__force u32) cpu_to_be32(DST);
105390- break;
105391- case 64:
105392- DST = (__force u64) cpu_to_be64(DST);
105393- break;
105394- }
105395- CONT;
105396- ALU_END_TO_LE:
105397- switch (IMM) {
105398- case 16:
105399- DST = (__force u16) cpu_to_le16(DST);
105400- break;
105401- case 32:
105402- DST = (__force u32) cpu_to_le32(DST);
105403- break;
105404- case 64:
105405- DST = (__force u64) cpu_to_le64(DST);
105406- break;
105407- }
105408- CONT;
105409-
105410- /* CALL */
105411- JMP_CALL:
105412- /* Function call scratches BPF_R1-BPF_R5 registers,
105413- * preserves BPF_R6-BPF_R9, and stores return value
105414- * into BPF_R0.
105415- */
105416- BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
105417- BPF_R4, BPF_R5);
105418- CONT;
105419-
105420- /* JMP */
105421- JMP_JA:
105422- insn += insn->off;
105423- CONT;
105424- JMP_JEQ_X:
105425- if (DST == SRC) {
105426- insn += insn->off;
105427- CONT_JMP;
105428- }
105429- CONT;
105430- JMP_JEQ_K:
105431- if (DST == IMM) {
105432- insn += insn->off;
105433- CONT_JMP;
105434- }
105435- CONT;
105436- JMP_JNE_X:
105437- if (DST != SRC) {
105438- insn += insn->off;
105439- CONT_JMP;
105440- }
105441- CONT;
105442- JMP_JNE_K:
105443- if (DST != IMM) {
105444- insn += insn->off;
105445- CONT_JMP;
105446- }
105447- CONT;
105448- JMP_JGT_X:
105449- if (DST > SRC) {
105450- insn += insn->off;
105451- CONT_JMP;
105452- }
105453- CONT;
105454- JMP_JGT_K:
105455- if (DST > IMM) {
105456- insn += insn->off;
105457- CONT_JMP;
105458- }
105459- CONT;
105460- JMP_JGE_X:
105461- if (DST >= SRC) {
105462- insn += insn->off;
105463- CONT_JMP;
105464- }
105465- CONT;
105466- JMP_JGE_K:
105467- if (DST >= IMM) {
105468- insn += insn->off;
105469- CONT_JMP;
105470- }
105471- CONT;
105472- JMP_JSGT_X:
105473- if (((s64) DST) > ((s64) SRC)) {
105474- insn += insn->off;
105475- CONT_JMP;
105476- }
105477- CONT;
105478- JMP_JSGT_K:
105479- if (((s64) DST) > ((s64) IMM)) {
105480- insn += insn->off;
105481- CONT_JMP;
105482- }
105483- CONT;
105484- JMP_JSGE_X:
105485- if (((s64) DST) >= ((s64) SRC)) {
105486- insn += insn->off;
105487- CONT_JMP;
105488- }
105489- CONT;
105490- JMP_JSGE_K:
105491- if (((s64) DST) >= ((s64) IMM)) {
105492- insn += insn->off;
105493- CONT_JMP;
105494- }
105495- CONT;
105496- JMP_JSET_X:
105497- if (DST & SRC) {
105498- insn += insn->off;
105499- CONT_JMP;
105500- }
105501- CONT;
105502- JMP_JSET_K:
105503- if (DST & IMM) {
105504- insn += insn->off;
105505- CONT_JMP;
105506- }
105507- CONT;
105508- JMP_EXIT:
105509- return BPF_R0;
105510-
105511- /* STX and ST and LDX*/
105512-#define LDST(SIZEOP, SIZE) \
105513- STX_MEM_##SIZEOP: \
105514- *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
105515- CONT; \
105516- ST_MEM_##SIZEOP: \
105517- *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
105518- CONT; \
105519- LDX_MEM_##SIZEOP: \
105520- DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
105521- CONT;
105522-
105523- LDST(B, u8)
105524- LDST(H, u16)
105525- LDST(W, u32)
105526- LDST(DW, u64)
105527-#undef LDST
105528- STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
105529- atomic_add((u32) SRC, (atomic_t *)(unsigned long)
105530- (DST + insn->off));
105531- CONT;
105532- STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
105533- atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
105534- (DST + insn->off));
105535- CONT;
105536- LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
105537- off = IMM;
105538-load_word:
105539- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
105540- * only appearing in the programs where ctx ==
105541- * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
105542- * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
105543- * internal BPF verifier will check that BPF_R6 ==
105544- * ctx.
105545- *
105546- * BPF_ABS and BPF_IND are wrappers of function calls,
105547- * so they scratch BPF_R1-BPF_R5 registers, preserve
105548- * BPF_R6-BPF_R9, and store return value into BPF_R0.
105549- *
105550- * Implicit input:
105551- * ctx == skb == BPF_R6 == CTX
105552- *
105553- * Explicit input:
105554- * SRC == any register
105555- * IMM == 32-bit immediate
105556- *
105557- * Output:
105558- * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
105559- */
105560-
105561- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
105562- if (likely(ptr != NULL)) {
105563- BPF_R0 = get_unaligned_be32(ptr);
105564- CONT;
105565- }
105566-
105567- return 0;
105568- LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
105569- off = IMM;
105570-load_half:
105571- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
105572- if (likely(ptr != NULL)) {
105573- BPF_R0 = get_unaligned_be16(ptr);
105574- CONT;
105575- }
105576-
105577- return 0;
105578- LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
105579- off = IMM;
105580-load_byte:
105581- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
105582- if (likely(ptr != NULL)) {
105583- BPF_R0 = *(u8 *)ptr;
105584- CONT;
105585- }
105586-
105587- return 0;
105588- LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
105589- off = IMM + SRC;
105590- goto load_word;
105591- LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
105592- off = IMM + SRC;
105593- goto load_half;
105594- LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
105595- off = IMM + SRC;
105596- goto load_byte;
105597-
105598- default_label:
105599- /* If we ever reach this, we have a bug somewhere. */
105600- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
105601- return 0;
105602-}
105603-
105604-/* Helper to find the offset of pkt_type in sk_buff structure. We want
105605- * to make sure its still a 3bit field starting at a byte boundary;
105606- * taken from arch/x86/net/bpf_jit_comp.c.
105607- */
105608-#ifdef __BIG_ENDIAN_BITFIELD
105609-#define PKT_TYPE_MAX (7 << 5)
105610+ u32 A = 0; /* Accumulator */
105611+ u32 X = 0; /* Index Register */
105612+ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */
105613+ u32 tmp;
105614+ int k;
105615+
105616+ /*
105617+ * Process array of filter instructions.
105618+ */
105619+ for (;; fentry++) {
105620+#if defined(CONFIG_X86_32)
105621+#define K (fentry->k)
105622 #else
105623-#define PKT_TYPE_MAX 7
105624+ const u32 K = fentry->k;
105625 #endif
105626-static unsigned int pkt_type_offset(void)
105627-{
105628- struct sk_buff skb_probe = { .pkt_type = ~0, };
105629- u8 *ct = (u8 *) &skb_probe;
105630- unsigned int off;
105631
105632- for (off = 0; off < sizeof(struct sk_buff); off++) {
105633- if (ct[off] == PKT_TYPE_MAX)
105634- return off;
105635- }
105636-
105637- pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
105638- return -1;
105639-}
105640-
105641-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105642-{
105643- return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
105644-}
105645-
105646-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105647-{
105648- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
105649- struct nlattr *nla;
105650-
105651- if (skb_is_nonlinear(skb))
105652- return 0;
105653-
105654- if (skb->len < sizeof(struct nlattr))
105655- return 0;
105656-
105657- if (a > skb->len - sizeof(struct nlattr))
105658- return 0;
105659-
105660- nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
105661- if (nla)
105662- return (void *) nla - (void *) skb->data;
105663-
105664- return 0;
105665-}
105666-
105667-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105668-{
105669- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
105670- struct nlattr *nla;
105671-
105672- if (skb_is_nonlinear(skb))
105673- return 0;
105674-
105675- if (skb->len < sizeof(struct nlattr))
105676- return 0;
105677-
105678- if (a > skb->len - sizeof(struct nlattr))
105679- return 0;
105680-
105681- nla = (struct nlattr *) &skb->data[a];
105682- if (nla->nla_len > skb->len - a)
105683- return 0;
105684-
105685- nla = nla_find_nested(nla, x);
105686- if (nla)
105687- return (void *) nla - (void *) skb->data;
105688-
105689- return 0;
105690-}
105691-
105692-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105693-{
105694- return raw_smp_processor_id();
105695-}
105696-
105697-/* note that this only generates 32-bit random numbers */
105698-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
105699-{
105700- return prandom_u32();
105701-}
105702-
105703-static bool convert_bpf_extensions(struct sock_filter *fp,
105704- struct sock_filter_int **insnp)
105705-{
105706- struct sock_filter_int *insn = *insnp;
105707-
105708- switch (fp->k) {
105709- case SKF_AD_OFF + SKF_AD_PROTOCOL:
105710- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
105711-
105712- /* A = *(u16 *) (CTX + offsetof(protocol)) */
105713- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
105714- offsetof(struct sk_buff, protocol));
105715- /* A = ntohs(A) [emitting a nop or swap16] */
105716- *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
105717- break;
105718-
105719- case SKF_AD_OFF + SKF_AD_PKTTYPE:
105720- *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
105721- pkt_type_offset());
105722- if (insn->off < 0)
105723- return false;
105724- insn++;
105725- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
105726-#ifdef __BIG_ENDIAN_BITFIELD
105727- insn++;
105728- *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
105729-#endif
105730- break;
105731-
105732- case SKF_AD_OFF + SKF_AD_IFINDEX:
105733- case SKF_AD_OFF + SKF_AD_HATYPE:
105734- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
105735- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
105736- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
105737-
105738- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
105739- BPF_REG_TMP, BPF_REG_CTX,
105740- offsetof(struct sk_buff, dev));
105741- /* if (tmp != 0) goto pc + 1 */
105742- *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
105743- *insn++ = BPF_EXIT_INSN();
105744- if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
105745- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
105746- offsetof(struct net_device, ifindex));
105747- else
105748- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
105749- offsetof(struct net_device, type));
105750- break;
105751-
105752- case SKF_AD_OFF + SKF_AD_MARK:
105753- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
105754-
105755- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
105756- offsetof(struct sk_buff, mark));
105757- break;
105758-
105759- case SKF_AD_OFF + SKF_AD_RXHASH:
105760- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
105761-
105762- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
105763- offsetof(struct sk_buff, hash));
105764- break;
105765-
105766- case SKF_AD_OFF + SKF_AD_QUEUE:
105767- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
105768-
105769- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
105770- offsetof(struct sk_buff, queue_mapping));
105771- break;
105772-
105773- case SKF_AD_OFF + SKF_AD_VLAN_TAG:
105774- case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
105775- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
105776- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
105777-
105778- /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
105779- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
105780- offsetof(struct sk_buff, vlan_tci));
105781- if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
105782- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
105783- ~VLAN_TAG_PRESENT);
105784- } else {
105785- /* A >>= 12 */
105786- *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
105787- /* A &= 1 */
105788- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
105789- }
105790- break;
105791-
105792- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
105793- case SKF_AD_OFF + SKF_AD_NLATTR:
105794- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
105795- case SKF_AD_OFF + SKF_AD_CPU:
105796- case SKF_AD_OFF + SKF_AD_RANDOM:
105797- /* arg1 = CTX */
105798- *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
105799- /* arg2 = A */
105800- *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
105801- /* arg3 = X */
105802- *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
105803- /* Emit call(arg1=CTX, arg2=A, arg3=X) */
105804- switch (fp->k) {
105805- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
105806- *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
105807- break;
105808- case SKF_AD_OFF + SKF_AD_NLATTR:
105809- *insn = BPF_EMIT_CALL(__skb_get_nlattr);
105810- break;
105811- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
105812- *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
105813- break;
105814- case SKF_AD_OFF + SKF_AD_CPU:
105815- *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
105816- break;
105817- case SKF_AD_OFF + SKF_AD_RANDOM:
105818- *insn = BPF_EMIT_CALL(__get_random_u32);
105819- break;
105820- }
105821- break;
105822-
105823- case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
105824- /* A ^= X */
105825- *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
105826- break;
105827-
105828- default:
105829- /* This is just a dummy call to avoid letting the compiler
105830- * evict __bpf_call_base() as an optimization. Placed here
105831- * where no-one bothers.
105832- */
105833- BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
105834- return false;
105835- }
105836-
105837- *insnp = insn;
105838- return true;
105839-}
105840-
105841-/**
105842- * sk_convert_filter - convert filter program
105843- * @prog: the user passed filter program
105844- * @len: the length of the user passed filter program
105845- * @new_prog: buffer where converted program will be stored
105846- * @new_len: pointer to store length of converted program
105847- *
105848- * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
105849- * Conversion workflow:
105850- *
105851- * 1) First pass for calculating the new program length:
105852- * sk_convert_filter(old_prog, old_len, NULL, &new_len)
105853- *
105854- * 2) 2nd pass to remap in two passes: 1st pass finds new
105855- * jump offsets, 2nd pass remapping:
105856- * new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
105857- * sk_convert_filter(old_prog, old_len, new_prog, &new_len);
105858- *
105859- * User BPF's register A is mapped to our BPF register 6, user BPF
105860- * register X is mapped to BPF register 7; frame pointer is always
105861- * register 10; Context 'void *ctx' is stored in register 1, that is,
105862- * for socket filters: ctx == 'struct sk_buff *', for seccomp:
105863- * ctx == 'struct seccomp_data *'.
105864- */
105865-int sk_convert_filter(struct sock_filter *prog, int len,
105866- struct sock_filter_int *new_prog, int *new_len)
105867-{
105868- int new_flen = 0, pass = 0, target, i;
105869- struct sock_filter_int *new_insn;
105870- struct sock_filter *fp;
105871- int *addrs = NULL;
105872- u8 bpf_src;
105873-
105874- BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
105875- BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
105876-
105877- if (len <= 0 || len > BPF_MAXINSNS)
105878- return -EINVAL;
105879-
105880- if (new_prog) {
105881- addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
105882- if (!addrs)
105883- return -ENOMEM;
105884- }
105885-
105886-do_pass:
105887- new_insn = new_prog;
105888- fp = prog;
105889-
105890- if (new_insn)
105891- *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
105892- new_insn++;
105893-
105894- for (i = 0; i < len; fp++, i++) {
105895- struct sock_filter_int tmp_insns[6] = { };
105896- struct sock_filter_int *insn = tmp_insns;
105897-
105898- if (addrs)
105899- addrs[i] = new_insn - new_prog;
105900-
105901- switch (fp->code) {
105902- /* All arithmetic insns and skb loads map as-is. */
105903- case BPF_ALU | BPF_ADD | BPF_X:
105904- case BPF_ALU | BPF_ADD | BPF_K:
105905- case BPF_ALU | BPF_SUB | BPF_X:
105906- case BPF_ALU | BPF_SUB | BPF_K:
105907- case BPF_ALU | BPF_AND | BPF_X:
105908- case BPF_ALU | BPF_AND | BPF_K:
105909- case BPF_ALU | BPF_OR | BPF_X:
105910- case BPF_ALU | BPF_OR | BPF_K:
105911- case BPF_ALU | BPF_LSH | BPF_X:
105912- case BPF_ALU | BPF_LSH | BPF_K:
105913- case BPF_ALU | BPF_RSH | BPF_X:
105914- case BPF_ALU | BPF_RSH | BPF_K:
105915- case BPF_ALU | BPF_XOR | BPF_X:
105916- case BPF_ALU | BPF_XOR | BPF_K:
105917- case BPF_ALU | BPF_MUL | BPF_X:
105918- case BPF_ALU | BPF_MUL | BPF_K:
105919- case BPF_ALU | BPF_DIV | BPF_X:
105920- case BPF_ALU | BPF_DIV | BPF_K:
105921- case BPF_ALU | BPF_MOD | BPF_X:
105922- case BPF_ALU | BPF_MOD | BPF_K:
105923- case BPF_ALU | BPF_NEG:
105924- case BPF_LD | BPF_ABS | BPF_W:
105925- case BPF_LD | BPF_ABS | BPF_H:
105926- case BPF_LD | BPF_ABS | BPF_B:
105927- case BPF_LD | BPF_IND | BPF_W:
105928- case BPF_LD | BPF_IND | BPF_H:
105929- case BPF_LD | BPF_IND | BPF_B:
105930- /* Check for overloaded BPF extension and
105931- * directly convert it if found, otherwise
105932- * just move on with mapping.
105933- */
105934- if (BPF_CLASS(fp->code) == BPF_LD &&
105935- BPF_MODE(fp->code) == BPF_ABS &&
105936- convert_bpf_extensions(fp, &insn))
105937- break;
105938-
105939- *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
105940- break;
105941-
105942- /* Jump transformation cannot use BPF block macros
105943- * everywhere as offset calculation and target updates
105944- * require a bit more work than the rest, i.e. jump
105945- * opcodes map as-is, but offsets need adjustment.
105946- */
105947-
105948-#define BPF_EMIT_JMP \
105949- do { \
105950- if (target >= len || target < 0) \
105951- goto err; \
105952- insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
105953- /* Adjust pc relative offset for 2nd or 3rd insn. */ \
105954- insn->off -= insn - tmp_insns; \
105955- } while (0)
105956-
105957- case BPF_JMP | BPF_JA:
105958- target = i + fp->k + 1;
105959- insn->code = fp->code;
105960- BPF_EMIT_JMP;
105961- break;
105962-
105963- case BPF_JMP | BPF_JEQ | BPF_K:
105964- case BPF_JMP | BPF_JEQ | BPF_X:
105965- case BPF_JMP | BPF_JSET | BPF_K:
105966- case BPF_JMP | BPF_JSET | BPF_X:
105967- case BPF_JMP | BPF_JGT | BPF_K:
105968- case BPF_JMP | BPF_JGT | BPF_X:
105969- case BPF_JMP | BPF_JGE | BPF_K:
105970- case BPF_JMP | BPF_JGE | BPF_X:
105971- if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
105972- /* BPF immediates are signed, zero extend
105973- * immediate into tmp register and use it
105974- * in compare insn.
105975- */
105976- *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
105977-
105978- insn->dst_reg = BPF_REG_A;
105979- insn->src_reg = BPF_REG_TMP;
105980- bpf_src = BPF_X;
105981- } else {
105982- insn->dst_reg = BPF_REG_A;
105983- insn->src_reg = BPF_REG_X;
105984- insn->imm = fp->k;
105985- bpf_src = BPF_SRC(fp->code);
105986+ switch (fentry->code) {
105987+ case BPF_S_ALU_ADD_X:
105988+ A += X;
105989+ continue;
105990+ case BPF_S_ALU_ADD_K:
105991+ A += K;
105992+ continue;
105993+ case BPF_S_ALU_SUB_X:
105994+ A -= X;
105995+ continue;
105996+ case BPF_S_ALU_SUB_K:
105997+ A -= K;
105998+ continue;
105999+ case BPF_S_ALU_MUL_X:
106000+ A *= X;
106001+ continue;
106002+ case BPF_S_ALU_MUL_K:
106003+ A *= K;
106004+ continue;
106005+ case BPF_S_ALU_DIV_X:
106006+ if (X == 0)
106007+ return 0;
106008+ A /= X;
106009+ continue;
106010+ case BPF_S_ALU_DIV_K:
106011+ A /= K;
106012+ continue;
106013+ case BPF_S_ALU_MOD_X:
106014+ if (X == 0)
106015+ return 0;
106016+ A %= X;
106017+ continue;
106018+ case BPF_S_ALU_MOD_K:
106019+ A %= K;
106020+ continue;
106021+ case BPF_S_ALU_AND_X:
106022+ A &= X;
106023+ continue;
106024+ case BPF_S_ALU_AND_K:
106025+ A &= K;
106026+ continue;
106027+ case BPF_S_ALU_OR_X:
106028+ A |= X;
106029+ continue;
106030+ case BPF_S_ALU_OR_K:
106031+ A |= K;
106032+ continue;
106033+ case BPF_S_ANC_ALU_XOR_X:
106034+ case BPF_S_ALU_XOR_X:
106035+ A ^= X;
106036+ continue;
106037+ case BPF_S_ALU_XOR_K:
106038+ A ^= K;
106039+ continue;
106040+ case BPF_S_ALU_LSH_X:
106041+ A <<= X;
106042+ continue;
106043+ case BPF_S_ALU_LSH_K:
106044+ A <<= K;
106045+ continue;
106046+ case BPF_S_ALU_RSH_X:
106047+ A >>= X;
106048+ continue;
106049+ case BPF_S_ALU_RSH_K:
106050+ A >>= K;
106051+ continue;
106052+ case BPF_S_ALU_NEG:
106053+ A = -A;
106054+ continue;
106055+ case BPF_S_JMP_JA:
106056+ fentry += K;
106057+ continue;
106058+ case BPF_S_JMP_JGT_K:
106059+ fentry += (A > K) ? fentry->jt : fentry->jf;
106060+ continue;
106061+ case BPF_S_JMP_JGE_K:
106062+ fentry += (A >= K) ? fentry->jt : fentry->jf;
106063+ continue;
106064+ case BPF_S_JMP_JEQ_K:
106065+ fentry += (A == K) ? fentry->jt : fentry->jf;
106066+ continue;
106067+ case BPF_S_JMP_JSET_K:
106068+ fentry += (A & K) ? fentry->jt : fentry->jf;
106069+ continue;
106070+ case BPF_S_JMP_JGT_X:
106071+ fentry += (A > X) ? fentry->jt : fentry->jf;
106072+ continue;
106073+ case BPF_S_JMP_JGE_X:
106074+ fentry += (A >= X) ? fentry->jt : fentry->jf;
106075+ continue;
106076+ case BPF_S_JMP_JEQ_X:
106077+ fentry += (A == X) ? fentry->jt : fentry->jf;
106078+ continue;
106079+ case BPF_S_JMP_JSET_X:
106080+ fentry += (A & X) ? fentry->jt : fentry->jf;
106081+ continue;
106082+ case BPF_S_LD_W_ABS:
106083+ k = K;
106084+load_w:
106085+ ptr = load_pointer(skb, k, 4, &tmp);
106086+ if (ptr != NULL) {
106087+ A = get_unaligned_be32(ptr);
106088+ continue;
106089 }
106090-
106091- /* Common case where 'jump_false' is next insn. */
106092- if (fp->jf == 0) {
106093- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
106094- target = i + fp->jt + 1;
106095- BPF_EMIT_JMP;
106096- break;
106097+ return 0;
106098+ case BPF_S_LD_H_ABS:
106099+ k = K;
106100+load_h:
106101+ ptr = load_pointer(skb, k, 2, &tmp);
106102+ if (ptr != NULL) {
106103+ A = get_unaligned_be16(ptr);
106104+ continue;
106105 }
106106-
106107- /* Convert JEQ into JNE when 'jump_true' is next insn. */
106108- if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
106109- insn->code = BPF_JMP | BPF_JNE | bpf_src;
106110- target = i + fp->jf + 1;
106111- BPF_EMIT_JMP;
106112- break;
106113+ return 0;
106114+ case BPF_S_LD_B_ABS:
106115+ k = K;
106116+load_b:
106117+ ptr = load_pointer(skb, k, 1, &tmp);
106118+ if (ptr != NULL) {
106119+ A = *(u8 *)ptr;
106120+ continue;
106121 }
106122-
106123- /* Other jumps are mapped into two insns: Jxx and JA. */
106124- target = i + fp->jt + 1;
106125- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
106126- BPF_EMIT_JMP;
106127- insn++;
106128-
106129- insn->code = BPF_JMP | BPF_JA;
106130- target = i + fp->jf + 1;
106131- BPF_EMIT_JMP;
106132- break;
106133-
106134- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
106135- case BPF_LDX | BPF_MSH | BPF_B:
106136- /* tmp = A */
106137- *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
106138- /* A = BPF_R0 = *(u8 *) (skb->data + K) */
106139- *insn++ = BPF_LD_ABS(BPF_B, fp->k);
106140- /* A &= 0xf */
106141- *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
106142- /* A <<= 2 */
106143- *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
106144- /* X = A */
106145- *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
106146- /* A = tmp */
106147- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
106148- break;
106149-
106150- /* RET_K, RET_A are remaped into 2 insns. */
106151- case BPF_RET | BPF_A:
106152- case BPF_RET | BPF_K:
106153- *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
106154- BPF_K : BPF_X, BPF_REG_0,
106155- BPF_REG_A, fp->k);
106156- *insn = BPF_EXIT_INSN();
106157- break;
106158-
106159- /* Store to stack. */
106160- case BPF_ST:
106161- case BPF_STX:
106162- *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
106163- BPF_ST ? BPF_REG_A : BPF_REG_X,
106164- -(BPF_MEMWORDS - fp->k) * 4);
106165- break;
106166-
106167- /* Load from stack. */
106168- case BPF_LD | BPF_MEM:
106169- case BPF_LDX | BPF_MEM:
106170- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
106171- BPF_REG_A : BPF_REG_X, BPF_REG_FP,
106172- -(BPF_MEMWORDS - fp->k) * 4);
106173- break;
106174-
106175- /* A = K or X = K */
106176- case BPF_LD | BPF_IMM:
106177- case BPF_LDX | BPF_IMM:
106178- *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
106179- BPF_REG_A : BPF_REG_X, fp->k);
106180- break;
106181-
106182- /* X = A */
106183- case BPF_MISC | BPF_TAX:
106184- *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
106185- break;
106186-
106187- /* A = X */
106188- case BPF_MISC | BPF_TXA:
106189- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
106190- break;
106191-
106192- /* A = skb->len or X = skb->len */
106193- case BPF_LD | BPF_W | BPF_LEN:
106194- case BPF_LDX | BPF_W | BPF_LEN:
106195- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
106196- BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
106197- offsetof(struct sk_buff, len));
106198- break;
106199-
106200- /* Access seccomp_data fields. */
106201- case BPF_LDX | BPF_ABS | BPF_W:
106202- /* A = *(u32 *) (ctx + K) */
106203- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
106204- break;
106205-
106206- /* Unkown instruction. */
106207+ return 0;
106208+ case BPF_S_LD_W_LEN:
106209+ A = skb->len;
106210+ continue;
106211+ case BPF_S_LDX_W_LEN:
106212+ X = skb->len;
106213+ continue;
106214+ case BPF_S_LD_W_IND:
106215+ k = X + K;
106216+ goto load_w;
106217+ case BPF_S_LD_H_IND:
106218+ k = X + K;
106219+ goto load_h;
106220+ case BPF_S_LD_B_IND:
106221+ k = X + K;
106222+ goto load_b;
106223+ case BPF_S_LDX_B_MSH:
106224+ ptr = load_pointer(skb, K, 1, &tmp);
106225+ if (ptr != NULL) {
106226+ X = (*(u8 *)ptr & 0xf) << 2;
106227+ continue;
106228+ }
106229+ return 0;
106230+ case BPF_S_LD_IMM:
106231+ A = K;
106232+ continue;
106233+ case BPF_S_LDX_IMM:
106234+ X = K;
106235+ continue;
106236+ case BPF_S_LD_MEM:
106237+ A = mem[K&15];
106238+ continue;
106239+ case BPF_S_LDX_MEM:
106240+ X = mem[K&15];
106241+ continue;
106242+ case BPF_S_MISC_TAX:
106243+ X = A;
106244+ continue;
106245+ case BPF_S_MISC_TXA:
106246+ A = X;
106247+ continue;
106248+ case BPF_S_RET_K:
106249+ return K;
106250+ case BPF_S_RET_A:
106251+ return A;
106252+ case BPF_S_ST:
106253+ mem[K&15] = A;
106254+ continue;
106255+ case BPF_S_STX:
106256+ mem[K&15] = X;
106257+ continue;
106258+ case BPF_S_ANC_PROTOCOL:
106259+ A = ntohs(skb->protocol);
106260+ continue;
106261+ case BPF_S_ANC_PKTTYPE:
106262+ A = skb->pkt_type;
106263+ continue;
106264+ case BPF_S_ANC_IFINDEX:
106265+ if (!skb->dev)
106266+ return 0;
106267+ A = skb->dev->ifindex;
106268+ continue;
106269+ case BPF_S_ANC_MARK:
106270+ A = skb->mark;
106271+ continue;
106272+ case BPF_S_ANC_QUEUE:
106273+ A = skb->queue_mapping;
106274+ continue;
106275+ case BPF_S_ANC_HATYPE:
106276+ if (!skb->dev)
106277+ return 0;
106278+ A = skb->dev->type;
106279+ continue;
106280+ case BPF_S_ANC_RXHASH:
106281+ A = skb->hash;
106282+ continue;
106283+ case BPF_S_ANC_CPU:
106284+ A = raw_smp_processor_id();
106285+ continue;
106286+ case BPF_S_ANC_VLAN_TAG:
106287+ A = vlan_tx_tag_get(skb);
106288+ continue;
106289+ case BPF_S_ANC_VLAN_TAG_PRESENT:
106290+ A = !!vlan_tx_tag_present(skb);
106291+ continue;
106292+ case BPF_S_ANC_PAY_OFFSET:
106293+ A = __skb_get_poff(skb);
106294+ continue;
106295+ case BPF_S_ANC_NLATTR: {
106296+ struct nlattr *nla;
106297+
106298+ if (skb_is_nonlinear(skb))
106299+ return 0;
106300+ if (skb->len < sizeof(struct nlattr))
106301+ return 0;
106302+ if (A > skb->len - sizeof(struct nlattr))
106303+ return 0;
106304+
106305+ nla = nla_find((struct nlattr *)&skb->data[A],
106306+ skb->len - A, X);
106307+ if (nla)
106308+ A = (void *)nla - (void *)skb->data;
106309+ else
106310+ A = 0;
106311+ continue;
106312+ }
106313+ case BPF_S_ANC_NLATTR_NEST: {
106314+ struct nlattr *nla;
106315+
106316+ if (skb_is_nonlinear(skb))
106317+ return 0;
106318+ if (skb->len < sizeof(struct nlattr))
106319+ return 0;
106320+ if (A > skb->len - sizeof(struct nlattr))
106321+ return 0;
106322+
106323+ nla = (struct nlattr *)&skb->data[A];
106324+ if (nla->nla_len > skb->len - A)
106325+ return 0;
106326+
106327+ nla = nla_find_nested(nla, X);
106328+ if (nla)
106329+ A = (void *)nla - (void *)skb->data;
106330+ else
106331+ A = 0;
106332+ continue;
106333+ }
106334+#ifdef CONFIG_SECCOMP_FILTER
106335+ case BPF_S_ANC_SECCOMP_LD_W:
106336+ A = seccomp_bpf_load(fentry->k);
106337+ continue;
106338+#endif
106339 default:
106340- goto err;
106341+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
106342+ fentry->code, fentry->jt,
106343+ fentry->jf, fentry->k);
106344+ BUG();
106345+ return 0;
106346 }
106347-
106348- insn++;
106349- if (new_prog)
106350- memcpy(new_insn, tmp_insns,
106351- sizeof(*insn) * (insn - tmp_insns));
106352- new_insn += insn - tmp_insns;
106353- }
106354-
106355- if (!new_prog) {
106356- /* Only calculating new length. */
106357- *new_len = new_insn - new_prog;
106358- return 0;
106359- }
106360-
106361- pass++;
106362- if (new_flen != new_insn - new_prog) {
106363- new_flen = new_insn - new_prog;
106364- if (pass > 2)
106365- goto err;
106366- goto do_pass;
106367 }
106368
106369- kfree(addrs);
106370- BUG_ON(*new_len != new_flen);
106371 return 0;
106372-err:
106373- kfree(addrs);
106374- return -EINVAL;
106375 }
106376+EXPORT_SYMBOL(sk_run_filter);
106377
106378-/* Security:
106379- *
106380+/*
106381+ * Security :
106382 * A BPF program is able to use 16 cells of memory to store intermediate
106383- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
106384- *
106385+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
106386 * As we dont want to clear mem[] array for each packet going through
106387 * sk_run_filter(), we check that filter loaded by user never try to read
106388 * a cell if not previously written, and we check all branches to be sure
106389@@ -1096,46 +418,44 @@ err:
106390 */
106391 static int check_load_and_stores(struct sock_filter *filter, int flen)
106392 {
106393- u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
106394+ u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
106395 int pc, ret = 0;
106396
106397- BUILD_BUG_ON(BPF_MEMWORDS > 16);
106398-
106399- masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
106400+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
106401+ masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
106402 if (!masks)
106403 return -ENOMEM;
106404-
106405 memset(masks, 0xff, flen * sizeof(*masks));
106406
106407 for (pc = 0; pc < flen; pc++) {
106408 memvalid &= masks[pc];
106409
106410 switch (filter[pc].code) {
106411- case BPF_ST:
106412- case BPF_STX:
106413+ case BPF_S_ST:
106414+ case BPF_S_STX:
106415 memvalid |= (1 << filter[pc].k);
106416 break;
106417- case BPF_LD | BPF_MEM:
106418- case BPF_LDX | BPF_MEM:
106419+ case BPF_S_LD_MEM:
106420+ case BPF_S_LDX_MEM:
106421 if (!(memvalid & (1 << filter[pc].k))) {
106422 ret = -EINVAL;
106423 goto error;
106424 }
106425 break;
106426- case BPF_JMP | BPF_JA:
106427- /* A jump must set masks on target */
106428+ case BPF_S_JMP_JA:
106429+ /* a jump must set masks on target */
106430 masks[pc + 1 + filter[pc].k] &= memvalid;
106431 memvalid = ~0;
106432 break;
106433- case BPF_JMP | BPF_JEQ | BPF_K:
106434- case BPF_JMP | BPF_JEQ | BPF_X:
106435- case BPF_JMP | BPF_JGE | BPF_K:
106436- case BPF_JMP | BPF_JGE | BPF_X:
106437- case BPF_JMP | BPF_JGT | BPF_K:
106438- case BPF_JMP | BPF_JGT | BPF_X:
106439- case BPF_JMP | BPF_JSET | BPF_K:
106440- case BPF_JMP | BPF_JSET | BPF_X:
106441- /* A jump must set masks on targets */
106442+ case BPF_S_JMP_JEQ_K:
106443+ case BPF_S_JMP_JEQ_X:
106444+ case BPF_S_JMP_JGE_K:
106445+ case BPF_S_JMP_JGE_X:
106446+ case BPF_S_JMP_JGT_K:
106447+ case BPF_S_JMP_JGT_X:
106448+ case BPF_S_JMP_JSET_X:
106449+ case BPF_S_JMP_JSET_K:
106450+ /* a jump must set masks on targets */
106451 masks[pc + 1 + filter[pc].jt] &= memvalid;
106452 masks[pc + 1 + filter[pc].jf] &= memvalid;
106453 memvalid = ~0;
106454@@ -1147,72 +467,6 @@ error:
106455 return ret;
106456 }
106457
106458-static bool chk_code_allowed(u16 code_to_probe)
106459-{
106460- static const bool codes[] = {
106461- /* 32 bit ALU operations */
106462- [BPF_ALU | BPF_ADD | BPF_K] = true,
106463- [BPF_ALU | BPF_ADD | BPF_X] = true,
106464- [BPF_ALU | BPF_SUB | BPF_K] = true,
106465- [BPF_ALU | BPF_SUB | BPF_X] = true,
106466- [BPF_ALU | BPF_MUL | BPF_K] = true,
106467- [BPF_ALU | BPF_MUL | BPF_X] = true,
106468- [BPF_ALU | BPF_DIV | BPF_K] = true,
106469- [BPF_ALU | BPF_DIV | BPF_X] = true,
106470- [BPF_ALU | BPF_MOD | BPF_K] = true,
106471- [BPF_ALU | BPF_MOD | BPF_X] = true,
106472- [BPF_ALU | BPF_AND | BPF_K] = true,
106473- [BPF_ALU | BPF_AND | BPF_X] = true,
106474- [BPF_ALU | BPF_OR | BPF_K] = true,
106475- [BPF_ALU | BPF_OR | BPF_X] = true,
106476- [BPF_ALU | BPF_XOR | BPF_K] = true,
106477- [BPF_ALU | BPF_XOR | BPF_X] = true,
106478- [BPF_ALU | BPF_LSH | BPF_K] = true,
106479- [BPF_ALU | BPF_LSH | BPF_X] = true,
106480- [BPF_ALU | BPF_RSH | BPF_K] = true,
106481- [BPF_ALU | BPF_RSH | BPF_X] = true,
106482- [BPF_ALU | BPF_NEG] = true,
106483- /* Load instructions */
106484- [BPF_LD | BPF_W | BPF_ABS] = true,
106485- [BPF_LD | BPF_H | BPF_ABS] = true,
106486- [BPF_LD | BPF_B | BPF_ABS] = true,
106487- [BPF_LD | BPF_W | BPF_LEN] = true,
106488- [BPF_LD | BPF_W | BPF_IND] = true,
106489- [BPF_LD | BPF_H | BPF_IND] = true,
106490- [BPF_LD | BPF_B | BPF_IND] = true,
106491- [BPF_LD | BPF_IMM] = true,
106492- [BPF_LD | BPF_MEM] = true,
106493- [BPF_LDX | BPF_W | BPF_LEN] = true,
106494- [BPF_LDX | BPF_B | BPF_MSH] = true,
106495- [BPF_LDX | BPF_IMM] = true,
106496- [BPF_LDX | BPF_MEM] = true,
106497- /* Store instructions */
106498- [BPF_ST] = true,
106499- [BPF_STX] = true,
106500- /* Misc instructions */
106501- [BPF_MISC | BPF_TAX] = true,
106502- [BPF_MISC | BPF_TXA] = true,
106503- /* Return instructions */
106504- [BPF_RET | BPF_K] = true,
106505- [BPF_RET | BPF_A] = true,
106506- /* Jump instructions */
106507- [BPF_JMP | BPF_JA] = true,
106508- [BPF_JMP | BPF_JEQ | BPF_K] = true,
106509- [BPF_JMP | BPF_JEQ | BPF_X] = true,
106510- [BPF_JMP | BPF_JGE | BPF_K] = true,
106511- [BPF_JMP | BPF_JGE | BPF_X] = true,
106512- [BPF_JMP | BPF_JGT | BPF_K] = true,
106513- [BPF_JMP | BPF_JGT | BPF_X] = true,
106514- [BPF_JMP | BPF_JSET | BPF_K] = true,
106515- [BPF_JMP | BPF_JSET | BPF_X] = true,
106516- };
106517-
106518- if (code_to_probe >= ARRAY_SIZE(codes))
106519- return false;
106520-
106521- return codes[code_to_probe];
106522-}
106523-
106524 /**
106525 * sk_chk_filter - verify socket filter code
106526 * @filter: filter to verify
106527@@ -1229,303 +483,187 @@ static bool chk_code_allowed(u16 code_to_probe)
106528 */
106529 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
106530 {
106531- bool anc_found;
106532+ /*
106533+ * Valid instructions are initialized to non-0.
106534+ * Invalid instructions are initialized to 0.
106535+ */
106536+ static const u8 codes[] = {
106537+ [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
106538+ [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
106539+ [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
106540+ [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
106541+ [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
106542+ [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
106543+ [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
106544+ [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
106545+ [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
106546+ [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
106547+ [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
106548+ [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
106549+ [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
106550+ [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
106551+ [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
106552+ [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
106553+ [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
106554+ [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
106555+ [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
106556+ [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
106557+ [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
106558+ [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
106559+ [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
106560+ [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
106561+ [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
106562+ [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
106563+ [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
106564+ [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
106565+ [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
106566+ [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
106567+ [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
106568+ [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
106569+ [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
106570+ [BPF_RET|BPF_K] = BPF_S_RET_K,
106571+ [BPF_RET|BPF_A] = BPF_S_RET_A,
106572+ [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
106573+ [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
106574+ [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
106575+ [BPF_ST] = BPF_S_ST,
106576+ [BPF_STX] = BPF_S_STX,
106577+ [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
106578+ [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
106579+ [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
106580+ [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
106581+ [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
106582+ [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
106583+ [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
106584+ [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
106585+ [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
106586+ };
106587 int pc;
106588+ bool anc_found;
106589
106590 if (flen == 0 || flen > BPF_MAXINSNS)
106591 return -EINVAL;
106592
106593- /* Check the filter code now */
106594+ /* check the filter code now */
106595 for (pc = 0; pc < flen; pc++) {
106596 struct sock_filter *ftest = &filter[pc];
106597+ u16 code = ftest->code;
106598
106599- /* May we actually operate on this code? */
106600- if (!chk_code_allowed(ftest->code))
106601+ if (code >= ARRAY_SIZE(codes))
106602+ return -EINVAL;
106603+ code = codes[code];
106604+ if (!code)
106605 return -EINVAL;
106606-
106607 /* Some instructions need special checks */
106608- switch (ftest->code) {
106609- case BPF_ALU | BPF_DIV | BPF_K:
106610- case BPF_ALU | BPF_MOD | BPF_K:
106611- /* Check for division by zero */
106612+ switch (code) {
106613+ case BPF_S_ALU_DIV_K:
106614+ case BPF_S_ALU_MOD_K:
106615+ /* check for division by zero */
106616 if (ftest->k == 0)
106617 return -EINVAL;
106618 break;
106619- case BPF_LD | BPF_MEM:
106620- case BPF_LDX | BPF_MEM:
106621- case BPF_ST:
106622- case BPF_STX:
106623- /* Check for invalid memory addresses */
106624+ case BPF_S_LD_MEM:
106625+ case BPF_S_LDX_MEM:
106626+ case BPF_S_ST:
106627+ case BPF_S_STX:
106628+ /* check for invalid memory addresses */
106629 if (ftest->k >= BPF_MEMWORDS)
106630 return -EINVAL;
106631 break;
106632- case BPF_JMP | BPF_JA:
106633- /* Note, the large ftest->k might cause loops.
106634+ case BPF_S_JMP_JA:
106635+ /*
106636+ * Note, the large ftest->k might cause loops.
106637 * Compare this with conditional jumps below,
106638 * where offsets are limited. --ANK (981016)
106639 */
106640- if (ftest->k >= (unsigned int)(flen - pc - 1))
106641+ if (ftest->k >= (unsigned int)(flen-pc-1))
106642 return -EINVAL;
106643 break;
106644- case BPF_JMP | BPF_JEQ | BPF_K:
106645- case BPF_JMP | BPF_JEQ | BPF_X:
106646- case BPF_JMP | BPF_JGE | BPF_K:
106647- case BPF_JMP | BPF_JGE | BPF_X:
106648- case BPF_JMP | BPF_JGT | BPF_K:
106649- case BPF_JMP | BPF_JGT | BPF_X:
106650- case BPF_JMP | BPF_JSET | BPF_K:
106651- case BPF_JMP | BPF_JSET | BPF_X:
106652- /* Both conditionals must be safe */
106653+ case BPF_S_JMP_JEQ_K:
106654+ case BPF_S_JMP_JEQ_X:
106655+ case BPF_S_JMP_JGE_K:
106656+ case BPF_S_JMP_JGE_X:
106657+ case BPF_S_JMP_JGT_K:
106658+ case BPF_S_JMP_JGT_X:
106659+ case BPF_S_JMP_JSET_X:
106660+ case BPF_S_JMP_JSET_K:
106661+ /* for conditionals both must be safe */
106662 if (pc + ftest->jt + 1 >= flen ||
106663 pc + ftest->jf + 1 >= flen)
106664 return -EINVAL;
106665 break;
106666- case BPF_LD | BPF_W | BPF_ABS:
106667- case BPF_LD | BPF_H | BPF_ABS:
106668- case BPF_LD | BPF_B | BPF_ABS:
106669+ case BPF_S_LD_W_ABS:
106670+ case BPF_S_LD_H_ABS:
106671+ case BPF_S_LD_B_ABS:
106672 anc_found = false;
106673- if (bpf_anc_helper(ftest) & BPF_ANC)
106674- anc_found = true;
106675- /* Ancillary operation unknown or unsupported */
106676+#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
106677+ code = BPF_S_ANC_##CODE; \
106678+ anc_found = true; \
106679+ break
106680+ switch (ftest->k) {
106681+ ANCILLARY(PROTOCOL);
106682+ ANCILLARY(PKTTYPE);
106683+ ANCILLARY(IFINDEX);
106684+ ANCILLARY(NLATTR);
106685+ ANCILLARY(NLATTR_NEST);
106686+ ANCILLARY(MARK);
106687+ ANCILLARY(QUEUE);
106688+ ANCILLARY(HATYPE);
106689+ ANCILLARY(RXHASH);
106690+ ANCILLARY(CPU);
106691+ ANCILLARY(ALU_XOR_X);
106692+ ANCILLARY(VLAN_TAG);
106693+ ANCILLARY(VLAN_TAG_PRESENT);
106694+ ANCILLARY(PAY_OFFSET);
106695+ }
106696+
106697+ /* ancillary operation unknown or unsupported */
106698 if (anc_found == false && ftest->k >= SKF_AD_OFF)
106699 return -EINVAL;
106700 }
106701+ ftest->code = code;
106702 }
106703
106704- /* Last instruction must be a RET code */
106705+ /* last instruction must be a RET code */
106706 switch (filter[flen - 1].code) {
106707- case BPF_RET | BPF_K:
106708- case BPF_RET | BPF_A:
106709+ case BPF_S_RET_K:
106710+ case BPF_S_RET_A:
106711 return check_load_and_stores(filter, flen);
106712 }
106713-
106714 return -EINVAL;
106715 }
106716 EXPORT_SYMBOL(sk_chk_filter);
106717
106718-static int sk_store_orig_filter(struct sk_filter *fp,
106719- const struct sock_fprog *fprog)
106720-{
106721- unsigned int fsize = sk_filter_proglen(fprog);
106722- struct sock_fprog_kern *fkprog;
106723-
106724- fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
106725- if (!fp->orig_prog)
106726- return -ENOMEM;
106727-
106728- fkprog = fp->orig_prog;
106729- fkprog->len = fprog->len;
106730- fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
106731- if (!fkprog->filter) {
106732- kfree(fp->orig_prog);
106733- return -ENOMEM;
106734- }
106735-
106736- return 0;
106737-}
106738-
106739-static void sk_release_orig_filter(struct sk_filter *fp)
106740-{
106741- struct sock_fprog_kern *fprog = fp->orig_prog;
106742-
106743- if (fprog) {
106744- kfree(fprog->filter);
106745- kfree(fprog);
106746- }
106747-}
106748-
106749 /**
106750 * sk_filter_release_rcu - Release a socket filter by rcu_head
106751 * @rcu: rcu_head that contains the sk_filter to free
106752 */
106753-static void sk_filter_release_rcu(struct rcu_head *rcu)
106754+void sk_filter_release_rcu(struct rcu_head *rcu)
106755 {
106756 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
106757
106758- sk_release_orig_filter(fp);
106759- sk_filter_free(fp);
106760-}
106761-
106762-/**
106763- * sk_filter_release - release a socket filter
106764- * @fp: filter to remove
106765- *
106766- * Remove a filter from a socket and release its resources.
106767- */
106768-static void sk_filter_release(struct sk_filter *fp)
106769-{
106770- if (atomic_dec_and_test(&fp->refcnt))
106771- call_rcu(&fp->rcu, sk_filter_release_rcu);
106772-}
106773-
106774-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
106775-{
106776- atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
106777- sk_filter_release(fp);
106778-}
106779-
106780-void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
106781-{
106782- atomic_inc(&fp->refcnt);
106783- atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
106784-}
106785-
106786-static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
106787- struct sock *sk,
106788- unsigned int len)
106789-{
106790- struct sk_filter *fp_new;
106791-
106792- if (sk == NULL)
106793- return krealloc(fp, len, GFP_KERNEL);
106794-
106795- fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
106796- if (fp_new) {
106797- *fp_new = *fp;
106798- /* As we're keeping orig_prog in fp_new along,
106799- * we need to make sure we're not evicting it
106800- * from the old fp.
106801- */
106802- fp->orig_prog = NULL;
106803- sk_filter_uncharge(sk, fp);
106804- }
106805-
106806- return fp_new;
106807-}
106808-
106809-static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
106810- struct sock *sk)
106811-{
106812- struct sock_filter *old_prog;
106813- struct sk_filter *old_fp;
106814- int err, new_len, old_len = fp->len;
106815-
106816- /* We are free to overwrite insns et al right here as it
106817- * won't be used at this point in time anymore internally
106818- * after the migration to the internal BPF instruction
106819- * representation.
106820- */
106821- BUILD_BUG_ON(sizeof(struct sock_filter) !=
106822- sizeof(struct sock_filter_int));
106823-
106824- /* Conversion cannot happen on overlapping memory areas,
106825- * so we need to keep the user BPF around until the 2nd
106826- * pass. At this time, the user BPF is stored in fp->insns.
106827- */
106828- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
106829- GFP_KERNEL);
106830- if (!old_prog) {
106831- err = -ENOMEM;
106832- goto out_err;
106833- }
106834-
106835- /* 1st pass: calculate the new program length. */
106836- err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
106837- if (err)
106838- goto out_err_free;
106839-
106840- /* Expand fp for appending the new filter representation. */
106841- old_fp = fp;
106842- fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
106843- if (!fp) {
106844- /* The old_fp is still around in case we couldn't
106845- * allocate new memory, so uncharge on that one.
106846- */
106847- fp = old_fp;
106848- err = -ENOMEM;
106849- goto out_err_free;
106850- }
106851-
106852- fp->len = new_len;
106853-
106854- /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
106855- err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
106856- if (err)
106857- /* 2nd sk_convert_filter() can fail only if it fails
106858- * to allocate memory, remapping must succeed. Note,
106859- * that at this time old_fp has already been released
106860- * by __sk_migrate_realloc().
106861- */
106862- goto out_err_free;
106863-
106864- sk_filter_select_runtime(fp);
106865-
106866- kfree(old_prog);
106867- return fp;
106868-
106869-out_err_free:
106870- kfree(old_prog);
106871-out_err:
106872- /* Rollback filter setup. */
106873- if (sk != NULL)
106874- sk_filter_uncharge(sk, fp);
106875- else
106876- kfree(fp);
106877- return ERR_PTR(err);
106878-}
106879-
106880-void __weak bpf_int_jit_compile(struct sk_filter *prog)
106881-{
106882-}
106883-
106884-/**
106885- * sk_filter_select_runtime - select execution runtime for BPF program
106886- * @fp: sk_filter populated with internal BPF program
106887- *
106888- * try to JIT internal BPF program, if JIT is not available select interpreter
106889- * BPF program will be executed via SK_RUN_FILTER() macro
106890- */
106891-void sk_filter_select_runtime(struct sk_filter *fp)
106892-{
106893- fp->bpf_func = (void *) __sk_run_filter;
106894-
106895- /* Probe if internal BPF can be JITed */
106896- bpf_int_jit_compile(fp);
106897-}
106898-EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
106899-
106900-/* free internal BPF program */
106901-void sk_filter_free(struct sk_filter *fp)
106902-{
106903 bpf_jit_free(fp);
106904 }
106905-EXPORT_SYMBOL_GPL(sk_filter_free);
106906+EXPORT_SYMBOL(sk_filter_release_rcu);
106907
106908-static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
106909- struct sock *sk)
106910+static int __sk_prepare_filter(struct sk_filter *fp)
106911 {
106912 int err;
106913
106914- fp->bpf_func = NULL;
106915- fp->jited = 0;
106916+ fp->bpf_func = sk_run_filter;
106917
106918 err = sk_chk_filter(fp->insns, fp->len);
106919- if (err) {
106920- if (sk != NULL)
106921- sk_filter_uncharge(sk, fp);
106922- else
106923- kfree(fp);
106924- return ERR_PTR(err);
106925- }
106926+ if (err)
106927+ return err;
106928
106929- /* Probe if we can JIT compile the filter and if so, do
106930- * the compilation of the filter.
106931- */
106932 bpf_jit_compile(fp);
106933-
106934- /* JIT compiler couldn't process this filter, so do the
106935- * internal BPF translation for the optimized interpreter.
106936- */
106937- if (!fp->jited)
106938- fp = __sk_migrate_filter(fp, sk);
106939-
106940- return fp;
106941+ return 0;
106942 }
106943
106944 /**
106945 * sk_unattached_filter_create - create an unattached filter
106946+ * @fprog: the filter program
106947 * @pfp: the unattached filter that is created
106948- * @fprog: the filter program
106949 *
106950 * Create a filter independent of any socket. We first run some
106951 * sanity checks on it to make sure it does not explode on us later.
106952@@ -1533,10 +671,11 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
106953 * a negative errno code is returned. On success the return is zero.
106954 */
106955 int sk_unattached_filter_create(struct sk_filter **pfp,
106956- struct sock_fprog_kern *fprog)
106957+ struct sock_fprog *fprog)
106958 {
106959- unsigned int fsize = sk_filter_proglen(fprog);
106960 struct sk_filter *fp;
106961+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
106962+ int err;
106963
106964 /* Make sure new filter is there and in the right amounts. */
106965 if (fprog->filter == NULL)
106966@@ -1545,26 +684,20 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
106967 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
106968 if (!fp)
106969 return -ENOMEM;
106970-
106971- memcpy(fp->insns, fprog->filter, fsize);
106972+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
106973
106974 atomic_set(&fp->refcnt, 1);
106975 fp->len = fprog->len;
106976- /* Since unattached filters are not copied back to user
106977- * space through sk_get_filter(), we do not need to hold
106978- * a copy here, and can spare us the work.
106979- */
106980- fp->orig_prog = NULL;
106981
106982- /* __sk_prepare_filter() already takes care of uncharging
106983- * memory in case something goes wrong.
106984- */
106985- fp = __sk_prepare_filter(fp, NULL);
106986- if (IS_ERR(fp))
106987- return PTR_ERR(fp);
106988+ err = __sk_prepare_filter(fp);
106989+ if (err)
106990+ goto free_mem;
106991
106992 *pfp = fp;
106993 return 0;
106994+free_mem:
106995+ kfree(fp);
106996+ return err;
106997 }
106998 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
106999
107000@@ -1587,7 +720,7 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
107001 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107002 {
107003 struct sk_filter *fp, *old_fp;
107004- unsigned int fsize = sk_filter_proglen(fprog);
107005+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
107006 unsigned int sk_fsize = sk_filter_size(fprog->len);
107007 int err;
107008
107009@@ -1601,7 +734,6 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107010 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
107011 if (!fp)
107012 return -ENOMEM;
107013-
107014 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
107015 sock_kfree_s(sk, fp, sk_fsize);
107016 return -EFAULT;
107017@@ -1610,26 +742,18 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107018 atomic_set(&fp->refcnt, 1);
107019 fp->len = fprog->len;
107020
107021- err = sk_store_orig_filter(fp, fprog);
107022+ err = __sk_prepare_filter(fp);
107023 if (err) {
107024 sk_filter_uncharge(sk, fp);
107025- return -ENOMEM;
107026+ return err;
107027 }
107028
107029- /* __sk_prepare_filter() already takes care of uncharging
107030- * memory in case something goes wrong.
107031- */
107032- fp = __sk_prepare_filter(fp, sk);
107033- if (IS_ERR(fp))
107034- return PTR_ERR(fp);
107035-
107036 old_fp = rcu_dereference_protected(sk->sk_filter,
107037 sock_owned_by_user(sk));
107038 rcu_assign_pointer(sk->sk_filter, fp);
107039
107040 if (old_fp)
107041 sk_filter_uncharge(sk, old_fp);
107042-
107043 return 0;
107044 }
107045 EXPORT_SYMBOL_GPL(sk_attach_filter);
107046@@ -1649,46 +773,116 @@ int sk_detach_filter(struct sock *sk)
107047 sk_filter_uncharge(sk, filter);
107048 ret = 0;
107049 }
107050-
107051 return ret;
107052 }
107053 EXPORT_SYMBOL_GPL(sk_detach_filter);
107054
107055-int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
107056- unsigned int len)
107057+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
107058+{
107059+ static const u16 decodes[] = {
107060+ [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
107061+ [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
107062+ [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
107063+ [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
107064+ [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
107065+ [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
107066+ [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
107067+ [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
107068+ [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
107069+ [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
107070+ [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
107071+ [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
107072+ [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
107073+ [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
107074+ [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
107075+ [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
107076+ [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
107077+ [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
107078+ [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
107079+ [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
107080+ [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
107081+ [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
107082+ [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
107083+ [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
107084+ [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
107085+ [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
107086+ [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
107087+ [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
107088+ [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
107089+ [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
107090+ [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
107091+ [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
107092+ [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
107093+ [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
107094+ [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
107095+ [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
107096+ [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
107097+ [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
107098+ [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
107099+ [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
107100+ [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
107101+ [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
107102+ [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
107103+ [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
107104+ [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
107105+ [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
107106+ [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
107107+ [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
107108+ [BPF_S_RET_K] = BPF_RET|BPF_K,
107109+ [BPF_S_RET_A] = BPF_RET|BPF_A,
107110+ [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
107111+ [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
107112+ [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
107113+ [BPF_S_ST] = BPF_ST,
107114+ [BPF_S_STX] = BPF_STX,
107115+ [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
107116+ [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
107117+ [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
107118+ [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
107119+ [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
107120+ [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
107121+ [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
107122+ [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
107123+ [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
107124+ };
107125+ u16 code;
107126+
107127+ code = filt->code;
107128+
107129+ to->code = decodes[code];
107130+ to->jt = filt->jt;
107131+ to->jf = filt->jf;
107132+ to->k = filt->k;
107133+}
107134+
107135+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
107136 {
107137- struct sock_fprog_kern *fprog;
107138 struct sk_filter *filter;
107139- int ret = 0;
107140+ int i, ret;
107141
107142 lock_sock(sk);
107143 filter = rcu_dereference_protected(sk->sk_filter,
107144- sock_owned_by_user(sk));
107145+ sock_owned_by_user(sk));
107146+ ret = 0;
107147 if (!filter)
107148 goto out;
107149-
107150- /* We're copying the filter that has been originally attached,
107151- * so no conversion/decode needed anymore.
107152- */
107153- fprog = filter->orig_prog;
107154-
107155- ret = fprog->len;
107156+ ret = filter->len;
107157 if (!len)
107158- /* User space only enquires number of filter blocks. */
107159 goto out;
107160-
107161 ret = -EINVAL;
107162- if (len < fprog->len)
107163+ if (len < filter->len)
107164 goto out;
107165
107166 ret = -EFAULT;
107167- if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
107168- goto out;
107169+ for (i = 0; i < filter->len; i++) {
107170+ struct sock_filter fb;
107171
107172- /* Instead of bytes, the API requests to return the number
107173- * of filter blocks.
107174- */
107175- ret = fprog->len;
107176+ sk_decode_filter(&filter->insns[i], &fb);
107177+ if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
107178+ goto out;
107179+ }
107180+
107181+ ret = filter->len;
107182 out:
107183 release_sock(sk);
107184 return ret;
107185diff --git a/net/core/flow.c b/net/core/flow.c
107186index a0348fd..6951c76 100644
107187--- a/net/core/flow.c
107188+++ b/net/core/flow.c
107189@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
107190 static int flow_entry_valid(struct flow_cache_entry *fle,
107191 struct netns_xfrm *xfrm)
107192 {
107193- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
107194+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
107195 return 0;
107196 if (fle->object && !fle->object->ops->check(fle->object))
107197 return 0;
107198@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
107199 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
107200 fcp->hash_count++;
107201 }
107202- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
107203+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
107204 flo = fle->object;
107205 if (!flo)
107206 goto ret_object;
107207@@ -263,7 +263,7 @@ nocache:
107208 }
107209 flo = resolver(net, key, family, dir, flo, ctx);
107210 if (fle) {
107211- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
107212+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
107213 if (!IS_ERR(flo))
107214 fle->object = flo;
107215 else
107216diff --git a/net/core/iovec.c b/net/core/iovec.c
107217index e1ec45a..e5c6f16 100644
107218--- a/net/core/iovec.c
107219+++ b/net/core/iovec.c
107220@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
107221 if (m->msg_name && m->msg_namelen) {
107222 if (mode == VERIFY_READ) {
107223 void __user *namep;
107224- namep = (void __user __force *) m->msg_name;
107225+ namep = (void __force_user *) m->msg_name;
107226 err = move_addr_to_kernel(namep, m->msg_namelen,
107227 address);
107228 if (err < 0)
107229@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
107230 }
107231
107232 size = m->msg_iovlen * sizeof(struct iovec);
107233- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
107234+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
107235 return -EFAULT;
107236
107237 m->msg_iov = iov;
107238diff --git a/net/core/neighbour.c b/net/core/neighbour.c
107239index ef31fef..8be66d9 100644
107240--- a/net/core/neighbour.c
107241+++ b/net/core/neighbour.c
107242@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
107243 void __user *buffer, size_t *lenp, loff_t *ppos)
107244 {
107245 int size, ret;
107246- struct ctl_table tmp = *ctl;
107247+ ctl_table_no_const tmp = *ctl;
107248
107249 tmp.extra1 = &zero;
107250 tmp.extra2 = &unres_qlen_max;
107251@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
107252 void __user *buffer,
107253 size_t *lenp, loff_t *ppos)
107254 {
107255- struct ctl_table tmp = *ctl;
107256+ ctl_table_no_const tmp = *ctl;
107257 int ret;
107258
107259 tmp.extra1 = &zero;
107260diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
107261index 2bf8329..2eb1423 100644
107262--- a/net/core/net-procfs.c
107263+++ b/net/core/net-procfs.c
107264@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
107265 struct rtnl_link_stats64 temp;
107266 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
107267
107268- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
107269+ if (gr_proc_is_restricted())
107270+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
107271+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
107272+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
107273+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
107274+ else
107275+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
107276 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
107277 dev->name, stats->rx_bytes, stats->rx_packets,
107278 stats->rx_errors,
107279@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
107280 return 0;
107281 }
107282
107283-static const struct seq_operations dev_seq_ops = {
107284+const struct seq_operations dev_seq_ops = {
107285 .start = dev_seq_start,
107286 .next = dev_seq_next,
107287 .stop = dev_seq_stop,
107288@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
107289
107290 static int softnet_seq_open(struct inode *inode, struct file *file)
107291 {
107292- return seq_open(file, &softnet_seq_ops);
107293+ return seq_open_restrict(file, &softnet_seq_ops);
107294 }
107295
107296 static const struct file_operations softnet_seq_fops = {
107297@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
107298 else
107299 seq_printf(seq, "%04x", ntohs(pt->type));
107300
107301+#ifdef CONFIG_GRKERNSEC_HIDESYM
107302+ seq_printf(seq, " %-8s %pf\n",
107303+ pt->dev ? pt->dev->name : "", NULL);
107304+#else
107305 seq_printf(seq, " %-8s %pf\n",
107306 pt->dev ? pt->dev->name : "", pt->func);
107307+#endif
107308 }
107309
107310 return 0;
107311diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
107312index 1cac29e..fb482f3 100644
107313--- a/net/core/net-sysfs.c
107314+++ b/net/core/net-sysfs.c
107315@@ -259,7 +259,7 @@ static ssize_t carrier_changes_show(struct device *dev,
107316 {
107317 struct net_device *netdev = to_net_dev(dev);
107318 return sprintf(buf, fmt_dec,
107319- atomic_read(&netdev->carrier_changes));
107320+ atomic_read_unchecked(&netdev->carrier_changes));
107321 }
107322 static DEVICE_ATTR_RO(carrier_changes);
107323
107324diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
107325index 85b6269..fc77ea0 100644
107326--- a/net/core/net_namespace.c
107327+++ b/net/core/net_namespace.c
107328@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
107329 int error;
107330 LIST_HEAD(net_exit_list);
107331
107332- list_add_tail(&ops->list, list);
107333+ pax_list_add_tail((struct list_head *)&ops->list, list);
107334 if (ops->init || (ops->id && ops->size)) {
107335 for_each_net(net) {
107336 error = ops_init(ops, net);
107337@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
107338
107339 out_undo:
107340 /* If I have an error cleanup all namespaces I initialized */
107341- list_del(&ops->list);
107342+ pax_list_del((struct list_head *)&ops->list);
107343 ops_exit_list(ops, &net_exit_list);
107344 ops_free_list(ops, &net_exit_list);
107345 return error;
107346@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
107347 struct net *net;
107348 LIST_HEAD(net_exit_list);
107349
107350- list_del(&ops->list);
107351+ pax_list_del((struct list_head *)&ops->list);
107352 for_each_net(net)
107353 list_add_tail(&net->exit_list, &net_exit_list);
107354 ops_exit_list(ops, &net_exit_list);
107355@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
107356 mutex_lock(&net_mutex);
107357 error = register_pernet_operations(&pernet_list, ops);
107358 if (!error && (first_device == &pernet_list))
107359- first_device = &ops->list;
107360+ first_device = (struct list_head *)&ops->list;
107361 mutex_unlock(&net_mutex);
107362 return error;
107363 }
107364diff --git a/net/core/netpoll.c b/net/core/netpoll.c
107365index e33937f..b2b4981 100644
107366--- a/net/core/netpoll.c
107367+++ b/net/core/netpoll.c
107368@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
107369 struct udphdr *udph;
107370 struct iphdr *iph;
107371 struct ethhdr *eth;
107372- static atomic_t ip_ident;
107373+ static atomic_unchecked_t ip_ident;
107374 struct ipv6hdr *ip6h;
107375
107376 udp_len = len + sizeof(*udph);
107377@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
107378 put_unaligned(0x45, (unsigned char *)iph);
107379 iph->tos = 0;
107380 put_unaligned(htons(ip_len), &(iph->tot_len));
107381- iph->id = htons(atomic_inc_return(&ip_ident));
107382+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
107383 iph->frag_off = 0;
107384 iph->ttl = 64;
107385 iph->protocol = IPPROTO_UDP;
107386diff --git a/net/core/pktgen.c b/net/core/pktgen.c
107387index fc17a9d..d4a3d88 100644
107388--- a/net/core/pktgen.c
107389+++ b/net/core/pktgen.c
107390@@ -3725,7 +3725,7 @@ static int __net_init pg_net_init(struct net *net)
107391 pn->net = net;
107392 INIT_LIST_HEAD(&pn->pktgen_threads);
107393 pn->pktgen_exiting = false;
107394- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
107395+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
107396 if (!pn->proc_dir) {
107397 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
107398 return -ENODEV;
107399diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
107400deleted file mode 100644
107401index d3027a7..0000000
107402--- a/net/core/ptp_classifier.c
107403+++ /dev/null
107404@@ -1,141 +0,0 @@
107405-/* PTP classifier
107406- *
107407- * This program is free software; you can redistribute it and/or
107408- * modify it under the terms of version 2 of the GNU General Public
107409- * License as published by the Free Software Foundation.
107410- *
107411- * This program is distributed in the hope that it will be useful, but
107412- * WITHOUT ANY WARRANTY; without even the implied warranty of
107413- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
107414- * General Public License for more details.
107415- */
107416-
107417-/* The below program is the bpf_asm (tools/net/) representation of
107418- * the opcode array in the ptp_filter structure.
107419- *
107420- * For convenience, this can easily be altered and reviewed with
107421- * bpf_asm and bpf_dbg, e.g. `./bpf_asm -c prog` where prog is a
107422- * simple file containing the below program:
107423- *
107424- * ldh [12] ; load ethertype
107425- *
107426- * ; PTP over UDP over IPv4 over Ethernet
107427- * test_ipv4:
107428- * jneq #0x800, test_ipv6 ; ETH_P_IP ?
107429- * ldb [23] ; load proto
107430- * jneq #17, drop_ipv4 ; IPPROTO_UDP ?
107431- * ldh [20] ; load frag offset field
107432- * jset #0x1fff, drop_ipv4 ; don't allow fragments
107433- * ldxb 4*([14]&0xf) ; load IP header len
107434- * ldh [x + 16] ; load UDP dst port
107435- * jneq #319, drop_ipv4 ; is port PTP_EV_PORT ?
107436- * ldh [x + 22] ; load payload
107437- * and #0xf ; mask PTP_CLASS_VMASK
107438- * or #0x10 ; PTP_CLASS_IPV4
107439- * ret a ; return PTP class
107440- * drop_ipv4: ret #0x0 ; PTP_CLASS_NONE
107441- *
107442- * ; PTP over UDP over IPv6 over Ethernet
107443- * test_ipv6:
107444- * jneq #0x86dd, test_8021q ; ETH_P_IPV6 ?
107445- * ldb [20] ; load proto
107446- * jneq #17, drop_ipv6 ; IPPROTO_UDP ?
107447- * ldh [56] ; load UDP dst port
107448- * jneq #319, drop_ipv6 ; is port PTP_EV_PORT ?
107449- * ldh [62] ; load payload
107450- * and #0xf ; mask PTP_CLASS_VMASK
107451- * or #0x20 ; PTP_CLASS_IPV6
107452- * ret a ; return PTP class
107453- * drop_ipv6: ret #0x0 ; PTP_CLASS_NONE
107454- *
107455- * ; PTP over 802.1Q over Ethernet
107456- * test_8021q:
107457- * jneq #0x8100, test_ieee1588 ; ETH_P_8021Q ?
107458- * ldh [16] ; load inner type
107459- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
107460- * ldb [18] ; load payload
107461- * and #0x8 ; as we don't have ports here, test
107462- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
107463- * ldh [18] ; reload payload
107464- * and #0xf ; mask PTP_CLASS_VMASK
107465- * or #0x40 ; PTP_CLASS_V2_VLAN
107466- * ret a ; return PTP class
107467- *
107468- * ; PTP over Ethernet
107469- * test_ieee1588:
107470- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
107471- * ldb [14] ; load payload
107472- * and #0x8 ; as we don't have ports here, test
107473- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
107474- * ldh [14] ; reload payload
107475- * and #0xf ; mask PTP_CLASS_VMASK
107476- * or #0x30 ; PTP_CLASS_L2
107477- * ret a ; return PTP class
107478- * drop_ieee1588: ret #0x0 ; PTP_CLASS_NONE
107479- */
107480-
107481-#include <linux/skbuff.h>
107482-#include <linux/filter.h>
107483-#include <linux/ptp_classify.h>
107484-
107485-static struct sk_filter *ptp_insns __read_mostly;
107486-
107487-unsigned int ptp_classify_raw(const struct sk_buff *skb)
107488-{
107489- return SK_RUN_FILTER(ptp_insns, skb);
107490-}
107491-EXPORT_SYMBOL_GPL(ptp_classify_raw);
107492-
107493-void __init ptp_classifier_init(void)
107494-{
107495- static struct sock_filter ptp_filter[] __initdata = {
107496- { 0x28, 0, 0, 0x0000000c },
107497- { 0x15, 0, 12, 0x00000800 },
107498- { 0x30, 0, 0, 0x00000017 },
107499- { 0x15, 0, 9, 0x00000011 },
107500- { 0x28, 0, 0, 0x00000014 },
107501- { 0x45, 7, 0, 0x00001fff },
107502- { 0xb1, 0, 0, 0x0000000e },
107503- { 0x48, 0, 0, 0x00000010 },
107504- { 0x15, 0, 4, 0x0000013f },
107505- { 0x48, 0, 0, 0x00000016 },
107506- { 0x54, 0, 0, 0x0000000f },
107507- { 0x44, 0, 0, 0x00000010 },
107508- { 0x16, 0, 0, 0x00000000 },
107509- { 0x06, 0, 0, 0x00000000 },
107510- { 0x15, 0, 9, 0x000086dd },
107511- { 0x30, 0, 0, 0x00000014 },
107512- { 0x15, 0, 6, 0x00000011 },
107513- { 0x28, 0, 0, 0x00000038 },
107514- { 0x15, 0, 4, 0x0000013f },
107515- { 0x28, 0, 0, 0x0000003e },
107516- { 0x54, 0, 0, 0x0000000f },
107517- { 0x44, 0, 0, 0x00000020 },
107518- { 0x16, 0, 0, 0x00000000 },
107519- { 0x06, 0, 0, 0x00000000 },
107520- { 0x15, 0, 9, 0x00008100 },
107521- { 0x28, 0, 0, 0x00000010 },
107522- { 0x15, 0, 15, 0x000088f7 },
107523- { 0x30, 0, 0, 0x00000012 },
107524- { 0x54, 0, 0, 0x00000008 },
107525- { 0x15, 0, 12, 0x00000000 },
107526- { 0x28, 0, 0, 0x00000012 },
107527- { 0x54, 0, 0, 0x0000000f },
107528- { 0x44, 0, 0, 0x00000040 },
107529- { 0x16, 0, 0, 0x00000000 },
107530- { 0x15, 0, 7, 0x000088f7 },
107531- { 0x30, 0, 0, 0x0000000e },
107532- { 0x54, 0, 0, 0x00000008 },
107533- { 0x15, 0, 4, 0x00000000 },
107534- { 0x28, 0, 0, 0x0000000e },
107535- { 0x54, 0, 0, 0x0000000f },
107536- { 0x44, 0, 0, 0x00000030 },
107537- { 0x16, 0, 0, 0x00000000 },
107538- { 0x06, 0, 0, 0x00000000 },
107539- };
107540- struct sock_fprog_kern ptp_prog = {
107541- .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
107542- };
107543-
107544- BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
107545-}
107546diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
107547index 1063996..0729c19 100644
107548--- a/net/core/rtnetlink.c
107549+++ b/net/core/rtnetlink.c
107550@@ -58,7 +58,7 @@ struct rtnl_link {
107551 rtnl_doit_func doit;
107552 rtnl_dumpit_func dumpit;
107553 rtnl_calcit_func calcit;
107554-};
107555+} __no_const;
107556
107557 static DEFINE_MUTEX(rtnl_mutex);
107558
107559@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
107560 if (rtnl_link_ops_get(ops->kind))
107561 return -EEXIST;
107562
107563- if (!ops->dellink)
107564- ops->dellink = unregister_netdevice_queue;
107565+ if (!ops->dellink) {
107566+ pax_open_kernel();
107567+ *(void **)&ops->dellink = unregister_netdevice_queue;
107568+ pax_close_kernel();
107569+ }
107570
107571- list_add_tail(&ops->list, &link_ops);
107572+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
107573 return 0;
107574 }
107575 EXPORT_SYMBOL_GPL(__rtnl_link_register);
107576@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
107577 for_each_net(net) {
107578 __rtnl_kill_links(net, ops);
107579 }
107580- list_del(&ops->list);
107581+ pax_list_del((struct list_head *)&ops->list);
107582 }
107583 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
107584
107585@@ -1008,7 +1011,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
107586 (dev->ifalias &&
107587 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
107588 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
107589- atomic_read(&dev->carrier_changes)))
107590+ atomic_read_unchecked(&dev->carrier_changes)))
107591 goto nla_put_failure;
107592
107593 if (1) {
107594diff --git a/net/core/scm.c b/net/core/scm.c
107595index b442e7e..6f5b5a2 100644
107596--- a/net/core/scm.c
107597+++ b/net/core/scm.c
107598@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
107599 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
107600 {
107601 struct cmsghdr __user *cm
107602- = (__force struct cmsghdr __user *)msg->msg_control;
107603+ = (struct cmsghdr __force_user *)msg->msg_control;
107604 struct cmsghdr cmhdr;
107605 int cmlen = CMSG_LEN(len);
107606 int err;
107607@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
107608 err = -EFAULT;
107609 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
107610 goto out;
107611- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
107612+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
107613 goto out;
107614 cmlen = CMSG_SPACE(len);
107615 if (msg->msg_controllen < cmlen)
107616@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
107617 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
107618 {
107619 struct cmsghdr __user *cm
107620- = (__force struct cmsghdr __user*)msg->msg_control;
107621+ = (struct cmsghdr __force_user *)msg->msg_control;
107622
107623 int fdmax = 0;
107624 int fdnum = scm->fp->count;
107625@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
107626 if (fdnum < fdmax)
107627 fdmax = fdnum;
107628
107629- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
107630+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
107631 i++, cmfptr++)
107632 {
107633 struct socket *sock;
107634diff --git a/net/core/skbuff.c b/net/core/skbuff.c
107635index 58ff88e..af9b458 100644
107636--- a/net/core/skbuff.c
107637+++ b/net/core/skbuff.c
107638@@ -2010,7 +2010,7 @@ EXPORT_SYMBOL(__skb_checksum);
107639 __wsum skb_checksum(const struct sk_buff *skb, int offset,
107640 int len, __wsum csum)
107641 {
107642- const struct skb_checksum_ops ops = {
107643+ static const struct skb_checksum_ops ops = {
107644 .update = csum_partial_ext,
107645 .combine = csum_block_add_ext,
107646 };
107647@@ -3233,13 +3233,15 @@ void __init skb_init(void)
107648 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
107649 sizeof(struct sk_buff),
107650 0,
107651- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
107652+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
107653+ SLAB_NO_SANITIZE,
107654 NULL);
107655 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
107656 (2*sizeof(struct sk_buff)) +
107657 sizeof(atomic_t),
107658 0,
107659- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
107660+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
107661+ SLAB_NO_SANITIZE,
107662 NULL);
107663 }
107664
107665diff --git a/net/core/sock.c b/net/core/sock.c
107666index 026e01f..f54f908 100644
107667--- a/net/core/sock.c
107668+++ b/net/core/sock.c
107669@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
107670 struct sk_buff_head *list = &sk->sk_receive_queue;
107671
107672 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
107673- atomic_inc(&sk->sk_drops);
107674+ atomic_inc_unchecked(&sk->sk_drops);
107675 trace_sock_rcvqueue_full(sk, skb);
107676 return -ENOMEM;
107677 }
107678@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
107679 return err;
107680
107681 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
107682- atomic_inc(&sk->sk_drops);
107683+ atomic_inc_unchecked(&sk->sk_drops);
107684 return -ENOBUFS;
107685 }
107686
107687@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
107688 skb_dst_force(skb);
107689
107690 spin_lock_irqsave(&list->lock, flags);
107691- skb->dropcount = atomic_read(&sk->sk_drops);
107692+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
107693 __skb_queue_tail(list, skb);
107694 spin_unlock_irqrestore(&list->lock, flags);
107695
107696@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
107697 skb->dev = NULL;
107698
107699 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
107700- atomic_inc(&sk->sk_drops);
107701+ atomic_inc_unchecked(&sk->sk_drops);
107702 goto discard_and_relse;
107703 }
107704 if (nested)
107705@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
107706 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
107707 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
107708 bh_unlock_sock(sk);
107709- atomic_inc(&sk->sk_drops);
107710+ atomic_inc_unchecked(&sk->sk_drops);
107711 goto discard_and_relse;
107712 }
107713
107714@@ -998,12 +998,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
107715 struct timeval tm;
107716 } v;
107717
107718- int lv = sizeof(int);
107719- int len;
107720+ unsigned int lv = sizeof(int);
107721+ unsigned int len;
107722
107723 if (get_user(len, optlen))
107724 return -EFAULT;
107725- if (len < 0)
107726+ if (len > INT_MAX)
107727 return -EINVAL;
107728
107729 memset(&v, 0, sizeof(v));
107730@@ -1155,11 +1155,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
107731
107732 case SO_PEERNAME:
107733 {
107734- char address[128];
107735+ char address[_K_SS_MAXSIZE];
107736
107737 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
107738 return -ENOTCONN;
107739- if (lv < len)
107740+ if (lv < len || sizeof address < len)
107741 return -EINVAL;
107742 if (copy_to_user(optval, address, len))
107743 return -EFAULT;
107744@@ -1240,7 +1240,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
107745
107746 if (len > lv)
107747 len = lv;
107748- if (copy_to_user(optval, &v, len))
107749+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
107750 return -EFAULT;
107751 lenout:
107752 if (put_user(len, optlen))
107753@@ -2375,7 +2375,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
107754 */
107755 smp_wmb();
107756 atomic_set(&sk->sk_refcnt, 1);
107757- atomic_set(&sk->sk_drops, 0);
107758+ atomic_set_unchecked(&sk->sk_drops, 0);
107759 }
107760 EXPORT_SYMBOL(sock_init_data);
107761
107762@@ -2503,6 +2503,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
107763 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
107764 int level, int type)
107765 {
107766+ struct sock_extended_err ee;
107767 struct sock_exterr_skb *serr;
107768 struct sk_buff *skb, *skb2;
107769 int copied, err;
107770@@ -2524,7 +2525,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
107771 sock_recv_timestamp(msg, sk, skb);
107772
107773 serr = SKB_EXT_ERR(skb);
107774- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
107775+ ee = serr->ee;
107776+ put_cmsg(msg, level, type, sizeof ee, &ee);
107777
107778 msg->msg_flags |= MSG_ERRQUEUE;
107779 err = copied;
107780diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
107781index a4216a4..773e3d7 100644
107782--- a/net/core/sock_diag.c
107783+++ b/net/core/sock_diag.c
107784@@ -9,26 +9,33 @@
107785 #include <linux/inet_diag.h>
107786 #include <linux/sock_diag.h>
107787
107788-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
107789+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
107790 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
107791 static DEFINE_MUTEX(sock_diag_table_mutex);
107792
107793 int sock_diag_check_cookie(void *sk, __u32 *cookie)
107794 {
107795+#ifndef CONFIG_GRKERNSEC_HIDESYM
107796 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
107797 cookie[1] != INET_DIAG_NOCOOKIE) &&
107798 ((u32)(unsigned long)sk != cookie[0] ||
107799 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
107800 return -ESTALE;
107801 else
107802+#endif
107803 return 0;
107804 }
107805 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
107806
107807 void sock_diag_save_cookie(void *sk, __u32 *cookie)
107808 {
107809+#ifdef CONFIG_GRKERNSEC_HIDESYM
107810+ cookie[0] = 0;
107811+ cookie[1] = 0;
107812+#else
107813 cookie[0] = (u32)(unsigned long)sk;
107814 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
107815+#endif
107816 }
107817 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
107818
107819@@ -52,10 +59,9 @@ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
107820 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
107821 struct sk_buff *skb, int attrtype)
107822 {
107823- struct sock_fprog_kern *fprog;
107824- struct sk_filter *filter;
107825 struct nlattr *attr;
107826- unsigned int flen;
107827+ struct sk_filter *filter;
107828+ unsigned int len;
107829 int err = 0;
107830
107831 if (!may_report_filterinfo) {
107832@@ -64,20 +70,24 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
107833 }
107834
107835 rcu_read_lock();
107836+
107837 filter = rcu_dereference(sk->sk_filter);
107838- if (!filter)
107839- goto out;
107840+ len = filter ? filter->len * sizeof(struct sock_filter) : 0;
107841
107842- fprog = filter->orig_prog;
107843- flen = sk_filter_proglen(fprog);
107844-
107845- attr = nla_reserve(skb, attrtype, flen);
107846+ attr = nla_reserve(skb, attrtype, len);
107847 if (attr == NULL) {
107848 err = -EMSGSIZE;
107849 goto out;
107850 }
107851
107852- memcpy(nla_data(attr), fprog->filter, flen);
107853+ if (filter) {
107854+ struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
107855+ int i;
107856+
107857+ for (i = 0; i < filter->len; i++, fb++)
107858+ sk_decode_filter(&filter->insns[i], fb);
107859+ }
107860+
107861 out:
107862 rcu_read_unlock();
107863 return err;
107864@@ -110,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
107865 mutex_lock(&sock_diag_table_mutex);
107866 if (sock_diag_handlers[hndl->family])
107867 err = -EBUSY;
107868- else
107869+ else {
107870+ pax_open_kernel();
107871 sock_diag_handlers[hndl->family] = hndl;
107872+ pax_close_kernel();
107873+ }
107874 mutex_unlock(&sock_diag_table_mutex);
107875
107876 return err;
107877@@ -127,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
107878
107879 mutex_lock(&sock_diag_table_mutex);
107880 BUG_ON(sock_diag_handlers[family] != hnld);
107881+ pax_open_kernel();
107882 sock_diag_handlers[family] = NULL;
107883+ pax_close_kernel();
107884 mutex_unlock(&sock_diag_table_mutex);
107885 }
107886 EXPORT_SYMBOL_GPL(sock_diag_unregister);
107887diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
107888index cf9cd13..8b56af3 100644
107889--- a/net/core/sysctl_net_core.c
107890+++ b/net/core/sysctl_net_core.c
107891@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
107892 {
107893 unsigned int orig_size, size;
107894 int ret, i;
107895- struct ctl_table tmp = {
107896+ ctl_table_no_const tmp = {
107897 .data = &size,
107898 .maxlen = sizeof(size),
107899 .mode = table->mode
107900@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
107901 void __user *buffer, size_t *lenp, loff_t *ppos)
107902 {
107903 char id[IFNAMSIZ];
107904- struct ctl_table tbl = {
107905+ ctl_table_no_const tbl = {
107906 .data = id,
107907 .maxlen = IFNAMSIZ,
107908 };
107909@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
107910
107911 static __net_init int sysctl_core_net_init(struct net *net)
107912 {
107913- struct ctl_table *tbl;
107914+ ctl_table_no_const *tbl = NULL;
107915
107916 net->core.sysctl_somaxconn = SOMAXCONN;
107917
107918- tbl = netns_core_table;
107919 if (!net_eq(net, &init_net)) {
107920- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
107921+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
107922 if (tbl == NULL)
107923 goto err_dup;
107924
107925@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
107926 if (net->user_ns != &init_user_ns) {
107927 tbl[0].procname = NULL;
107928 }
107929- }
107930-
107931- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
107932+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
107933+ } else
107934+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
107935 if (net->core.sysctl_hdr == NULL)
107936 goto err_reg;
107937
107938 return 0;
107939
107940 err_reg:
107941- if (tbl != netns_core_table)
107942- kfree(tbl);
107943+ kfree(tbl);
107944 err_dup:
107945 return -ENOMEM;
107946 }
107947@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
107948 kfree(tbl);
107949 }
107950
107951-static __net_initdata struct pernet_operations sysctl_core_ops = {
107952+static __net_initconst struct pernet_operations sysctl_core_ops = {
107953 .init = sysctl_core_net_init,
107954 .exit = sysctl_core_net_exit,
107955 };
107956diff --git a/net/core/timestamping.c b/net/core/timestamping.c
107957index 6521dfd..661b5a4 100644
107958--- a/net/core/timestamping.c
107959+++ b/net/core/timestamping.c
107960@@ -23,11 +23,16 @@
107961 #include <linux/skbuff.h>
107962 #include <linux/export.h>
107963
107964+static struct sock_filter ptp_filter[] = {
107965+ PTP_FILTER
107966+};
107967+
107968 static unsigned int classify(const struct sk_buff *skb)
107969 {
107970- if (likely(skb->dev && skb->dev->phydev &&
107971+ if (likely(skb->dev &&
107972+ skb->dev->phydev &&
107973 skb->dev->phydev->drv))
107974- return ptp_classify_raw(skb);
107975+ return sk_run_filter(skb, ptp_filter);
107976 else
107977 return PTP_CLASS_NONE;
107978 }
107979@@ -55,13 +60,11 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
107980 if (likely(phydev->drv->txtstamp)) {
107981 if (!atomic_inc_not_zero(&sk->sk_refcnt))
107982 return;
107983-
107984 clone = skb_clone(skb, GFP_ATOMIC);
107985 if (!clone) {
107986 sock_put(sk);
107987 return;
107988 }
107989-
107990 clone->sk = sk;
107991 phydev->drv->txtstamp(phydev, clone, type);
107992 }
107993@@ -86,15 +89,12 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
107994 }
107995
107996 *skb_hwtstamps(skb) = *hwtstamps;
107997-
107998 serr = SKB_EXT_ERR(skb);
107999 memset(serr, 0, sizeof(*serr));
108000 serr->ee.ee_errno = ENOMSG;
108001 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
108002 skb->sk = NULL;
108003-
108004 err = sock_queue_err_skb(sk, skb);
108005-
108006 sock_put(sk);
108007 if (err)
108008 kfree_skb(skb);
108009@@ -132,3 +132,8 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
108010 return false;
108011 }
108012 EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
108013+
108014+void __init skb_timestamping_init(void)
108015+{
108016+ BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter)));
108017+}
108018diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
108019index ae011b4..d2d18bf 100644
108020--- a/net/decnet/af_decnet.c
108021+++ b/net/decnet/af_decnet.c
108022@@ -465,6 +465,7 @@ static struct proto dn_proto = {
108023 .sysctl_rmem = sysctl_decnet_rmem,
108024 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
108025 .obj_size = sizeof(struct dn_sock),
108026+ .slab_flags = SLAB_USERCOPY,
108027 };
108028
108029 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
108030diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
108031index 3b726f3..1af6368 100644
108032--- a/net/decnet/dn_dev.c
108033+++ b/net/decnet/dn_dev.c
108034@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
108035 .extra1 = &min_t3,
108036 .extra2 = &max_t3
108037 },
108038- {0}
108039+ { }
108040 },
108041 };
108042
108043diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
108044index 5325b54..a0d4d69 100644
108045--- a/net/decnet/sysctl_net_decnet.c
108046+++ b/net/decnet/sysctl_net_decnet.c
108047@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
108048
108049 if (len > *lenp) len = *lenp;
108050
108051- if (copy_to_user(buffer, addr, len))
108052+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
108053 return -EFAULT;
108054
108055 *lenp = len;
108056@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
108057
108058 if (len > *lenp) len = *lenp;
108059
108060- if (copy_to_user(buffer, devname, len))
108061+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
108062 return -EFAULT;
108063
108064 *lenp = len;
108065diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
108066index 6f1428c..9586b83 100644
108067--- a/net/ieee802154/reassembly.c
108068+++ b/net/ieee802154/reassembly.c
108069@@ -438,14 +438,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
108070
108071 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108072 {
108073- struct ctl_table *table;
108074+ ctl_table_no_const *table = NULL;
108075 struct ctl_table_header *hdr;
108076 struct netns_ieee802154_lowpan *ieee802154_lowpan =
108077 net_ieee802154_lowpan(net);
108078
108079- table = lowpan_frags_ns_ctl_table;
108080 if (!net_eq(net, &init_net)) {
108081- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
108082+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
108083 GFP_KERNEL);
108084 if (table == NULL)
108085 goto err_alloc;
108086@@ -458,9 +457,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108087 /* Don't export sysctls to unprivileged users */
108088 if (net->user_ns != &init_user_ns)
108089 table[0].procname = NULL;
108090- }
108091-
108092- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
108093+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
108094+ } else
108095+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
108096 if (hdr == NULL)
108097 goto err_reg;
108098
108099@@ -468,8 +467,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108100 return 0;
108101
108102 err_reg:
108103- if (!net_eq(net, &init_net))
108104- kfree(table);
108105+ kfree(table);
108106 err_alloc:
108107 return -ENOMEM;
108108 }
108109diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
108110index e944937..368fe78 100644
108111--- a/net/ipv4/devinet.c
108112+++ b/net/ipv4/devinet.c
108113@@ -1540,7 +1540,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
108114 idx = 0;
108115 head = &net->dev_index_head[h];
108116 rcu_read_lock();
108117- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
108118+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
108119 net->dev_base_seq;
108120 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108121 if (idx < s_idx)
108122@@ -1858,7 +1858,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
108123 idx = 0;
108124 head = &net->dev_index_head[h];
108125 rcu_read_lock();
108126- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
108127+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
108128 net->dev_base_seq;
108129 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108130 if (idx < s_idx)
108131@@ -2093,7 +2093,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
108132 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
108133 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
108134
108135-static struct devinet_sysctl_table {
108136+static const struct devinet_sysctl_table {
108137 struct ctl_table_header *sysctl_header;
108138 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
108139 } devinet_sysctl = {
108140@@ -2215,7 +2215,7 @@ static __net_init int devinet_init_net(struct net *net)
108141 int err;
108142 struct ipv4_devconf *all, *dflt;
108143 #ifdef CONFIG_SYSCTL
108144- struct ctl_table *tbl = ctl_forward_entry;
108145+ ctl_table_no_const *tbl = NULL;
108146 struct ctl_table_header *forw_hdr;
108147 #endif
108148
108149@@ -2233,7 +2233,7 @@ static __net_init int devinet_init_net(struct net *net)
108150 goto err_alloc_dflt;
108151
108152 #ifdef CONFIG_SYSCTL
108153- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
108154+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
108155 if (tbl == NULL)
108156 goto err_alloc_ctl;
108157
108158@@ -2253,7 +2253,10 @@ static __net_init int devinet_init_net(struct net *net)
108159 goto err_reg_dflt;
108160
108161 err = -ENOMEM;
108162- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
108163+ if (!net_eq(net, &init_net))
108164+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
108165+ else
108166+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
108167 if (forw_hdr == NULL)
108168 goto err_reg_ctl;
108169 net->ipv4.forw_hdr = forw_hdr;
108170@@ -2269,8 +2272,7 @@ err_reg_ctl:
108171 err_reg_dflt:
108172 __devinet_sysctl_unregister(all);
108173 err_reg_all:
108174- if (tbl != ctl_forward_entry)
108175- kfree(tbl);
108176+ kfree(tbl);
108177 err_alloc_ctl:
108178 #endif
108179 if (dflt != &ipv4_devconf_dflt)
108180diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
108181index 255aa99..45c78f8 100644
108182--- a/net/ipv4/fib_frontend.c
108183+++ b/net/ipv4/fib_frontend.c
108184@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
108185 #ifdef CONFIG_IP_ROUTE_MULTIPATH
108186 fib_sync_up(dev);
108187 #endif
108188- atomic_inc(&net->ipv4.dev_addr_genid);
108189+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108190 rt_cache_flush(dev_net(dev));
108191 break;
108192 case NETDEV_DOWN:
108193 fib_del_ifaddr(ifa, NULL);
108194- atomic_inc(&net->ipv4.dev_addr_genid);
108195+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108196 if (ifa->ifa_dev->ifa_list == NULL) {
108197 /* Last address was deleted from this interface.
108198 * Disable IP.
108199@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
108200 #ifdef CONFIG_IP_ROUTE_MULTIPATH
108201 fib_sync_up(dev);
108202 #endif
108203- atomic_inc(&net->ipv4.dev_addr_genid);
108204+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108205 rt_cache_flush(net);
108206 break;
108207 case NETDEV_DOWN:
108208diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
108209index b10cd43a..22327f9 100644
108210--- a/net/ipv4/fib_semantics.c
108211+++ b/net/ipv4/fib_semantics.c
108212@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
108213 nh->nh_saddr = inet_select_addr(nh->nh_dev,
108214 nh->nh_gw,
108215 nh->nh_parent->fib_scope);
108216- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
108217+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
108218
108219 return nh->nh_saddr;
108220 }
108221diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
108222index 43116e8..e3e6159 100644
108223--- a/net/ipv4/inet_hashtables.c
108224+++ b/net/ipv4/inet_hashtables.c
108225@@ -18,6 +18,7 @@
108226 #include <linux/sched.h>
108227 #include <linux/slab.h>
108228 #include <linux/wait.h>
108229+#include <linux/security.h>
108230
108231 #include <net/inet_connection_sock.h>
108232 #include <net/inet_hashtables.h>
108233@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
108234 return inet_ehashfn(net, laddr, lport, faddr, fport);
108235 }
108236
108237+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
108238+
108239 /*
108240 * Allocate and initialize a new local port bind bucket.
108241 * The bindhash mutex for snum's hash chain must be held here.
108242@@ -554,6 +557,8 @@ ok:
108243 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
108244 spin_unlock(&head->lock);
108245
108246+ gr_update_task_in_ip_table(current, inet_sk(sk));
108247+
108248 if (tw) {
108249 inet_twsk_deschedule(tw, death_row);
108250 while (twrefcnt) {
108251diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
108252index bd5f592..e80e605 100644
108253--- a/net/ipv4/inetpeer.c
108254+++ b/net/ipv4/inetpeer.c
108255@@ -482,7 +482,7 @@ relookup:
108256 if (p) {
108257 p->daddr = *daddr;
108258 atomic_set(&p->refcnt, 1);
108259- atomic_set(&p->rid, 0);
108260+ atomic_set_unchecked(&p->rid, 0);
108261 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
108262 p->rate_tokens = 0;
108263 /* 60*HZ is arbitrary, but chosen enough high so that the first
108264diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
108265index ed32313..3762abe 100644
108266--- a/net/ipv4/ip_fragment.c
108267+++ b/net/ipv4/ip_fragment.c
108268@@ -284,7 +284,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
108269 return 0;
108270
108271 start = qp->rid;
108272- end = atomic_inc_return(&peer->rid);
108273+ end = atomic_inc_return_unchecked(&peer->rid);
108274 qp->rid = end;
108275
108276 rc = qp->q.fragments && (end - start) > max;
108277@@ -761,12 +761,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
108278
108279 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108280 {
108281- struct ctl_table *table;
108282+ ctl_table_no_const *table = NULL;
108283 struct ctl_table_header *hdr;
108284
108285- table = ip4_frags_ns_ctl_table;
108286 if (!net_eq(net, &init_net)) {
108287- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
108288+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
108289 if (table == NULL)
108290 goto err_alloc;
108291
108292@@ -777,9 +776,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108293 /* Don't export sysctls to unprivileged users */
108294 if (net->user_ns != &init_user_ns)
108295 table[0].procname = NULL;
108296- }
108297+ hdr = register_net_sysctl(net, "net/ipv4", table);
108298+ } else
108299+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
108300
108301- hdr = register_net_sysctl(net, "net/ipv4", table);
108302 if (hdr == NULL)
108303 goto err_reg;
108304
108305@@ -787,8 +787,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108306 return 0;
108307
108308 err_reg:
108309- if (!net_eq(net, &init_net))
108310- kfree(table);
108311+ kfree(table);
108312 err_alloc:
108313 return -ENOMEM;
108314 }
108315diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
108316index 9b84254..c776611 100644
108317--- a/net/ipv4/ip_gre.c
108318+++ b/net/ipv4/ip_gre.c
108319@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
108320 module_param(log_ecn_error, bool, 0644);
108321 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108322
108323-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
108324+static struct rtnl_link_ops ipgre_link_ops;
108325 static int ipgre_tunnel_init(struct net_device *dev);
108326
108327 static int ipgre_net_id __read_mostly;
108328@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
108329 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
108330 };
108331
108332-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
108333+static struct rtnl_link_ops ipgre_link_ops = {
108334 .kind = "gre",
108335 .maxtype = IFLA_GRE_MAX,
108336 .policy = ipgre_policy,
108337@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
108338 .fill_info = ipgre_fill_info,
108339 };
108340
108341-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
108342+static struct rtnl_link_ops ipgre_tap_ops = {
108343 .kind = "gretap",
108344 .maxtype = IFLA_GRE_MAX,
108345 .policy = ipgre_policy,
108346diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
108347index 64741b9..6f334a2 100644
108348--- a/net/ipv4/ip_sockglue.c
108349+++ b/net/ipv4/ip_sockglue.c
108350@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
108351 len = min_t(unsigned int, len, opt->optlen);
108352 if (put_user(len, optlen))
108353 return -EFAULT;
108354- if (copy_to_user(optval, opt->__data, len))
108355+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
108356+ copy_to_user(optval, opt->__data, len))
108357 return -EFAULT;
108358 return 0;
108359 }
108360@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
108361 if (sk->sk_type != SOCK_STREAM)
108362 return -ENOPROTOOPT;
108363
108364- msg.msg_control = optval;
108365+ msg.msg_control = (void __force_kernel *)optval;
108366 msg.msg_controllen = len;
108367 msg.msg_flags = flags;
108368
108369diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
108370index b8960f3..0f025db 100644
108371--- a/net/ipv4/ip_vti.c
108372+++ b/net/ipv4/ip_vti.c
108373@@ -45,7 +45,7 @@
108374 #include <net/net_namespace.h>
108375 #include <net/netns/generic.h>
108376
108377-static struct rtnl_link_ops vti_link_ops __read_mostly;
108378+static struct rtnl_link_ops vti_link_ops;
108379
108380 static int vti_net_id __read_mostly;
108381 static int vti_tunnel_init(struct net_device *dev);
108382@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
108383 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
108384 };
108385
108386-static struct rtnl_link_ops vti_link_ops __read_mostly = {
108387+static struct rtnl_link_ops vti_link_ops = {
108388 .kind = "vti",
108389 .maxtype = IFLA_VTI_MAX,
108390 .policy = vti_policy,
108391diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
108392index b3e86ea..18ce98c 100644
108393--- a/net/ipv4/ipconfig.c
108394+++ b/net/ipv4/ipconfig.c
108395@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
108396
108397 mm_segment_t oldfs = get_fs();
108398 set_fs(get_ds());
108399- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
108400+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
108401 set_fs(oldfs);
108402 return res;
108403 }
108404@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
108405
108406 mm_segment_t oldfs = get_fs();
108407 set_fs(get_ds());
108408- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
108409+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
108410 set_fs(oldfs);
108411 return res;
108412 }
108413@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
108414
108415 mm_segment_t oldfs = get_fs();
108416 set_fs(get_ds());
108417- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
108418+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
108419 set_fs(oldfs);
108420 return res;
108421 }
108422diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
108423index 62eaa00..29b2dc2 100644
108424--- a/net/ipv4/ipip.c
108425+++ b/net/ipv4/ipip.c
108426@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108427 static int ipip_net_id __read_mostly;
108428
108429 static int ipip_tunnel_init(struct net_device *dev);
108430-static struct rtnl_link_ops ipip_link_ops __read_mostly;
108431+static struct rtnl_link_ops ipip_link_ops;
108432
108433 static int ipip_err(struct sk_buff *skb, u32 info)
108434 {
108435@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
108436 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
108437 };
108438
108439-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
108440+static struct rtnl_link_ops ipip_link_ops = {
108441 .kind = "ipip",
108442 .maxtype = IFLA_IPTUN_MAX,
108443 .policy = ipip_policy,
108444diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
108445index f95b6f9..2ee2097 100644
108446--- a/net/ipv4/netfilter/arp_tables.c
108447+++ b/net/ipv4/netfilter/arp_tables.c
108448@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
108449 #endif
108450
108451 static int get_info(struct net *net, void __user *user,
108452- const int *len, int compat)
108453+ int len, int compat)
108454 {
108455 char name[XT_TABLE_MAXNAMELEN];
108456 struct xt_table *t;
108457 int ret;
108458
108459- if (*len != sizeof(struct arpt_getinfo)) {
108460- duprintf("length %u != %Zu\n", *len,
108461+ if (len != sizeof(struct arpt_getinfo)) {
108462+ duprintf("length %u != %Zu\n", len,
108463 sizeof(struct arpt_getinfo));
108464 return -EINVAL;
108465 }
108466@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
108467 info.size = private->size;
108468 strcpy(info.name, name);
108469
108470- if (copy_to_user(user, &info, *len) != 0)
108471+ if (copy_to_user(user, &info, len) != 0)
108472 ret = -EFAULT;
108473 else
108474 ret = 0;
108475@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
108476
108477 switch (cmd) {
108478 case ARPT_SO_GET_INFO:
108479- ret = get_info(sock_net(sk), user, len, 1);
108480+ ret = get_info(sock_net(sk), user, *len, 1);
108481 break;
108482 case ARPT_SO_GET_ENTRIES:
108483 ret = compat_get_entries(sock_net(sk), user, len);
108484@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
108485
108486 switch (cmd) {
108487 case ARPT_SO_GET_INFO:
108488- ret = get_info(sock_net(sk), user, len, 0);
108489+ ret = get_info(sock_net(sk), user, *len, 0);
108490 break;
108491
108492 case ARPT_SO_GET_ENTRIES:
108493diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
108494index 99e810f..3711b81 100644
108495--- a/net/ipv4/netfilter/ip_tables.c
108496+++ b/net/ipv4/netfilter/ip_tables.c
108497@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
108498 #endif
108499
108500 static int get_info(struct net *net, void __user *user,
108501- const int *len, int compat)
108502+ int len, int compat)
108503 {
108504 char name[XT_TABLE_MAXNAMELEN];
108505 struct xt_table *t;
108506 int ret;
108507
108508- if (*len != sizeof(struct ipt_getinfo)) {
108509- duprintf("length %u != %zu\n", *len,
108510+ if (len != sizeof(struct ipt_getinfo)) {
108511+ duprintf("length %u != %zu\n", len,
108512 sizeof(struct ipt_getinfo));
108513 return -EINVAL;
108514 }
108515@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
108516 info.size = private->size;
108517 strcpy(info.name, name);
108518
108519- if (copy_to_user(user, &info, *len) != 0)
108520+ if (copy_to_user(user, &info, len) != 0)
108521 ret = -EFAULT;
108522 else
108523 ret = 0;
108524@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
108525
108526 switch (cmd) {
108527 case IPT_SO_GET_INFO:
108528- ret = get_info(sock_net(sk), user, len, 1);
108529+ ret = get_info(sock_net(sk), user, *len, 1);
108530 break;
108531 case IPT_SO_GET_ENTRIES:
108532 ret = compat_get_entries(sock_net(sk), user, len);
108533@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
108534
108535 switch (cmd) {
108536 case IPT_SO_GET_INFO:
108537- ret = get_info(sock_net(sk), user, len, 0);
108538+ ret = get_info(sock_net(sk), user, *len, 0);
108539 break;
108540
108541 case IPT_SO_GET_ENTRIES:
108542diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
108543index 2510c02..cfb34fa 100644
108544--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
108545+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
108546@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
108547 spin_lock_init(&cn->lock);
108548
108549 #ifdef CONFIG_PROC_FS
108550- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
108551+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
108552 if (!cn->procdir) {
108553 pr_err("Unable to proc dir entry\n");
108554 return -ENOMEM;
108555diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
108556index 044a0dd..3399751 100644
108557--- a/net/ipv4/ping.c
108558+++ b/net/ipv4/ping.c
108559@@ -59,7 +59,7 @@ struct ping_table {
108560 };
108561
108562 static struct ping_table ping_table;
108563-struct pingv6_ops pingv6_ops;
108564+struct pingv6_ops *pingv6_ops;
108565 EXPORT_SYMBOL_GPL(pingv6_ops);
108566
108567 static u16 ping_port_rover;
108568@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
108569 return -ENODEV;
108570 }
108571 }
108572- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
108573+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
108574 scoped);
108575 rcu_read_unlock();
108576
108577@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
108578 }
108579 #if IS_ENABLED(CONFIG_IPV6)
108580 } else if (skb->protocol == htons(ETH_P_IPV6)) {
108581- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
108582+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
108583 #endif
108584 }
108585
108586@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
108587 info, (u8 *)icmph);
108588 #if IS_ENABLED(CONFIG_IPV6)
108589 } else if (family == AF_INET6) {
108590- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
108591+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
108592 info, (u8 *)icmph);
108593 #endif
108594 }
108595@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
108596 return ip_recv_error(sk, msg, len, addr_len);
108597 #if IS_ENABLED(CONFIG_IPV6)
108598 } else if (family == AF_INET6) {
108599- return pingv6_ops.ipv6_recv_error(sk, msg, len,
108600+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
108601 addr_len);
108602 #endif
108603 }
108604@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
108605 }
108606
108607 if (inet6_sk(sk)->rxopt.all)
108608- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
108609+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
108610 if (skb->protocol == htons(ETH_P_IPV6) &&
108611 inet6_sk(sk)->rxopt.all)
108612- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
108613+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
108614 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
108615 ip_cmsg_recv(msg, skb);
108616 #endif
108617@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
108618 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
108619 0, sock_i_ino(sp),
108620 atomic_read(&sp->sk_refcnt), sp,
108621- atomic_read(&sp->sk_drops));
108622+ atomic_read_unchecked(&sp->sk_drops));
108623 }
108624
108625 static int ping_v4_seq_show(struct seq_file *seq, void *v)
108626diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
108627index 2c65160..213ecdf 100644
108628--- a/net/ipv4/raw.c
108629+++ b/net/ipv4/raw.c
108630@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
108631 int raw_rcv(struct sock *sk, struct sk_buff *skb)
108632 {
108633 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
108634- atomic_inc(&sk->sk_drops);
108635+ atomic_inc_unchecked(&sk->sk_drops);
108636 kfree_skb(skb);
108637 return NET_RX_DROP;
108638 }
108639@@ -748,16 +748,20 @@ static int raw_init(struct sock *sk)
108640
108641 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
108642 {
108643+ struct icmp_filter filter;
108644+
108645 if (optlen > sizeof(struct icmp_filter))
108646 optlen = sizeof(struct icmp_filter);
108647- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
108648+ if (copy_from_user(&filter, optval, optlen))
108649 return -EFAULT;
108650+ raw_sk(sk)->filter = filter;
108651 return 0;
108652 }
108653
108654 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
108655 {
108656 int len, ret = -EFAULT;
108657+ struct icmp_filter filter;
108658
108659 if (get_user(len, optlen))
108660 goto out;
108661@@ -767,8 +771,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
108662 if (len > sizeof(struct icmp_filter))
108663 len = sizeof(struct icmp_filter);
108664 ret = -EFAULT;
108665- if (put_user(len, optlen) ||
108666- copy_to_user(optval, &raw_sk(sk)->filter, len))
108667+ filter = raw_sk(sk)->filter;
108668+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
108669 goto out;
108670 ret = 0;
108671 out: return ret;
108672@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
108673 0, 0L, 0,
108674 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
108675 0, sock_i_ino(sp),
108676- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
108677+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
108678 }
108679
108680 static int raw_seq_show(struct seq_file *seq, void *v)
108681diff --git a/net/ipv4/route.c b/net/ipv4/route.c
108682index 1901998..a9a850a 100644
108683--- a/net/ipv4/route.c
108684+++ b/net/ipv4/route.c
108685@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
108686
108687 static int rt_cache_seq_open(struct inode *inode, struct file *file)
108688 {
108689- return seq_open(file, &rt_cache_seq_ops);
108690+ return seq_open_restrict(file, &rt_cache_seq_ops);
108691 }
108692
108693 static const struct file_operations rt_cache_seq_fops = {
108694@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
108695
108696 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
108697 {
108698- return seq_open(file, &rt_cpu_seq_ops);
108699+ return seq_open_restrict(file, &rt_cpu_seq_ops);
108700 }
108701
108702 static const struct file_operations rt_cpu_seq_fops = {
108703@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
108704
108705 static int rt_acct_proc_open(struct inode *inode, struct file *file)
108706 {
108707- return single_open(file, rt_acct_proc_show, NULL);
108708+ return single_open_restrict(file, rt_acct_proc_show, NULL);
108709 }
108710
108711 static const struct file_operations rt_acct_proc_fops = {
108712@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
108713
108714 #define IP_IDENTS_SZ 2048u
108715 struct ip_ident_bucket {
108716- atomic_t id;
108717+ atomic_unchecked_t id;
108718 u32 stamp32;
108719 };
108720
108721-static struct ip_ident_bucket *ip_idents __read_mostly;
108722+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
108723
108724 /* In order to protect privacy, we add a perturbation to identifiers
108725 * if one generator is seldom used. This makes hard for an attacker
108726@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
108727 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
108728 delta = prandom_u32_max(now - old);
108729
108730- return atomic_add_return(segs + delta, &bucket->id) - segs;
108731+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
108732 }
108733 EXPORT_SYMBOL(ip_idents_reserve);
108734
108735@@ -2625,34 +2625,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
108736 .maxlen = sizeof(int),
108737 .mode = 0200,
108738 .proc_handler = ipv4_sysctl_rtcache_flush,
108739+ .extra1 = &init_net,
108740 },
108741 { },
108742 };
108743
108744 static __net_init int sysctl_route_net_init(struct net *net)
108745 {
108746- struct ctl_table *tbl;
108747+ ctl_table_no_const *tbl = NULL;
108748
108749- tbl = ipv4_route_flush_table;
108750 if (!net_eq(net, &init_net)) {
108751- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
108752+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
108753 if (tbl == NULL)
108754 goto err_dup;
108755
108756 /* Don't export sysctls to unprivileged users */
108757 if (net->user_ns != &init_user_ns)
108758 tbl[0].procname = NULL;
108759- }
108760- tbl[0].extra1 = net;
108761+ tbl[0].extra1 = net;
108762+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
108763+ } else
108764+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
108765
108766- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
108767 if (net->ipv4.route_hdr == NULL)
108768 goto err_reg;
108769 return 0;
108770
108771 err_reg:
108772- if (tbl != ipv4_route_flush_table)
108773- kfree(tbl);
108774+ kfree(tbl);
108775 err_dup:
108776 return -ENOMEM;
108777 }
108778@@ -2675,8 +2675,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
108779
108780 static __net_init int rt_genid_init(struct net *net)
108781 {
108782- atomic_set(&net->ipv4.rt_genid, 0);
108783- atomic_set(&net->fnhe_genid, 0);
108784+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
108785+ atomic_set_unchecked(&net->fnhe_genid, 0);
108786 get_random_bytes(&net->ipv4.dev_addr_genid,
108787 sizeof(net->ipv4.dev_addr_genid));
108788 return 0;
108789@@ -2719,11 +2719,7 @@ int __init ip_rt_init(void)
108790 {
108791 int rc = 0;
108792
108793- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
108794- if (!ip_idents)
108795- panic("IP: failed to allocate ip_idents\n");
108796-
108797- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
108798+ prandom_bytes(ip_idents, sizeof(ip_idents));
108799
108800 #ifdef CONFIG_IP_ROUTE_CLASSID
108801 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
108802diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
108803index 79a007c..5023029 100644
108804--- a/net/ipv4/sysctl_net_ipv4.c
108805+++ b/net/ipv4/sysctl_net_ipv4.c
108806@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
108807 container_of(table->data, struct net, ipv4.ip_local_ports.range);
108808 int ret;
108809 int range[2];
108810- struct ctl_table tmp = {
108811+ ctl_table_no_const tmp = {
108812 .data = &range,
108813 .maxlen = sizeof(range),
108814 .mode = table->mode,
108815@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
108816 int ret;
108817 gid_t urange[2];
108818 kgid_t low, high;
108819- struct ctl_table tmp = {
108820+ ctl_table_no_const tmp = {
108821 .data = &urange,
108822 .maxlen = sizeof(urange),
108823 .mode = table->mode,
108824@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
108825 void __user *buffer, size_t *lenp, loff_t *ppos)
108826 {
108827 char val[TCP_CA_NAME_MAX];
108828- struct ctl_table tbl = {
108829+ ctl_table_no_const tbl = {
108830 .data = val,
108831 .maxlen = TCP_CA_NAME_MAX,
108832 };
108833@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
108834 void __user *buffer, size_t *lenp,
108835 loff_t *ppos)
108836 {
108837- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
108838+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
108839 int ret;
108840
108841 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
108842@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
108843 void __user *buffer, size_t *lenp,
108844 loff_t *ppos)
108845 {
108846- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
108847+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
108848 int ret;
108849
108850 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
108851@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
108852 void __user *buffer, size_t *lenp,
108853 loff_t *ppos)
108854 {
108855- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
108856+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
108857 struct tcp_fastopen_context *ctxt;
108858 int ret;
108859 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
108860@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
108861
108862 static __net_init int ipv4_sysctl_init_net(struct net *net)
108863 {
108864- struct ctl_table *table;
108865+ ctl_table_no_const *table = NULL;
108866
108867- table = ipv4_net_table;
108868 if (!net_eq(net, &init_net)) {
108869 int i;
108870
108871- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
108872+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
108873 if (table == NULL)
108874 goto err_alloc;
108875
108876@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
108877 table[i].data += (void *)net - (void *)&init_net;
108878 }
108879
108880- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
108881+ if (!net_eq(net, &init_net))
108882+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
108883+ else
108884+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
108885 if (net->ipv4.ipv4_hdr == NULL)
108886 goto err_reg;
108887
108888diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
108889index 40639c2..dfc86b2 100644
108890--- a/net/ipv4/tcp_input.c
108891+++ b/net/ipv4/tcp_input.c
108892@@ -754,7 +754,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
108893 * without any lock. We want to make sure compiler wont store
108894 * intermediate values in this location.
108895 */
108896- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
108897+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
108898 sk->sk_max_pacing_rate);
108899 }
108900
108901@@ -4478,7 +4478,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
108902 * simplifies code)
108903 */
108904 static void
108905-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
108906+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
108907 struct sk_buff *head, struct sk_buff *tail,
108908 u32 start, u32 end)
108909 {
108910@@ -5536,6 +5536,7 @@ discard:
108911 tcp_paws_reject(&tp->rx_opt, 0))
108912 goto discard_and_undo;
108913
108914+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
108915 if (th->syn) {
108916 /* We see SYN without ACK. It is attempt of
108917 * simultaneous connect with crossed SYNs.
108918@@ -5586,6 +5587,7 @@ discard:
108919 goto discard;
108920 #endif
108921 }
108922+#endif
108923 /* "fifth, if neither of the SYN or RST bits is set then
108924 * drop the segment and return."
108925 */
108926@@ -5632,7 +5634,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
108927 goto discard;
108928
108929 if (th->syn) {
108930- if (th->fin)
108931+ if (th->fin || th->urg || th->psh)
108932 goto discard;
108933 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
108934 return 1;
108935diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
108936index 77cccda..10122c4 100644
108937--- a/net/ipv4/tcp_ipv4.c
108938+++ b/net/ipv4/tcp_ipv4.c
108939@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
108940 EXPORT_SYMBOL(sysctl_tcp_low_latency);
108941
108942
108943+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108944+extern int grsec_enable_blackhole;
108945+#endif
108946+
108947 #ifdef CONFIG_TCP_MD5SIG
108948 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
108949 __be32 daddr, __be32 saddr, const struct tcphdr *th);
108950@@ -1591,6 +1595,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
108951 return 0;
108952
108953 reset:
108954+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108955+ if (!grsec_enable_blackhole)
108956+#endif
108957 tcp_v4_send_reset(rsk, skb);
108958 discard:
108959 kfree_skb(skb);
108960@@ -1737,12 +1744,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
108961 TCP_SKB_CB(skb)->sacked = 0;
108962
108963 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
108964- if (!sk)
108965+ if (!sk) {
108966+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108967+ ret = 1;
108968+#endif
108969 goto no_tcp_socket;
108970-
108971+ }
108972 process:
108973- if (sk->sk_state == TCP_TIME_WAIT)
108974+ if (sk->sk_state == TCP_TIME_WAIT) {
108975+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108976+ ret = 2;
108977+#endif
108978 goto do_time_wait;
108979+ }
108980
108981 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
108982 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
108983@@ -1796,6 +1810,10 @@ csum_error:
108984 bad_packet:
108985 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
108986 } else {
108987+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
108988+ if (!grsec_enable_blackhole || (ret == 1 &&
108989+ (skb->dev->flags & IFF_LOOPBACK)))
108990+#endif
108991 tcp_v4_send_reset(NULL, skb);
108992 }
108993
108994diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
108995index e68e0d4..0334263 100644
108996--- a/net/ipv4/tcp_minisocks.c
108997+++ b/net/ipv4/tcp_minisocks.c
108998@@ -27,6 +27,10 @@
108999 #include <net/inet_common.h>
109000 #include <net/xfrm.h>
109001
109002+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109003+extern int grsec_enable_blackhole;
109004+#endif
109005+
109006 int sysctl_tcp_syncookies __read_mostly = 1;
109007 EXPORT_SYMBOL(sysctl_tcp_syncookies);
109008
109009@@ -740,7 +744,10 @@ embryonic_reset:
109010 * avoid becoming vulnerable to outside attack aiming at
109011 * resetting legit local connections.
109012 */
109013- req->rsk_ops->send_reset(sk, skb);
109014+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109015+ if (!grsec_enable_blackhole)
109016+#endif
109017+ req->rsk_ops->send_reset(sk, skb);
109018 } else if (fastopen) { /* received a valid RST pkt */
109019 reqsk_fastopen_remove(sk, req, true);
109020 tcp_reset(sk);
109021diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
109022index 3b66610..bfbe23a 100644
109023--- a/net/ipv4/tcp_probe.c
109024+++ b/net/ipv4/tcp_probe.c
109025@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
109026 if (cnt + width >= len)
109027 break;
109028
109029- if (copy_to_user(buf + cnt, tbuf, width))
109030+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
109031 return -EFAULT;
109032 cnt += width;
109033 }
109034diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
109035index 286227a..c495a76 100644
109036--- a/net/ipv4/tcp_timer.c
109037+++ b/net/ipv4/tcp_timer.c
109038@@ -22,6 +22,10 @@
109039 #include <linux/gfp.h>
109040 #include <net/tcp.h>
109041
109042+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109043+extern int grsec_lastack_retries;
109044+#endif
109045+
109046 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
109047 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
109048 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
109049@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
109050 }
109051 }
109052
109053+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109054+ if ((sk->sk_state == TCP_LAST_ACK) &&
109055+ (grsec_lastack_retries > 0) &&
109056+ (grsec_lastack_retries < retry_until))
109057+ retry_until = grsec_lastack_retries;
109058+#endif
109059+
109060 if (retransmits_timed_out(sk, retry_until,
109061 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
109062 /* Has it gone just too far? */
109063diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
109064index 7d5a866..4874211 100644
109065--- a/net/ipv4/udp.c
109066+++ b/net/ipv4/udp.c
109067@@ -87,6 +87,7 @@
109068 #include <linux/types.h>
109069 #include <linux/fcntl.h>
109070 #include <linux/module.h>
109071+#include <linux/security.h>
109072 #include <linux/socket.h>
109073 #include <linux/sockios.h>
109074 #include <linux/igmp.h>
109075@@ -113,6 +114,10 @@
109076 #include <net/busy_poll.h>
109077 #include "udp_impl.h"
109078
109079+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109080+extern int grsec_enable_blackhole;
109081+#endif
109082+
109083 struct udp_table udp_table __read_mostly;
109084 EXPORT_SYMBOL(udp_table);
109085
109086@@ -615,6 +620,9 @@ found:
109087 return s;
109088 }
109089
109090+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
109091+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
109092+
109093 /*
109094 * This routine is called by the ICMP module when it gets some
109095 * sort of error condition. If err < 0 then the socket should
109096@@ -952,9 +960,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109097 dport = usin->sin_port;
109098 if (dport == 0)
109099 return -EINVAL;
109100+
109101+ err = gr_search_udp_sendmsg(sk, usin);
109102+ if (err)
109103+ return err;
109104 } else {
109105 if (sk->sk_state != TCP_ESTABLISHED)
109106 return -EDESTADDRREQ;
109107+
109108+ err = gr_search_udp_sendmsg(sk, NULL);
109109+ if (err)
109110+ return err;
109111+
109112 daddr = inet->inet_daddr;
109113 dport = inet->inet_dport;
109114 /* Open fast path for connected socket.
109115@@ -1202,7 +1219,7 @@ static unsigned int first_packet_length(struct sock *sk)
109116 IS_UDPLITE(sk));
109117 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109118 IS_UDPLITE(sk));
109119- atomic_inc(&sk->sk_drops);
109120+ atomic_inc_unchecked(&sk->sk_drops);
109121 __skb_unlink(skb, rcvq);
109122 __skb_queue_tail(&list_kill, skb);
109123 }
109124@@ -1282,6 +1299,10 @@ try_again:
109125 if (!skb)
109126 goto out;
109127
109128+ err = gr_search_udp_recvmsg(sk, skb);
109129+ if (err)
109130+ goto out_free;
109131+
109132 ulen = skb->len - sizeof(struct udphdr);
109133 copied = len;
109134 if (copied > ulen)
109135@@ -1315,7 +1336,7 @@ try_again:
109136 if (unlikely(err)) {
109137 trace_kfree_skb(skb, udp_recvmsg);
109138 if (!peeked) {
109139- atomic_inc(&sk->sk_drops);
109140+ atomic_inc_unchecked(&sk->sk_drops);
109141 UDP_INC_STATS_USER(sock_net(sk),
109142 UDP_MIB_INERRORS, is_udplite);
109143 }
109144@@ -1612,7 +1633,7 @@ csum_error:
109145 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
109146 drop:
109147 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
109148- atomic_inc(&sk->sk_drops);
109149+ atomic_inc_unchecked(&sk->sk_drops);
109150 kfree_skb(skb);
109151 return -1;
109152 }
109153@@ -1631,7 +1652,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
109154 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
109155
109156 if (!skb1) {
109157- atomic_inc(&sk->sk_drops);
109158+ atomic_inc_unchecked(&sk->sk_drops);
109159 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
109160 IS_UDPLITE(sk));
109161 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109162@@ -1817,6 +1838,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
109163 goto csum_error;
109164
109165 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
109166+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109167+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
109168+#endif
109169 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
109170
109171 /*
109172@@ -2403,7 +2427,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
109173 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
109174 0, sock_i_ino(sp),
109175 atomic_read(&sp->sk_refcnt), sp,
109176- atomic_read(&sp->sk_drops));
109177+ atomic_read_unchecked(&sp->sk_drops));
109178 }
109179
109180 int udp4_seq_show(struct seq_file *seq, void *v)
109181diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
109182index 6156f68..d6ab46d 100644
109183--- a/net/ipv4/xfrm4_policy.c
109184+++ b/net/ipv4/xfrm4_policy.c
109185@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
109186 fl4->flowi4_tos = iph->tos;
109187 }
109188
109189-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
109190+static int xfrm4_garbage_collect(struct dst_ops *ops)
109191 {
109192 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
109193
109194- xfrm4_policy_afinfo.garbage_collect(net);
109195+ xfrm_garbage_collect_deferred(net);
109196 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
109197 }
109198
109199@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
109200
109201 static int __net_init xfrm4_net_init(struct net *net)
109202 {
109203- struct ctl_table *table;
109204+ ctl_table_no_const *table = NULL;
109205 struct ctl_table_header *hdr;
109206
109207- table = xfrm4_policy_table;
109208 if (!net_eq(net, &init_net)) {
109209- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
109210+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
109211 if (!table)
109212 goto err_alloc;
109213
109214 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
109215- }
109216-
109217- hdr = register_net_sysctl(net, "net/ipv4", table);
109218+ hdr = register_net_sysctl(net, "net/ipv4", table);
109219+ } else
109220+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
109221 if (!hdr)
109222 goto err_reg;
109223
109224@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
109225 return 0;
109226
109227 err_reg:
109228- if (!net_eq(net, &init_net))
109229- kfree(table);
109230+ kfree(table);
109231 err_alloc:
109232 return -ENOMEM;
109233 }
109234diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
109235index 5667b30..2044f61 100644
109236--- a/net/ipv6/addrconf.c
109237+++ b/net/ipv6/addrconf.c
109238@@ -593,7 +593,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
109239 idx = 0;
109240 head = &net->dev_index_head[h];
109241 rcu_read_lock();
109242- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
109243+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
109244 net->dev_base_seq;
109245 hlist_for_each_entry_rcu(dev, head, index_hlist) {
109246 if (idx < s_idx)
109247@@ -2390,7 +2390,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
109248 p.iph.ihl = 5;
109249 p.iph.protocol = IPPROTO_IPV6;
109250 p.iph.ttl = 64;
109251- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
109252+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
109253
109254 if (ops->ndo_do_ioctl) {
109255 mm_segment_t oldfs = get_fs();
109256@@ -3516,16 +3516,23 @@ static const struct file_operations if6_fops = {
109257 .release = seq_release_net,
109258 };
109259
109260+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
109261+extern void unregister_ipv6_seq_ops_addr(void);
109262+
109263 static int __net_init if6_proc_net_init(struct net *net)
109264 {
109265- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
109266+ register_ipv6_seq_ops_addr(&if6_seq_ops);
109267+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
109268+ unregister_ipv6_seq_ops_addr();
109269 return -ENOMEM;
109270+ }
109271 return 0;
109272 }
109273
109274 static void __net_exit if6_proc_net_exit(struct net *net)
109275 {
109276 remove_proc_entry("if_inet6", net->proc_net);
109277+ unregister_ipv6_seq_ops_addr();
109278 }
109279
109280 static struct pernet_operations if6_proc_net_ops = {
109281@@ -4141,7 +4148,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
109282 s_ip_idx = ip_idx = cb->args[2];
109283
109284 rcu_read_lock();
109285- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
109286+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
109287 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
109288 idx = 0;
109289 head = &net->dev_index_head[h];
109290@@ -4741,11 +4748,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
109291
109292 rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
109293 dev->ifindex, 1);
109294- if (rt) {
109295- dst_hold(&rt->dst);
109296- if (ip6_del_rt(rt))
109297- dst_free(&rt->dst);
109298- }
109299+ if (rt && ip6_del_rt(rt))
109300+ dst_free(&rt->dst);
109301 }
109302 dst_hold(&ifp->rt->dst);
109303
109304@@ -4753,7 +4757,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
109305 dst_free(&ifp->rt->dst);
109306 break;
109307 }
109308- atomic_inc(&net->ipv6.dev_addr_genid);
109309+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
109310 rt_genid_bump_ipv6(net);
109311 }
109312
109313@@ -4774,7 +4778,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
109314 int *valp = ctl->data;
109315 int val = *valp;
109316 loff_t pos = *ppos;
109317- struct ctl_table lctl;
109318+ ctl_table_no_const lctl;
109319 int ret;
109320
109321 /*
109322@@ -4859,7 +4863,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
109323 int *valp = ctl->data;
109324 int val = *valp;
109325 loff_t pos = *ppos;
109326- struct ctl_table lctl;
109327+ ctl_table_no_const lctl;
109328 int ret;
109329
109330 /*
109331diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
109332index 7cb4392..dc96d28 100644
109333--- a/net/ipv6/af_inet6.c
109334+++ b/net/ipv6/af_inet6.c
109335@@ -765,7 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
109336 net->ipv6.sysctl.bindv6only = 0;
109337 net->ipv6.sysctl.icmpv6_time = 1*HZ;
109338 net->ipv6.sysctl.flowlabel_consistency = 1;
109339- atomic_set(&net->ipv6.rt_genid, 0);
109340+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
109341
109342 err = ipv6_init_mibs(net);
109343 if (err)
109344diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
109345index c3bf2d2..1f00573 100644
109346--- a/net/ipv6/datagram.c
109347+++ b/net/ipv6/datagram.c
109348@@ -938,5 +938,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
109349 0,
109350 sock_i_ino(sp),
109351 atomic_read(&sp->sk_refcnt), sp,
109352- atomic_read(&sp->sk_drops));
109353+ atomic_read_unchecked(&sp->sk_drops));
109354 }
109355diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
109356index f6c84a6..9f2084e 100644
109357--- a/net/ipv6/icmp.c
109358+++ b/net/ipv6/icmp.c
109359@@ -990,7 +990,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
109360
109361 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
109362 {
109363- struct ctl_table *table;
109364+ ctl_table_no_const *table;
109365
109366 table = kmemdup(ipv6_icmp_table_template,
109367 sizeof(ipv6_icmp_table_template),
109368diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
109369index 3873181..220ad3f 100644
109370--- a/net/ipv6/ip6_gre.c
109371+++ b/net/ipv6/ip6_gre.c
109372@@ -71,8 +71,8 @@ struct ip6gre_net {
109373 struct net_device *fb_tunnel_dev;
109374 };
109375
109376-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
109377-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
109378+static struct rtnl_link_ops ip6gre_link_ops;
109379+static struct rtnl_link_ops ip6gre_tap_ops;
109380 static int ip6gre_tunnel_init(struct net_device *dev);
109381 static void ip6gre_tunnel_setup(struct net_device *dev);
109382 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
109383@@ -1280,7 +1280,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
109384 }
109385
109386
109387-static struct inet6_protocol ip6gre_protocol __read_mostly = {
109388+static struct inet6_protocol ip6gre_protocol = {
109389 .handler = ip6gre_rcv,
109390 .err_handler = ip6gre_err,
109391 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
109392@@ -1638,7 +1638,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
109393 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
109394 };
109395
109396-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
109397+static struct rtnl_link_ops ip6gre_link_ops = {
109398 .kind = "ip6gre",
109399 .maxtype = IFLA_GRE_MAX,
109400 .policy = ip6gre_policy,
109401@@ -1652,7 +1652,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
109402 .fill_info = ip6gre_fill_info,
109403 };
109404
109405-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
109406+static struct rtnl_link_ops ip6gre_tap_ops = {
109407 .kind = "ip6gretap",
109408 .maxtype = IFLA_GRE_MAX,
109409 .policy = ip6gre_policy,
109410diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
109411index afa0824..04ba530 100644
109412--- a/net/ipv6/ip6_tunnel.c
109413+++ b/net/ipv6/ip6_tunnel.c
109414@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
109415
109416 static int ip6_tnl_dev_init(struct net_device *dev);
109417 static void ip6_tnl_dev_setup(struct net_device *dev);
109418-static struct rtnl_link_ops ip6_link_ops __read_mostly;
109419+static struct rtnl_link_ops ip6_link_ops;
109420
109421 static int ip6_tnl_net_id __read_mostly;
109422 struct ip6_tnl_net {
109423@@ -1708,7 +1708,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
109424 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
109425 };
109426
109427-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
109428+static struct rtnl_link_ops ip6_link_ops = {
109429 .kind = "ip6tnl",
109430 .maxtype = IFLA_IPTUN_MAX,
109431 .policy = ip6_tnl_policy,
109432diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
109433index 9aaa6bb..5c13e57 100644
109434--- a/net/ipv6/ip6_vti.c
109435+++ b/net/ipv6/ip6_vti.c
109436@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
109437
109438 static int vti6_dev_init(struct net_device *dev);
109439 static void vti6_dev_setup(struct net_device *dev);
109440-static struct rtnl_link_ops vti6_link_ops __read_mostly;
109441+static struct rtnl_link_ops vti6_link_ops;
109442
109443 static int vti6_net_id __read_mostly;
109444 struct vti6_net {
109445@@ -977,7 +977,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
109446 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
109447 };
109448
109449-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
109450+static struct rtnl_link_ops vti6_link_ops = {
109451 .kind = "vti6",
109452 .maxtype = IFLA_VTI_MAX,
109453 .policy = vti6_policy,
109454diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
109455index edb58af..78de133 100644
109456--- a/net/ipv6/ipv6_sockglue.c
109457+++ b/net/ipv6/ipv6_sockglue.c
109458@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
109459 if (sk->sk_type != SOCK_STREAM)
109460 return -ENOPROTOOPT;
109461
109462- msg.msg_control = optval;
109463+ msg.msg_control = (void __force_kernel *)optval;
109464 msg.msg_controllen = len;
109465 msg.msg_flags = flags;
109466
109467diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
109468index e080fbb..412b3cf 100644
109469--- a/net/ipv6/netfilter/ip6_tables.c
109470+++ b/net/ipv6/netfilter/ip6_tables.c
109471@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
109472 #endif
109473
109474 static int get_info(struct net *net, void __user *user,
109475- const int *len, int compat)
109476+ int len, int compat)
109477 {
109478 char name[XT_TABLE_MAXNAMELEN];
109479 struct xt_table *t;
109480 int ret;
109481
109482- if (*len != sizeof(struct ip6t_getinfo)) {
109483- duprintf("length %u != %zu\n", *len,
109484+ if (len != sizeof(struct ip6t_getinfo)) {
109485+ duprintf("length %u != %zu\n", len,
109486 sizeof(struct ip6t_getinfo));
109487 return -EINVAL;
109488 }
109489@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
109490 info.size = private->size;
109491 strcpy(info.name, name);
109492
109493- if (copy_to_user(user, &info, *len) != 0)
109494+ if (copy_to_user(user, &info, len) != 0)
109495 ret = -EFAULT;
109496 else
109497 ret = 0;
109498@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109499
109500 switch (cmd) {
109501 case IP6T_SO_GET_INFO:
109502- ret = get_info(sock_net(sk), user, len, 1);
109503+ ret = get_info(sock_net(sk), user, *len, 1);
109504 break;
109505 case IP6T_SO_GET_ENTRIES:
109506 ret = compat_get_entries(sock_net(sk), user, len);
109507@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109508
109509 switch (cmd) {
109510 case IP6T_SO_GET_INFO:
109511- ret = get_info(sock_net(sk), user, len, 0);
109512+ ret = get_info(sock_net(sk), user, *len, 0);
109513 break;
109514
109515 case IP6T_SO_GET_ENTRIES:
109516diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
109517index 0d5279f..89d9f6f 100644
109518--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
109519+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
109520@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
109521
109522 static int nf_ct_frag6_sysctl_register(struct net *net)
109523 {
109524- struct ctl_table *table;
109525+ ctl_table_no_const *table = NULL;
109526 struct ctl_table_header *hdr;
109527
109528- table = nf_ct_frag6_sysctl_table;
109529 if (!net_eq(net, &init_net)) {
109530- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
109531+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
109532 GFP_KERNEL);
109533 if (table == NULL)
109534 goto err_alloc;
109535@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
109536 table[0].data = &net->nf_frag.frags.timeout;
109537 table[1].data = &net->nf_frag.frags.low_thresh;
109538 table[2].data = &net->nf_frag.frags.high_thresh;
109539- }
109540-
109541- hdr = register_net_sysctl(net, "net/netfilter", table);
109542+ hdr = register_net_sysctl(net, "net/netfilter", table);
109543+ } else
109544+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
109545 if (hdr == NULL)
109546 goto err_reg;
109547
109548@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
109549 return 0;
109550
109551 err_reg:
109552- if (!net_eq(net, &init_net))
109553- kfree(table);
109554+ kfree(table);
109555 err_alloc:
109556 return -ENOMEM;
109557 }
109558diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
109559index 5b7a1ed..d9da205 100644
109560--- a/net/ipv6/ping.c
109561+++ b/net/ipv6/ping.c
109562@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
109563 };
109564 #endif
109565
109566+static struct pingv6_ops real_pingv6_ops = {
109567+ .ipv6_recv_error = ipv6_recv_error,
109568+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
109569+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
109570+ .icmpv6_err_convert = icmpv6_err_convert,
109571+ .ipv6_icmp_error = ipv6_icmp_error,
109572+ .ipv6_chk_addr = ipv6_chk_addr,
109573+};
109574+
109575+static struct pingv6_ops dummy_pingv6_ops = {
109576+ .ipv6_recv_error = dummy_ipv6_recv_error,
109577+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
109578+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
109579+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
109580+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
109581+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
109582+};
109583+
109584 int __init pingv6_init(void)
109585 {
109586 #ifdef CONFIG_PROC_FS
109587@@ -247,13 +265,7 @@ int __init pingv6_init(void)
109588 if (ret)
109589 return ret;
109590 #endif
109591- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
109592- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
109593- pingv6_ops.ip6_datagram_recv_specific_ctl =
109594- ip6_datagram_recv_specific_ctl;
109595- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
109596- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
109597- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
109598+ pingv6_ops = &real_pingv6_ops;
109599 return inet6_register_protosw(&pingv6_protosw);
109600 }
109601
109602@@ -262,14 +274,9 @@ int __init pingv6_init(void)
109603 */
109604 void pingv6_exit(void)
109605 {
109606- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
109607- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
109608- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
109609- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
109610- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
109611- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
109612 #ifdef CONFIG_PROC_FS
109613 unregister_pernet_subsys(&ping_v6_net_ops);
109614 #endif
109615+ pingv6_ops = &dummy_pingv6_ops;
109616 inet6_unregister_protosw(&pingv6_protosw);
109617 }
109618diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
109619index 3317440..201764e 100644
109620--- a/net/ipv6/proc.c
109621+++ b/net/ipv6/proc.c
109622@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
109623 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
109624 goto proc_snmp6_fail;
109625
109626- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
109627+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
109628 if (!net->mib.proc_net_devsnmp6)
109629 goto proc_dev_snmp6_fail;
109630 return 0;
109631diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
109632index b2dc60b..a6b6c10 100644
109633--- a/net/ipv6/raw.c
109634+++ b/net/ipv6/raw.c
109635@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
109636 {
109637 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
109638 skb_checksum_complete(skb)) {
109639- atomic_inc(&sk->sk_drops);
109640+ atomic_inc_unchecked(&sk->sk_drops);
109641 kfree_skb(skb);
109642 return NET_RX_DROP;
109643 }
109644@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
109645 struct raw6_sock *rp = raw6_sk(sk);
109646
109647 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
109648- atomic_inc(&sk->sk_drops);
109649+ atomic_inc_unchecked(&sk->sk_drops);
109650 kfree_skb(skb);
109651 return NET_RX_DROP;
109652 }
109653@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
109654
109655 if (inet->hdrincl) {
109656 if (skb_checksum_complete(skb)) {
109657- atomic_inc(&sk->sk_drops);
109658+ atomic_inc_unchecked(&sk->sk_drops);
109659 kfree_skb(skb);
109660 return NET_RX_DROP;
109661 }
109662@@ -610,7 +610,7 @@ out:
109663 return err;
109664 }
109665
109666-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
109667+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
109668 struct flowi6 *fl6, struct dst_entry **dstp,
109669 unsigned int flags)
109670 {
109671@@ -916,12 +916,15 @@ do_confirm:
109672 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
109673 char __user *optval, int optlen)
109674 {
109675+ struct icmp6_filter filter;
109676+
109677 switch (optname) {
109678 case ICMPV6_FILTER:
109679 if (optlen > sizeof(struct icmp6_filter))
109680 optlen = sizeof(struct icmp6_filter);
109681- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
109682+ if (copy_from_user(&filter, optval, optlen))
109683 return -EFAULT;
109684+ raw6_sk(sk)->filter = filter;
109685 return 0;
109686 default:
109687 return -ENOPROTOOPT;
109688@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
109689 char __user *optval, int __user *optlen)
109690 {
109691 int len;
109692+ struct icmp6_filter filter;
109693
109694 switch (optname) {
109695 case ICMPV6_FILTER:
109696@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
109697 len = sizeof(struct icmp6_filter);
109698 if (put_user(len, optlen))
109699 return -EFAULT;
109700- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
109701+ filter = raw6_sk(sk)->filter;
109702+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
109703 return -EFAULT;
109704 return 0;
109705 default:
109706diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
109707index cc85a9b..526a133 100644
109708--- a/net/ipv6/reassembly.c
109709+++ b/net/ipv6/reassembly.c
109710@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
109711
109712 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
109713 {
109714- struct ctl_table *table;
109715+ ctl_table_no_const *table = NULL;
109716 struct ctl_table_header *hdr;
109717
109718- table = ip6_frags_ns_ctl_table;
109719 if (!net_eq(net, &init_net)) {
109720- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
109721+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
109722 if (table == NULL)
109723 goto err_alloc;
109724
109725@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
109726 /* Don't export sysctls to unprivileged users */
109727 if (net->user_ns != &init_user_ns)
109728 table[0].procname = NULL;
109729- }
109730+ hdr = register_net_sysctl(net, "net/ipv6", table);
109731+ } else
109732+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
109733
109734- hdr = register_net_sysctl(net, "net/ipv6", table);
109735 if (hdr == NULL)
109736 goto err_reg;
109737
109738@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
109739 return 0;
109740
109741 err_reg:
109742- if (!net_eq(net, &init_net))
109743- kfree(table);
109744+ kfree(table);
109745 err_alloc:
109746 return -ENOMEM;
109747 }
109748diff --git a/net/ipv6/route.c b/net/ipv6/route.c
109749index f23fbd2..7868241 100644
109750--- a/net/ipv6/route.c
109751+++ b/net/ipv6/route.c
109752@@ -2971,7 +2971,7 @@ struct ctl_table ipv6_route_table_template[] = {
109753
109754 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
109755 {
109756- struct ctl_table *table;
109757+ ctl_table_no_const *table;
109758
109759 table = kmemdup(ipv6_route_table_template,
109760 sizeof(ipv6_route_table_template),
109761diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
109762index 4f40817..54dcbef 100644
109763--- a/net/ipv6/sit.c
109764+++ b/net/ipv6/sit.c
109765@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
109766 static void ipip6_dev_free(struct net_device *dev);
109767 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
109768 __be32 *v4dst);
109769-static struct rtnl_link_ops sit_link_ops __read_mostly;
109770+static struct rtnl_link_ops sit_link_ops;
109771
109772 static int sit_net_id __read_mostly;
109773 struct sit_net {
109774@@ -1661,7 +1661,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
109775 unregister_netdevice_queue(dev, head);
109776 }
109777
109778-static struct rtnl_link_ops sit_link_ops __read_mostly = {
109779+static struct rtnl_link_ops sit_link_ops = {
109780 .kind = "sit",
109781 .maxtype = IFLA_IPTUN_MAX,
109782 .policy = ipip6_policy,
109783diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
109784index 058f3ec..dec973d 100644
109785--- a/net/ipv6/sysctl_net_ipv6.c
109786+++ b/net/ipv6/sysctl_net_ipv6.c
109787@@ -61,7 +61,7 @@ static struct ctl_table ipv6_rotable[] = {
109788
109789 static int __net_init ipv6_sysctl_net_init(struct net *net)
109790 {
109791- struct ctl_table *ipv6_table;
109792+ ctl_table_no_const *ipv6_table;
109793 struct ctl_table *ipv6_route_table;
109794 struct ctl_table *ipv6_icmp_table;
109795 int err;
109796diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
109797index 229239ad..ee2802f 100644
109798--- a/net/ipv6/tcp_ipv6.c
109799+++ b/net/ipv6/tcp_ipv6.c
109800@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
109801 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
109802 }
109803
109804+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109805+extern int grsec_enable_blackhole;
109806+#endif
109807+
109808 static void tcp_v6_hash(struct sock *sk)
109809 {
109810 if (sk->sk_state != TCP_CLOSE) {
109811@@ -1424,6 +1428,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
109812 return 0;
109813
109814 reset:
109815+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109816+ if (!grsec_enable_blackhole)
109817+#endif
109818 tcp_v6_send_reset(sk, skb);
109819 discard:
109820 if (opt_skb)
109821@@ -1508,12 +1515,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
109822 TCP_SKB_CB(skb)->sacked = 0;
109823
109824 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
109825- if (!sk)
109826+ if (!sk) {
109827+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109828+ ret = 1;
109829+#endif
109830 goto no_tcp_socket;
109831+ }
109832
109833 process:
109834- if (sk->sk_state == TCP_TIME_WAIT)
109835+ if (sk->sk_state == TCP_TIME_WAIT) {
109836+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109837+ ret = 2;
109838+#endif
109839 goto do_time_wait;
109840+ }
109841
109842 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
109843 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
109844@@ -1565,6 +1580,10 @@ csum_error:
109845 bad_packet:
109846 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
109847 } else {
109848+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109849+ if (!grsec_enable_blackhole || (ret == 1 &&
109850+ (skb->dev->flags & IFF_LOOPBACK)))
109851+#endif
109852 tcp_v6_send_reset(NULL, skb);
109853 }
109854
109855diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
109856index 7092ff7..3fd0eb4 100644
109857--- a/net/ipv6/udp.c
109858+++ b/net/ipv6/udp.c
109859@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
109860 udp_ipv6_hash_secret + net_hash_mix(net));
109861 }
109862
109863+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109864+extern int grsec_enable_blackhole;
109865+#endif
109866+
109867 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
109868 {
109869 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
109870@@ -435,7 +439,7 @@ try_again:
109871 if (unlikely(err)) {
109872 trace_kfree_skb(skb, udpv6_recvmsg);
109873 if (!peeked) {
109874- atomic_inc(&sk->sk_drops);
109875+ atomic_inc_unchecked(&sk->sk_drops);
109876 if (is_udp4)
109877 UDP_INC_STATS_USER(sock_net(sk),
109878 UDP_MIB_INERRORS,
109879@@ -698,7 +702,7 @@ csum_error:
109880 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
109881 drop:
109882 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
109883- atomic_inc(&sk->sk_drops);
109884+ atomic_inc_unchecked(&sk->sk_drops);
109885 kfree_skb(skb);
109886 return -1;
109887 }
109888@@ -754,7 +758,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
109889 if (likely(skb1 == NULL))
109890 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
109891 if (!skb1) {
109892- atomic_inc(&sk->sk_drops);
109893+ atomic_inc_unchecked(&sk->sk_drops);
109894 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
109895 IS_UDPLITE(sk));
109896 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109897@@ -920,6 +924,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
109898 goto csum_error;
109899
109900 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
109901+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109902+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
109903+#endif
109904 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
109905
109906 kfree_skb(skb);
109907diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
109908index 2a0bbda..d75ca57 100644
109909--- a/net/ipv6/xfrm6_policy.c
109910+++ b/net/ipv6/xfrm6_policy.c
109911@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
109912 }
109913 }
109914
109915-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
109916+static int xfrm6_garbage_collect(struct dst_ops *ops)
109917 {
109918 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
109919
109920- xfrm6_policy_afinfo.garbage_collect(net);
109921+ xfrm_garbage_collect_deferred(net);
109922 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
109923 }
109924
109925@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
109926
109927 static int __net_init xfrm6_net_init(struct net *net)
109928 {
109929- struct ctl_table *table;
109930+ ctl_table_no_const *table = NULL;
109931 struct ctl_table_header *hdr;
109932
109933- table = xfrm6_policy_table;
109934 if (!net_eq(net, &init_net)) {
109935- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
109936+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
109937 if (!table)
109938 goto err_alloc;
109939
109940 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
109941- }
109942+ hdr = register_net_sysctl(net, "net/ipv6", table);
109943+ } else
109944+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
109945
109946- hdr = register_net_sysctl(net, "net/ipv6", table);
109947 if (!hdr)
109948 goto err_reg;
109949
109950@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
109951 return 0;
109952
109953 err_reg:
109954- if (!net_eq(net, &init_net))
109955- kfree(table);
109956+ kfree(table);
109957 err_alloc:
109958 return -ENOMEM;
109959 }
109960diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
109961index e15c16a..7cf07aa 100644
109962--- a/net/ipx/ipx_proc.c
109963+++ b/net/ipx/ipx_proc.c
109964@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
109965 struct proc_dir_entry *p;
109966 int rc = -ENOMEM;
109967
109968- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
109969+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
109970
109971 if (!ipx_proc_dir)
109972 goto out;
109973diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
109974index 2ba8b97..6d33010 100644
109975--- a/net/irda/ircomm/ircomm_tty.c
109976+++ b/net/irda/ircomm/ircomm_tty.c
109977@@ -317,11 +317,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
109978 add_wait_queue(&port->open_wait, &wait);
109979
109980 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
109981- __FILE__, __LINE__, tty->driver->name, port->count);
109982+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
109983
109984 spin_lock_irqsave(&port->lock, flags);
109985 if (!tty_hung_up_p(filp))
109986- port->count--;
109987+ atomic_dec(&port->count);
109988 port->blocked_open++;
109989 spin_unlock_irqrestore(&port->lock, flags);
109990
109991@@ -356,7 +356,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
109992 }
109993
109994 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
109995- __FILE__, __LINE__, tty->driver->name, port->count);
109996+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
109997
109998 schedule();
109999 }
110000@@ -366,12 +366,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110001
110002 spin_lock_irqsave(&port->lock, flags);
110003 if (!tty_hung_up_p(filp))
110004- port->count++;
110005+ atomic_inc(&port->count);
110006 port->blocked_open--;
110007 spin_unlock_irqrestore(&port->lock, flags);
110008
110009 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
110010- __FILE__, __LINE__, tty->driver->name, port->count);
110011+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110012
110013 if (!retval)
110014 port->flags |= ASYNC_NORMAL_ACTIVE;
110015@@ -445,12 +445,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
110016
110017 /* ++ is not atomic, so this should be protected - Jean II */
110018 spin_lock_irqsave(&self->port.lock, flags);
110019- self->port.count++;
110020+ atomic_inc(&self->port.count);
110021 spin_unlock_irqrestore(&self->port.lock, flags);
110022 tty_port_tty_set(&self->port, tty);
110023
110024 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
110025- self->line, self->port.count);
110026+ self->line, atomic_read(&self->port.count));
110027
110028 /* Not really used by us, but lets do it anyway */
110029 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
110030@@ -987,7 +987,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
110031 tty_kref_put(port->tty);
110032 }
110033 port->tty = NULL;
110034- port->count = 0;
110035+ atomic_set(&port->count, 0);
110036 spin_unlock_irqrestore(&port->lock, flags);
110037
110038 wake_up_interruptible(&port->open_wait);
110039@@ -1344,7 +1344,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
110040 seq_putc(m, '\n');
110041
110042 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
110043- seq_printf(m, "Open count: %d\n", self->port.count);
110044+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
110045 seq_printf(m, "Max data size: %d\n", self->max_data_size);
110046 seq_printf(m, "Max header size: %d\n", self->max_header_size);
110047
110048diff --git a/net/irda/irproc.c b/net/irda/irproc.c
110049index b9ac598..f88cc56 100644
110050--- a/net/irda/irproc.c
110051+++ b/net/irda/irproc.c
110052@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
110053 {
110054 int i;
110055
110056- proc_irda = proc_mkdir("irda", init_net.proc_net);
110057+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
110058 if (proc_irda == NULL)
110059 return;
110060
110061diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
110062index 7a95fa4..57be196 100644
110063--- a/net/iucv/af_iucv.c
110064+++ b/net/iucv/af_iucv.c
110065@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
110066 {
110067 char name[12];
110068
110069- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
110070+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
110071 while (__iucv_get_sock_by_name(name)) {
110072 sprintf(name, "%08x",
110073- atomic_inc_return(&iucv_sk_list.autobind_name));
110074+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
110075 }
110076 memcpy(iucv->src_name, name, 8);
110077 }
110078diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
110079index da78793..bdd78cf 100644
110080--- a/net/iucv/iucv.c
110081+++ b/net/iucv/iucv.c
110082@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
110083 return NOTIFY_OK;
110084 }
110085
110086-static struct notifier_block __refdata iucv_cpu_notifier = {
110087+static struct notifier_block iucv_cpu_notifier = {
110088 .notifier_call = iucv_cpu_notify,
110089 };
110090
110091diff --git a/net/key/af_key.c b/net/key/af_key.c
110092index ba2a2f9..b658bc3 100644
110093--- a/net/key/af_key.c
110094+++ b/net/key/af_key.c
110095@@ -3052,10 +3052,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
110096 static u32 get_acqseq(void)
110097 {
110098 u32 res;
110099- static atomic_t acqseq;
110100+ static atomic_unchecked_t acqseq;
110101
110102 do {
110103- res = atomic_inc_return(&acqseq);
110104+ res = atomic_inc_return_unchecked(&acqseq);
110105 } while (!res);
110106 return res;
110107 }
110108diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
110109index 76125c5..e474828 100644
110110--- a/net/l2tp/l2tp_eth.c
110111+++ b/net/l2tp/l2tp_eth.c
110112@@ -42,12 +42,12 @@ struct l2tp_eth {
110113 struct sock *tunnel_sock;
110114 struct l2tp_session *session;
110115 struct list_head list;
110116- atomic_long_t tx_bytes;
110117- atomic_long_t tx_packets;
110118- atomic_long_t tx_dropped;
110119- atomic_long_t rx_bytes;
110120- atomic_long_t rx_packets;
110121- atomic_long_t rx_errors;
110122+ atomic_long_unchecked_t tx_bytes;
110123+ atomic_long_unchecked_t tx_packets;
110124+ atomic_long_unchecked_t tx_dropped;
110125+ atomic_long_unchecked_t rx_bytes;
110126+ atomic_long_unchecked_t rx_packets;
110127+ atomic_long_unchecked_t rx_errors;
110128 };
110129
110130 /* via l2tp_session_priv() */
110131@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
110132 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
110133
110134 if (likely(ret == NET_XMIT_SUCCESS)) {
110135- atomic_long_add(len, &priv->tx_bytes);
110136- atomic_long_inc(&priv->tx_packets);
110137+ atomic_long_add_unchecked(len, &priv->tx_bytes);
110138+ atomic_long_inc_unchecked(&priv->tx_packets);
110139 } else {
110140- atomic_long_inc(&priv->tx_dropped);
110141+ atomic_long_inc_unchecked(&priv->tx_dropped);
110142 }
110143 return NETDEV_TX_OK;
110144 }
110145@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
110146 {
110147 struct l2tp_eth *priv = netdev_priv(dev);
110148
110149- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
110150- stats->tx_packets = atomic_long_read(&priv->tx_packets);
110151- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
110152- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
110153- stats->rx_packets = atomic_long_read(&priv->rx_packets);
110154- stats->rx_errors = atomic_long_read(&priv->rx_errors);
110155+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
110156+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
110157+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
110158+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
110159+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
110160+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
110161 return stats;
110162 }
110163
110164@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
110165 nf_reset(skb);
110166
110167 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
110168- atomic_long_inc(&priv->rx_packets);
110169- atomic_long_add(data_len, &priv->rx_bytes);
110170+ atomic_long_inc_unchecked(&priv->rx_packets);
110171+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
110172 } else {
110173- atomic_long_inc(&priv->rx_errors);
110174+ atomic_long_inc_unchecked(&priv->rx_errors);
110175 }
110176 return;
110177
110178 error:
110179- atomic_long_inc(&priv->rx_errors);
110180+ atomic_long_inc_unchecked(&priv->rx_errors);
110181 kfree_skb(skb);
110182 }
110183
110184diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
110185index 13752d9..b704a93 100644
110186--- a/net/l2tp/l2tp_ppp.c
110187+++ b/net/l2tp/l2tp_ppp.c
110188@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
110189 /* If PMTU discovery was enabled, use the MTU that was discovered */
110190 dst = sk_dst_get(tunnel->sock);
110191 if (dst != NULL) {
110192- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
110193+ u32 pmtu = dst_mtu(dst);
110194+
110195 if (pmtu != 0)
110196 session->mtu = session->mru = pmtu -
110197 PPPOL2TP_HEADER_OVERHEAD;
110198diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
110199index 1a3c7e0..80f8b0c 100644
110200--- a/net/llc/llc_proc.c
110201+++ b/net/llc/llc_proc.c
110202@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
110203 int rc = -ENOMEM;
110204 struct proc_dir_entry *p;
110205
110206- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
110207+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
110208 if (!llc_proc_dir)
110209 goto out;
110210
110211diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
110212index 592f4b1..efa7aa9 100644
110213--- a/net/mac80211/cfg.c
110214+++ b/net/mac80211/cfg.c
110215@@ -864,7 +864,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
110216 ret = ieee80211_vif_use_channel(sdata, chandef,
110217 IEEE80211_CHANCTX_EXCLUSIVE);
110218 }
110219- } else if (local->open_count == local->monitors) {
110220+ } else if (local_read(&local->open_count) == local->monitors) {
110221 local->_oper_chandef = *chandef;
110222 ieee80211_hw_config(local, 0);
110223 }
110224@@ -3574,7 +3574,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
110225 else
110226 local->probe_req_reg--;
110227
110228- if (!local->open_count)
110229+ if (!local_read(&local->open_count))
110230 break;
110231
110232 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
110233@@ -3723,8 +3723,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
110234 if (chanctx_conf) {
110235 *chandef = chanctx_conf->def;
110236 ret = 0;
110237- } else if (local->open_count > 0 &&
110238- local->open_count == local->monitors &&
110239+ } else if (local_read(&local->open_count) > 0 &&
110240+ local_read(&local->open_count) == local->monitors &&
110241 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
110242 if (local->use_chanctx)
110243 *chandef = local->monitor_chandef;
110244diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
110245index ac9836e..32613c1 100644
110246--- a/net/mac80211/ieee80211_i.h
110247+++ b/net/mac80211/ieee80211_i.h
110248@@ -28,6 +28,7 @@
110249 #include <net/ieee80211_radiotap.h>
110250 #include <net/cfg80211.h>
110251 #include <net/mac80211.h>
110252+#include <asm/local.h>
110253 #include "key.h"
110254 #include "sta_info.h"
110255 #include "debug.h"
110256@@ -1011,7 +1012,7 @@ struct ieee80211_local {
110257 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
110258 spinlock_t queue_stop_reason_lock;
110259
110260- int open_count;
110261+ local_t open_count;
110262 int monitors, cooked_mntrs;
110263 /* number of interfaces with corresponding FIF_ flags */
110264 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
110265diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
110266index 388b863..6575b55 100644
110267--- a/net/mac80211/iface.c
110268+++ b/net/mac80211/iface.c
110269@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110270 break;
110271 }
110272
110273- if (local->open_count == 0) {
110274+ if (local_read(&local->open_count) == 0) {
110275 res = drv_start(local);
110276 if (res)
110277 goto err_del_bss;
110278@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110279 res = drv_add_interface(local, sdata);
110280 if (res)
110281 goto err_stop;
110282- } else if (local->monitors == 0 && local->open_count == 0) {
110283+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
110284 res = ieee80211_add_virtual_monitor(local);
110285 if (res)
110286 goto err_stop;
110287@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110288 atomic_inc(&local->iff_promiscs);
110289
110290 if (coming_up)
110291- local->open_count++;
110292+ local_inc(&local->open_count);
110293
110294 if (hw_reconf_flags)
110295 ieee80211_hw_config(local, hw_reconf_flags);
110296@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110297 err_del_interface:
110298 drv_remove_interface(local, sdata);
110299 err_stop:
110300- if (!local->open_count)
110301+ if (!local_read(&local->open_count))
110302 drv_stop(local);
110303 err_del_bss:
110304 sdata->bss = NULL;
110305@@ -888,7 +888,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110306 }
110307
110308 if (going_down)
110309- local->open_count--;
110310+ local_dec(&local->open_count);
110311
110312 switch (sdata->vif.type) {
110313 case NL80211_IFTYPE_AP_VLAN:
110314@@ -949,7 +949,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110315 }
110316 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
110317
110318- if (local->open_count == 0)
110319+ if (local_read(&local->open_count) == 0)
110320 ieee80211_clear_tx_pending(local);
110321
110322 /*
110323@@ -989,7 +989,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110324
110325 ieee80211_recalc_ps(local, -1);
110326
110327- if (local->open_count == 0) {
110328+ if (local_read(&local->open_count) == 0) {
110329 ieee80211_stop_device(local);
110330
110331 /* no reconfiguring after stop! */
110332@@ -1000,7 +1000,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110333 ieee80211_configure_filter(local);
110334 ieee80211_hw_config(local, hw_reconf_flags);
110335
110336- if (local->monitors == local->open_count)
110337+ if (local->monitors == local_read(&local->open_count))
110338 ieee80211_add_virtual_monitor(local);
110339 }
110340
110341diff --git a/net/mac80211/main.c b/net/mac80211/main.c
110342index d17c26d..43d6bfb 100644
110343--- a/net/mac80211/main.c
110344+++ b/net/mac80211/main.c
110345@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
110346 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
110347 IEEE80211_CONF_CHANGE_POWER);
110348
110349- if (changed && local->open_count) {
110350+ if (changed && local_read(&local->open_count)) {
110351 ret = drv_config(local, changed);
110352 /*
110353 * Goal:
110354diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
110355index d478b88..8c8d157 100644
110356--- a/net/mac80211/pm.c
110357+++ b/net/mac80211/pm.c
110358@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
110359 struct ieee80211_sub_if_data *sdata;
110360 struct sta_info *sta;
110361
110362- if (!local->open_count)
110363+ if (!local_read(&local->open_count))
110364 goto suspend;
110365
110366 ieee80211_scan_cancel(local);
110367@@ -58,7 +58,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
110368 cancel_work_sync(&local->dynamic_ps_enable_work);
110369 del_timer_sync(&local->dynamic_ps_timer);
110370
110371- local->wowlan = wowlan && local->open_count;
110372+ local->wowlan = wowlan && local_read(&local->open_count);
110373 if (local->wowlan) {
110374 int err = drv_suspend(local, wowlan);
110375 if (err < 0) {
110376@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
110377 WARN_ON(!list_empty(&local->chanctx_list));
110378
110379 /* stop hardware - this must stop RX */
110380- if (local->open_count)
110381+ if (local_read(&local->open_count))
110382 ieee80211_stop_device(local);
110383
110384 suspend:
110385diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
110386index 8fdadfd..a4f72b8 100644
110387--- a/net/mac80211/rate.c
110388+++ b/net/mac80211/rate.c
110389@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
110390
110391 ASSERT_RTNL();
110392
110393- if (local->open_count)
110394+ if (local_read(&local->open_count))
110395 return -EBUSY;
110396
110397 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
110398diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
110399index 6ff1346..936ca9a 100644
110400--- a/net/mac80211/rc80211_pid_debugfs.c
110401+++ b/net/mac80211/rc80211_pid_debugfs.c
110402@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
110403
110404 spin_unlock_irqrestore(&events->lock, status);
110405
110406- if (copy_to_user(buf, pb, p))
110407+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
110408 return -EFAULT;
110409
110410 return p;
110411diff --git a/net/mac80211/util.c b/net/mac80211/util.c
110412index a6cda52..f3b6776 100644
110413--- a/net/mac80211/util.c
110414+++ b/net/mac80211/util.c
110415@@ -1548,7 +1548,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
110416 }
110417 #endif
110418 /* everything else happens only if HW was up & running */
110419- if (!local->open_count)
110420+ if (!local_read(&local->open_count))
110421 goto wake_up;
110422
110423 /*
110424@@ -1772,7 +1772,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
110425 local->in_reconfig = false;
110426 barrier();
110427
110428- if (local->monitors == local->open_count && local->monitors > 0)
110429+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
110430 ieee80211_add_virtual_monitor(local);
110431
110432 /*
110433diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
110434index e9410d1..77b6378 100644
110435--- a/net/netfilter/Kconfig
110436+++ b/net/netfilter/Kconfig
110437@@ -1081,6 +1081,16 @@ config NETFILTER_XT_MATCH_ESP
110438
110439 To compile it as a module, choose M here. If unsure, say N.
110440
110441+config NETFILTER_XT_MATCH_GRADM
110442+ tristate '"gradm" match support'
110443+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
110444+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
110445+ ---help---
110446+ The gradm match allows to match on grsecurity RBAC being enabled.
110447+ It is useful when iptables rules are applied early on bootup to
110448+ prevent connections to the machine (except from a trusted host)
110449+ while the RBAC system is disabled.
110450+
110451 config NETFILTER_XT_MATCH_HASHLIMIT
110452 tristate '"hashlimit" match support'
110453 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
110454diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
110455index bffdad7..f9317d1 100644
110456--- a/net/netfilter/Makefile
110457+++ b/net/netfilter/Makefile
110458@@ -133,6 +133,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
110459 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
110460 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
110461 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
110462+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
110463 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
110464 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
110465 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
110466diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
110467index ec8114f..6b2bfba 100644
110468--- a/net/netfilter/ipset/ip_set_core.c
110469+++ b/net/netfilter/ipset/ip_set_core.c
110470@@ -1921,7 +1921,7 @@ done:
110471 return ret;
110472 }
110473
110474-static struct nf_sockopt_ops so_set __read_mostly = {
110475+static struct nf_sockopt_ops so_set = {
110476 .pf = PF_INET,
110477 .get_optmin = SO_IP_SET,
110478 .get_optmax = SO_IP_SET + 1,
110479diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
110480index 610e19c..08d0c3f 100644
110481--- a/net/netfilter/ipvs/ip_vs_conn.c
110482+++ b/net/netfilter/ipvs/ip_vs_conn.c
110483@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
110484 /* Increase the refcnt counter of the dest */
110485 ip_vs_dest_hold(dest);
110486
110487- conn_flags = atomic_read(&dest->conn_flags);
110488+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
110489 if (cp->protocol != IPPROTO_UDP)
110490 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
110491 flags = cp->flags;
110492@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
110493
110494 cp->control = NULL;
110495 atomic_set(&cp->n_control, 0);
110496- atomic_set(&cp->in_pkts, 0);
110497+ atomic_set_unchecked(&cp->in_pkts, 0);
110498
110499 cp->packet_xmit = NULL;
110500 cp->app = NULL;
110501@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
110502
110503 /* Don't drop the entry if its number of incoming packets is not
110504 located in [0, 8] */
110505- i = atomic_read(&cp->in_pkts);
110506+ i = atomic_read_unchecked(&cp->in_pkts);
110507 if (i > 8 || i < 0) return 0;
110508
110509 if (!todrop_rate[i]) return 0;
110510diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
110511index e683675..67cb16b 100644
110512--- a/net/netfilter/ipvs/ip_vs_core.c
110513+++ b/net/netfilter/ipvs/ip_vs_core.c
110514@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
110515 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
110516 /* do not touch skb anymore */
110517
110518- atomic_inc(&cp->in_pkts);
110519+ atomic_inc_unchecked(&cp->in_pkts);
110520 ip_vs_conn_put(cp);
110521 return ret;
110522 }
110523@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
110524 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
110525 pkts = sysctl_sync_threshold(ipvs);
110526 else
110527- pkts = atomic_add_return(1, &cp->in_pkts);
110528+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
110529
110530 if (ipvs->sync_state & IP_VS_STATE_MASTER)
110531 ip_vs_sync_conn(net, cp, pkts);
110532diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
110533index 581a658..910e112 100644
110534--- a/net/netfilter/ipvs/ip_vs_ctl.c
110535+++ b/net/netfilter/ipvs/ip_vs_ctl.c
110536@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
110537 */
110538 ip_vs_rs_hash(ipvs, dest);
110539 }
110540- atomic_set(&dest->conn_flags, conn_flags);
110541+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
110542
110543 /* bind the service */
110544 old_svc = rcu_dereference_protected(dest->svc, 1);
110545@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
110546 * align with netns init in ip_vs_control_net_init()
110547 */
110548
110549-static struct ctl_table vs_vars[] = {
110550+static ctl_table_no_const vs_vars[] __read_only = {
110551 {
110552 .procname = "amemthresh",
110553 .maxlen = sizeof(int),
110554@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
110555 " %-7s %-6d %-10d %-10d\n",
110556 &dest->addr.in6,
110557 ntohs(dest->port),
110558- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
110559+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
110560 atomic_read(&dest->weight),
110561 atomic_read(&dest->activeconns),
110562 atomic_read(&dest->inactconns));
110563@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
110564 "%-7s %-6d %-10d %-10d\n",
110565 ntohl(dest->addr.ip),
110566 ntohs(dest->port),
110567- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
110568+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
110569 atomic_read(&dest->weight),
110570 atomic_read(&dest->activeconns),
110571 atomic_read(&dest->inactconns));
110572@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
110573
110574 entry.addr = dest->addr.ip;
110575 entry.port = dest->port;
110576- entry.conn_flags = atomic_read(&dest->conn_flags);
110577+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
110578 entry.weight = atomic_read(&dest->weight);
110579 entry.u_threshold = dest->u_threshold;
110580 entry.l_threshold = dest->l_threshold;
110581@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
110582 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
110583 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
110584 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
110585- (atomic_read(&dest->conn_flags) &
110586+ (atomic_read_unchecked(&dest->conn_flags) &
110587 IP_VS_CONN_F_FWD_MASK)) ||
110588 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
110589 atomic_read(&dest->weight)) ||
110590@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
110591 {
110592 int idx;
110593 struct netns_ipvs *ipvs = net_ipvs(net);
110594- struct ctl_table *tbl;
110595+ ctl_table_no_const *tbl;
110596
110597 atomic_set(&ipvs->dropentry, 0);
110598 spin_lock_init(&ipvs->dropentry_lock);
110599diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
110600index 547ff33..c8c8117 100644
110601--- a/net/netfilter/ipvs/ip_vs_lblc.c
110602+++ b/net/netfilter/ipvs/ip_vs_lblc.c
110603@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
110604 * IPVS LBLC sysctl table
110605 */
110606 #ifdef CONFIG_SYSCTL
110607-static struct ctl_table vs_vars_table[] = {
110608+static ctl_table_no_const vs_vars_table[] __read_only = {
110609 {
110610 .procname = "lblc_expiration",
110611 .data = NULL,
110612diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
110613index 3f21a2f..a112e85 100644
110614--- a/net/netfilter/ipvs/ip_vs_lblcr.c
110615+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
110616@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
110617 * IPVS LBLCR sysctl table
110618 */
110619
110620-static struct ctl_table vs_vars_table[] = {
110621+static ctl_table_no_const vs_vars_table[] __read_only = {
110622 {
110623 .procname = "lblcr_expiration",
110624 .data = NULL,
110625diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
110626index db80126..ef7110e 100644
110627--- a/net/netfilter/ipvs/ip_vs_sync.c
110628+++ b/net/netfilter/ipvs/ip_vs_sync.c
110629@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
110630 cp = cp->control;
110631 if (cp) {
110632 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
110633- pkts = atomic_add_return(1, &cp->in_pkts);
110634+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
110635 else
110636 pkts = sysctl_sync_threshold(ipvs);
110637 ip_vs_sync_conn(net, cp->control, pkts);
110638@@ -771,7 +771,7 @@ control:
110639 if (!cp)
110640 return;
110641 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
110642- pkts = atomic_add_return(1, &cp->in_pkts);
110643+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
110644 else
110645 pkts = sysctl_sync_threshold(ipvs);
110646 goto sloop;
110647@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
110648
110649 if (opt)
110650 memcpy(&cp->in_seq, opt, sizeof(*opt));
110651- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
110652+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
110653 cp->state = state;
110654 cp->old_state = cp->state;
110655 /*
110656diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
110657index 73ba1cc..1adfc7a 100644
110658--- a/net/netfilter/ipvs/ip_vs_xmit.c
110659+++ b/net/netfilter/ipvs/ip_vs_xmit.c
110660@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
110661 else
110662 rc = NF_ACCEPT;
110663 /* do not touch skb anymore */
110664- atomic_inc(&cp->in_pkts);
110665+ atomic_inc_unchecked(&cp->in_pkts);
110666 goto out;
110667 }
110668
110669@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
110670 else
110671 rc = NF_ACCEPT;
110672 /* do not touch skb anymore */
110673- atomic_inc(&cp->in_pkts);
110674+ atomic_inc_unchecked(&cp->in_pkts);
110675 goto out;
110676 }
110677
110678diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
110679index a4b5e2a..13b1de3 100644
110680--- a/net/netfilter/nf_conntrack_acct.c
110681+++ b/net/netfilter/nf_conntrack_acct.c
110682@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
110683 #ifdef CONFIG_SYSCTL
110684 static int nf_conntrack_acct_init_sysctl(struct net *net)
110685 {
110686- struct ctl_table *table;
110687+ ctl_table_no_const *table;
110688
110689 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
110690 GFP_KERNEL);
110691diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
110692index 1f4f954..e364ad7 100644
110693--- a/net/netfilter/nf_conntrack_core.c
110694+++ b/net/netfilter/nf_conntrack_core.c
110695@@ -1789,6 +1789,10 @@ void nf_conntrack_init_end(void)
110696 #define DYING_NULLS_VAL ((1<<30)+1)
110697 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
110698
110699+#ifdef CONFIG_GRKERNSEC_HIDESYM
110700+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
110701+#endif
110702+
110703 int nf_conntrack_init_net(struct net *net)
110704 {
110705 int ret = -ENOMEM;
110706@@ -1814,7 +1818,11 @@ int nf_conntrack_init_net(struct net *net)
110707 if (!net->ct.stat)
110708 goto err_pcpu_lists;
110709
110710+#ifdef CONFIG_GRKERNSEC_HIDESYM
110711+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
110712+#else
110713 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
110714+#endif
110715 if (!net->ct.slabname)
110716 goto err_slabname;
110717
110718diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
110719index 1df1761..ce8b88a 100644
110720--- a/net/netfilter/nf_conntrack_ecache.c
110721+++ b/net/netfilter/nf_conntrack_ecache.c
110722@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
110723 #ifdef CONFIG_SYSCTL
110724 static int nf_conntrack_event_init_sysctl(struct net *net)
110725 {
110726- struct ctl_table *table;
110727+ ctl_table_no_const *table;
110728
110729 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
110730 GFP_KERNEL);
110731diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
110732index 5b3eae7..dd4b8fe 100644
110733--- a/net/netfilter/nf_conntrack_helper.c
110734+++ b/net/netfilter/nf_conntrack_helper.c
110735@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
110736
110737 static int nf_conntrack_helper_init_sysctl(struct net *net)
110738 {
110739- struct ctl_table *table;
110740+ ctl_table_no_const *table;
110741
110742 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
110743 GFP_KERNEL);
110744diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
110745index b65d586..beec902 100644
110746--- a/net/netfilter/nf_conntrack_proto.c
110747+++ b/net/netfilter/nf_conntrack_proto.c
110748@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
110749
110750 static void
110751 nf_ct_unregister_sysctl(struct ctl_table_header **header,
110752- struct ctl_table **table,
110753+ ctl_table_no_const **table,
110754 unsigned int users)
110755 {
110756 if (users > 0)
110757diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
110758index f641751..d3c5b51 100644
110759--- a/net/netfilter/nf_conntrack_standalone.c
110760+++ b/net/netfilter/nf_conntrack_standalone.c
110761@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
110762
110763 static int nf_conntrack_standalone_init_sysctl(struct net *net)
110764 {
110765- struct ctl_table *table;
110766+ ctl_table_no_const *table;
110767
110768 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
110769 GFP_KERNEL);
110770diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
110771index 7a394df..bd91a8a 100644
110772--- a/net/netfilter/nf_conntrack_timestamp.c
110773+++ b/net/netfilter/nf_conntrack_timestamp.c
110774@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
110775 #ifdef CONFIG_SYSCTL
110776 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
110777 {
110778- struct ctl_table *table;
110779+ ctl_table_no_const *table;
110780
110781 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
110782 GFP_KERNEL);
110783diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
110784index 85296d4..8becdec 100644
110785--- a/net/netfilter/nf_log.c
110786+++ b/net/netfilter/nf_log.c
110787@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
110788
110789 #ifdef CONFIG_SYSCTL
110790 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
110791-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
110792+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
110793
110794 static int nf_log_proc_dostring(struct ctl_table *table, int write,
110795 void __user *buffer, size_t *lenp, loff_t *ppos)
110796@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
110797 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
110798 mutex_unlock(&nf_log_mutex);
110799 } else {
110800+ ctl_table_no_const nf_log_table = *table;
110801+
110802 mutex_lock(&nf_log_mutex);
110803 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
110804 lockdep_is_held(&nf_log_mutex));
110805 if (!logger)
110806- table->data = "NONE";
110807+ nf_log_table.data = "NONE";
110808 else
110809- table->data = logger->name;
110810- r = proc_dostring(table, write, buffer, lenp, ppos);
110811+ nf_log_table.data = logger->name;
110812+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
110813 mutex_unlock(&nf_log_mutex);
110814 }
110815
110816diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
110817index f042ae5..30ea486 100644
110818--- a/net/netfilter/nf_sockopt.c
110819+++ b/net/netfilter/nf_sockopt.c
110820@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
110821 }
110822 }
110823
110824- list_add(&reg->list, &nf_sockopts);
110825+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
110826 out:
110827 mutex_unlock(&nf_sockopt_mutex);
110828 return ret;
110829@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
110830 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
110831 {
110832 mutex_lock(&nf_sockopt_mutex);
110833- list_del(&reg->list);
110834+ pax_list_del((struct list_head *)&reg->list);
110835 mutex_unlock(&nf_sockopt_mutex);
110836 }
110837 EXPORT_SYMBOL(nf_unregister_sockopt);
110838diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
110839index d292c8d..9f1e166 100644
110840--- a/net/netfilter/nfnetlink_log.c
110841+++ b/net/netfilter/nfnetlink_log.c
110842@@ -79,7 +79,7 @@ static int nfnl_log_net_id __read_mostly;
110843 struct nfnl_log_net {
110844 spinlock_t instances_lock;
110845 struct hlist_head instance_table[INSTANCE_BUCKETS];
110846- atomic_t global_seq;
110847+ atomic_unchecked_t global_seq;
110848 };
110849
110850 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
110851@@ -561,7 +561,7 @@ __build_packet_message(struct nfnl_log_net *log,
110852 /* global sequence number */
110853 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
110854 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
110855- htonl(atomic_inc_return(&log->global_seq))))
110856+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
110857 goto nla_put_failure;
110858
110859 if (data_len) {
110860diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
110861index 1840989..6895744 100644
110862--- a/net/netfilter/nft_compat.c
110863+++ b/net/netfilter/nft_compat.c
110864@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
110865 /* We want to reuse existing compat_to_user */
110866 old_fs = get_fs();
110867 set_fs(KERNEL_DS);
110868- t->compat_to_user(out, in);
110869+ t->compat_to_user((void __force_user *)out, in);
110870 set_fs(old_fs);
110871 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
110872 kfree(out);
110873@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
110874 /* We want to reuse existing compat_to_user */
110875 old_fs = get_fs();
110876 set_fs(KERNEL_DS);
110877- m->compat_to_user(out, in);
110878+ m->compat_to_user((void __force_user *)out, in);
110879 set_fs(old_fs);
110880 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
110881 kfree(out);
110882diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
110883index bbffdbda..12d4da8 100644
110884--- a/net/netfilter/xt_bpf.c
110885+++ b/net/netfilter/xt_bpf.c
110886@@ -23,11 +23,10 @@ MODULE_ALIAS("ip6t_bpf");
110887 static int bpf_mt_check(const struct xt_mtchk_param *par)
110888 {
110889 struct xt_bpf_info *info = par->matchinfo;
110890- struct sock_fprog_kern program;
110891+ struct sock_fprog program;
110892
110893 program.len = info->bpf_program_num_elem;
110894- program.filter = info->bpf_program;
110895-
110896+ program.filter = (struct sock_filter __user *) info->bpf_program;
110897 if (sk_unattached_filter_create(&info->filter, &program)) {
110898 pr_info("bpf: check failed: parse error\n");
110899 return -EINVAL;
110900diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
110901new file mode 100644
110902index 0000000..c566332
110903--- /dev/null
110904+++ b/net/netfilter/xt_gradm.c
110905@@ -0,0 +1,51 @@
110906+/*
110907+ * gradm match for netfilter
110908