]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 15 Jul 2018 11:41:50 +0000 (13:41 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 15 Jul 2018 11:41:50 +0000 (13:41 +0200)
added patches:
crypto-x86-salsa20-remove-x86-salsa20-implementations.patch
f2fs-give-message-and-set-need_fsck-given-broken-node-id.patch
f2fs-sanity-check-on-sit-entry.patch
kvm-vmx-nested-vm-entry-prereqs-for-event-inj.patch
loop-add-recursion-validation-to-loop_change_fd.patch
loop-remember-whether-sysfs_create_group-was-done.patch
netfilter-nf_queue-augment-nfqa_cfg_policy.patch
netfilter-x_tables-initialise-match-target-check-parameter-struct.patch
nvme-pci-remap-cmb-sq-entries-on-every-controller-reset.patch
pm-hibernate-fix-oops-at-snapshot_write.patch
rdma-ucm-mark-ucm-interface-as-broken.patch
uprobes-x86-remove-incorrect-warn_on-in-uprobe_init_insn.patch

13 files changed:
queue-4.14/crypto-x86-salsa20-remove-x86-salsa20-implementations.patch [new file with mode: 0644]
queue-4.14/f2fs-give-message-and-set-need_fsck-given-broken-node-id.patch [new file with mode: 0644]
queue-4.14/f2fs-sanity-check-on-sit-entry.patch [new file with mode: 0644]
queue-4.14/kvm-vmx-nested-vm-entry-prereqs-for-event-inj.patch [new file with mode: 0644]
queue-4.14/loop-add-recursion-validation-to-loop_change_fd.patch [new file with mode: 0644]
queue-4.14/loop-remember-whether-sysfs_create_group-was-done.patch [new file with mode: 0644]
queue-4.14/netfilter-nf_queue-augment-nfqa_cfg_policy.patch [new file with mode: 0644]
queue-4.14/netfilter-x_tables-initialise-match-target-check-parameter-struct.patch [new file with mode: 0644]
queue-4.14/nvme-pci-remap-cmb-sq-entries-on-every-controller-reset.patch [new file with mode: 0644]
queue-4.14/pm-hibernate-fix-oops-at-snapshot_write.patch [new file with mode: 0644]
queue-4.14/rdma-ucm-mark-ucm-interface-as-broken.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/uprobes-x86-remove-incorrect-warn_on-in-uprobe_init_insn.patch [new file with mode: 0644]

diff --git a/queue-4.14/crypto-x86-salsa20-remove-x86-salsa20-implementations.patch b/queue-4.14/crypto-x86-salsa20-remove-x86-salsa20-implementations.patch
new file mode 100644 (file)
index 0000000..b68f782
--- /dev/null
@@ -0,0 +1,2283 @@
+From b7b73cd5d74694ed59abcdb4974dacb4ff8b2a2a Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sat, 26 May 2018 00:08:58 -0700
+Subject: crypto: x86/salsa20 - remove x86 salsa20 implementations
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit b7b73cd5d74694ed59abcdb4974dacb4ff8b2a2a upstream.
+
+The x86 assembly implementations of Salsa20 use the frame base pointer
+register (%ebp or %rbp), which breaks frame pointer convention and
+breaks stack traces when unwinding from an interrupt in the crypto code.
+Recent (v4.10+) kernels will warn about this, e.g.
+
+WARNING: kernel stack regs at 00000000a8291e69 in syzkaller047086:4677 has bad 'bp' value 000000001077994c
+[...]
+
+But after looking into it, I believe there's very little reason to still
+retain the x86 Salsa20 code.  First, these are *not* vectorized
+(SSE2/SSSE3/AVX2) implementations, which would be needed to get anywhere
+close to the best Salsa20 performance on any remotely modern x86
+processor; they're just regular x86 assembly.  Second, it's still
+unclear that anyone is actually using the kernel's Salsa20 at all,
+especially given that now ChaCha20 is supported too, and with much more
+efficient SSSE3 and AVX2 implementations.  Finally, in benchmarks I did
+on both Intel and AMD processors with both gcc 8.1.0 and gcc 4.9.4, the
+x86_64 salsa20-asm is actually slightly *slower* than salsa20-generic
+(~3% slower on Skylake, ~10% slower on Zen), while the i686 salsa20-asm
+is only slightly faster than salsa20-generic (~15% faster on Skylake,
+~20% faster on Zen).  The gcc version made little difference.
+
+So, the x86_64 salsa20-asm is pretty clearly useless.  That leaves just
+the i686 salsa20-asm, which based on my tests provides a 15-20% speed
+boost.  But that's without updating the code to not use %ebp.  And given
+the maintenance cost, the small speed difference vs. salsa20-generic,
+the fact that few people still use i686 kernels, the doubt that anyone
+is even using the kernel's Salsa20 at all, and the fact that a SSE2
+implementation would almost certainly be much faster on any remotely
+modern x86 processor yet no one has cared enough to add one yet, I don't
+think it's worthwhile to keep.
+
+Thus, just remove both the x86_64 and i686 salsa20-asm implementations.
+
+Reported-by: syzbot+ffa3a158337bbc01ff09@syzkaller.appspotmail.com
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/Makefile                |    4 
+ arch/x86/crypto/salsa20-i586-asm_32.S   | 1114 --------------------------------
+ arch/x86/crypto/salsa20-x86_64-asm_64.S |  919 --------------------------
+ arch/x86/crypto/salsa20_glue.c          |  116 ---
+ crypto/Kconfig                          |   26 
+ 5 files changed, 2179 deletions(-)
+
+--- a/arch/x86/crypto/Makefile
++++ b/arch/x86/crypto/Makefile
+@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) +=
+ obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
+-obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
+ obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
+ obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
+@@ -24,7 +23,6 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) +=
+ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
+-obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+ obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
+ obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
+ obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
+@@ -59,7 +57,6 @@ endif
+ aes-i586-y := aes-i586-asm_32.o aes_glue.o
+ twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
+-salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
+ serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
+ aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
+@@ -68,7 +65,6 @@ camellia-x86_64-y := camellia-x86_64-asm
+ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
+ twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
+ twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
+-salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+ chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
+ serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
+--- a/arch/x86/crypto/salsa20-i586-asm_32.S
++++ /dev/null
+@@ -1,1114 +0,0 @@
+-# salsa20_pm.s version 20051229
+-# D. J. Bernstein
+-# Public domain.
+-
+-#include <linux/linkage.h>
+-
+-.text
+-
+-# enter salsa20_encrypt_bytes
+-ENTRY(salsa20_encrypt_bytes)
+-      mov     %esp,%eax
+-      and     $31,%eax
+-      add     $256,%eax
+-      sub     %eax,%esp
+-      # eax_stack = eax
+-      movl    %eax,80(%esp)
+-      # ebx_stack = ebx
+-      movl    %ebx,84(%esp)
+-      # esi_stack = esi
+-      movl    %esi,88(%esp)
+-      # edi_stack = edi
+-      movl    %edi,92(%esp)
+-      # ebp_stack = ebp
+-      movl    %ebp,96(%esp)
+-      # x = arg1
+-      movl    4(%esp,%eax),%edx
+-      # m = arg2
+-      movl    8(%esp,%eax),%esi
+-      # out = arg3
+-      movl    12(%esp,%eax),%edi
+-      # bytes = arg4
+-      movl    16(%esp,%eax),%ebx
+-      # bytes -= 0
+-      sub     $0,%ebx
+-      # goto done if unsigned<=
+-      jbe     ._done
+-._start:
+-      # in0 = *(uint32 *) (x + 0)
+-      movl    0(%edx),%eax
+-      # in1 = *(uint32 *) (x + 4)
+-      movl    4(%edx),%ecx
+-      # in2 = *(uint32 *) (x + 8)
+-      movl    8(%edx),%ebp
+-      # j0 = in0
+-      movl    %eax,164(%esp)
+-      # in3 = *(uint32 *) (x + 12)
+-      movl    12(%edx),%eax
+-      # j1 = in1
+-      movl    %ecx,168(%esp)
+-      # in4 = *(uint32 *) (x + 16)
+-      movl    16(%edx),%ecx
+-      # j2 = in2
+-      movl    %ebp,172(%esp)
+-      # in5 = *(uint32 *) (x + 20)
+-      movl    20(%edx),%ebp
+-      # j3 = in3
+-      movl    %eax,176(%esp)
+-      # in6 = *(uint32 *) (x + 24)
+-      movl    24(%edx),%eax
+-      # j4 = in4
+-      movl    %ecx,180(%esp)
+-      # in7 = *(uint32 *) (x + 28)
+-      movl    28(%edx),%ecx
+-      # j5 = in5
+-      movl    %ebp,184(%esp)
+-      # in8 = *(uint32 *) (x + 32)
+-      movl    32(%edx),%ebp
+-      # j6 = in6
+-      movl    %eax,188(%esp)
+-      # in9 = *(uint32 *) (x + 36)
+-      movl    36(%edx),%eax
+-      # j7 = in7
+-      movl    %ecx,192(%esp)
+-      # in10 = *(uint32 *) (x + 40)
+-      movl    40(%edx),%ecx
+-      # j8 = in8
+-      movl    %ebp,196(%esp)
+-      # in11 = *(uint32 *) (x + 44)
+-      movl    44(%edx),%ebp
+-      # j9 = in9
+-      movl    %eax,200(%esp)
+-      # in12 = *(uint32 *) (x + 48)
+-      movl    48(%edx),%eax
+-      # j10 = in10
+-      movl    %ecx,204(%esp)
+-      # in13 = *(uint32 *) (x + 52)
+-      movl    52(%edx),%ecx
+-      # j11 = in11
+-      movl    %ebp,208(%esp)
+-      # in14 = *(uint32 *) (x + 56)
+-      movl    56(%edx),%ebp
+-      # j12 = in12
+-      movl    %eax,212(%esp)
+-      # in15 = *(uint32 *) (x + 60)
+-      movl    60(%edx),%eax
+-      # j13 = in13
+-      movl    %ecx,216(%esp)
+-      # j14 = in14
+-      movl    %ebp,220(%esp)
+-      # j15 = in15
+-      movl    %eax,224(%esp)
+-      # x_backup = x
+-      movl    %edx,64(%esp)
+-._bytesatleast1:
+-      #   bytes - 64
+-      cmp     $64,%ebx
+-      #   goto nocopy if unsigned>=
+-      jae     ._nocopy
+-      #     ctarget = out
+-      movl    %edi,228(%esp)
+-      #     out = &tmp
+-      leal    0(%esp),%edi
+-      #     i = bytes
+-      mov     %ebx,%ecx
+-      #     while (i) { *out++ = *m++; --i }
+-      rep     movsb
+-      #     out = &tmp
+-      leal    0(%esp),%edi
+-      #     m = &tmp
+-      leal    0(%esp),%esi
+-._nocopy:
+-      #   out_backup = out
+-      movl    %edi,72(%esp)
+-      #   m_backup = m
+-      movl    %esi,68(%esp)
+-      #   bytes_backup = bytes
+-      movl    %ebx,76(%esp)
+-      #   in0 = j0
+-      movl    164(%esp),%eax
+-      #   in1 = j1
+-      movl    168(%esp),%ecx
+-      #   in2 = j2
+-      movl    172(%esp),%edx
+-      #   in3 = j3
+-      movl    176(%esp),%ebx
+-      #   x0 = in0
+-      movl    %eax,100(%esp)
+-      #   x1 = in1
+-      movl    %ecx,104(%esp)
+-      #   x2 = in2
+-      movl    %edx,108(%esp)
+-      #   x3 = in3
+-      movl    %ebx,112(%esp)
+-      #   in4 = j4
+-      movl    180(%esp),%eax
+-      #   in5 = j5
+-      movl    184(%esp),%ecx
+-      #   in6 = j6
+-      movl    188(%esp),%edx
+-      #   in7 = j7
+-      movl    192(%esp),%ebx
+-      #   x4 = in4
+-      movl    %eax,116(%esp)
+-      #   x5 = in5
+-      movl    %ecx,120(%esp)
+-      #   x6 = in6
+-      movl    %edx,124(%esp)
+-      #   x7 = in7
+-      movl    %ebx,128(%esp)
+-      #   in8 = j8
+-      movl    196(%esp),%eax
+-      #   in9 = j9
+-      movl    200(%esp),%ecx
+-      #   in10 = j10
+-      movl    204(%esp),%edx
+-      #   in11 = j11
+-      movl    208(%esp),%ebx
+-      #   x8 = in8
+-      movl    %eax,132(%esp)
+-      #   x9 = in9
+-      movl    %ecx,136(%esp)
+-      #   x10 = in10
+-      movl    %edx,140(%esp)
+-      #   x11 = in11
+-      movl    %ebx,144(%esp)
+-      #   in12 = j12
+-      movl    212(%esp),%eax
+-      #   in13 = j13
+-      movl    216(%esp),%ecx
+-      #   in14 = j14
+-      movl    220(%esp),%edx
+-      #   in15 = j15
+-      movl    224(%esp),%ebx
+-      #   x12 = in12
+-      movl    %eax,148(%esp)
+-      #   x13 = in13
+-      movl    %ecx,152(%esp)
+-      #   x14 = in14
+-      movl    %edx,156(%esp)
+-      #   x15 = in15
+-      movl    %ebx,160(%esp)
+-      #   i = 20
+-      mov     $20,%ebp
+-      # p = x0
+-      movl    100(%esp),%eax
+-      # s = x5
+-      movl    120(%esp),%ecx
+-      # t = x10
+-      movl    140(%esp),%edx
+-      # w = x15
+-      movl    160(%esp),%ebx
+-._mainloop:
+-      # x0 = p
+-      movl    %eax,100(%esp)
+-      #                               x10 = t
+-      movl    %edx,140(%esp)
+-      # p += x12
+-      addl    148(%esp),%eax
+-      #               x5 = s
+-      movl    %ecx,120(%esp)
+-      #                               t += x6
+-      addl    124(%esp),%edx
+-      #                                               x15 = w
+-      movl    %ebx,160(%esp)
+-      #               r = x1
+-      movl    104(%esp),%esi
+-      #               r += s
+-      add     %ecx,%esi
+-      #                                               v = x11
+-      movl    144(%esp),%edi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      # p <<<= 7
+-      rol     $7,%eax
+-      # p ^= x4
+-      xorl    116(%esp),%eax
+-      #                               t <<<= 7
+-      rol     $7,%edx
+-      #                               t ^= x14
+-      xorl    156(%esp),%edx
+-      #               r <<<= 7
+-      rol     $7,%esi
+-      #               r ^= x9
+-      xorl    136(%esp),%esi
+-      #                                               v <<<= 7
+-      rol     $7,%edi
+-      #                                               v ^= x3
+-      xorl    112(%esp),%edi
+-      # x4 = p
+-      movl    %eax,116(%esp)
+-      #                               x14 = t
+-      movl    %edx,156(%esp)
+-      # p += x0
+-      addl    100(%esp),%eax
+-      #               x9 = r
+-      movl    %esi,136(%esp)
+-      #                               t += x10
+-      addl    140(%esp),%edx
+-      #                                               x3 = v
+-      movl    %edi,112(%esp)
+-      # p <<<= 9
+-      rol     $9,%eax
+-      # p ^= x8
+-      xorl    132(%esp),%eax
+-      #                               t <<<= 9
+-      rol     $9,%edx
+-      #                               t ^= x2
+-      xorl    108(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 9
+-      rol     $9,%ecx
+-      #               s ^= x13
+-      xorl    152(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 9
+-      rol     $9,%ebx
+-      #                                               w ^= x7
+-      xorl    128(%esp),%ebx
+-      # x8 = p
+-      movl    %eax,132(%esp)
+-      #                               x2 = t
+-      movl    %edx,108(%esp)
+-      # p += x4
+-      addl    116(%esp),%eax
+-      #               x13 = s
+-      movl    %ecx,152(%esp)
+-      #                               t += x14
+-      addl    156(%esp),%edx
+-      #                                               x7 = w
+-      movl    %ebx,128(%esp)
+-      # p <<<= 13
+-      rol     $13,%eax
+-      # p ^= x12
+-      xorl    148(%esp),%eax
+-      #                               t <<<= 13
+-      rol     $13,%edx
+-      #                               t ^= x6
+-      xorl    124(%esp),%edx
+-      #               r += s
+-      add     %ecx,%esi
+-      #               r <<<= 13
+-      rol     $13,%esi
+-      #               r ^= x1
+-      xorl    104(%esp),%esi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      #                                               v <<<= 13
+-      rol     $13,%edi
+-      #                                               v ^= x11
+-      xorl    144(%esp),%edi
+-      # x12 = p
+-      movl    %eax,148(%esp)
+-      #                               x6 = t
+-      movl    %edx,124(%esp)
+-      # p += x8
+-      addl    132(%esp),%eax
+-      #               x1 = r
+-      movl    %esi,104(%esp)
+-      #                               t += x2
+-      addl    108(%esp),%edx
+-      #                                               x11 = v
+-      movl    %edi,144(%esp)
+-      # p <<<= 18
+-      rol     $18,%eax
+-      # p ^= x0
+-      xorl    100(%esp),%eax
+-      #                               t <<<= 18
+-      rol     $18,%edx
+-      #                               t ^= x10
+-      xorl    140(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 18
+-      rol     $18,%ecx
+-      #               s ^= x5
+-      xorl    120(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 18
+-      rol     $18,%ebx
+-      #                                               w ^= x15
+-      xorl    160(%esp),%ebx
+-      # x0 = p
+-      movl    %eax,100(%esp)
+-      #                               x10 = t
+-      movl    %edx,140(%esp)
+-      # p += x3
+-      addl    112(%esp),%eax
+-      # p <<<= 7
+-      rol     $7,%eax
+-      #               x5 = s
+-      movl    %ecx,120(%esp)
+-      #                               t += x9
+-      addl    136(%esp),%edx
+-      #                                               x15 = w
+-      movl    %ebx,160(%esp)
+-      #               r = x4
+-      movl    116(%esp),%esi
+-      #               r += s
+-      add     %ecx,%esi
+-      #                                               v = x14
+-      movl    156(%esp),%edi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      # p ^= x1
+-      xorl    104(%esp),%eax
+-      #                               t <<<= 7
+-      rol     $7,%edx
+-      #                               t ^= x11
+-      xorl    144(%esp),%edx
+-      #               r <<<= 7
+-      rol     $7,%esi
+-      #               r ^= x6
+-      xorl    124(%esp),%esi
+-      #                                               v <<<= 7
+-      rol     $7,%edi
+-      #                                               v ^= x12
+-      xorl    148(%esp),%edi
+-      # x1 = p
+-      movl    %eax,104(%esp)
+-      #                               x11 = t
+-      movl    %edx,144(%esp)
+-      # p += x0
+-      addl    100(%esp),%eax
+-      #               x6 = r
+-      movl    %esi,124(%esp)
+-      #                               t += x10
+-      addl    140(%esp),%edx
+-      #                                               x12 = v
+-      movl    %edi,148(%esp)
+-      # p <<<= 9
+-      rol     $9,%eax
+-      # p ^= x2
+-      xorl    108(%esp),%eax
+-      #                               t <<<= 9
+-      rol     $9,%edx
+-      #                               t ^= x8
+-      xorl    132(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 9
+-      rol     $9,%ecx
+-      #               s ^= x7
+-      xorl    128(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 9
+-      rol     $9,%ebx
+-      #                                               w ^= x13
+-      xorl    152(%esp),%ebx
+-      # x2 = p
+-      movl    %eax,108(%esp)
+-      #                               x8 = t
+-      movl    %edx,132(%esp)
+-      # p += x1
+-      addl    104(%esp),%eax
+-      #               x7 = s
+-      movl    %ecx,128(%esp)
+-      #                               t += x11
+-      addl    144(%esp),%edx
+-      #                                               x13 = w
+-      movl    %ebx,152(%esp)
+-      # p <<<= 13
+-      rol     $13,%eax
+-      # p ^= x3
+-      xorl    112(%esp),%eax
+-      #                               t <<<= 13
+-      rol     $13,%edx
+-      #                               t ^= x9
+-      xorl    136(%esp),%edx
+-      #               r += s
+-      add     %ecx,%esi
+-      #               r <<<= 13
+-      rol     $13,%esi
+-      #               r ^= x4
+-      xorl    116(%esp),%esi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      #                                               v <<<= 13
+-      rol     $13,%edi
+-      #                                               v ^= x14
+-      xorl    156(%esp),%edi
+-      # x3 = p
+-      movl    %eax,112(%esp)
+-      #                               x9 = t
+-      movl    %edx,136(%esp)
+-      # p += x2
+-      addl    108(%esp),%eax
+-      #               x4 = r
+-      movl    %esi,116(%esp)
+-      #                               t += x8
+-      addl    132(%esp),%edx
+-      #                                               x14 = v
+-      movl    %edi,156(%esp)
+-      # p <<<= 18
+-      rol     $18,%eax
+-      # p ^= x0
+-      xorl    100(%esp),%eax
+-      #                               t <<<= 18
+-      rol     $18,%edx
+-      #                               t ^= x10
+-      xorl    140(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 18
+-      rol     $18,%ecx
+-      #               s ^= x5
+-      xorl    120(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 18
+-      rol     $18,%ebx
+-      #                                               w ^= x15
+-      xorl    160(%esp),%ebx
+-      # x0 = p
+-      movl    %eax,100(%esp)
+-      #                               x10 = t
+-      movl    %edx,140(%esp)
+-      # p += x12
+-      addl    148(%esp),%eax
+-      #               x5 = s
+-      movl    %ecx,120(%esp)
+-      #                               t += x6
+-      addl    124(%esp),%edx
+-      #                                               x15 = w
+-      movl    %ebx,160(%esp)
+-      #               r = x1
+-      movl    104(%esp),%esi
+-      #               r += s
+-      add     %ecx,%esi
+-      #                                               v = x11
+-      movl    144(%esp),%edi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      # p <<<= 7
+-      rol     $7,%eax
+-      # p ^= x4
+-      xorl    116(%esp),%eax
+-      #                               t <<<= 7
+-      rol     $7,%edx
+-      #                               t ^= x14
+-      xorl    156(%esp),%edx
+-      #               r <<<= 7
+-      rol     $7,%esi
+-      #               r ^= x9
+-      xorl    136(%esp),%esi
+-      #                                               v <<<= 7
+-      rol     $7,%edi
+-      #                                               v ^= x3
+-      xorl    112(%esp),%edi
+-      # x4 = p
+-      movl    %eax,116(%esp)
+-      #                               x14 = t
+-      movl    %edx,156(%esp)
+-      # p += x0
+-      addl    100(%esp),%eax
+-      #               x9 = r
+-      movl    %esi,136(%esp)
+-      #                               t += x10
+-      addl    140(%esp),%edx
+-      #                                               x3 = v
+-      movl    %edi,112(%esp)
+-      # p <<<= 9
+-      rol     $9,%eax
+-      # p ^= x8
+-      xorl    132(%esp),%eax
+-      #                               t <<<= 9
+-      rol     $9,%edx
+-      #                               t ^= x2
+-      xorl    108(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 9
+-      rol     $9,%ecx
+-      #               s ^= x13
+-      xorl    152(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 9
+-      rol     $9,%ebx
+-      #                                               w ^= x7
+-      xorl    128(%esp),%ebx
+-      # x8 = p
+-      movl    %eax,132(%esp)
+-      #                               x2 = t
+-      movl    %edx,108(%esp)
+-      # p += x4
+-      addl    116(%esp),%eax
+-      #               x13 = s
+-      movl    %ecx,152(%esp)
+-      #                               t += x14
+-      addl    156(%esp),%edx
+-      #                                               x7 = w
+-      movl    %ebx,128(%esp)
+-      # p <<<= 13
+-      rol     $13,%eax
+-      # p ^= x12
+-      xorl    148(%esp),%eax
+-      #                               t <<<= 13
+-      rol     $13,%edx
+-      #                               t ^= x6
+-      xorl    124(%esp),%edx
+-      #               r += s
+-      add     %ecx,%esi
+-      #               r <<<= 13
+-      rol     $13,%esi
+-      #               r ^= x1
+-      xorl    104(%esp),%esi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      #                                               v <<<= 13
+-      rol     $13,%edi
+-      #                                               v ^= x11
+-      xorl    144(%esp),%edi
+-      # x12 = p
+-      movl    %eax,148(%esp)
+-      #                               x6 = t
+-      movl    %edx,124(%esp)
+-      # p += x8
+-      addl    132(%esp),%eax
+-      #               x1 = r
+-      movl    %esi,104(%esp)
+-      #                               t += x2
+-      addl    108(%esp),%edx
+-      #                                               x11 = v
+-      movl    %edi,144(%esp)
+-      # p <<<= 18
+-      rol     $18,%eax
+-      # p ^= x0
+-      xorl    100(%esp),%eax
+-      #                               t <<<= 18
+-      rol     $18,%edx
+-      #                               t ^= x10
+-      xorl    140(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 18
+-      rol     $18,%ecx
+-      #               s ^= x5
+-      xorl    120(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 18
+-      rol     $18,%ebx
+-      #                                               w ^= x15
+-      xorl    160(%esp),%ebx
+-      # x0 = p
+-      movl    %eax,100(%esp)
+-      #                               x10 = t
+-      movl    %edx,140(%esp)
+-      # p += x3
+-      addl    112(%esp),%eax
+-      # p <<<= 7
+-      rol     $7,%eax
+-      #               x5 = s
+-      movl    %ecx,120(%esp)
+-      #                               t += x9
+-      addl    136(%esp),%edx
+-      #                                               x15 = w
+-      movl    %ebx,160(%esp)
+-      #               r = x4
+-      movl    116(%esp),%esi
+-      #               r += s
+-      add     %ecx,%esi
+-      #                                               v = x14
+-      movl    156(%esp),%edi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      # p ^= x1
+-      xorl    104(%esp),%eax
+-      #                               t <<<= 7
+-      rol     $7,%edx
+-      #                               t ^= x11
+-      xorl    144(%esp),%edx
+-      #               r <<<= 7
+-      rol     $7,%esi
+-      #               r ^= x6
+-      xorl    124(%esp),%esi
+-      #                                               v <<<= 7
+-      rol     $7,%edi
+-      #                                               v ^= x12
+-      xorl    148(%esp),%edi
+-      # x1 = p
+-      movl    %eax,104(%esp)
+-      #                               x11 = t
+-      movl    %edx,144(%esp)
+-      # p += x0
+-      addl    100(%esp),%eax
+-      #               x6 = r
+-      movl    %esi,124(%esp)
+-      #                               t += x10
+-      addl    140(%esp),%edx
+-      #                                               x12 = v
+-      movl    %edi,148(%esp)
+-      # p <<<= 9
+-      rol     $9,%eax
+-      # p ^= x2
+-      xorl    108(%esp),%eax
+-      #                               t <<<= 9
+-      rol     $9,%edx
+-      #                               t ^= x8
+-      xorl    132(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 9
+-      rol     $9,%ecx
+-      #               s ^= x7
+-      xorl    128(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 9
+-      rol     $9,%ebx
+-      #                                               w ^= x13
+-      xorl    152(%esp),%ebx
+-      # x2 = p
+-      movl    %eax,108(%esp)
+-      #                               x8 = t
+-      movl    %edx,132(%esp)
+-      # p += x1
+-      addl    104(%esp),%eax
+-      #               x7 = s
+-      movl    %ecx,128(%esp)
+-      #                               t += x11
+-      addl    144(%esp),%edx
+-      #                                               x13 = w
+-      movl    %ebx,152(%esp)
+-      # p <<<= 13
+-      rol     $13,%eax
+-      # p ^= x3
+-      xorl    112(%esp),%eax
+-      #                               t <<<= 13
+-      rol     $13,%edx
+-      #                               t ^= x9
+-      xorl    136(%esp),%edx
+-      #               r += s
+-      add     %ecx,%esi
+-      #               r <<<= 13
+-      rol     $13,%esi
+-      #               r ^= x4
+-      xorl    116(%esp),%esi
+-      #                                               v += w
+-      add     %ebx,%edi
+-      #                                               v <<<= 13
+-      rol     $13,%edi
+-      #                                               v ^= x14
+-      xorl    156(%esp),%edi
+-      # x3 = p
+-      movl    %eax,112(%esp)
+-      #                               x9 = t
+-      movl    %edx,136(%esp)
+-      # p += x2
+-      addl    108(%esp),%eax
+-      #               x4 = r
+-      movl    %esi,116(%esp)
+-      #                               t += x8
+-      addl    132(%esp),%edx
+-      #                                               x14 = v
+-      movl    %edi,156(%esp)
+-      # p <<<= 18
+-      rol     $18,%eax
+-      # p ^= x0
+-      xorl    100(%esp),%eax
+-      #                               t <<<= 18
+-      rol     $18,%edx
+-      #                               t ^= x10
+-      xorl    140(%esp),%edx
+-      #               s += r
+-      add     %esi,%ecx
+-      #               s <<<= 18
+-      rol     $18,%ecx
+-      #               s ^= x5
+-      xorl    120(%esp),%ecx
+-      #                                               w += v
+-      add     %edi,%ebx
+-      #                                               w <<<= 18
+-      rol     $18,%ebx
+-      #                                               w ^= x15
+-      xorl    160(%esp),%ebx
+-      # i -= 4
+-      sub     $4,%ebp
+-      # goto mainloop if unsigned >
+-      ja      ._mainloop
+-      # x0 = p
+-      movl    %eax,100(%esp)
+-      # x5 = s
+-      movl    %ecx,120(%esp)
+-      # x10 = t
+-      movl    %edx,140(%esp)
+-      # x15 = w
+-      movl    %ebx,160(%esp)
+-      #   out = out_backup
+-      movl    72(%esp),%edi
+-      #   m = m_backup
+-      movl    68(%esp),%esi
+-      #   in0 = x0
+-      movl    100(%esp),%eax
+-      #   in1 = x1
+-      movl    104(%esp),%ecx
+-      #   in0 += j0
+-      addl    164(%esp),%eax
+-      #   in1 += j1
+-      addl    168(%esp),%ecx
+-      #   in0 ^= *(uint32 *) (m + 0)
+-      xorl    0(%esi),%eax
+-      #   in1 ^= *(uint32 *) (m + 4)
+-      xorl    4(%esi),%ecx
+-      #   *(uint32 *) (out + 0) = in0
+-      movl    %eax,0(%edi)
+-      #   *(uint32 *) (out + 4) = in1
+-      movl    %ecx,4(%edi)
+-      #   in2 = x2
+-      movl    108(%esp),%eax
+-      #   in3 = x3
+-      movl    112(%esp),%ecx
+-      #   in2 += j2
+-      addl    172(%esp),%eax
+-      #   in3 += j3
+-      addl    176(%esp),%ecx
+-      #   in2 ^= *(uint32 *) (m + 8)
+-      xorl    8(%esi),%eax
+-      #   in3 ^= *(uint32 *) (m + 12)
+-      xorl    12(%esi),%ecx
+-      #   *(uint32 *) (out + 8) = in2
+-      movl    %eax,8(%edi)
+-      #   *(uint32 *) (out + 12) = in3
+-      movl    %ecx,12(%edi)
+-      #   in4 = x4
+-      movl    116(%esp),%eax
+-      #   in5 = x5
+-      movl    120(%esp),%ecx
+-      #   in4 += j4
+-      addl    180(%esp),%eax
+-      #   in5 += j5
+-      addl    184(%esp),%ecx
+-      #   in4 ^= *(uint32 *) (m + 16)
+-      xorl    16(%esi),%eax
+-      #   in5 ^= *(uint32 *) (m + 20)
+-      xorl    20(%esi),%ecx
+-      #   *(uint32 *) (out + 16) = in4
+-      movl    %eax,16(%edi)
+-      #   *(uint32 *) (out + 20) = in5
+-      movl    %ecx,20(%edi)
+-      #   in6 = x6
+-      movl    124(%esp),%eax
+-      #   in7 = x7
+-      movl    128(%esp),%ecx
+-      #   in6 += j6
+-      addl    188(%esp),%eax
+-      #   in7 += j7
+-      addl    192(%esp),%ecx
+-      #   in6 ^= *(uint32 *) (m + 24)
+-      xorl    24(%esi),%eax
+-      #   in7 ^= *(uint32 *) (m + 28)
+-      xorl    28(%esi),%ecx
+-      #   *(uint32 *) (out + 24) = in6
+-      movl    %eax,24(%edi)
+-      #   *(uint32 *) (out + 28) = in7
+-      movl    %ecx,28(%edi)
+-      #   in8 = x8
+-      movl    132(%esp),%eax
+-      #   in9 = x9
+-      movl    136(%esp),%ecx
+-      #   in8 += j8
+-      addl    196(%esp),%eax
+-      #   in9 += j9
+-      addl    200(%esp),%ecx
+-      #   in8 ^= *(uint32 *) (m + 32)
+-      xorl    32(%esi),%eax
+-      #   in9 ^= *(uint32 *) (m + 36)
+-      xorl    36(%esi),%ecx
+-      #   *(uint32 *) (out + 32) = in8
+-      movl    %eax,32(%edi)
+-      #   *(uint32 *) (out + 36) = in9
+-      movl    %ecx,36(%edi)
+-      #   in10 = x10
+-      movl    140(%esp),%eax
+-      #   in11 = x11
+-      movl    144(%esp),%ecx
+-      #   in10 += j10
+-      addl    204(%esp),%eax
+-      #   in11 += j11
+-      addl    208(%esp),%ecx
+-      #   in10 ^= *(uint32 *) (m + 40)
+-      xorl    40(%esi),%eax
+-      #   in11 ^= *(uint32 *) (m + 44)
+-      xorl    44(%esi),%ecx
+-      #   *(uint32 *) (out + 40) = in10
+-      movl    %eax,40(%edi)
+-      #   *(uint32 *) (out + 44) = in11
+-      movl    %ecx,44(%edi)
+-      #   in12 = x12
+-      movl    148(%esp),%eax
+-      #   in13 = x13
+-      movl    152(%esp),%ecx
+-      #   in12 += j12
+-      addl    212(%esp),%eax
+-      #   in13 += j13
+-      addl    216(%esp),%ecx
+-      #   in12 ^= *(uint32 *) (m + 48)
+-      xorl    48(%esi),%eax
+-      #   in13 ^= *(uint32 *) (m + 52)
+-      xorl    52(%esi),%ecx
+-      #   *(uint32 *) (out + 48) = in12
+-      movl    %eax,48(%edi)
+-      #   *(uint32 *) (out + 52) = in13
+-      movl    %ecx,52(%edi)
+-      #   in14 = x14
+-      movl    156(%esp),%eax
+-      #   in15 = x15
+-      movl    160(%esp),%ecx
+-      #   in14 += j14
+-      addl    220(%esp),%eax
+-      #   in15 += j15
+-      addl    224(%esp),%ecx
+-      #   in14 ^= *(uint32 *) (m + 56)
+-      xorl    56(%esi),%eax
+-      #   in15 ^= *(uint32 *) (m + 60)
+-      xorl    60(%esi),%ecx
+-      #   *(uint32 *) (out + 56) = in14
+-      movl    %eax,56(%edi)
+-      #   *(uint32 *) (out + 60) = in15
+-      movl    %ecx,60(%edi)
+-      #   bytes = bytes_backup
+-      movl    76(%esp),%ebx
+-      #   in8 = j8
+-      movl    196(%esp),%eax
+-      #   in9 = j9
+-      movl    200(%esp),%ecx
+-      #   in8 += 1
+-      add     $1,%eax
+-      #   in9 += 0 + carry
+-      adc     $0,%ecx
+-      #   j8 = in8
+-      movl    %eax,196(%esp)
+-      #   j9 = in9
+-      movl    %ecx,200(%esp)
+-      #   bytes - 64
+-      cmp     $64,%ebx
+-      #   goto bytesatleast65 if unsigned>
+-      ja      ._bytesatleast65
+-      #     goto bytesatleast64 if unsigned>=
+-      jae     ._bytesatleast64
+-      #       m = out
+-      mov     %edi,%esi
+-      #       out = ctarget
+-      movl    228(%esp),%edi
+-      #       i = bytes
+-      mov     %ebx,%ecx
+-      #       while (i) { *out++ = *m++; --i }
+-      rep     movsb
+-._bytesatleast64:
+-      #     x = x_backup
+-      movl    64(%esp),%eax
+-      #     in8 = j8
+-      movl    196(%esp),%ecx
+-      #     in9 = j9
+-      movl    200(%esp),%edx
+-      #     *(uint32 *) (x + 32) = in8
+-      movl    %ecx,32(%eax)
+-      #     *(uint32 *) (x + 36) = in9
+-      movl    %edx,36(%eax)
+-._done:
+-      #     eax = eax_stack
+-      movl    80(%esp),%eax
+-      #     ebx = ebx_stack
+-      movl    84(%esp),%ebx
+-      #     esi = esi_stack
+-      movl    88(%esp),%esi
+-      #     edi = edi_stack
+-      movl    92(%esp),%edi
+-      #     ebp = ebp_stack
+-      movl    96(%esp),%ebp
+-      #     leave
+-      add     %eax,%esp
+-      ret
+-._bytesatleast65:
+-      #   bytes -= 64
+-      sub     $64,%ebx
+-      #   out += 64
+-      add     $64,%edi
+-      #   m += 64
+-      add     $64,%esi
+-      # goto bytesatleast1
+-      jmp     ._bytesatleast1
+-ENDPROC(salsa20_encrypt_bytes)
+-
+-# enter salsa20_keysetup
+-ENTRY(salsa20_keysetup)
+-      mov     %esp,%eax
+-      and     $31,%eax
+-      add     $256,%eax
+-      sub     %eax,%esp
+-      #   eax_stack = eax
+-      movl    %eax,64(%esp)
+-      #   ebx_stack = ebx
+-      movl    %ebx,68(%esp)
+-      #   esi_stack = esi
+-      movl    %esi,72(%esp)
+-      #   edi_stack = edi
+-      movl    %edi,76(%esp)
+-      #   ebp_stack = ebp
+-      movl    %ebp,80(%esp)
+-      #   k = arg2
+-      movl    8(%esp,%eax),%ecx
+-      #   kbits = arg3
+-      movl    12(%esp,%eax),%edx
+-      #   x = arg1
+-      movl    4(%esp,%eax),%eax
+-      #   in1 = *(uint32 *) (k + 0)
+-      movl    0(%ecx),%ebx
+-      #   in2 = *(uint32 *) (k + 4)
+-      movl    4(%ecx),%esi
+-      #   in3 = *(uint32 *) (k + 8)
+-      movl    8(%ecx),%edi
+-      #   in4 = *(uint32 *) (k + 12)
+-      movl    12(%ecx),%ebp
+-      #   *(uint32 *) (x + 4) = in1
+-      movl    %ebx,4(%eax)
+-      #   *(uint32 *) (x + 8) = in2
+-      movl    %esi,8(%eax)
+-      #   *(uint32 *) (x + 12) = in3
+-      movl    %edi,12(%eax)
+-      #   *(uint32 *) (x + 16) = in4
+-      movl    %ebp,16(%eax)
+-      #   kbits - 256
+-      cmp     $256,%edx
+-      #   goto kbits128 if unsigned<
+-      jb      ._kbits128
+-._kbits256:
+-      #     in11 = *(uint32 *) (k + 16)
+-      movl    16(%ecx),%edx
+-      #     in12 = *(uint32 *) (k + 20)
+-      movl    20(%ecx),%ebx
+-      #     in13 = *(uint32 *) (k + 24)
+-      movl    24(%ecx),%esi
+-      #     in14 = *(uint32 *) (k + 28)
+-      movl    28(%ecx),%ecx
+-      #     *(uint32 *) (x + 44) = in11
+-      movl    %edx,44(%eax)
+-      #     *(uint32 *) (x + 48) = in12
+-      movl    %ebx,48(%eax)
+-      #     *(uint32 *) (x + 52) = in13
+-      movl    %esi,52(%eax)
+-      #     *(uint32 *) (x + 56) = in14
+-      movl    %ecx,56(%eax)
+-      #     in0 = 1634760805
+-      mov     $1634760805,%ecx
+-      #     in5 = 857760878
+-      mov     $857760878,%edx
+-      #     in10 = 2036477234
+-      mov     $2036477234,%ebx
+-      #     in15 = 1797285236
+-      mov     $1797285236,%esi
+-      #     *(uint32 *) (x + 0) = in0
+-      movl    %ecx,0(%eax)
+-      #     *(uint32 *) (x + 20) = in5
+-      movl    %edx,20(%eax)
+-      #     *(uint32 *) (x + 40) = in10
+-      movl    %ebx,40(%eax)
+-      #     *(uint32 *) (x + 60) = in15
+-      movl    %esi,60(%eax)
+-      #   goto keysetupdone
+-      jmp     ._keysetupdone
+-._kbits128:
+-      #     in11 = *(uint32 *) (k + 0)
+-      movl    0(%ecx),%edx
+-      #     in12 = *(uint32 *) (k + 4)
+-      movl    4(%ecx),%ebx
+-      #     in13 = *(uint32 *) (k + 8)
+-      movl    8(%ecx),%esi
+-      #     in14 = *(uint32 *) (k + 12)
+-      movl    12(%ecx),%ecx
+-      #     *(uint32 *) (x + 44) = in11
+-      movl    %edx,44(%eax)
+-      #     *(uint32 *) (x + 48) = in12
+-      movl    %ebx,48(%eax)
+-      #     *(uint32 *) (x + 52) = in13
+-      movl    %esi,52(%eax)
+-      #     *(uint32 *) (x + 56) = in14
+-      movl    %ecx,56(%eax)
+-      #     in0 = 1634760805
+-      mov     $1634760805,%ecx
+-      #     in5 = 824206446
+-      mov     $824206446,%edx
+-      #     in10 = 2036477238
+-      mov     $2036477238,%ebx
+-      #     in15 = 1797285236
+-      mov     $1797285236,%esi
+-      #     *(uint32 *) (x + 0) = in0
+-      movl    %ecx,0(%eax)
+-      #     *(uint32 *) (x + 20) = in5
+-      movl    %edx,20(%eax)
+-      #     *(uint32 *) (x + 40) = in10
+-      movl    %ebx,40(%eax)
+-      #     *(uint32 *) (x + 60) = in15
+-      movl    %esi,60(%eax)
+-._keysetupdone:
+-      #   eax = eax_stack
+-      movl    64(%esp),%eax
+-      #   ebx = ebx_stack
+-      movl    68(%esp),%ebx
+-      #   esi = esi_stack
+-      movl    72(%esp),%esi
+-      #   edi = edi_stack
+-      movl    76(%esp),%edi
+-      #   ebp = ebp_stack
+-      movl    80(%esp),%ebp
+-      # leave
+-      add     %eax,%esp
+-      ret
+-ENDPROC(salsa20_keysetup)
+-
+-# enter salsa20_ivsetup
+-ENTRY(salsa20_ivsetup)
+-      mov     %esp,%eax
+-      and     $31,%eax
+-      add     $256,%eax
+-      sub     %eax,%esp
+-      #   eax_stack = eax
+-      movl    %eax,64(%esp)
+-      #   ebx_stack = ebx
+-      movl    %ebx,68(%esp)
+-      #   esi_stack = esi
+-      movl    %esi,72(%esp)
+-      #   edi_stack = edi
+-      movl    %edi,76(%esp)
+-      #   ebp_stack = ebp
+-      movl    %ebp,80(%esp)
+-      #   iv = arg2
+-      movl    8(%esp,%eax),%ecx
+-      #   x = arg1
+-      movl    4(%esp,%eax),%eax
+-      #   in6 = *(uint32 *) (iv + 0)
+-      movl    0(%ecx),%edx
+-      #   in7 = *(uint32 *) (iv + 4)
+-      movl    4(%ecx),%ecx
+-      #   in8 = 0
+-      mov     $0,%ebx
+-      #   in9 = 0
+-      mov     $0,%esi
+-      #   *(uint32 *) (x + 24) = in6
+-      movl    %edx,24(%eax)
+-      #   *(uint32 *) (x + 28) = in7
+-      movl    %ecx,28(%eax)
+-      #   *(uint32 *) (x + 32) = in8
+-      movl    %ebx,32(%eax)
+-      #   *(uint32 *) (x + 36) = in9
+-      movl    %esi,36(%eax)
+-      #   eax = eax_stack
+-      movl    64(%esp),%eax
+-      #   ebx = ebx_stack
+-      movl    68(%esp),%ebx
+-      #   esi = esi_stack
+-      movl    72(%esp),%esi
+-      #   edi = edi_stack
+-      movl    76(%esp),%edi
+-      #   ebp = ebp_stack
+-      movl    80(%esp),%ebp
+-      # leave
+-      add     %eax,%esp
+-      ret
+-ENDPROC(salsa20_ivsetup)
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ /dev/null
+@@ -1,919 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#include <linux/linkage.h>
+-
+-# enter salsa20_encrypt_bytes
+-ENTRY(salsa20_encrypt_bytes)
+-      mov     %rsp,%r11
+-      and     $31,%r11
+-      add     $256,%r11
+-      sub     %r11,%rsp
+-      # x = arg1
+-      mov     %rdi,%r8
+-      # m = arg2
+-      mov     %rsi,%rsi
+-      # out = arg3
+-      mov     %rdx,%rdi
+-      # bytes = arg4
+-      mov     %rcx,%rdx
+-      #               unsigned>? bytes - 0
+-      cmp     $0,%rdx
+-      # comment:fp stack unchanged by jump
+-      # goto done if !unsigned>
+-      jbe     ._done
+-      # comment:fp stack unchanged by fallthrough
+-# start:
+-._start:
+-      # r11_stack = r11
+-      movq    %r11,0(%rsp)
+-      # r12_stack = r12
+-      movq    %r12,8(%rsp)
+-      # r13_stack = r13
+-      movq    %r13,16(%rsp)
+-      # r14_stack = r14
+-      movq    %r14,24(%rsp)
+-      # r15_stack = r15
+-      movq    %r15,32(%rsp)
+-      # rbx_stack = rbx
+-      movq    %rbx,40(%rsp)
+-      # rbp_stack = rbp
+-      movq    %rbp,48(%rsp)
+-      # in0 = *(uint64 *) (x + 0)
+-      movq    0(%r8),%rcx
+-      # in2 = *(uint64 *) (x + 8)
+-      movq    8(%r8),%r9
+-      # in4 = *(uint64 *) (x + 16)
+-      movq    16(%r8),%rax
+-      # in6 = *(uint64 *) (x + 24)
+-      movq    24(%r8),%r10
+-      # in8 = *(uint64 *) (x + 32)
+-      movq    32(%r8),%r11
+-      # in10 = *(uint64 *) (x + 40)
+-      movq    40(%r8),%r12
+-      # in12 = *(uint64 *) (x + 48)
+-      movq    48(%r8),%r13
+-      # in14 = *(uint64 *) (x + 56)
+-      movq    56(%r8),%r14
+-      # j0 = in0
+-      movq    %rcx,56(%rsp)
+-      # j2 = in2
+-      movq    %r9,64(%rsp)
+-      # j4 = in4
+-      movq    %rax,72(%rsp)
+-      # j6 = in6
+-      movq    %r10,80(%rsp)
+-      # j8 = in8
+-      movq    %r11,88(%rsp)
+-      # j10 = in10
+-      movq    %r12,96(%rsp)
+-      # j12 = in12
+-      movq    %r13,104(%rsp)
+-      # j14 = in14
+-      movq    %r14,112(%rsp)
+-      # x_backup = x
+-      movq    %r8,120(%rsp)
+-# bytesatleast1:
+-._bytesatleast1:
+-      #                   unsigned<? bytes - 64
+-      cmp     $64,%rdx
+-      # comment:fp stack unchanged by jump
+-      #   goto nocopy if !unsigned<
+-      jae     ._nocopy
+-      #     ctarget = out
+-      movq    %rdi,128(%rsp)
+-      #     out = &tmp
+-      leaq    192(%rsp),%rdi
+-      #     i = bytes
+-      mov     %rdx,%rcx
+-      #     while (i) { *out++ = *m++; --i }
+-      rep     movsb
+-      #     out = &tmp
+-      leaq    192(%rsp),%rdi
+-      #     m = &tmp
+-      leaq    192(%rsp),%rsi
+-      # comment:fp stack unchanged by fallthrough
+-#   nocopy:
+-._nocopy:
+-      #   out_backup = out
+-      movq    %rdi,136(%rsp)
+-      #   m_backup = m
+-      movq    %rsi,144(%rsp)
+-      #   bytes_backup = bytes
+-      movq    %rdx,152(%rsp)
+-      #   x1 = j0
+-      movq    56(%rsp),%rdi
+-      #   x0 = x1
+-      mov     %rdi,%rdx
+-      #   (uint64) x1 >>= 32
+-      shr     $32,%rdi
+-      #               x3 = j2
+-      movq    64(%rsp),%rsi
+-      #               x2 = x3
+-      mov     %rsi,%rcx
+-      #               (uint64) x3 >>= 32
+-      shr     $32,%rsi
+-      #   x5 = j4
+-      movq    72(%rsp),%r8
+-      #   x4 = x5
+-      mov     %r8,%r9
+-      #   (uint64) x5 >>= 32
+-      shr     $32,%r8
+-      #   x5_stack = x5
+-      movq    %r8,160(%rsp)
+-      #               x7 = j6
+-      movq    80(%rsp),%r8
+-      #               x6 = x7
+-      mov     %r8,%rax
+-      #               (uint64) x7 >>= 32
+-      shr     $32,%r8
+-      #   x9 = j8
+-      movq    88(%rsp),%r10
+-      #   x8 = x9
+-      mov     %r10,%r11
+-      #   (uint64) x9 >>= 32
+-      shr     $32,%r10
+-      #               x11 = j10
+-      movq    96(%rsp),%r12
+-      #               x10 = x11
+-      mov     %r12,%r13
+-      #               x10_stack = x10
+-      movq    %r13,168(%rsp)
+-      #               (uint64) x11 >>= 32
+-      shr     $32,%r12
+-      #   x13 = j12
+-      movq    104(%rsp),%r13
+-      #   x12 = x13
+-      mov     %r13,%r14
+-      #   (uint64) x13 >>= 32
+-      shr     $32,%r13
+-      #               x15 = j14
+-      movq    112(%rsp),%r15
+-      #               x14 = x15
+-      mov     %r15,%rbx
+-      #               (uint64) x15 >>= 32
+-      shr     $32,%r15
+-      #               x15_stack = x15
+-      movq    %r15,176(%rsp)
+-      #   i = 20
+-      mov     $20,%r15
+-#   mainloop:
+-._mainloop:
+-      #   i_backup = i
+-      movq    %r15,184(%rsp)
+-      #               x5 = x5_stack
+-      movq    160(%rsp),%r15
+-      # a = x12 + x0
+-      lea     (%r14,%rdx),%rbp
+-      # (uint32) a <<<= 7
+-      rol     $7,%ebp
+-      # x4 ^= a
+-      xor     %rbp,%r9
+-      #               b = x1 + x5
+-      lea     (%rdi,%r15),%rbp
+-      #               (uint32) b <<<= 7
+-      rol     $7,%ebp
+-      #               x9 ^= b
+-      xor     %rbp,%r10
+-      # a = x0 + x4
+-      lea     (%rdx,%r9),%rbp
+-      # (uint32) a <<<= 9
+-      rol     $9,%ebp
+-      # x8 ^= a
+-      xor     %rbp,%r11
+-      #               b = x5 + x9
+-      lea     (%r15,%r10),%rbp
+-      #               (uint32) b <<<= 9
+-      rol     $9,%ebp
+-      #               x13 ^= b
+-      xor     %rbp,%r13
+-      # a = x4 + x8
+-      lea     (%r9,%r11),%rbp
+-      # (uint32) a <<<= 13
+-      rol     $13,%ebp
+-      # x12 ^= a
+-      xor     %rbp,%r14
+-      #               b = x9 + x13
+-      lea     (%r10,%r13),%rbp
+-      #               (uint32) b <<<= 13
+-      rol     $13,%ebp
+-      #               x1 ^= b
+-      xor     %rbp,%rdi
+-      # a = x8 + x12
+-      lea     (%r11,%r14),%rbp
+-      # (uint32) a <<<= 18
+-      rol     $18,%ebp
+-      # x0 ^= a
+-      xor     %rbp,%rdx
+-      #               b = x13 + x1
+-      lea     (%r13,%rdi),%rbp
+-      #               (uint32) b <<<= 18
+-      rol     $18,%ebp
+-      #               x5 ^= b
+-      xor     %rbp,%r15
+-      #                               x10 = x10_stack
+-      movq    168(%rsp),%rbp
+-      #               x5_stack = x5
+-      movq    %r15,160(%rsp)
+-      #                               c = x6 + x10
+-      lea     (%rax,%rbp),%r15
+-      #                               (uint32) c <<<= 7
+-      rol     $7,%r15d
+-      #                               x14 ^= c
+-      xor     %r15,%rbx
+-      #                               c = x10 + x14
+-      lea     (%rbp,%rbx),%r15
+-      #                               (uint32) c <<<= 9
+-      rol     $9,%r15d
+-      #                               x2 ^= c
+-      xor     %r15,%rcx
+-      #                               c = x14 + x2
+-      lea     (%rbx,%rcx),%r15
+-      #                               (uint32) c <<<= 13
+-      rol     $13,%r15d
+-      #                               x6 ^= c
+-      xor     %r15,%rax
+-      #                               c = x2 + x6
+-      lea     (%rcx,%rax),%r15
+-      #                               (uint32) c <<<= 18
+-      rol     $18,%r15d
+-      #                               x10 ^= c
+-      xor     %r15,%rbp
+-      #                                               x15 = x15_stack
+-      movq    176(%rsp),%r15
+-      #                               x10_stack = x10
+-      movq    %rbp,168(%rsp)
+-      #                                               d = x11 + x15
+-      lea     (%r12,%r15),%rbp
+-      #                                               (uint32) d <<<= 7
+-      rol     $7,%ebp
+-      #                                               x3 ^= d
+-      xor     %rbp,%rsi
+-      #                                               d = x15 + x3
+-      lea     (%r15,%rsi),%rbp
+-      #                                               (uint32) d <<<= 9
+-      rol     $9,%ebp
+-      #                                               x7 ^= d
+-      xor     %rbp,%r8
+-      #                                               d = x3 + x7
+-      lea     (%rsi,%r8),%rbp
+-      #                                               (uint32) d <<<= 13
+-      rol     $13,%ebp
+-      #                                               x11 ^= d
+-      xor     %rbp,%r12
+-      #                                               d = x7 + x11
+-      lea     (%r8,%r12),%rbp
+-      #                                               (uint32) d <<<= 18
+-      rol     $18,%ebp
+-      #                                               x15 ^= d
+-      xor     %rbp,%r15
+-      #                                               x15_stack = x15
+-      movq    %r15,176(%rsp)
+-      #               x5 = x5_stack
+-      movq    160(%rsp),%r15
+-      # a = x3 + x0
+-      lea     (%rsi,%rdx),%rbp
+-      # (uint32) a <<<= 7
+-      rol     $7,%ebp
+-      # x1 ^= a
+-      xor     %rbp,%rdi
+-      #               b = x4 + x5
+-      lea     (%r9,%r15),%rbp
+-      #               (uint32) b <<<= 7
+-      rol     $7,%ebp
+-      #               x6 ^= b
+-      xor     %rbp,%rax
+-      # a = x0 + x1
+-      lea     (%rdx,%rdi),%rbp
+-      # (uint32) a <<<= 9
+-      rol     $9,%ebp
+-      # x2 ^= a
+-      xor     %rbp,%rcx
+-      #               b = x5 + x6
+-      lea     (%r15,%rax),%rbp
+-      #               (uint32) b <<<= 9
+-      rol     $9,%ebp
+-      #               x7 ^= b
+-      xor     %rbp,%r8
+-      # a = x1 + x2
+-      lea     (%rdi,%rcx),%rbp
+-      # (uint32) a <<<= 13
+-      rol     $13,%ebp
+-      # x3 ^= a
+-      xor     %rbp,%rsi
+-      #               b = x6 + x7
+-      lea     (%rax,%r8),%rbp
+-      #               (uint32) b <<<= 13
+-      rol     $13,%ebp
+-      #               x4 ^= b
+-      xor     %rbp,%r9
+-      # a = x2 + x3
+-      lea     (%rcx,%rsi),%rbp
+-      # (uint32) a <<<= 18
+-      rol     $18,%ebp
+-      # x0 ^= a
+-      xor     %rbp,%rdx
+-      #               b = x7 + x4
+-      lea     (%r8,%r9),%rbp
+-      #               (uint32) b <<<= 18
+-      rol     $18,%ebp
+-      #               x5 ^= b
+-      xor     %rbp,%r15
+-      #                               x10 = x10_stack
+-      movq    168(%rsp),%rbp
+-      #               x5_stack = x5
+-      movq    %r15,160(%rsp)
+-      #                               c = x9 + x10
+-      lea     (%r10,%rbp),%r15
+-      #                               (uint32) c <<<= 7
+-      rol     $7,%r15d
+-      #                               x11 ^= c
+-      xor     %r15,%r12
+-      #                               c = x10 + x11
+-      lea     (%rbp,%r12),%r15
+-      #                               (uint32) c <<<= 9
+-      rol     $9,%r15d
+-      #                               x8 ^= c
+-      xor     %r15,%r11
+-      #                               c = x11 + x8
+-      lea     (%r12,%r11),%r15
+-      #                               (uint32) c <<<= 13
+-      rol     $13,%r15d
+-      #                               x9 ^= c
+-      xor     %r15,%r10
+-      #                               c = x8 + x9
+-      lea     (%r11,%r10),%r15
+-      #                               (uint32) c <<<= 18
+-      rol     $18,%r15d
+-      #                               x10 ^= c
+-      xor     %r15,%rbp
+-      #                                               x15 = x15_stack
+-      movq    176(%rsp),%r15
+-      #                               x10_stack = x10
+-      movq    %rbp,168(%rsp)
+-      #                                               d = x14 + x15
+-      lea     (%rbx,%r15),%rbp
+-      #                                               (uint32) d <<<= 7
+-      rol     $7,%ebp
+-      #                                               x12 ^= d
+-      xor     %rbp,%r14
+-      #                                               d = x15 + x12
+-      lea     (%r15,%r14),%rbp
+-      #                                               (uint32) d <<<= 9
+-      rol     $9,%ebp
+-      #                                               x13 ^= d
+-      xor     %rbp,%r13
+-      #                                               d = x12 + x13
+-      lea     (%r14,%r13),%rbp
+-      #                                               (uint32) d <<<= 13
+-      rol     $13,%ebp
+-      #                                               x14 ^= d
+-      xor     %rbp,%rbx
+-      #                                               d = x13 + x14
+-      lea     (%r13,%rbx),%rbp
+-      #                                               (uint32) d <<<= 18
+-      rol     $18,%ebp
+-      #                                               x15 ^= d
+-      xor     %rbp,%r15
+-      #                                               x15_stack = x15
+-      movq    %r15,176(%rsp)
+-      #               x5 = x5_stack
+-      movq    160(%rsp),%r15
+-      # a = x12 + x0
+-      lea     (%r14,%rdx),%rbp
+-      # (uint32) a <<<= 7
+-      rol     $7,%ebp
+-      # x4 ^= a
+-      xor     %rbp,%r9
+-      #               b = x1 + x5
+-      lea     (%rdi,%r15),%rbp
+-      #               (uint32) b <<<= 7
+-      rol     $7,%ebp
+-      #               x9 ^= b
+-      xor     %rbp,%r10
+-      # a = x0 + x4
+-      lea     (%rdx,%r9),%rbp
+-      # (uint32) a <<<= 9
+-      rol     $9,%ebp
+-      # x8 ^= a
+-      xor     %rbp,%r11
+-      #               b = x5 + x9
+-      lea     (%r15,%r10),%rbp
+-      #               (uint32) b <<<= 9
+-      rol     $9,%ebp
+-      #               x13 ^= b
+-      xor     %rbp,%r13
+-      # a = x4 + x8
+-      lea     (%r9,%r11),%rbp
+-      # (uint32) a <<<= 13
+-      rol     $13,%ebp
+-      # x12 ^= a
+-      xor     %rbp,%r14
+-      #               b = x9 + x13
+-      lea     (%r10,%r13),%rbp
+-      #               (uint32) b <<<= 13
+-      rol     $13,%ebp
+-      #               x1 ^= b
+-      xor     %rbp,%rdi
+-      # a = x8 + x12
+-      lea     (%r11,%r14),%rbp
+-      # (uint32) a <<<= 18
+-      rol     $18,%ebp
+-      # x0 ^= a
+-      xor     %rbp,%rdx
+-      #               b = x13 + x1
+-      lea     (%r13,%rdi),%rbp
+-      #               (uint32) b <<<= 18
+-      rol     $18,%ebp
+-      #               x5 ^= b
+-      xor     %rbp,%r15
+-      #                               x10 = x10_stack
+-      movq    168(%rsp),%rbp
+-      #               x5_stack = x5
+-      movq    %r15,160(%rsp)
+-      #                               c = x6 + x10
+-      lea     (%rax,%rbp),%r15
+-      #                               (uint32) c <<<= 7
+-      rol     $7,%r15d
+-      #                               x14 ^= c
+-      xor     %r15,%rbx
+-      #                               c = x10 + x14
+-      lea     (%rbp,%rbx),%r15
+-      #                               (uint32) c <<<= 9
+-      rol     $9,%r15d
+-      #                               x2 ^= c
+-      xor     %r15,%rcx
+-      #                               c = x14 + x2
+-      lea     (%rbx,%rcx),%r15
+-      #                               (uint32) c <<<= 13
+-      rol     $13,%r15d
+-      #                               x6 ^= c
+-      xor     %r15,%rax
+-      #                               c = x2 + x6
+-      lea     (%rcx,%rax),%r15
+-      #                               (uint32) c <<<= 18
+-      rol     $18,%r15d
+-      #                               x10 ^= c
+-      xor     %r15,%rbp
+-      #                                               x15 = x15_stack
+-      movq    176(%rsp),%r15
+-      #                               x10_stack = x10
+-      movq    %rbp,168(%rsp)
+-      #                                               d = x11 + x15
+-      lea     (%r12,%r15),%rbp
+-      #                                               (uint32) d <<<= 7
+-      rol     $7,%ebp
+-      #                                               x3 ^= d
+-      xor     %rbp,%rsi
+-      #                                               d = x15 + x3
+-      lea     (%r15,%rsi),%rbp
+-      #                                               (uint32) d <<<= 9
+-      rol     $9,%ebp
+-      #                                               x7 ^= d
+-      xor     %rbp,%r8
+-      #                                               d = x3 + x7
+-      lea     (%rsi,%r8),%rbp
+-      #                                               (uint32) d <<<= 13
+-      rol     $13,%ebp
+-      #                                               x11 ^= d
+-      xor     %rbp,%r12
+-      #                                               d = x7 + x11
+-      lea     (%r8,%r12),%rbp
+-      #                                               (uint32) d <<<= 18
+-      rol     $18,%ebp
+-      #                                               x15 ^= d
+-      xor     %rbp,%r15
+-      #                                               x15_stack = x15
+-      movq    %r15,176(%rsp)
+-      #               x5 = x5_stack
+-      movq    160(%rsp),%r15
+-      # a = x3 + x0
+-      lea     (%rsi,%rdx),%rbp
+-      # (uint32) a <<<= 7
+-      rol     $7,%ebp
+-      # x1 ^= a
+-      xor     %rbp,%rdi
+-      #               b = x4 + x5
+-      lea     (%r9,%r15),%rbp
+-      #               (uint32) b <<<= 7
+-      rol     $7,%ebp
+-      #               x6 ^= b
+-      xor     %rbp,%rax
+-      # a = x0 + x1
+-      lea     (%rdx,%rdi),%rbp
+-      # (uint32) a <<<= 9
+-      rol     $9,%ebp
+-      # x2 ^= a
+-      xor     %rbp,%rcx
+-      #               b = x5 + x6
+-      lea     (%r15,%rax),%rbp
+-      #               (uint32) b <<<= 9
+-      rol     $9,%ebp
+-      #               x7 ^= b
+-      xor     %rbp,%r8
+-      # a = x1 + x2
+-      lea     (%rdi,%rcx),%rbp
+-      # (uint32) a <<<= 13
+-      rol     $13,%ebp
+-      # x3 ^= a
+-      xor     %rbp,%rsi
+-      #               b = x6 + x7
+-      lea     (%rax,%r8),%rbp
+-      #               (uint32) b <<<= 13
+-      rol     $13,%ebp
+-      #               x4 ^= b
+-      xor     %rbp,%r9
+-      # a = x2 + x3
+-      lea     (%rcx,%rsi),%rbp
+-      # (uint32) a <<<= 18
+-      rol     $18,%ebp
+-      # x0 ^= a
+-      xor     %rbp,%rdx
+-      #               b = x7 + x4
+-      lea     (%r8,%r9),%rbp
+-      #               (uint32) b <<<= 18
+-      rol     $18,%ebp
+-      #               x5 ^= b
+-      xor     %rbp,%r15
+-      #                               x10 = x10_stack
+-      movq    168(%rsp),%rbp
+-      #               x5_stack = x5
+-      movq    %r15,160(%rsp)
+-      #                               c = x9 + x10
+-      lea     (%r10,%rbp),%r15
+-      #                               (uint32) c <<<= 7
+-      rol     $7,%r15d
+-      #                               x11 ^= c
+-      xor     %r15,%r12
+-      #                               c = x10 + x11
+-      lea     (%rbp,%r12),%r15
+-      #                               (uint32) c <<<= 9
+-      rol     $9,%r15d
+-      #                               x8 ^= c
+-      xor     %r15,%r11
+-      #                               c = x11 + x8
+-      lea     (%r12,%r11),%r15
+-      #                               (uint32) c <<<= 13
+-      rol     $13,%r15d
+-      #                               x9 ^= c
+-      xor     %r15,%r10
+-      #                               c = x8 + x9
+-      lea     (%r11,%r10),%r15
+-      #                               (uint32) c <<<= 18
+-      rol     $18,%r15d
+-      #                               x10 ^= c
+-      xor     %r15,%rbp
+-      #                                               x15 = x15_stack
+-      movq    176(%rsp),%r15
+-      #                               x10_stack = x10
+-      movq    %rbp,168(%rsp)
+-      #                                               d = x14 + x15
+-      lea     (%rbx,%r15),%rbp
+-      #                                               (uint32) d <<<= 7
+-      rol     $7,%ebp
+-      #                                               x12 ^= d
+-      xor     %rbp,%r14
+-      #                                               d = x15 + x12
+-      lea     (%r15,%r14),%rbp
+-      #                                               (uint32) d <<<= 9
+-      rol     $9,%ebp
+-      #                                               x13 ^= d
+-      xor     %rbp,%r13
+-      #                                               d = x12 + x13
+-      lea     (%r14,%r13),%rbp
+-      #                                               (uint32) d <<<= 13
+-      rol     $13,%ebp
+-      #                                               x14 ^= d
+-      xor     %rbp,%rbx
+-      #                                               d = x13 + x14
+-      lea     (%r13,%rbx),%rbp
+-      #                                               (uint32) d <<<= 18
+-      rol     $18,%ebp
+-      #                                               x15 ^= d
+-      xor     %rbp,%r15
+-      #                                               x15_stack = x15
+-      movq    %r15,176(%rsp)
+-      #   i = i_backup
+-      movq    184(%rsp),%r15
+-      #                  unsigned>? i -= 4
+-      sub     $4,%r15
+-      # comment:fp stack unchanged by jump
+-      # goto mainloop if unsigned>
+-      ja      ._mainloop
+-      #   (uint32) x2 += j2
+-      addl    64(%rsp),%ecx
+-      #   x3 <<= 32
+-      shl     $32,%rsi
+-      #   x3 += j2
+-      addq    64(%rsp),%rsi
+-      #   (uint64) x3 >>= 32
+-      shr     $32,%rsi
+-      #   x3 <<= 32
+-      shl     $32,%rsi
+-      #   x2 += x3
+-      add     %rsi,%rcx
+-      #   (uint32) x6 += j6
+-      addl    80(%rsp),%eax
+-      #   x7 <<= 32
+-      shl     $32,%r8
+-      #   x7 += j6
+-      addq    80(%rsp),%r8
+-      #   (uint64) x7 >>= 32
+-      shr     $32,%r8
+-      #   x7 <<= 32
+-      shl     $32,%r8
+-      #   x6 += x7
+-      add     %r8,%rax
+-      #   (uint32) x8 += j8
+-      addl    88(%rsp),%r11d
+-      #   x9 <<= 32
+-      shl     $32,%r10
+-      #   x9 += j8
+-      addq    88(%rsp),%r10
+-      #   (uint64) x9 >>= 32
+-      shr     $32,%r10
+-      #   x9 <<= 32
+-      shl     $32,%r10
+-      #   x8 += x9
+-      add     %r10,%r11
+-      #   (uint32) x12 += j12
+-      addl    104(%rsp),%r14d
+-      #   x13 <<= 32
+-      shl     $32,%r13
+-      #   x13 += j12
+-      addq    104(%rsp),%r13
+-      #   (uint64) x13 >>= 32
+-      shr     $32,%r13
+-      #   x13 <<= 32
+-      shl     $32,%r13
+-      #   x12 += x13
+-      add     %r13,%r14
+-      #   (uint32) x0 += j0
+-      addl    56(%rsp),%edx
+-      #   x1 <<= 32
+-      shl     $32,%rdi
+-      #   x1 += j0
+-      addq    56(%rsp),%rdi
+-      #   (uint64) x1 >>= 32
+-      shr     $32,%rdi
+-      #   x1 <<= 32
+-      shl     $32,%rdi
+-      #   x0 += x1
+-      add     %rdi,%rdx
+-      #   x5 = x5_stack
+-      movq    160(%rsp),%rdi
+-      #   (uint32) x4 += j4
+-      addl    72(%rsp),%r9d
+-      #   x5 <<= 32
+-      shl     $32,%rdi
+-      #   x5 += j4
+-      addq    72(%rsp),%rdi
+-      #   (uint64) x5 >>= 32
+-      shr     $32,%rdi
+-      #   x5 <<= 32
+-      shl     $32,%rdi
+-      #   x4 += x5
+-      add     %rdi,%r9
+-      #   x10 = x10_stack
+-      movq    168(%rsp),%r8
+-      #   (uint32) x10 += j10
+-      addl    96(%rsp),%r8d
+-      #   x11 <<= 32
+-      shl     $32,%r12
+-      #   x11 += j10
+-      addq    96(%rsp),%r12
+-      #   (uint64) x11 >>= 32
+-      shr     $32,%r12
+-      #   x11 <<= 32
+-      shl     $32,%r12
+-      #   x10 += x11
+-      add     %r12,%r8
+-      #   x15 = x15_stack
+-      movq    176(%rsp),%rdi
+-      #   (uint32) x14 += j14
+-      addl    112(%rsp),%ebx
+-      #   x15 <<= 32
+-      shl     $32,%rdi
+-      #   x15 += j14
+-      addq    112(%rsp),%rdi
+-      #   (uint64) x15 >>= 32
+-      shr     $32,%rdi
+-      #   x15 <<= 32
+-      shl     $32,%rdi
+-      #   x14 += x15
+-      add     %rdi,%rbx
+-      #   out = out_backup
+-      movq    136(%rsp),%rdi
+-      #   m = m_backup
+-      movq    144(%rsp),%rsi
+-      #   x0 ^= *(uint64 *) (m + 0)
+-      xorq    0(%rsi),%rdx
+-      #   *(uint64 *) (out + 0) = x0
+-      movq    %rdx,0(%rdi)
+-      #   x2 ^= *(uint64 *) (m + 8)
+-      xorq    8(%rsi),%rcx
+-      #   *(uint64 *) (out + 8) = x2
+-      movq    %rcx,8(%rdi)
+-      #   x4 ^= *(uint64 *) (m + 16)
+-      xorq    16(%rsi),%r9
+-      #   *(uint64 *) (out + 16) = x4
+-      movq    %r9,16(%rdi)
+-      #   x6 ^= *(uint64 *) (m + 24)
+-      xorq    24(%rsi),%rax
+-      #   *(uint64 *) (out + 24) = x6
+-      movq    %rax,24(%rdi)
+-      #   x8 ^= *(uint64 *) (m + 32)
+-      xorq    32(%rsi),%r11
+-      #   *(uint64 *) (out + 32) = x8
+-      movq    %r11,32(%rdi)
+-      #   x10 ^= *(uint64 *) (m + 40)
+-      xorq    40(%rsi),%r8
+-      #   *(uint64 *) (out + 40) = x10
+-      movq    %r8,40(%rdi)
+-      #   x12 ^= *(uint64 *) (m + 48)
+-      xorq    48(%rsi),%r14
+-      #   *(uint64 *) (out + 48) = x12
+-      movq    %r14,48(%rdi)
+-      #   x14 ^= *(uint64 *) (m + 56)
+-      xorq    56(%rsi),%rbx
+-      #   *(uint64 *) (out + 56) = x14
+-      movq    %rbx,56(%rdi)
+-      #   bytes = bytes_backup
+-      movq    152(%rsp),%rdx
+-      #   in8 = j8
+-      movq    88(%rsp),%rcx
+-      #   in8 += 1
+-      add     $1,%rcx
+-      #   j8 = in8
+-      movq    %rcx,88(%rsp)
+-      #                          unsigned>? unsigned<? bytes - 64
+-      cmp     $64,%rdx
+-      # comment:fp stack unchanged by jump
+-      #   goto bytesatleast65 if unsigned>
+-      ja      ._bytesatleast65
+-      # comment:fp stack unchanged by jump
+-      #     goto bytesatleast64 if !unsigned<
+-      jae     ._bytesatleast64
+-      #       m = out
+-      mov     %rdi,%rsi
+-      #       out = ctarget
+-      movq    128(%rsp),%rdi
+-      #       i = bytes
+-      mov     %rdx,%rcx
+-      #       while (i) { *out++ = *m++; --i }
+-      rep     movsb
+-      # comment:fp stack unchanged by fallthrough
+-#     bytesatleast64:
+-._bytesatleast64:
+-      #     x = x_backup
+-      movq    120(%rsp),%rdi
+-      #     in8 = j8
+-      movq    88(%rsp),%rsi
+-      #     *(uint64 *) (x + 32) = in8
+-      movq    %rsi,32(%rdi)
+-      #     r11 = r11_stack
+-      movq    0(%rsp),%r11
+-      #     r12 = r12_stack
+-      movq    8(%rsp),%r12
+-      #     r13 = r13_stack
+-      movq    16(%rsp),%r13
+-      #     r14 = r14_stack
+-      movq    24(%rsp),%r14
+-      #     r15 = r15_stack
+-      movq    32(%rsp),%r15
+-      #     rbx = rbx_stack
+-      movq    40(%rsp),%rbx
+-      #     rbp = rbp_stack
+-      movq    48(%rsp),%rbp
+-      # comment:fp stack unchanged by fallthrough
+-#     done:
+-._done:
+-      #     leave
+-      add     %r11,%rsp
+-      mov     %rdi,%rax
+-      mov     %rsi,%rdx
+-      ret
+-#   bytesatleast65:
+-._bytesatleast65:
+-      #   bytes -= 64
+-      sub     $64,%rdx
+-      #   out += 64
+-      add     $64,%rdi
+-      #   m += 64
+-      add     $64,%rsi
+-      # comment:fp stack unchanged by jump
+-      # goto bytesatleast1
+-      jmp     ._bytesatleast1
+-ENDPROC(salsa20_encrypt_bytes)
+-
+-# enter salsa20_keysetup
+-ENTRY(salsa20_keysetup)
+-      mov     %rsp,%r11
+-      and     $31,%r11
+-      add     $256,%r11
+-      sub     %r11,%rsp
+-      #   k = arg2
+-      mov     %rsi,%rsi
+-      #   kbits = arg3
+-      mov     %rdx,%rdx
+-      #   x = arg1
+-      mov     %rdi,%rdi
+-      #   in0 = *(uint64 *) (k + 0)
+-      movq    0(%rsi),%r8
+-      #   in2 = *(uint64 *) (k + 8)
+-      movq    8(%rsi),%r9
+-      #   *(uint64 *) (x + 4) = in0
+-      movq    %r8,4(%rdi)
+-      #   *(uint64 *) (x + 12) = in2
+-      movq    %r9,12(%rdi)
+-      #                    unsigned<? kbits - 256
+-      cmp     $256,%rdx
+-      # comment:fp stack unchanged by jump
+-      #   goto kbits128 if unsigned<
+-      jb      ._kbits128
+-#   kbits256:
+-._kbits256:
+-      #     in10 = *(uint64 *) (k + 16)
+-      movq    16(%rsi),%rdx
+-      #     in12 = *(uint64 *) (k + 24)
+-      movq    24(%rsi),%rsi
+-      #     *(uint64 *) (x + 44) = in10
+-      movq    %rdx,44(%rdi)
+-      #     *(uint64 *) (x + 52) = in12
+-      movq    %rsi,52(%rdi)
+-      #     in0 = 1634760805
+-      mov     $1634760805,%rsi
+-      #     in4 = 857760878
+-      mov     $857760878,%rdx
+-      #     in10 = 2036477234
+-      mov     $2036477234,%rcx
+-      #     in14 = 1797285236
+-      mov     $1797285236,%r8
+-      #     *(uint32 *) (x + 0) = in0
+-      movl    %esi,0(%rdi)
+-      #     *(uint32 *) (x + 20) = in4
+-      movl    %edx,20(%rdi)
+-      #     *(uint32 *) (x + 40) = in10
+-      movl    %ecx,40(%rdi)
+-      #     *(uint32 *) (x + 60) = in14
+-      movl    %r8d,60(%rdi)
+-      # comment:fp stack unchanged by jump
+-      #   goto keysetupdone
+-      jmp     ._keysetupdone
+-#   kbits128:
+-._kbits128:
+-      #     in10 = *(uint64 *) (k + 0)
+-      movq    0(%rsi),%rdx
+-      #     in12 = *(uint64 *) (k + 8)
+-      movq    8(%rsi),%rsi
+-      #     *(uint64 *) (x + 44) = in10
+-      movq    %rdx,44(%rdi)
+-      #     *(uint64 *) (x + 52) = in12
+-      movq    %rsi,52(%rdi)
+-      #     in0 = 1634760805
+-      mov     $1634760805,%rsi
+-      #     in4 = 824206446
+-      mov     $824206446,%rdx
+-      #     in10 = 2036477238
+-      mov     $2036477238,%rcx
+-      #     in14 = 1797285236
+-      mov     $1797285236,%r8
+-      #     *(uint32 *) (x + 0) = in0
+-      movl    %esi,0(%rdi)
+-      #     *(uint32 *) (x + 20) = in4
+-      movl    %edx,20(%rdi)
+-      #     *(uint32 *) (x + 40) = in10
+-      movl    %ecx,40(%rdi)
+-      #     *(uint32 *) (x + 60) = in14
+-      movl    %r8d,60(%rdi)
+-#   keysetupdone:
+-._keysetupdone:
+-      # leave
+-      add     %r11,%rsp
+-      mov     %rdi,%rax
+-      mov     %rsi,%rdx
+-      ret
+-ENDPROC(salsa20_keysetup)
+-
+-# enter salsa20_ivsetup
+-ENTRY(salsa20_ivsetup)
+-      mov     %rsp,%r11
+-      and     $31,%r11
+-      add     $256,%r11
+-      sub     %r11,%rsp
+-      #   iv = arg2
+-      mov     %rsi,%rsi
+-      #   x = arg1
+-      mov     %rdi,%rdi
+-      #   in6 = *(uint64 *) (iv + 0)
+-      movq    0(%rsi),%rsi
+-      #   in8 = 0
+-      mov     $0,%r8
+-      #   *(uint64 *) (x + 24) = in6
+-      movq    %rsi,24(%rdi)
+-      #   *(uint64 *) (x + 32) = in8
+-      movq    %r8,32(%rdi)
+-      # leave
+-      add     %r11,%rsp
+-      mov     %rdi,%rax
+-      mov     %rsi,%rdx
+-      ret
+-ENDPROC(salsa20_ivsetup)
+--- a/arch/x86/crypto/salsa20_glue.c
++++ /dev/null
+@@ -1,116 +0,0 @@
+-/*
+- * Glue code for optimized assembly version of  Salsa20.
+- *
+- * Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com>
+- *
+- * The assembly codes are public domain assembly codes written by Daniel. J.
+- * Bernstein <djb@cr.yp.to>. The codes are modified to include indentation
+- * and to remove extraneous comments and functions that are not needed.
+- * - i586 version, renamed as salsa20-i586-asm_32.S
+- *   available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s>
+- * - x86-64 version, renamed as salsa20-x86_64-asm_64.S
+- *   available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the Free
+- * Software Foundation; either version 2 of the License, or (at your option)
+- * any later version.
+- *
+- */
+-
+-#include <crypto/algapi.h>
+-#include <linux/module.h>
+-#include <linux/crypto.h>
+-
+-#define SALSA20_IV_SIZE        8U
+-#define SALSA20_MIN_KEY_SIZE  16U
+-#define SALSA20_MAX_KEY_SIZE  32U
+-
+-struct salsa20_ctx
+-{
+-      u32 input[16];
+-};
+-
+-asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
+-                               u32 keysize, u32 ivsize);
+-asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
+-asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
+-                                    const u8 *src, u8 *dst, u32 bytes);
+-
+-static int setkey(struct crypto_tfm *tfm, const u8 *key,
+-                unsigned int keysize)
+-{
+-      struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
+-      salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
+-      return 0;
+-}
+-
+-static int encrypt(struct blkcipher_desc *desc,
+-                 struct scatterlist *dst, struct scatterlist *src,
+-                 unsigned int nbytes)
+-{
+-      struct blkcipher_walk walk;
+-      struct crypto_blkcipher *tfm = desc->tfm;
+-      struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+-      int err;
+-
+-      blkcipher_walk_init(&walk, dst, src, nbytes);
+-      err = blkcipher_walk_virt_block(desc, &walk, 64);
+-
+-      salsa20_ivsetup(ctx, walk.iv);
+-
+-      while (walk.nbytes >= 64) {
+-              salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
+-                                    walk.dst.virt.addr,
+-                                    walk.nbytes - (walk.nbytes % 64));
+-              err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
+-      }
+-
+-      if (walk.nbytes) {
+-              salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
+-                                    walk.dst.virt.addr, walk.nbytes);
+-              err = blkcipher_walk_done(desc, &walk, 0);
+-      }
+-
+-      return err;
+-}
+-
+-static struct crypto_alg alg = {
+-      .cra_name           =   "salsa20",
+-      .cra_driver_name    =   "salsa20-asm",
+-      .cra_priority       =   200,
+-      .cra_flags          =   CRYPTO_ALG_TYPE_BLKCIPHER,
+-      .cra_type           =   &crypto_blkcipher_type,
+-      .cra_blocksize      =   1,
+-      .cra_ctxsize        =   sizeof(struct salsa20_ctx),
+-      .cra_alignmask      =   3,
+-      .cra_module         =   THIS_MODULE,
+-      .cra_u              =   {
+-              .blkcipher = {
+-                      .setkey         =   setkey,
+-                      .encrypt        =   encrypt,
+-                      .decrypt        =   encrypt,
+-                      .min_keysize    =   SALSA20_MIN_KEY_SIZE,
+-                      .max_keysize    =   SALSA20_MAX_KEY_SIZE,
+-                      .ivsize         =   SALSA20_IV_SIZE,
+-              }
+-      }
+-};
+-
+-static int __init init(void)
+-{
+-      return crypto_register_alg(&alg);
+-}
+-
+-static void __exit fini(void)
+-{
+-      crypto_unregister_alg(&alg);
+-}
+-
+-module_init(init);
+-module_exit(fini);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
+-MODULE_ALIAS_CRYPTO("salsa20");
+-MODULE_ALIAS_CRYPTO("salsa20-asm");
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1324,32 +1324,6 @@ config CRYPTO_SALSA20
+         The Salsa20 stream cipher algorithm is designed by Daniel J.
+         Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
+-config CRYPTO_SALSA20_586
+-      tristate "Salsa20 stream cipher algorithm (i586)"
+-      depends on (X86 || UML_X86) && !64BIT
+-      select CRYPTO_BLKCIPHER
+-      help
+-        Salsa20 stream cipher algorithm.
+-
+-        Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
+-        Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
+-
+-        The Salsa20 stream cipher algorithm is designed by Daniel J.
+-        Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
+-
+-config CRYPTO_SALSA20_X86_64
+-      tristate "Salsa20 stream cipher algorithm (x86_64)"
+-      depends on (X86 || UML_X86) && 64BIT
+-      select CRYPTO_BLKCIPHER
+-      help
+-        Salsa20 stream cipher algorithm.
+-
+-        Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
+-        Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
+-
+-        The Salsa20 stream cipher algorithm is designed by Daniel J.
+-        Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
+-
+ config CRYPTO_CHACHA20
+       tristate "ChaCha20 cipher algorithm"
+       select CRYPTO_BLKCIPHER
diff --git a/queue-4.14/f2fs-give-message-and-set-need_fsck-given-broken-node-id.patch b/queue-4.14/f2fs-give-message-and-set-need_fsck-given-broken-node-id.patch
new file mode 100644 (file)
index 0000000..ee8ebfc
--- /dev/null
@@ -0,0 +1,193 @@
+From a4f843bd004d775cbb360cd375969b8a479568a9 Mon Sep 17 00:00:00 2001
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+Date: Mon, 23 Apr 2018 23:02:31 -0600
+Subject: f2fs: give message and set need_fsck given broken node id
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+commit a4f843bd004d775cbb360cd375969b8a479568a9 upstream.
+
+syzbot hit the following crash on upstream commit
+83beed7b2b26f232d782127792dd0cd4362fdc41 (Fri Apr 20 17:56:32 2018 +0000)
+Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal
+syzbot dashboard link: https://syzkaller.appspot.com/bug?extid=d154ec99402c6f628887
+
+C reproducer: https://syzkaller.appspot.com/x/repro.c?id=5414336294027264
+syzkaller reproducer: https://syzkaller.appspot.com/x/repro.syz?id=5471683234234368
+Raw console output: https://syzkaller.appspot.com/x/log.txt?id=5436660795834368
+Kernel config: https://syzkaller.appspot.com/x/.config?id=1808800213120130118
+compiler: gcc (GCC) 8.0.1 20180413 (experimental)
+
+IMPORTANT: if you fix the bug, please add the following tag to the commit:
+Reported-by: syzbot+d154ec99402c6f628887@syzkaller.appspotmail.com
+It will help syzbot understand when the bug is fixed. See footer for details.
+If you forward the report, please keep this part and the footer.
+
+F2FS-fs (loop0): Magic Mismatch, valid(0xf2f52010) - read(0x0)
+F2FS-fs (loop0): Can't find valid F2FS filesystem in 1th superblock
+F2FS-fs (loop0): invalid crc value
+------------[ cut here ]------------
+kernel BUG at fs/f2fs/node.c:1185!
+invalid opcode: 0000 [#1] SMP KASAN
+Dumping ftrace buffer:
+   (ftrace buffer empty)
+Modules linked in:
+CPU: 1 PID: 4549 Comm: syzkaller704305 Not tainted 4.17.0-rc1+ #10
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:__get_node_page+0xb68/0x16e0 fs/f2fs/node.c:1185
+RSP: 0018:ffff8801d960e820 EFLAGS: 00010293
+RAX: ffff8801d88205c0 RBX: 0000000000000003 RCX: ffffffff82f6cc06
+RDX: 0000000000000000 RSI: ffffffff82f6d5e8 RDI: 0000000000000004
+RBP: ffff8801d960ec30 R08: ffff8801d88205c0 R09: ffffed003b5e46c2
+R10: 0000000000000003 R11: 0000000000000003 R12: ffff8801a86e00c0
+R13: 0000000000000001 R14: ffff8801a86e0530 R15: ffff8801d9745240
+FS:  000000000072c880(0000) GS:ffff8801daf00000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f3d403209b8 CR3: 00000001d8f3f000 CR4: 00000000001406e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ get_node_page fs/f2fs/node.c:1237 [inline]
+ truncate_xattr_node+0x152/0x2e0 fs/f2fs/node.c:1014
+ remove_inode_page+0x200/0xaf0 fs/f2fs/node.c:1039
+ f2fs_evict_inode+0xe86/0x1710 fs/f2fs/inode.c:547
+ evict+0x4a6/0x960 fs/inode.c:557
+ iput_final fs/inode.c:1519 [inline]
+ iput+0x62d/0xa80 fs/inode.c:1545
+ f2fs_fill_super+0x5f4e/0x7bf0 fs/f2fs/super.c:2849
+ mount_bdev+0x30c/0x3e0 fs/super.c:1164
+ f2fs_mount+0x34/0x40 fs/f2fs/super.c:3020
+ mount_fs+0xae/0x328 fs/super.c:1267
+ vfs_kern_mount.part.34+0xd4/0x4d0 fs/namespace.c:1037
+ vfs_kern_mount fs/namespace.c:1027 [inline]
+ do_new_mount fs/namespace.c:2518 [inline]
+ do_mount+0x564/0x3070 fs/namespace.c:2848
+ ksys_mount+0x12d/0x140 fs/namespace.c:3064
+ __do_sys_mount fs/namespace.c:3078 [inline]
+ __se_sys_mount fs/namespace.c:3075 [inline]
+ __x64_sys_mount+0xbe/0x150 fs/namespace.c:3075
+ do_syscall_64+0x1b1/0x800 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x443dea
+RSP: 002b:00007ffcc7882368 EFLAGS: 00000297 ORIG_RAX: 00000000000000a5
+RAX: ffffffffffffffda RBX: 0000000020000c00 RCX: 0000000000443dea
+RDX: 0000000020000000 RSI: 0000000020000100 RDI: 00007ffcc7882370
+RBP: 0000000000000003 R08: 0000000020016a00 R09: 000000000000000a
+R10: 0000000000000000 R11: 0000000000000297 R12: 0000000000000004
+R13: 0000000000402ce0 R14: 0000000000000000 R15: 0000000000000000
+RIP: __get_node_page+0xb68/0x16e0 fs/f2fs/node.c:1185 RSP: ffff8801d960e820
+---[ end trace 4edbeb71f002bb76 ]---
+
+Reported-and-tested-by: syzbot+d154ec99402c6f628887@syzkaller.appspotmail.com
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/f2fs.h  |   13 +------------
+ fs/f2fs/inode.c |   13 ++++++-------
+ fs/f2fs/node.c  |   21 +++++++++++++++++++--
+ 3 files changed, 26 insertions(+), 21 deletions(-)
+
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1471,18 +1471,6 @@ static inline bool __exist_node_summarie
+ }
+ /*
+- * Check whether the given nid is within node id range.
+- */
+-static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+-{
+-      if (unlikely(nid < F2FS_ROOT_INO(sbi)))
+-              return -EINVAL;
+-      if (unlikely(nid >= NM_I(sbi)->max_nid))
+-              return -EINVAL;
+-      return 0;
+-}
+-
+-/*
+  * Check whether the inode has blocks or not
+  */
+ static inline int F2FS_HAS_BLOCKS(struct inode *inode)
+@@ -2470,6 +2458,7 @@ f2fs_hash_t f2fs_dentry_hash(const struc
+ struct dnode_of_data;
+ struct node_info;
++int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
+ bool available_free_memory(struct f2fs_sb_info *sbi, int type);
+ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
+ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -188,12 +188,8 @@ static int do_read_inode(struct inode *i
+       projid_t i_projid;
+       /* Check if ino is within scope */
+-      if (check_nid_range(sbi, inode->i_ino)) {
+-              f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
+-                       (unsigned long) inode->i_ino);
+-              WARN_ON(1);
++      if (check_nid_range(sbi, inode->i_ino))
+               return -EINVAL;
+-      }
+       node_page = get_node_page(sbi, inode->i_ino);
+       if (IS_ERR(node_page))
+@@ -538,8 +534,11 @@ no_delete:
+               alloc_nid_failed(sbi, inode->i_ino);
+               clear_inode_flag(inode, FI_FREE_NID);
+       } else {
+-              f2fs_bug_on(sbi, err &&
+-                      !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
++              /*
++               * If xattr nid is corrupted, we can reach out error condition,
++               * err & !exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
++               * In that case, check_nid_range() is enough to give a clue.
++               */
+       }
+ out_clear:
+       fscrypt_put_encryption_info(inode, NULL);
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -29,6 +29,21 @@ static struct kmem_cache *nat_entry_slab
+ static struct kmem_cache *free_nid_slab;
+ static struct kmem_cache *nat_entry_set_slab;
++/*
++ * Check whether the given nid is within node id range.
++ */
++int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
++{
++      if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
++              set_sbi_flag(sbi, SBI_NEED_FSCK);
++              f2fs_msg(sbi->sb, KERN_WARNING,
++                              "%s: out-of-range nid=%x, run fsck to fix.",
++                              __func__, nid);
++              return -EINVAL;
++      }
++      return 0;
++}
++
+ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
+ {
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+@@ -1122,7 +1137,8 @@ void ra_node_page(struct f2fs_sb_info *s
+       if (!nid)
+               return;
+-      f2fs_bug_on(sbi, check_nid_range(sbi, nid));
++      if (check_nid_range(sbi, nid))
++              return;
+       rcu_read_lock();
+       apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
+@@ -1146,7 +1162,8 @@ static struct page *__get_node_page(stru
+       if (!nid)
+               return ERR_PTR(-ENOENT);
+-      f2fs_bug_on(sbi, check_nid_range(sbi, nid));
++      if (check_nid_range(sbi, nid))
++              return ERR_PTR(-EINVAL);
+ repeat:
+       page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
+       if (!page)
diff --git a/queue-4.14/f2fs-sanity-check-on-sit-entry.patch b/queue-4.14/f2fs-sanity-check-on-sit-entry.patch
new file mode 100644 (file)
index 0000000..5816435
--- /dev/null
@@ -0,0 +1,103 @@
+From b2ca374f33bd33fd822eb871876e4888cf79dc97 Mon Sep 17 00:00:00 2001
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+Date: Tue, 24 Apr 2018 15:44:16 -0600
+Subject: f2fs: sanity check on sit entry
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+commit b2ca374f33bd33fd822eb871876e4888cf79dc97 upstream.
+
+syzbot hit the following crash on upstream commit
+87ef12027b9b1dd0e0b12cf311fbcb19f9d92539 (Wed Apr 18 19:48:17 2018 +0000)
+Merge tag 'ceph-for-4.17-rc2' of git://github.com/ceph/ceph-client
+syzbot dashboard link: https://syzkaller.appspot.com/bug?extid=83699adeb2d13579c31e
+
+C reproducer: https://syzkaller.appspot.com/x/repro.c?id=5805208181407744
+syzkaller reproducer: https://syzkaller.appspot.com/x/repro.syz?id=6005073343676416
+Raw console output: https://syzkaller.appspot.com/x/log.txt?id=6555047731134464
+Kernel config: https://syzkaller.appspot.com/x/.config?id=1808800213120130118
+compiler: gcc (GCC) 8.0.1 20180413 (experimental)
+
+IMPORTANT: if you fix the bug, please add the following tag to the commit:
+Reported-by: syzbot+83699adeb2d13579c31e@syzkaller.appspotmail.com
+It will help syzbot understand when the bug is fixed. See footer for details.
+If you forward the report, please keep this part and the footer.
+
+F2FS-fs (loop0): Magic Mismatch, valid(0xf2f52010) - read(0x0)
+F2FS-fs (loop0): Can't find valid F2FS filesystem in 1th superblock
+F2FS-fs (loop0): invalid crc value
+BUG: unable to handle kernel paging request at ffffed006b2a50c0
+PGD 21ffee067 P4D 21ffee067 PUD 21fbeb067 PMD 0
+Oops: 0000 [#1] SMP KASAN
+Dumping ftrace buffer:
+   (ftrace buffer empty)
+Modules linked in:
+CPU: 0 PID: 4514 Comm: syzkaller989480 Not tainted 4.17.0-rc1+ #8
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:build_sit_entries fs/f2fs/segment.c:3653 [inline]
+RIP: 0010:build_segment_manager+0x7ef7/0xbf70 fs/f2fs/segment.c:3852
+RSP: 0018:ffff8801b102e5b0 EFLAGS: 00010a06
+RAX: 1ffff1006b2a50c0 RBX: 0000000000000004 RCX: 0000000000000001
+RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8801ac74243e
+RBP: ffff8801b102f410 R08: ffff8801acbd46c0 R09: fffffbfff14d9af8
+R10: fffffbfff14d9af8 R11: ffff8801acbd46c0 R12: ffff8801ac742a80
+R13: ffff8801d9519100 R14: dffffc0000000000 R15: ffff880359528600
+FS:  0000000001e04880(0000) GS:ffff8801dae00000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: ffffed006b2a50c0 CR3: 00000001ac6ac000 CR4: 00000000001406f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ f2fs_fill_super+0x4095/0x7bf0 fs/f2fs/super.c:2803
+ mount_bdev+0x30c/0x3e0 fs/super.c:1165
+ f2fs_mount+0x34/0x40 fs/f2fs/super.c:3020
+ mount_fs+0xae/0x328 fs/super.c:1268
+ vfs_kern_mount.part.34+0xd4/0x4d0 fs/namespace.c:1037
+ vfs_kern_mount fs/namespace.c:1027 [inline]
+ do_new_mount fs/namespace.c:2517 [inline]
+ do_mount+0x564/0x3070 fs/namespace.c:2847
+ ksys_mount+0x12d/0x140 fs/namespace.c:3063
+ __do_sys_mount fs/namespace.c:3077 [inline]
+ __se_sys_mount fs/namespace.c:3074 [inline]
+ __x64_sys_mount+0xbe/0x150 fs/namespace.c:3074
+ do_syscall_64+0x1b1/0x800 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x443d6a
+RSP: 002b:00007ffd312813c8 EFLAGS: 00000297 ORIG_RAX: 00000000000000a5
+RAX: ffffffffffffffda RBX: 0000000020000c00 RCX: 0000000000443d6a
+RDX: 0000000020000000 RSI: 0000000020000100 RDI: 00007ffd312813d0
+RBP: 0000000000000003 R08: 0000000020016a00 R09: 000000000000000a
+R10: 0000000000000000 R11: 0000000000000297 R12: 0000000000000004
+R13: 0000000000402c60 R14: 0000000000000000 R15: 0000000000000000
+RIP: build_sit_entries fs/f2fs/segment.c:3653 [inline] RSP: ffff8801b102e5b0
+RIP: build_segment_manager+0x7ef7/0xbf70 fs/f2fs/segment.c:3852 RSP: ffff8801b102e5b0
+CR2: ffffed006b2a50c0
+---[ end trace a2034989e196ff17 ]---
+
+Reported-and-tested-by: syzbot+83699adeb2d13579c31e@syzkaller.appspotmail.com
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/segment.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3293,6 +3293,15 @@ static void build_sit_entries(struct f2f
+               unsigned int old_valid_blocks;
+               start = le32_to_cpu(segno_in_journal(journal, i));
++              if (start >= MAIN_SEGS(sbi)) {
++                      f2fs_msg(sbi->sb, KERN_ERR,
++                                      "Wrong journal entry on segno %u",
++                                      start);
++                      set_sbi_flag(sbi, SBI_NEED_FSCK);
++                      err = -EINVAL;
++                      break;
++              }
++
+               se = &sit_i->sentries[start];
+               sit = sit_in_journal(journal, i);
diff --git a/queue-4.14/kvm-vmx-nested-vm-entry-prereqs-for-event-inj.patch b/queue-4.14/kvm-vmx-nested-vm-entry-prereqs-for-event-inj.patch
new file mode 100644 (file)
index 0000000..2ce073b
--- /dev/null
@@ -0,0 +1,164 @@
+From 0447378a4a793da008451fad50bc0f93e9675ae6 Mon Sep 17 00:00:00 2001
+From: Marc Orr <marcorr@google.com>
+Date: Wed, 20 Jun 2018 17:21:29 -0700
+Subject: kvm: vmx: Nested VM-entry prereqs for event inj.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marc Orr <marcorr@google.com>
+
+commit 0447378a4a793da008451fad50bc0f93e9675ae6 upstream.
+
+This patch extends the checks done prior to a nested VM entry.
+Specifically, it extends the check_vmentry_prereqs function with checks
+for fields relevant to the VM-entry event injection information, as
+described in the Intel SDM, volume 3.
+
+This patch is motivated by a syzkaller bug, where a bad VM-entry
+interruption information field is generated in the VMCS02, which causes
+the nested VM launch to fail. Then, KVM fails to resume L1.
+
+While KVM should be improved to correctly resume L1 execution after a
+failed nested launch, this change is justified because the existing code
+to resume L1 is flaky/ad-hoc and the test coverage for resuming L1 is
+sparse.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Marc Orr <marcorr@google.com>
+[Removed comment whose parts were describing previous revisions and the
+ rest was obvious from function/variable naming. - Radim]
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/vmx.h |    3 ++
+ arch/x86/kvm/vmx.c         |   67 +++++++++++++++++++++++++++++++++++++++++++++
+ arch/x86/kvm/x86.h         |    9 ++++++
+ 3 files changed, 79 insertions(+)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -114,6 +114,7 @@
+ #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK   0x0000001f
+ #define VMX_MISC_SAVE_EFER_LMA                        0x00000020
+ #define VMX_MISC_ACTIVITY_HLT                 0x00000040
++#define VMX_MISC_ZERO_LEN_INS                 0x40000000
+ /* VMFUNC functions */
+ #define VMX_VMFUNC_EPTP_SWITCHING               0x00000001
+@@ -349,11 +350,13 @@ enum vmcs_field {
+ #define VECTORING_INFO_VALID_MASK             INTR_INFO_VALID_MASK
+ #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
++#define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
+ #define INTR_TYPE_NMI_INTR            (2 << 8) /* NMI */
+ #define INTR_TYPE_HARD_EXCEPTION      (3 << 8) /* processor exception */
+ #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
+ #define INTR_TYPE_PRIV_SW_EXCEPTION   (5 << 8) /* ICE breakpoint - undocumented */
+ #define INTR_TYPE_SOFT_EXCEPTION      (6 << 8) /* software exception */
++#define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
+ /* GUEST_INTERRUPTIBILITY_INFO flags. */
+ #define GUEST_INTR_STATE_STI          0x00000001
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1350,6 +1350,17 @@ static inline unsigned nested_cpu_vmx_mi
+       return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
+ }
++static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
++{
++      return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
++}
++
++static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
++{
++      return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
++                      CPU_BASED_MONITOR_TRAP_FLAG;
++}
++
+ static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
+ {
+       return vmcs12->cpu_based_vm_exec_control & bit;
+@@ -11024,6 +11035,62 @@ static int check_vmentry_prereqs(struct
+           !nested_cr3_valid(vcpu, vmcs12->host_cr3))
+               return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
++      /*
++       * From the Intel SDM, volume 3:
++       * Fields relevant to VM-entry event injection must be set properly.
++       * These fields are the VM-entry interruption-information field, the
++       * VM-entry exception error code, and the VM-entry instruction length.
++       */
++      if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
++              u32 intr_info = vmcs12->vm_entry_intr_info_field;
++              u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
++              u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
++              bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
++              bool should_have_error_code;
++              bool urg = nested_cpu_has2(vmcs12,
++                                         SECONDARY_EXEC_UNRESTRICTED_GUEST);
++              bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
++
++              /* VM-entry interruption-info field: interruption type */
++              if (intr_type == INTR_TYPE_RESERVED ||
++                  (intr_type == INTR_TYPE_OTHER_EVENT &&
++                   !nested_cpu_supports_monitor_trap_flag(vcpu)))
++                      return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++
++              /* VM-entry interruption-info field: vector */
++              if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
++                  (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
++                  (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
++                      return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++
++              /* VM-entry interruption-info field: deliver error code */
++              should_have_error_code =
++                      intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
++                      x86_exception_has_error_code(vector);
++              if (has_error_code != should_have_error_code)
++                      return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++
++              /* VM-entry exception error code */
++              if (has_error_code &&
++                  vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
++                      return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++
++              /* VM-entry interruption-info field: reserved bits */
++              if (intr_info & INTR_INFO_RESVD_BITS_MASK)
++                      return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++
++              /* VM-entry instruction length */
++              switch (intr_type) {
++              case INTR_TYPE_SOFT_EXCEPTION:
++              case INTR_TYPE_SOFT_INTR:
++              case INTR_TYPE_PRIV_SW_EXCEPTION:
++                      if ((vmcs12->vm_entry_instruction_len > 15) ||
++                          (vmcs12->vm_entry_instruction_len == 0 &&
++                           !nested_cpu_has_zero_length_injection(vcpu)))
++                              return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++              }
++      }
++
+       return 0;
+ }
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -74,6 +74,15 @@ static inline bool is_la57_mode(struct k
+ #endif
+ }
++static inline bool x86_exception_has_error_code(unsigned int vector)
++{
++      static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
++                      BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
++                      BIT(PF_VECTOR) | BIT(AC_VECTOR);
++
++      return (1U << vector) & exception_has_error_code;
++}
++
+ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
+ {
+       return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
diff --git a/queue-4.14/loop-add-recursion-validation-to-loop_change_fd.patch b/queue-4.14/loop-add-recursion-validation-to-loop_change_fd.patch
new file mode 100644 (file)
index 0000000..d181ec5
--- /dev/null
@@ -0,0 +1,143 @@
+From d2ac838e4cd7e5e9891ecc094d626734b0245c99 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Mon, 7 May 2018 11:37:58 -0400
+Subject: loop: add recursion validation to LOOP_CHANGE_FD
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit d2ac838e4cd7e5e9891ecc094d626734b0245c99 upstream.
+
+Refactor the validation code used in LOOP_SET_FD so it is also used in
+LOOP_CHANGE_FD.  Otherwise it is possible to construct a set of loop
+devices that all refer to each other.  This can lead to a infinite
+loop in starting with "while (is_loop_device(f)) .." in loop_set_fd().
+
+Fix this by refactoring out the validation code and using it for
+LOOP_CHANGE_FD as well as LOOP_SET_FD.
+
+Reported-by: syzbot+4349872271ece473a7c91190b68b4bac7c5dbc87@syzkaller.appspotmail.com
+Reported-by: syzbot+40bd32c4d9a3cc12a339@syzkaller.appspotmail.com
+Reported-by: syzbot+769c54e66f994b041be7@syzkaller.appspotmail.com
+Reported-by: syzbot+0a89a9ce473936c57065@syzkaller.appspotmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c |   68 ++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 38 insertions(+), 30 deletions(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -617,6 +617,36 @@ static void loop_reread_partitions(struc
+                       __func__, lo->lo_number, lo->lo_file_name, rc);
+ }
++static inline int is_loop_device(struct file *file)
++{
++      struct inode *i = file->f_mapping->host;
++
++      return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
++}
++
++static int loop_validate_file(struct file *file, struct block_device *bdev)
++{
++      struct inode    *inode = file->f_mapping->host;
++      struct file     *f = file;
++
++      /* Avoid recursion */
++      while (is_loop_device(f)) {
++              struct loop_device *l;
++
++              if (f->f_mapping->host->i_bdev == bdev)
++                      return -EBADF;
++
++              l = f->f_mapping->host->i_bdev->bd_disk->private_data;
++              if (l->lo_state == Lo_unbound) {
++                      return -EINVAL;
++              }
++              f = l->lo_backing_file;
++      }
++      if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
++              return -EINVAL;
++      return 0;
++}
++
+ /*
+  * loop_change_fd switched the backing store of a loopback device to
+  * a new file. This is useful for operating system installers to free up
+@@ -646,14 +676,15 @@ static int loop_change_fd(struct loop_de
+       if (!file)
+               goto out;
++      error = loop_validate_file(file, bdev);
++      if (error)
++              goto out_putf;
++
+       inode = file->f_mapping->host;
+       old_file = lo->lo_backing_file;
+       error = -EINVAL;
+-      if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+-              goto out_putf;
+-
+       /* size of the new backing store needs to be the same */
+       if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+               goto out_putf;
+@@ -679,13 +710,6 @@ static int loop_change_fd(struct loop_de
+       return error;
+ }
+-static inline int is_loop_device(struct file *file)
+-{
+-      struct inode *i = file->f_mapping->host;
+-
+-      return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+-}
+-
+ /* loop sysfs attributes */
+ static ssize_t loop_attr_show(struct device *dev, char *page,
+@@ -850,7 +874,7 @@ static int loop_prepare_queue(struct loo
+ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+                      struct block_device *bdev, unsigned int arg)
+ {
+-      struct file     *file, *f;
++      struct file     *file;
+       struct inode    *inode;
+       struct address_space *mapping;
+       int             lo_flags = 0;
+@@ -869,29 +893,13 @@ static int loop_set_fd(struct loop_devic
+       if (lo->lo_state != Lo_unbound)
+               goto out_putf;
+-      /* Avoid recursion */
+-      f = file;
+-      while (is_loop_device(f)) {
+-              struct loop_device *l;
+-
+-              if (f->f_mapping->host->i_bdev == bdev)
+-                      goto out_putf;
+-
+-              l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+-              if (l->lo_state == Lo_unbound) {
+-                      error = -EINVAL;
+-                      goto out_putf;
+-              }
+-              f = l->lo_backing_file;
+-      }
++      error = loop_validate_file(file, bdev);
++      if (error)
++              goto out_putf;
+       mapping = file->f_mapping;
+       inode = mapping->host;
+-      error = -EINVAL;
+-      if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+-              goto out_putf;
+-
+       if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
+           !file->f_op->write_iter)
+               lo_flags |= LO_FLAGS_READ_ONLY;
diff --git a/queue-4.14/loop-remember-whether-sysfs_create_group-was-done.patch b/queue-4.14/loop-remember-whether-sysfs_create_group-was-done.patch
new file mode 100644 (file)
index 0000000..83f9db0
--- /dev/null
@@ -0,0 +1,65 @@
+From d3349b6b3c373ac1fbfb040b810fcee5e2adc7e0 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Fri, 4 May 2018 10:58:09 -0600
+Subject: loop: remember whether sysfs_create_group() was done
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit d3349b6b3c373ac1fbfb040b810fcee5e2adc7e0 upstream.
+
+syzbot is hitting WARN() triggered by memory allocation fault
+injection [1] because loop module is calling sysfs_remove_group()
+when sysfs_create_group() failed.
+Fix this by remembering whether sysfs_create_group() succeeded.
+
+[1] https://syzkaller.appspot.com/bug?id=3f86c0edf75c86d2633aeb9dd69eccc70bc7e90b
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reported-by: syzbot <syzbot+9f03168400f56df89dbc6f1751f4458fe739ff29@syzkaller.appspotmail.com>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Renamed sysfs_ready -> sysfs_inited.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c |   11 ++++++-----
+ drivers/block/loop.h |    1 +
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -806,16 +806,17 @@ static struct attribute_group loop_attri
+       .attrs= loop_attrs,
+ };
+-static int loop_sysfs_init(struct loop_device *lo)
++static void loop_sysfs_init(struct loop_device *lo)
+ {
+-      return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
+-                                &loop_attribute_group);
++      lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
++                                              &loop_attribute_group);
+ }
+ static void loop_sysfs_exit(struct loop_device *lo)
+ {
+-      sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
+-                         &loop_attribute_group);
++      if (lo->sysfs_inited)
++              sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
++                                 &loop_attribute_group);
+ }
+ static void loop_config_discard(struct loop_device *lo)
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -58,6 +58,7 @@ struct loop_device {
+       struct kthread_worker   worker;
+       struct task_struct      *worker_task;
+       bool                    use_dio;
++      bool                    sysfs_inited;
+       struct request_queue    *lo_queue;
+       struct blk_mq_tag_set   tag_set;
diff --git a/queue-4.14/netfilter-nf_queue-augment-nfqa_cfg_policy.patch b/queue-4.14/netfilter-nf_queue-augment-nfqa_cfg_policy.patch
new file mode 100644 (file)
index 0000000..625d445
--- /dev/null
@@ -0,0 +1,94 @@
+From ba062ebb2cd561d404e0fba8ee4b3f5ebce7cbfc Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 13 Jun 2018 09:13:39 -0700
+Subject: netfilter: nf_queue: augment nfqa_cfg_policy
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit ba062ebb2cd561d404e0fba8ee4b3f5ebce7cbfc upstream.
+
+Three attributes are currently not verified, thus can trigger KMSAN
+warnings such as :
+
+BUG: KMSAN: uninit-value in __arch_swab32 arch/x86/include/uapi/asm/swab.h:10 [inline]
+BUG: KMSAN: uninit-value in __fswab32 include/uapi/linux/swab.h:59 [inline]
+BUG: KMSAN: uninit-value in nfqnl_recv_config+0x939/0x17d0 net/netfilter/nfnetlink_queue.c:1268
+CPU: 1 PID: 4521 Comm: syz-executor120 Not tainted 4.17.0+ #5
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x185/0x1d0 lib/dump_stack.c:113
+ kmsan_report+0x188/0x2a0 mm/kmsan/kmsan.c:1117
+ __msan_warning_32+0x70/0xc0 mm/kmsan/kmsan_instr.c:620
+ __arch_swab32 arch/x86/include/uapi/asm/swab.h:10 [inline]
+ __fswab32 include/uapi/linux/swab.h:59 [inline]
+ nfqnl_recv_config+0x939/0x17d0 net/netfilter/nfnetlink_queue.c:1268
+ nfnetlink_rcv_msg+0xb2e/0xc80 net/netfilter/nfnetlink.c:212
+ netlink_rcv_skb+0x37e/0x600 net/netlink/af_netlink.c:2448
+ nfnetlink_rcv+0x2fe/0x680 net/netfilter/nfnetlink.c:513
+ netlink_unicast_kernel net/netlink/af_netlink.c:1310 [inline]
+ netlink_unicast+0x1680/0x1750 net/netlink/af_netlink.c:1336
+ netlink_sendmsg+0x104f/0x1350 net/netlink/af_netlink.c:1901
+ sock_sendmsg_nosec net/socket.c:629 [inline]
+ sock_sendmsg net/socket.c:639 [inline]
+ ___sys_sendmsg+0xec8/0x1320 net/socket.c:2117
+ __sys_sendmsg net/socket.c:2155 [inline]
+ __do_sys_sendmsg net/socket.c:2164 [inline]
+ __se_sys_sendmsg net/socket.c:2162 [inline]
+ __x64_sys_sendmsg+0x331/0x460 net/socket.c:2162
+ do_syscall_64+0x15b/0x230 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+RIP: 0033:0x43fd59
+RSP: 002b:00007ffde0e30d28 EFLAGS: 00000213 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 000000000043fd59
+RDX: 0000000000000000 RSI: 0000000020000080 RDI: 0000000000000003
+RBP: 00000000006ca018 R08: 00000000004002c8 R09: 00000000004002c8
+R10: 00000000004002c8 R11: 0000000000000213 R12: 0000000000401680
+R13: 0000000000401710 R14: 0000000000000000 R15: 0000000000000000
+
+Uninit was created at:
+ kmsan_save_stack_with_flags mm/kmsan/kmsan.c:279 [inline]
+ kmsan_internal_poison_shadow+0xb8/0x1b0 mm/kmsan/kmsan.c:189
+ kmsan_kmalloc+0x94/0x100 mm/kmsan/kmsan.c:315
+ kmsan_slab_alloc+0x10/0x20 mm/kmsan/kmsan.c:322
+ slab_post_alloc_hook mm/slab.h:446 [inline]
+ slab_alloc_node mm/slub.c:2753 [inline]
+ __kmalloc_node_track_caller+0xb35/0x11b0 mm/slub.c:4395
+ __kmalloc_reserve net/core/skbuff.c:138 [inline]
+ __alloc_skb+0x2cb/0x9e0 net/core/skbuff.c:206
+ alloc_skb include/linux/skbuff.h:988 [inline]
+ netlink_alloc_large_skb net/netlink/af_netlink.c:1182 [inline]
+ netlink_sendmsg+0x76e/0x1350 net/netlink/af_netlink.c:1876
+ sock_sendmsg_nosec net/socket.c:629 [inline]
+ sock_sendmsg net/socket.c:639 [inline]
+ ___sys_sendmsg+0xec8/0x1320 net/socket.c:2117
+ __sys_sendmsg net/socket.c:2155 [inline]
+ __do_sys_sendmsg net/socket.c:2164 [inline]
+ __se_sys_sendmsg net/socket.c:2162 [inline]
+ __x64_sys_sendmsg+0x331/0x460 net/socket.c:2162
+ do_syscall_64+0x15b/0x230 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fixes: fdb694a01f1f ("netfilter: Add fail-open support")
+Fixes: 829e17a1a602 ("[NETFILTER]: nfnetlink_queue: allow changing queue length through netlink")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nfnetlink_queue.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -1228,6 +1228,9 @@ static int nfqnl_recv_unsupp(struct net
+ static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
+       [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
+       [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
++      [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
++      [NFQA_CFG_MASK]         = { .type = NLA_U32 },
++      [NFQA_CFG_FLAGS]        = { .type = NLA_U32 },
+ };
+ static const struct nf_queue_handler nfqh = {
diff --git a/queue-4.14/netfilter-x_tables-initialise-match-target-check-parameter-struct.patch b/queue-4.14/netfilter-x_tables-initialise-match-target-check-parameter-struct.patch
new file mode 100644 (file)
index 0000000..7a8c634
--- /dev/null
@@ -0,0 +1,70 @@
+From c568503ef02030f169c9e19204def610a3510918 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Thu, 7 Jun 2018 21:34:43 +0200
+Subject: netfilter: x_tables: initialise match/target check parameter struct
+
+From: Florian Westphal <fw@strlen.de>
+
+commit c568503ef02030f169c9e19204def610a3510918 upstream.
+
+syzbot reports following splat:
+
+BUG: KMSAN: uninit-value in ebt_stp_mt_check+0x24b/0x450
+ net/bridge/netfilter/ebt_stp.c:162
+ ebt_stp_mt_check+0x24b/0x450 net/bridge/netfilter/ebt_stp.c:162
+ xt_check_match+0x1438/0x1650 net/netfilter/x_tables.c:506
+ ebt_check_match net/bridge/netfilter/ebtables.c:372 [inline]
+ ebt_check_entry net/bridge/netfilter/ebtables.c:702 [inline]
+
+The uninitialised access is
+   xt_mtchk_param->nft_compat
+
+... which should be set to 0.
+Fix it by zeroing the struct beforehand, same for tgchk.
+
+ip(6)tables targetinfo uses c99-style initialiser, so no change
+needed there.
+
+Reported-by: syzbot+da4494182233c23a5fcf@syzkaller.appspotmail.com
+Fixes: 55917a21d0cc0 ("netfilter: x_tables: add context to know if extension runs from nft_compat")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bridge/netfilter/ebtables.c |    2 ++
+ net/ipv4/netfilter/ip_tables.c  |    1 +
+ net/ipv6/netfilter/ip6_tables.c |    1 +
+ 3 files changed, 4 insertions(+)
+
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -696,6 +696,8 @@ ebt_check_entry(struct ebt_entry *e, str
+       }
+       i = 0;
++      memset(&mtpar, 0, sizeof(mtpar));
++      memset(&tgpar, 0, sizeof(tgpar));
+       mtpar.net       = tgpar.net       = net;
+       mtpar.table     = tgpar.table     = name;
+       mtpar.entryinfo = tgpar.entryinfo = e;
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -541,6 +541,7 @@ find_check_entry(struct ipt_entry *e, st
+               return -ENOMEM;
+       j = 0;
++      memset(&mtpar, 0, sizeof(mtpar));
+       mtpar.net       = net;
+       mtpar.table     = name;
+       mtpar.entryinfo = &e->ip;
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -561,6 +561,7 @@ find_check_entry(struct ip6t_entry *e, s
+               return -ENOMEM;
+       j = 0;
++      memset(&mtpar, 0, sizeof(mtpar));
+       mtpar.net       = net;
+       mtpar.table     = name;
+       mtpar.entryinfo = &e->ipv6;
diff --git a/queue-4.14/nvme-pci-remap-cmb-sq-entries-on-every-controller-reset.patch b/queue-4.14/nvme-pci-remap-cmb-sq-entries-on-every-controller-reset.patch
new file mode 100644 (file)
index 0000000..a6ce6c3
--- /dev/null
@@ -0,0 +1,74 @@
+From 815c6704bf9f1c59f3a6be380a4032b9c57b12f1 Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Tue, 13 Feb 2018 05:44:44 -0700
+Subject: nvme-pci: Remap CMB SQ entries on every controller reset
+
+From: Keith Busch <keith.busch@intel.com>
+
+commit 815c6704bf9f1c59f3a6be380a4032b9c57b12f1 upstream.
+
+The controller memory buffer is remapped into a kernel address on each
+reset, but the driver was setting the submission queue base address
+only on the very first queue creation. The remapped address is likely to
+change after a reset, so accessing the old address will hit a kernel bug.
+
+This patch fixes that by setting the queue's CMB base address each time
+the queue is created.
+
+Fixes: f63572dff1421 ("nvme: unmap CMB and remove sysfs file in reset path")
+Reported-by: Christian Black <christian.d.black@intel.com>
+Cc: Jon Derrick <jonathan.derrick@intel.com>
+Cc: <stable@vger.kernel.org> # 4.9+
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Scott Bauer <scott.bauer@intel.com>
+Reviewed-by: Jon Derrick <jonathan.derrick@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/pci.c |   27 ++++++++++++++++-----------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1233,17 +1233,15 @@ static int nvme_cmb_qdepth(struct nvme_d
+ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+                               int qid, int depth)
+ {
+-      if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+-              unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
+-                                                    dev->ctrl.page_size);
+-              nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+-              nvmeq->sq_cmds_io = dev->cmb + offset;
+-      } else {
+-              nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+-                                      &nvmeq->sq_dma_addr, GFP_KERNEL);
+-              if (!nvmeq->sq_cmds)
+-                      return -ENOMEM;
+-      }
++
++      /* CMB SQEs will be mapped before creation */
++      if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz))
++              return 0;
++
++      nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
++                                          &nvmeq->sq_dma_addr, GFP_KERNEL);
++      if (!nvmeq->sq_cmds)
++              return -ENOMEM;
+       return 0;
+ }
+@@ -1320,6 +1318,13 @@ static int nvme_create_queue(struct nvme
+       struct nvme_dev *dev = nvmeq->dev;
+       int result;
++      if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
++              unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
++                                                    dev->ctrl.page_size);
++              nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
++              nvmeq->sq_cmds_io = dev->cmb + offset;
++      }
++
+       nvmeq->cq_vector = qid - 1;
+       result = adapter_alloc_cq(dev, qid, nvmeq);
+       if (result < 0)
diff --git a/queue-4.14/pm-hibernate-fix-oops-at-snapshot_write.patch b/queue-4.14/pm-hibernate-fix-oops-at-snapshot_write.patch
new file mode 100644 (file)
index 0000000..40d6ce5
--- /dev/null
@@ -0,0 +1,38 @@
+From fc14eebfc20854a38fd9f1d93a42b1783dad4d17 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Sat, 26 May 2018 09:59:36 +0900
+Subject: PM / hibernate: Fix oops at snapshot_write()
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit fc14eebfc20854a38fd9f1d93a42b1783dad4d17 upstream.
+
+syzbot is reporting NULL pointer dereference at snapshot_write() [1].
+This is because data->handle is zero-cleared by ioctl(SNAPSHOT_FREE).
+Fix this by checking data_of(data->handle) != NULL before using it.
+
+[1] https://syzkaller.appspot.com/bug?id=828a3c71bd344a6de8b6a31233d51a72099f27fd
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reported-by: syzbot <syzbot+ae590932da6e45d6564d@syzkaller.appspotmail.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/user.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -186,6 +186,11 @@ static ssize_t snapshot_write(struct fil
+               res = PAGE_SIZE - pg_offp;
+       }
++      if (!data_of(data->handle)) {
++              res = -EINVAL;
++              goto unlock;
++      }
++
+       res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
+                       buf, count);
+       if (res > 0)
diff --git a/queue-4.14/rdma-ucm-mark-ucm-interface-as-broken.patch b/queue-4.14/rdma-ucm-mark-ucm-interface-as-broken.patch
new file mode 100644 (file)
index 0000000..b15b56f
--- /dev/null
@@ -0,0 +1,66 @@
+From 7a8690ed6f5346f6738971892205e91d39b6b901 Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Wed, 23 May 2018 08:22:11 +0300
+Subject: RDMA/ucm: Mark UCM interface as BROKEN
+
+From: Leon Romanovsky <leonro@mellanox.com>
+
+commit 7a8690ed6f5346f6738971892205e91d39b6b901 upstream.
+
+In commit 357d23c811a7 ("Remove the obsolete libibcm library")
+in rdma-core [1], we removed obsolete library which used the
+/dev/infiniband/ucmX interface.
+
+Following multiple syzkaller reports about non-sanitized
+user input in the UCMA module, the short audit reveals the same
+issues in UCM module too.
+
+It is better to disable this interface in the kernel,
+before syzkaller team invests time and energy to harden
+this unused interface.
+
+[1] https://github.com/linux-rdma/rdma-core/pull/279
+
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/Kconfig       |   12 ++++++++++++
+ drivers/infiniband/core/Makefile |    4 ++--
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/Kconfig
++++ b/drivers/infiniband/Kconfig
+@@ -34,6 +34,18 @@ config INFINIBAND_USER_ACCESS
+         libibverbs, libibcm and a hardware driver library from
+         <http://www.openfabrics.org/git/>.
++config INFINIBAND_USER_ACCESS_UCM
++      bool "Userspace CM (UCM, DEPRECATED)"
++      depends on BROKEN
++      depends on INFINIBAND_USER_ACCESS
++      help
++        The UCM module has known security flaws, which no one is
++        interested to fix. The user-space part of this code was
++        dropped from the upstream a long time ago.
++
++        This option is DEPRECATED and planned to be removed.
++
++
+ config INFINIBAND_EXP_USER_ACCESS
+       bool "Allow experimental support for Infiniband ABI"
+       depends on INFINIBAND_USER_ACCESS
+--- a/drivers/infiniband/core/Makefile
++++ b/drivers/infiniband/core/Makefile
+@@ -5,8 +5,8 @@ user_access-$(CONFIG_INFINIBAND_ADDR_TRA
+ obj-$(CONFIG_INFINIBAND) +=           ib_core.o ib_cm.o iw_cm.o \
+                                       $(infiniband-y)
+ obj-$(CONFIG_INFINIBAND_USER_MAD) +=  ib_umad.o
+-obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=       ib_uverbs.o ib_ucm.o \
+-                                      $(user_access-y)
++obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
++obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
+ ib_core-y :=                  packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
+                               device.o fmr_pool.o cache.o netlink.o \
index 64c40374f8178748d8860a0de957f98e19acb5b7..39b380299083ca63bbee124ca76fa4db5988bf96 100644 (file)
@@ -42,3 +42,15 @@ ib-hfi1-fix-incorrect-mixing-of-err_ptr-and-null-return-values.patch
 i2c-tegra-fix-nack-error-handling.patch
 iw_cxgb4-correctly-enforce-the-max-reg_mr-depth.patch
 xen-setup-pv-irq-ops-vector-earlier.patch
+nvme-pci-remap-cmb-sq-entries-on-every-controller-reset.patch
+crypto-x86-salsa20-remove-x86-salsa20-implementations.patch
+uprobes-x86-remove-incorrect-warn_on-in-uprobe_init_insn.patch
+netfilter-nf_queue-augment-nfqa_cfg_policy.patch
+netfilter-x_tables-initialise-match-target-check-parameter-struct.patch
+loop-add-recursion-validation-to-loop_change_fd.patch
+pm-hibernate-fix-oops-at-snapshot_write.patch
+rdma-ucm-mark-ucm-interface-as-broken.patch
+loop-remember-whether-sysfs_create_group-was-done.patch
+kvm-vmx-nested-vm-entry-prereqs-for-event-inj.patch
+f2fs-give-message-and-set-need_fsck-given-broken-node-id.patch
+f2fs-sanity-check-on-sit-entry.patch
diff --git a/queue-4.14/uprobes-x86-remove-incorrect-warn_on-in-uprobe_init_insn.patch b/queue-4.14/uprobes-x86-remove-incorrect-warn_on-in-uprobe_init_insn.patch
new file mode 100644 (file)
index 0000000..c1d752b
--- /dev/null
@@ -0,0 +1,39 @@
+From 90718e32e1dcc2479acfa208ccfc6442850b594c Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Fri, 18 May 2018 18:27:39 +0200
+Subject: uprobes/x86: Remove incorrect WARN_ON() in uprobe_init_insn()
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 90718e32e1dcc2479acfa208ccfc6442850b594c upstream.
+
+insn_get_length() has the side-effect of processing the entire instruction
+but only if it was decoded successfully, otherwise insn_complete() can fail
+and in this case we need to just return an error without warning.
+
+Reported-by: syzbot+30d675e3ca03c1c351e7@syzkaller.appspotmail.com
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: syzkaller-bugs@googlegroups.com
+Link: https://lkml.kernel.org/lkml/20180518162739.GA5559@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/uprobes.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_
+       insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
+       /* has the side-effect of processing the entire instruction */
+       insn_get_length(insn);
+-      if (WARN_ON_ONCE(!insn_complete(insn)))
++      if (!insn_complete(insn))
+               return -ENOEXEC;
+       if (is_prefix_bad(insn))