]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 16 May 2017 10:50:45 +0000 (12:50 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 16 May 2017 10:50:45 +0000 (12:50 +0200)
added patches:
arm64-kvm-fix-decoding-of-rt-rt2-when-trapping-aarch32-cp-accesses.patch
block-fix-blk_integrity_register-to-use-template-s-interval_exp-if-not-0.patch
crypto-algif_aead-require-setkey-before-accept-2.patch
crypto-ccp-change-isr-handler-method-for-a-v3-ccp.patch
crypto-ccp-change-isr-handler-method-for-a-v5-ccp.patch
crypto-ccp-disable-interrupts-early-on-unload.patch
crypto-ccp-use-only-the-relevant-interrupt-bits.patch
crypto-s5p-sss-close-possible-race-for-completed-requests.patch
dm-crypt-rewrite-wipe-key-in-crypto-layer-using-random-data.patch
dm-era-save-spacemap-metadata-root-after-the-pre-commit.patch
dm-rq-check-blk_mq_register_dev-return-value-in-dm_mq_init_request_queue.patch
dm-thin-fix-a-memory-leak-when-passing-discard-bio-down.patch
kvm-arm-arm64-fix-races-in-kvm_psci_vcpu_on.patch
kvm-x86-fix-user-triggerable-warning-in-kvm_apic_accept_events.patch
perf-x86-fix-broadwell-ep-dram-rapl-events.patch
revert-kvm-support-vcpu-based-gfn-hva-cache.patch
selftests-x86-ldt_gdt_32-work-around-a-glibc-sigaction-bug.patch
um-fix-ptrace_pokeuser-on-x86_64.patch
x86-boot-fix-bss-corruption-overwrite-bug-in-early-x86-kernel-startup.patch
x86-pmem-fix-cache-flushing-for-iovec-write-8-bytes.patch

21 files changed:
queue-4.11/arm64-kvm-fix-decoding-of-rt-rt2-when-trapping-aarch32-cp-accesses.patch [new file with mode: 0644]
queue-4.11/block-fix-blk_integrity_register-to-use-template-s-interval_exp-if-not-0.patch [new file with mode: 0644]
queue-4.11/crypto-algif_aead-require-setkey-before-accept-2.patch [new file with mode: 0644]
queue-4.11/crypto-ccp-change-isr-handler-method-for-a-v3-ccp.patch [new file with mode: 0644]
queue-4.11/crypto-ccp-change-isr-handler-method-for-a-v5-ccp.patch [new file with mode: 0644]
queue-4.11/crypto-ccp-disable-interrupts-early-on-unload.patch [new file with mode: 0644]
queue-4.11/crypto-ccp-use-only-the-relevant-interrupt-bits.patch [new file with mode: 0644]
queue-4.11/crypto-s5p-sss-close-possible-race-for-completed-requests.patch [new file with mode: 0644]
queue-4.11/dm-crypt-rewrite-wipe-key-in-crypto-layer-using-random-data.patch [new file with mode: 0644]
queue-4.11/dm-era-save-spacemap-metadata-root-after-the-pre-commit.patch [new file with mode: 0644]
queue-4.11/dm-rq-check-blk_mq_register_dev-return-value-in-dm_mq_init_request_queue.patch [new file with mode: 0644]
queue-4.11/dm-thin-fix-a-memory-leak-when-passing-discard-bio-down.patch [new file with mode: 0644]
queue-4.11/kvm-arm-arm64-fix-races-in-kvm_psci_vcpu_on.patch [new file with mode: 0644]
queue-4.11/kvm-x86-fix-user-triggerable-warning-in-kvm_apic_accept_events.patch [new file with mode: 0644]
queue-4.11/perf-x86-fix-broadwell-ep-dram-rapl-events.patch [new file with mode: 0644]
queue-4.11/revert-kvm-support-vcpu-based-gfn-hva-cache.patch [new file with mode: 0644]
queue-4.11/selftests-x86-ldt_gdt_32-work-around-a-glibc-sigaction-bug.patch [new file with mode: 0644]
queue-4.11/series
queue-4.11/um-fix-ptrace_pokeuser-on-x86_64.patch [new file with mode: 0644]
queue-4.11/x86-boot-fix-bss-corruption-overwrite-bug-in-early-x86-kernel-startup.patch [new file with mode: 0644]
queue-4.11/x86-pmem-fix-cache-flushing-for-iovec-write-8-bytes.patch [new file with mode: 0644]

diff --git a/queue-4.11/arm64-kvm-fix-decoding-of-rt-rt2-when-trapping-aarch32-cp-accesses.patch b/queue-4.11/arm64-kvm-fix-decoding-of-rt-rt2-when-trapping-aarch32-cp-accesses.patch
new file mode 100644 (file)
index 0000000..bd79cdc
--- /dev/null
@@ -0,0 +1,71 @@
+From c667186f1c01ca8970c785888868b7ffd74e51ee Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Apr 2017 19:06:48 +0100
+Subject: arm64: KVM: Fix decoding of Rt/Rt2 when trapping AArch32 CP accesses
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit c667186f1c01ca8970c785888868b7ffd74e51ee upstream.
+
+Our 32bit CP14/15 handling inherited some of the ARMv7 code for handling
+the trapped system registers, completely missing the fact that the
+fields for Rt and Rt2 are now 5 bit wide, and not 4...
+
+Let's fix it, and provide an accessor for the most common Rt case.
+
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_emulate.h |    6 ++++++
+ arch/arm64/kvm/sys_regs.c            |    8 ++++----
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault
+       return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
+ }
++static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
++{
++      u32 esr = kvm_vcpu_get_hsr(vcpu);
++      return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
++}
++
+ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
+ {
+       return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1638,8 +1638,8 @@ static int kvm_handle_cp_64(struct kvm_v
+ {
+       struct sys_reg_params params;
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+-      int Rt = (hsr >> 5) & 0xf;
+-      int Rt2 = (hsr >> 10) & 0xf;
++      int Rt = kvm_vcpu_sys_get_rt(vcpu);
++      int Rt2 = (hsr >> 10) & 0x1f;
+       params.is_aarch32 = true;
+       params.is_32bit = false;
+@@ -1690,7 +1690,7 @@ static int kvm_handle_cp_32(struct kvm_v
+ {
+       struct sys_reg_params params;
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+-      int Rt  = (hsr >> 5) & 0xf;
++      int Rt  = kvm_vcpu_sys_get_rt(vcpu);
+       params.is_aarch32 = true;
+       params.is_32bit = true;
+@@ -1805,7 +1805,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *
+ {
+       struct sys_reg_params params;
+       unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+-      int Rt = (esr >> 5) & 0x1f;
++      int Rt = kvm_vcpu_sys_get_rt(vcpu);
+       int ret;
+       trace_kvm_handle_sys_reg(esr);
diff --git a/queue-4.11/block-fix-blk_integrity_register-to-use-template-s-interval_exp-if-not-0.patch b/queue-4.11/block-fix-blk_integrity_register-to-use-template-s-interval_exp-if-not-0.patch
new file mode 100644 (file)
index 0000000..8f47a3c
--- /dev/null
@@ -0,0 +1,39 @@
+From 2859323e35ab5fc42f351fbda23ab544eaa85945 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Sat, 22 Apr 2017 17:22:09 -0400
+Subject: block: fix blk_integrity_register to use template's interval_exp if not 0
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 2859323e35ab5fc42f351fbda23ab544eaa85945 upstream.
+
+When registering an integrity profile: if the template's interval_exp is
+not 0 use it, otherwise use the ilog2() of logical block size of the
+provided gendisk.
+
+This fixes a long-standing DM linear target bug where it cannot pass
+integrity data to the underlying device if its logical block size
+conflicts with the underlying device's logical block size.
+
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-integrity.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -412,7 +412,8 @@ void blk_integrity_register(struct gendi
+       bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
+               template->flags;
+-      bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
++      bi->interval_exp = template->interval_exp ? :
++              ilog2(queue_logical_block_size(disk->queue));
+       bi->profile = template->profile ? template->profile : &nop_profile;
+       bi->tuple_size = template->tuple_size;
+       bi->tag_size = template->tag_size;
diff --git a/queue-4.11/crypto-algif_aead-require-setkey-before-accept-2.patch b/queue-4.11/crypto-algif_aead-require-setkey-before-accept-2.patch
new file mode 100644 (file)
index 0000000..b13c9b3
--- /dev/null
@@ -0,0 +1,241 @@
+From 2a2a251f110576b1d89efbd0662677d7e7db21a8 Mon Sep 17 00:00:00 2001
+From: Stephan Mueller <smueller@chronox.de>
+Date: Mon, 24 Apr 2017 11:15:23 +0200
+Subject: crypto: algif_aead - Require setkey before accept(2)
+
+From: Stephan Mueller <smueller@chronox.de>
+
+commit 2a2a251f110576b1d89efbd0662677d7e7db21a8 upstream.
+
+Some cipher implementations will crash if you try to use them
+without calling setkey first.  This patch adds a check so that
+the accept(2) call will fail with -ENOKEY if setkey hasn't been
+done on the socket yet.
+
+Fixes: 400c40cf78da ("crypto: algif - add AEAD support")
+Signed-off-by: Stephan Mueller <smueller@chronox.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_aead.c |  157 +++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 149 insertions(+), 8 deletions(-)
+
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -45,6 +45,11 @@ struct aead_async_req {
+       char iv[];
+ };
++struct aead_tfm {
++      struct crypto_aead *aead;
++      bool has_key;
++};
++
+ struct aead_ctx {
+       struct aead_sg_list tsgl;
+       struct aead_async_rsgl first_rsgl;
+@@ -723,24 +728,146 @@ static struct proto_ops algif_aead_ops =
+       .poll           =       aead_poll,
+ };
++static int aead_check_key(struct socket *sock)
++{
++      int err = 0;
++      struct sock *psk;
++      struct alg_sock *pask;
++      struct aead_tfm *tfm;
++      struct sock *sk = sock->sk;
++      struct alg_sock *ask = alg_sk(sk);
++
++      lock_sock(sk);
++      if (ask->refcnt)
++              goto unlock_child;
++
++      psk = ask->parent;
++      pask = alg_sk(ask->parent);
++      tfm = pask->private;
++
++      err = -ENOKEY;
++      lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
++      if (!tfm->has_key)
++              goto unlock;
++
++      if (!pask->refcnt++)
++              sock_hold(psk);
++
++      ask->refcnt = 1;
++      sock_put(psk);
++
++      err = 0;
++
++unlock:
++      release_sock(psk);
++unlock_child:
++      release_sock(sk);
++
++      return err;
++}
++
++static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
++                                size_t size)
++{
++      int err;
++
++      err = aead_check_key(sock);
++      if (err)
++              return err;
++
++      return aead_sendmsg(sock, msg, size);
++}
++
++static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
++                                     int offset, size_t size, int flags)
++{
++      int err;
++
++      err = aead_check_key(sock);
++      if (err)
++              return err;
++
++      return aead_sendpage(sock, page, offset, size, flags);
++}
++
++static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
++                                size_t ignored, int flags)
++{
++      int err;
++
++      err = aead_check_key(sock);
++      if (err)
++              return err;
++
++      return aead_recvmsg(sock, msg, ignored, flags);
++}
++
++static struct proto_ops algif_aead_ops_nokey = {
++      .family         =       PF_ALG,
++
++      .connect        =       sock_no_connect,
++      .socketpair     =       sock_no_socketpair,
++      .getname        =       sock_no_getname,
++      .ioctl          =       sock_no_ioctl,
++      .listen         =       sock_no_listen,
++      .shutdown       =       sock_no_shutdown,
++      .getsockopt     =       sock_no_getsockopt,
++      .mmap           =       sock_no_mmap,
++      .bind           =       sock_no_bind,
++      .accept         =       sock_no_accept,
++      .setsockopt     =       sock_no_setsockopt,
++
++      .release        =       af_alg_release,
++      .sendmsg        =       aead_sendmsg_nokey,
++      .sendpage       =       aead_sendpage_nokey,
++      .recvmsg        =       aead_recvmsg_nokey,
++      .poll           =       aead_poll,
++};
++
+ static void *aead_bind(const char *name, u32 type, u32 mask)
+ {
+-      return crypto_alloc_aead(name, type, mask);
++      struct aead_tfm *tfm;
++      struct crypto_aead *aead;
++
++      tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++      if (!tfm)
++              return ERR_PTR(-ENOMEM);
++
++      aead = crypto_alloc_aead(name, type, mask);
++      if (IS_ERR(aead)) {
++              kfree(tfm);
++              return ERR_CAST(aead);
++      }
++
++      tfm->aead = aead;
++
++      return tfm;
+ }
+ static void aead_release(void *private)
+ {
+-      crypto_free_aead(private);
++      struct aead_tfm *tfm = private;
++
++      crypto_free_aead(tfm->aead);
++      kfree(tfm);
+ }
+ static int aead_setauthsize(void *private, unsigned int authsize)
+ {
+-      return crypto_aead_setauthsize(private, authsize);
++      struct aead_tfm *tfm = private;
++
++      return crypto_aead_setauthsize(tfm->aead, authsize);
+ }
+ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+-      return crypto_aead_setkey(private, key, keylen);
++      struct aead_tfm *tfm = private;
++      int err;
++
++      err = crypto_aead_setkey(tfm->aead, key, keylen);
++      tfm->has_key = !err;
++
++      return err;
+ }
+ static void aead_sock_destruct(struct sock *sk)
+@@ -757,12 +884,14 @@ static void aead_sock_destruct(struct so
+       af_alg_release_parent(sk);
+ }
+-static int aead_accept_parent(void *private, struct sock *sk)
++static int aead_accept_parent_nokey(void *private, struct sock *sk)
+ {
+       struct aead_ctx *ctx;
+       struct alg_sock *ask = alg_sk(sk);
+-      unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
+-      unsigned int ivlen = crypto_aead_ivsize(private);
++      struct aead_tfm *tfm = private;
++      struct crypto_aead *aead = tfm->aead;
++      unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
++      unsigned int ivlen = crypto_aead_ivsize(aead);
+       ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+       if (!ctx)
+@@ -789,7 +918,7 @@ static int aead_accept_parent(void *priv
+       ask->private = ctx;
+-      aead_request_set_tfm(&ctx->aead_req, private);
++      aead_request_set_tfm(&ctx->aead_req, aead);
+       aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                 af_alg_complete, &ctx->completion);
+@@ -798,13 +927,25 @@ static int aead_accept_parent(void *priv
+       return 0;
+ }
++static int aead_accept_parent(void *private, struct sock *sk)
++{
++      struct aead_tfm *tfm = private;
++
++      if (!tfm->has_key)
++              return -ENOKEY;
++
++      return aead_accept_parent_nokey(private, sk);
++}
++
+ static const struct af_alg_type algif_type_aead = {
+       .bind           =       aead_bind,
+       .release        =       aead_release,
+       .setkey         =       aead_setkey,
+       .setauthsize    =       aead_setauthsize,
+       .accept         =       aead_accept_parent,
++      .accept_nokey   =       aead_accept_parent_nokey,
+       .ops            =       &algif_aead_ops,
++      .ops_nokey      =       &algif_aead_ops_nokey,
+       .name           =       "aead",
+       .owner          =       THIS_MODULE
+ };
diff --git a/queue-4.11/crypto-ccp-change-isr-handler-method-for-a-v3-ccp.patch b/queue-4.11/crypto-ccp-change-isr-handler-method-for-a-v3-ccp.patch
new file mode 100644 (file)
index 0000000..603c37b
--- /dev/null
@@ -0,0 +1,265 @@
+From 7b537b24e76a1e8e6d7ea91483a45d5b1426809b Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Fri, 21 Apr 2017 10:50:05 -0500
+Subject: crypto: ccp - Change ISR handler method for a v3 CCP
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit 7b537b24e76a1e8e6d7ea91483a45d5b1426809b upstream.
+
+The CCP has the ability to perform several operations simultaneously,
+but only one interrupt.  When implemented as a PCI device and using
+MSI-X/MSI interrupts, use a tasklet model to service interrupts. By
+disabling and enabling interrupts from the CCP, coupled with the
+queuing that tasklets provide, we can ensure that all events
+(occurring on the device) are recognized and serviced.
+
+This change fixes a problem wherein 2 or more busy queues can cause
+notification bits to change state while a (CCP) interrupt is being
+serviced, but after the queue state has been evaluated. This results
+in the event being 'lost' and the queue hanging, waiting to be
+serviced. Since the status bits are never fully de-asserted, the
+CCP never generates another interrupt (all bits zero -> one or more
+bits one), and no further CCP operations will be executed.
+
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev-v3.c |  120 +++++++++++++++++++++++-----------------
+ drivers/crypto/ccp/ccp-dev.h    |    3 +
+ drivers/crypto/ccp/ccp-pci.c    |    2 
+ 3 files changed, 75 insertions(+), 50 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev-v3.c
++++ b/drivers/crypto/ccp/ccp-dev-v3.c
+@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op
+       return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+ }
++static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
++{
++      iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
++}
++
++static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
++{
++      iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
++}
++
++static void ccp_irq_bh(unsigned long data)
++{
++      struct ccp_device *ccp = (struct ccp_device *)data;
++      struct ccp_cmd_queue *cmd_q;
++      u32 q_int, status;
++      unsigned int i;
++
++      status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
++
++      for (i = 0; i < ccp->cmd_q_count; i++) {
++              cmd_q = &ccp->cmd_q[i];
++
++              q_int = status & (cmd_q->int_ok | cmd_q->int_err);
++              if (q_int) {
++                      cmd_q->int_status = status;
++                      cmd_q->q_status = ioread32(cmd_q->reg_status);
++                      cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
++
++                      /* On error, only save the first error value */
++                      if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
++                              cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
++
++                      cmd_q->int_rcvd = 1;
++
++                      /* Acknowledge the interrupt and wake the kthread */
++                      iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
++                      wake_up_interruptible(&cmd_q->int_queue);
++              }
++      }
++      ccp_enable_queue_interrupts(ccp);
++}
++
++static irqreturn_t ccp_irq_handler(int irq, void *data)
++{
++      struct device *dev = data;
++      struct ccp_device *ccp = dev_get_drvdata(dev);
++
++      ccp_disable_queue_interrupts(ccp);
++      if (ccp->use_tasklet)
++              tasklet_schedule(&ccp->irq_tasklet);
++      else
++              ccp_irq_bh((unsigned long)ccp);
++
++      return IRQ_HANDLED;
++}
++
+ static int ccp_init(struct ccp_device *ccp)
+ {
+       struct device *dev = ccp->dev;
+       struct ccp_cmd_queue *cmd_q;
+       struct dma_pool *dma_pool;
+       char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+-      unsigned int qmr, qim, i;
++      unsigned int qmr, i;
+       int ret;
+       /* Find available queues */
+-      qim = 0;
++      ccp->qim = 0;
+       qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+       for (i = 0; i < MAX_HW_QUEUES; i++) {
+               if (!(qmr & (1 << i)))
+@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *c
+               init_waitqueue_head(&cmd_q->int_queue);
+               /* Build queue interrupt mask (two interrupts per queue) */
+-              qim |= cmd_q->int_ok | cmd_q->int_err;
++              ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
+ #ifdef CONFIG_ARM64
+               /* For arm64 set the recommended queue cache settings */
+@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *c
+       dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+       /* Disable and clear interrupts until ready */
+-      iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
++      ccp_disable_queue_interrupts(ccp);
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               cmd_q = &ccp->cmd_q[i];
+               ioread32(cmd_q->reg_int_status);
+               ioread32(cmd_q->reg_status);
+       }
+-      iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
++      iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
+       /* Request an irq */
+       ret = ccp->get_irq(ccp);
+@@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *c
+               goto e_pool;
+       }
++      /* Initialize the ISR tasklet? */
++      if (ccp->use_tasklet)
++              tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
++                           (unsigned long)ccp);
++
+       dev_dbg(dev, "Starting threads...\n");
+       /* Create a kthread for each queue */
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+@@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *c
+       dev_dbg(dev, "Enabling interrupts...\n");
+       /* Enable interrupts */
+-      iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
++      ccp_enable_queue_interrupts(ccp);
+       dev_dbg(dev, "Registering device...\n");
+       ccp_add_device(ccp);
+@@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_devic
+ {
+       struct ccp_cmd_queue *cmd_q;
+       struct ccp_cmd *cmd;
+-      unsigned int qim, i;
++      unsigned int i;
+       /* Unregister the DMA engine */
+       ccp_dmaengine_unregister(ccp);
+@@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_devic
+       /* Remove this device from the list of available units */
+       ccp_del_device(ccp);
+-      /* Build queue interrupt mask (two interrupt masks per queue) */
+-      qim = 0;
+-      for (i = 0; i < ccp->cmd_q_count; i++) {
+-              cmd_q = &ccp->cmd_q[i];
+-              qim |= cmd_q->int_ok | cmd_q->int_err;
+-      }
+-
+       /* Disable and clear interrupts */
+-      iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
++      ccp_disable_queue_interrupts(ccp);
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               cmd_q = &ccp->cmd_q[i];
+               ioread32(cmd_q->reg_int_status);
+               ioread32(cmd_q->reg_status);
+       }
+-      iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
++      iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
+       /* Stop the queue kthreads */
+       for (i = 0; i < ccp->cmd_q_count; i++)
+@@ -516,40 +570,6 @@ static void ccp_destroy(struct ccp_devic
+       }
+ }
+-static irqreturn_t ccp_irq_handler(int irq, void *data)
+-{
+-      struct device *dev = data;
+-      struct ccp_device *ccp = dev_get_drvdata(dev);
+-      struct ccp_cmd_queue *cmd_q;
+-      u32 q_int, status;
+-      unsigned int i;
+-
+-      status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+-
+-      for (i = 0; i < ccp->cmd_q_count; i++) {
+-              cmd_q = &ccp->cmd_q[i];
+-
+-              q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+-              if (q_int) {
+-                      cmd_q->int_status = status;
+-                      cmd_q->q_status = ioread32(cmd_q->reg_status);
+-                      cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+-
+-                      /* On error, only save the first error value */
+-                      if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+-                              cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+-
+-                      cmd_q->int_rcvd = 1;
+-
+-                      /* Acknowledge the interrupt and wake the kthread */
+-                      iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+-                      wake_up_interruptible(&cmd_q->int_queue);
+-              }
+-      }
+-
+-      return IRQ_HANDLED;
+-}
+-
+ static const struct ccp_actions ccp3_actions = {
+       .aes = ccp_perform_aes,
+       .xts_aes = ccp_perform_xts_aes,
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -336,7 +336,10 @@ struct ccp_device {
+       void *dev_specific;
+       int (*get_irq)(struct ccp_device *ccp);
+       void (*free_irq)(struct ccp_device *ccp);
++      unsigned int qim;
+       unsigned int irq;
++      bool use_tasklet;
++      struct tasklet_struct irq_tasklet;
+       /* I/O area used for device communication. The register mapping
+        * starts at an offset into the mapped bar.
+--- a/drivers/crypto/ccp/ccp-pci.c
++++ b/drivers/crypto/ccp/ccp-pci.c
+@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_
+                       goto e_irq;
+               }
+       }
++      ccp->use_tasklet = true;
+       return 0;
+@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_de
+               dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
+               goto e_msi;
+       }
++      ccp->use_tasklet = true;
+       return 0;
diff --git a/queue-4.11/crypto-ccp-change-isr-handler-method-for-a-v5-ccp.patch b/queue-4.11/crypto-ccp-change-isr-handler-method-for-a-v5-ccp.patch
new file mode 100644 (file)
index 0000000..a5308f4
--- /dev/null
@@ -0,0 +1,202 @@
+From 6263b51eb3190d30351360fd168959af7e3a49a9 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Fri, 21 Apr 2017 10:50:14 -0500
+Subject: crypto: ccp - Change ISR handler method for a v5 CCP
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit 6263b51eb3190d30351360fd168959af7e3a49a9 upstream.
+
+The CCP has the ability to perform several operations simultaneously,
+but only one interrupt.  When implemented as a PCI device and using
+MSI-X/MSI interrupts, use a tasklet model to service interrupts. By
+disabling and enabling interrupts from the CCP, coupled with the
+queuing that tasklets provide, we can ensure that all events
+(occurring on the device) are recognized and serviced.
+
+This change fixes a problem wherein 2 or more busy queues can cause
+notification bits to change state while a (CCP) interrupt is being
+serviced, but after the queue state has been evaluated. This results
+in the event being 'lost' and the queue hanging, waiting to be
+serviced. Since the status bits are never fully de-asserted, the
+CCP never generates another interrupt (all bits zero -> one or more
+bits one), and no further CCP operations will be executed.
+
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev-v5.c |  111 ++++++++++++++++++++++++----------------
+ 1 file changed, 67 insertions(+), 44 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -653,6 +653,65 @@ static int ccp_assign_lsbs(struct ccp_de
+       return rc;
+ }
++static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
++{
++      unsigned int i;
++
++      for (i = 0; i < ccp->cmd_q_count; i++)
++              iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
++}
++
++static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
++{
++      unsigned int i;
++
++      for (i = 0; i < ccp->cmd_q_count; i++)
++              iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
++}
++
++static void ccp5_irq_bh(unsigned long data)
++{
++      struct ccp_device *ccp = (struct ccp_device *)data;
++      u32 status;
++      unsigned int i;
++
++      for (i = 0; i < ccp->cmd_q_count; i++) {
++              struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
++
++              status = ioread32(cmd_q->reg_interrupt_status);
++
++              if (status) {
++                      cmd_q->int_status = status;
++                      cmd_q->q_status = ioread32(cmd_q->reg_status);
++                      cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
++
++                      /* On error, only save the first error value */
++                      if ((status & INT_ERROR) && !cmd_q->cmd_error)
++                              cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
++
++                      cmd_q->int_rcvd = 1;
++
++                      /* Acknowledge the interrupt and wake the kthread */
++                      iowrite32(status, cmd_q->reg_interrupt_status);
++                      wake_up_interruptible(&cmd_q->int_queue);
++              }
++      }
++      ccp5_enable_queue_interrupts(ccp);
++}
++
++static irqreturn_t ccp5_irq_handler(int irq, void *data)
++{
++      struct device *dev = data;
++      struct ccp_device *ccp = dev_get_drvdata(dev);
++
++      ccp5_disable_queue_interrupts(ccp);
++      if (ccp->use_tasklet)
++              tasklet_schedule(&ccp->irq_tasklet);
++      else
++              ccp5_irq_bh((unsigned long)ccp);
++      return IRQ_HANDLED;
++}
++
+ static int ccp5_init(struct ccp_device *ccp)
+ {
+       struct device *dev = ccp->dev;
+@@ -736,18 +795,17 @@ static int ccp5_init(struct ccp_device *
+       }
+       /* Turn off the queues and disable interrupts until ready */
++      ccp5_disable_queue_interrupts(ccp);
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               cmd_q = &ccp->cmd_q[i];
+               cmd_q->qcontrol = 0; /* Start with nothing */
+               iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+-              /* Disable the interrupts */
+-              iowrite32(0x00, cmd_q->reg_int_enable);
+               ioread32(cmd_q->reg_int_status);
+               ioread32(cmd_q->reg_status);
+-              /* Clear the interrupts */
++              /* Clear the interrupt status */
+               iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+       }
+@@ -758,6 +816,10 @@ static int ccp5_init(struct ccp_device *
+               dev_err(dev, "unable to allocate an IRQ\n");
+               goto e_pool;
+       }
++      /* Initialize the ISR tasklet */
++      if (ccp->use_tasklet)
++              tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
++                           (unsigned long)ccp);
+       dev_dbg(dev, "Loading LSB map...\n");
+       /* Copy the private LSB mask to the public registers */
+@@ -826,11 +888,7 @@ static int ccp5_init(struct ccp_device *
+       }
+       dev_dbg(dev, "Enabling interrupts...\n");
+-      /* Enable interrupts */
+-      for (i = 0; i < ccp->cmd_q_count; i++) {
+-              cmd_q = &ccp->cmd_q[i];
+-              iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_int_enable);
+-      }
++      ccp5_enable_queue_interrupts(ccp);
+       dev_dbg(dev, "Registering device...\n");
+       /* Put this on the unit list to make it available */
+@@ -882,15 +940,13 @@ static void ccp5_destroy(struct ccp_devi
+       ccp_del_device(ccp);
+       /* Disable and clear interrupts */
++      ccp5_disable_queue_interrupts(ccp);
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               cmd_q = &ccp->cmd_q[i];
+               /* Turn off the run bit */
+               iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+-              /* Disable the interrupts */
+-              iowrite32(0x00, cmd_q->reg_int_enable);
+-
+               /* Clear the interrupt status */
+               iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+               ioread32(cmd_q->reg_int_status);
+@@ -925,39 +981,6 @@ static void ccp5_destroy(struct ccp_devi
+       }
+ }
+-static irqreturn_t ccp5_irq_handler(int irq, void *data)
+-{
+-      struct device *dev = data;
+-      struct ccp_device *ccp = dev_get_drvdata(dev);
+-      u32 status;
+-      unsigned int i;
+-
+-      for (i = 0; i < ccp->cmd_q_count; i++) {
+-              struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+-
+-              status = ioread32(cmd_q->reg_interrupt_status);
+-
+-              if (status) {
+-                      cmd_q->int_status = status;
+-                      cmd_q->q_status = ioread32(cmd_q->reg_status);
+-                      cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+-
+-                      /* On error, only save the first error value */
+-                      if ((status & INT_ERROR) && !cmd_q->cmd_error)
+-                              cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+-
+-                      cmd_q->int_rcvd = 1;
+-
+-                      /* Acknowledge the interrupt and wake the kthread */
+-                      iowrite32(SUPPORTED_INTERRUPTS,
+-                                cmd_q->reg_interrupt_status);
+-                      wake_up_interruptible(&cmd_q->int_queue);
+-              }
+-      }
+-
+-      return IRQ_HANDLED;
+-}
+-
+ static void ccp5_config(struct ccp_device *ccp)
+ {
+       /* Public side */
diff --git a/queue-4.11/crypto-ccp-disable-interrupts-early-on-unload.patch b/queue-4.11/crypto-ccp-disable-interrupts-early-on-unload.patch
new file mode 100644 (file)
index 0000000..e3a23c3
--- /dev/null
@@ -0,0 +1,35 @@
+From 116591fe3eef11c6f06b662c9176385f13891183 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <ghook@amd.com>
+Date: Thu, 20 Apr 2017 15:24:22 -0500
+Subject: crypto: ccp - Disable interrupts early on unload
+
+From: Gary R Hook <ghook@amd.com>
+
+commit 116591fe3eef11c6f06b662c9176385f13891183 upstream.
+
+Ensure that we disable interrupts first when shutting down
+the driver.
+
+Signed-off-by: Gary R Hook <ghook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev-v5.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -889,10 +889,10 @@ static void ccp5_destroy(struct ccp_devi
+               iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+               /* Disable the interrupts */
+-              iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
++              iowrite32(0x00, cmd_q->reg_int_enable);
+               /* Clear the interrupt status */
+-              iowrite32(0x00, cmd_q->reg_int_enable);
++              iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+               ioread32(cmd_q->reg_int_status);
+               ioread32(cmd_q->reg_status);
+       }
diff --git a/queue-4.11/crypto-ccp-use-only-the-relevant-interrupt-bits.patch b/queue-4.11/crypto-ccp-use-only-the-relevant-interrupt-bits.patch
new file mode 100644 (file)
index 0000000..baba78d
--- /dev/null
@@ -0,0 +1,75 @@
+From 56467cb11cf8ae4db9003f54b3d3425b5f07a10a Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Thu, 20 Apr 2017 15:24:09 -0500
+Subject: crypto: ccp - Use only the relevant interrupt bits
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit 56467cb11cf8ae4db9003f54b3d3425b5f07a10a upstream.
+
+Each CCP queue can product interrupts for 4 conditions:
+operation complete, queue empty, error, and queue stopped.
+This driver only works with completion and error events.
+
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev-v5.c |    9 +++++----
+ drivers/crypto/ccp/ccp-dev.h    |    5 ++---
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -748,7 +748,7 @@ static int ccp5_init(struct ccp_device *
+               ioread32(cmd_q->reg_status);
+               /* Clear the interrupts */
+-              iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
++              iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+       }
+       dev_dbg(dev, "Requesting an IRQ...\n");
+@@ -829,7 +829,7 @@ static int ccp5_init(struct ccp_device *
+       /* Enable interrupts */
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               cmd_q = &ccp->cmd_q[i];
+-              iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
++              iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_int_enable);
+       }
+       dev_dbg(dev, "Registering device...\n");
+@@ -889,7 +889,7 @@ static void ccp5_destroy(struct ccp_devi
+               iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+               /* Disable the interrupts */
+-              iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
++              iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+               /* Clear the interrupt status */
+               iowrite32(0x00, cmd_q->reg_int_enable);
+@@ -949,7 +949,8 @@ static irqreturn_t ccp5_irq_handler(int
+                       cmd_q->int_rcvd = 1;
+                       /* Acknowledge the interrupt and wake the kthread */
+-                      iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
++                      iowrite32(SUPPORTED_INTERRUPTS,
++                                cmd_q->reg_interrupt_status);
+                       wake_up_interruptible(&cmd_q->int_queue);
+               }
+       }
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -109,9 +109,8 @@
+ #define INT_COMPLETION                        0x1
+ #define INT_ERROR                     0x2
+ #define INT_QUEUE_STOPPED             0x4
+-#define ALL_INTERRUPTS                        (INT_COMPLETION| \
+-                                       INT_ERROR| \
+-                                       INT_QUEUE_STOPPED)
++#define       INT_EMPTY_QUEUE                 0x8
++#define SUPPORTED_INTERRUPTS          (INT_COMPLETION | INT_ERROR)
+ #define LSB_REGION_WIDTH              5
+ #define MAX_LSB_CNT                   8
diff --git a/queue-4.11/crypto-s5p-sss-close-possible-race-for-completed-requests.patch b/queue-4.11/crypto-s5p-sss-close-possible-race-for-completed-requests.patch
new file mode 100644 (file)
index 0000000..4239ad6
--- /dev/null
@@ -0,0 +1,69 @@
+From 42d5c176b76e190a4a3e0dfeffdae661755955b6 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Fri, 17 Mar 2017 16:49:19 +0200
+Subject: crypto: s5p-sss - Close possible race for completed requests
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+commit 42d5c176b76e190a4a3e0dfeffdae661755955b6 upstream.
+
+Driver is capable of handling only one request at a time and it stores
+it in its state container struct s5p_aes_dev.  This stored request must be
+protected between concurrent invocations (e.g. completing current
+request and scheduling new one).  Combination of lock and "busy" field
+is used for that purpose.
+
+When "busy" field is true, the driver will not accept new request thus
+it will not overwrite currently handled data.
+
+However commit 28b62b145868 ("crypto: s5p-sss - Fix spinlock recursion
+on LRW(AES)") moved some of the write to "busy" field out of a lock
+protected critical section.  This might lead to potential race between
+completing current request and scheduling a new one.  Effectively the
+request completion might try to operate on new crypto request.
+
+Fixes: 28b62b145868 ("crypto: s5p-sss - Fix spinlock recursion on LRW(AES)")
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Reviewed-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/s5p-sss.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/s5p-sss.c
++++ b/drivers/crypto/s5p-sss.c
+@@ -287,7 +287,6 @@ static void s5p_sg_done(struct s5p_aes_d
+ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+ {
+       dev->req->base.complete(&dev->req->base, err);
+-      dev->busy = false;
+ }
+ static void s5p_unset_outdata(struct s5p_aes_dev *dev)
+@@ -462,7 +461,7 @@ static irqreturn_t s5p_aes_interrupt(int
+               spin_unlock_irqrestore(&dev->lock, flags);
+               s5p_aes_complete(dev, 0);
+-              dev->busy = true;
++              /* Device is still busy */
+               tasklet_schedule(&dev->tasklet);
+       } else {
+               /*
+@@ -483,6 +482,7 @@ static irqreturn_t s5p_aes_interrupt(int
+ error:
+       s5p_sg_done(dev);
++      dev->busy = false;
+       spin_unlock_irqrestore(&dev->lock, flags);
+       s5p_aes_complete(dev, err);
+@@ -634,6 +634,7 @@ outdata_error:
+ indata_error:
+       s5p_sg_done(dev);
++      dev->busy = false;
+       spin_unlock_irqrestore(&dev->lock, flags);
+       s5p_aes_complete(dev, err);
+ }
diff --git a/queue-4.11/dm-crypt-rewrite-wipe-key-in-crypto-layer-using-random-data.patch b/queue-4.11/dm-crypt-rewrite-wipe-key-in-crypto-layer-using-random-data.patch
new file mode 100644 (file)
index 0000000..a118039
--- /dev/null
@@ -0,0 +1,47 @@
+From c82feeec9a014b72c4ffea36648cfb6f81cc1b73 Mon Sep 17 00:00:00 2001
+From: Ondrej Kozina <okozina@redhat.com>
+Date: Mon, 24 Apr 2017 14:21:53 +0200
+Subject: dm crypt: rewrite (wipe) key in crypto layer using random data
+
+From: Ondrej Kozina <okozina@redhat.com>
+
+commit c82feeec9a014b72c4ffea36648cfb6f81cc1b73 upstream.
+
+The message "key wipe" used to wipe real key stored in crypto layer by
+rewriting it with zeroes.  Since commit 28856a9 ("crypto: xts -
+consolidate sanity check for keys") this no longer works in FIPS mode
+for XTS.
+
+While running in FIPS mode the crypto key part has to differ from the
+tweak key.
+
+Fixes: 28856a9 ("crypto: xts - consolidate sanity check for keys")
+Signed-off-by: Ondrej Kozina <okozina@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1649,12 +1649,16 @@ out:
+ static int crypt_wipe_key(struct crypt_config *cc)
+ {
++      int r;
++
+       clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+-      memset(&cc->key, 0, cc->key_size * sizeof(u8));
++      get_random_bytes(&cc->key, cc->key_size);
+       kzfree(cc->key_string);
+       cc->key_string = NULL;
++      r = crypt_setkey(cc);
++      memset(&cc->key, 0, cc->key_size * sizeof(u8));
+-      return crypt_setkey(cc);
++      return r;
+ }
+ static void crypt_dtr(struct dm_target *ti)
diff --git a/queue-4.11/dm-era-save-spacemap-metadata-root-after-the-pre-commit.patch b/queue-4.11/dm-era-save-spacemap-metadata-root-after-the-pre-commit.patch
new file mode 100644 (file)
index 0000000..c7d9b08
--- /dev/null
@@ -0,0 +1,45 @@
+From 117aceb030307dcd431fdcff87ce988d3016c34a Mon Sep 17 00:00:00 2001
+From: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
+Date: Fri, 7 Apr 2017 12:14:55 -0700
+Subject: dm era: save spacemap metadata root after the pre-commit
+
+From: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
+
+commit 117aceb030307dcd431fdcff87ce988d3016c34a upstream.
+
+When committing era metadata to disk, it doesn't always save the latest
+spacemap metadata root in superblock. Due to this, metadata is getting
+corrupted sometimes when reopening the device. The correct order of update
+should be, pre-commit (shadows spacemap root), save the spacemap root
+(newly shadowed block) to in-core superblock and then the final commit.
+
+Signed-off-by: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-era-target.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -961,15 +961,15 @@ static int metadata_commit(struct era_me
+               }
+       }
+-      r = save_sm_root(md);
++      r = dm_tm_pre_commit(md->tm);
+       if (r) {
+-              DMERR("%s: save_sm_root failed", __func__);
++              DMERR("%s: pre commit failed", __func__);
+               return r;
+       }
+-      r = dm_tm_pre_commit(md->tm);
++      r = save_sm_root(md);
+       if (r) {
+-              DMERR("%s: pre commit failed", __func__);
++              DMERR("%s: save_sm_root failed", __func__);
+               return r;
+       }
diff --git a/queue-4.11/dm-rq-check-blk_mq_register_dev-return-value-in-dm_mq_init_request_queue.patch b/queue-4.11/dm-rq-check-blk_mq_register_dev-return-value-in-dm_mq_init_request_queue.patch
new file mode 100644 (file)
index 0000000..c31ba70
--- /dev/null
@@ -0,0 +1,40 @@
+From 23a601248958fa4142d49294352fe8d1fdf3e509 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Thu, 27 Apr 2017 10:11:19 -0700
+Subject: dm rq: check blk_mq_register_dev() return value in dm_mq_init_request_queue()
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit 23a601248958fa4142d49294352fe8d1fdf3e509 upstream.
+
+Otherwise the request-based DM blk-mq request_queue will be put into
+service without being properly exported via sysfs.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-rq.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -810,10 +810,14 @@ int dm_mq_init_request_queue(struct mapp
+       dm_init_md_queue(md);
+       /* backfill 'mq' sysfs registration normally done in blk_register_queue */
+-      blk_mq_register_dev(disk_to_dev(md->disk), q);
++      err = blk_mq_register_dev(disk_to_dev(md->disk), q);
++      if (err)
++              goto out_cleanup_queue;
+       return 0;
++out_cleanup_queue:
++      blk_cleanup_queue(q);
+ out_tag_set:
+       blk_mq_free_tag_set(md->tag_set);
+ out_kfree_tag_set:
diff --git a/queue-4.11/dm-thin-fix-a-memory-leak-when-passing-discard-bio-down.patch b/queue-4.11/dm-thin-fix-a-memory-leak-when-passing-discard-bio-down.patch
new file mode 100644 (file)
index 0000000..2d8bafe
--- /dev/null
@@ -0,0 +1,55 @@
+From 948f581a53b704b984aa20df009f0a2b4cf7f907 Mon Sep 17 00:00:00 2001
+From: Dennis Yang <dennisyang@qnap.com>
+Date: Tue, 18 Apr 2017 15:27:06 +0800
+Subject: dm thin: fix a memory leak when passing discard bio down
+
+From: Dennis Yang <dennisyang@qnap.com>
+
+commit 948f581a53b704b984aa20df009f0a2b4cf7f907 upstream.
+
+dm-thin does not free the discard_parent bio after all chained sub
+bios finished. The following kmemleak report could be observed after
+pool with discard_passdown option processes discard bios in
+linux v4.11-rc7. To fix this, we drop the discard_parent bio reference
+when its endio (passdown_endio) called.
+
+unreferenced object 0xffff8803d6b29700 (size 256):
+  comm "kworker/u8:0", pid 30349, jiffies 4379504020 (age 143002.776s)
+  hex dump (first 32 bytes):
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    01 00 00 00 00 00 00 f0 00 00 00 00 00 00 00 00  ................
+  backtrace:
+    [<ffffffff81a5efd9>] kmemleak_alloc+0x49/0xa0
+    [<ffffffff8114ec34>] kmem_cache_alloc+0xb4/0x100
+    [<ffffffff8110eec0>] mempool_alloc_slab+0x10/0x20
+    [<ffffffff8110efa5>] mempool_alloc+0x55/0x150
+    [<ffffffff81374939>] bio_alloc_bioset+0xb9/0x260
+    [<ffffffffa018fd20>] process_prepared_discard_passdown_pt1+0x40/0x1c0 [dm_thin_pool]
+    [<ffffffffa018b409>] break_up_discard_bio+0x1a9/0x200 [dm_thin_pool]
+    [<ffffffffa018b484>] process_discard_cell_passdown+0x24/0x40 [dm_thin_pool]
+    [<ffffffffa018b24d>] process_discard_bio+0xdd/0xf0 [dm_thin_pool]
+    [<ffffffffa018ecf6>] do_worker+0xa76/0xd50 [dm_thin_pool]
+    [<ffffffff81086239>] process_one_work+0x139/0x370
+    [<ffffffff810867b1>] worker_thread+0x61/0x450
+    [<ffffffff8108b316>] kthread+0xd6/0xf0
+    [<ffffffff81a6cd1f>] ret_from_fork+0x3f/0x70
+    [<ffffffffffffffff>] 0xffffffffffffffff
+
+Signed-off-by: Dennis Yang <dennisyang@qnap.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1069,6 +1069,7 @@ static void passdown_endio(struct bio *b
+        * to unmap (we ignore err).
+        */
+       queue_passdown_pt2(bio->bi_private);
++      bio_put(bio);
+ }
+ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
diff --git a/queue-4.11/kvm-arm-arm64-fix-races-in-kvm_psci_vcpu_on.patch b/queue-4.11/kvm-arm-arm64-fix-races-in-kvm_psci_vcpu_on.patch
new file mode 100644 (file)
index 0000000..12458c1
--- /dev/null
@@ -0,0 +1,72 @@
+From 6c7a5dce22b3f3cc44be098e2837fa6797edb8b8 Mon Sep 17 00:00:00 2001
+From: Andrew Jones <drjones@redhat.com>
+Date: Tue, 18 Apr 2017 17:59:58 +0200
+Subject: KVM: arm/arm64: fix races in kvm_psci_vcpu_on
+
+From: Andrew Jones <drjones@redhat.com>
+
+commit 6c7a5dce22b3f3cc44be098e2837fa6797edb8b8 upstream.
+
+Fix potential races in kvm_psci_vcpu_on() by taking the kvm->lock
+mutex.  In general, it's a bad idea to allow more than one PSCI_CPU_ON
+to process the same target VCPU at the same time.  One such problem
+that may arise is that one PSCI_CPU_ON could be resetting the target
+vcpu, which fills the entire sys_regs array with a temporary value
+including the MPIDR register, while another looks up the VCPU based
+on the MPIDR value, resulting in no target VCPU found.  Resolves both
+races found with the kvm-unit-tests/arm/psci unit test.
+
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Reported-by: Levente Kurusa <lkurusa@redhat.com>
+Suggested-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Andrew Jones <drjones@redhat.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/psci.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vc
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+-      int ret = 1;
++      struct kvm *kvm = vcpu->kvm;
+       unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
++      int ret = 1;
+       switch (psci_fn) {
+       case PSCI_0_2_FN_PSCI_VERSION:
+@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_
+               break;
+       case PSCI_0_2_FN_CPU_ON:
+       case PSCI_0_2_FN64_CPU_ON:
++              mutex_lock(&kvm->lock);
+               val = kvm_psci_vcpu_on(vcpu);
++              mutex_unlock(&kvm->lock);
+               break;
+       case PSCI_0_2_FN_AFFINITY_INFO:
+       case PSCI_0_2_FN64_AFFINITY_INFO:
+@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
++      struct kvm *kvm = vcpu->kvm;
+       unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
+@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_
+               val = PSCI_RET_SUCCESS;
+               break;
+       case KVM_PSCI_FN_CPU_ON:
++              mutex_lock(&kvm->lock);
+               val = kvm_psci_vcpu_on(vcpu);
++              mutex_unlock(&kvm->lock);
+               break;
+       default:
+               val = PSCI_RET_NOT_SUPPORTED;
diff --git a/queue-4.11/kvm-x86-fix-user-triggerable-warning-in-kvm_apic_accept_events.patch b/queue-4.11/kvm-x86-fix-user-triggerable-warning-in-kvm_apic_accept_events.patch
new file mode 100644 (file)
index 0000000..39fe1f6
--- /dev/null
@@ -0,0 +1,57 @@
+From 28bf28887976d8881a3a59491896c718fade7355 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Thu, 23 Mar 2017 11:46:03 +0100
+Subject: KVM: x86: fix user triggerable warning in kvm_apic_accept_events()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 28bf28887976d8881a3a59491896c718fade7355 upstream.
+
+If we already entered/are about to enter SMM, don't allow switching to
+INIT/SIPI_RECEIVED, otherwise the next call to kvm_apic_accept_events()
+will report a warning.
+
+Same applies if we are already in MP state INIT_RECEIVED and SMM is
+requested to be turned on. Refuse to set the VCPU events in this case.
+
+Fixes: cd7764fe9f73 ("KVM: x86: latch INITs while in system management mode")
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3127,6 +3127,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_e
+           (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+               return -EINVAL;
++      /* INITs are latched while in SMM */
++      if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
++          (events->smi.smm || events->smi.pending) &&
++          vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
++              return -EINVAL;
++
+       process_nmi(vcpu);
+       vcpu->arch.exception.pending = events->exception.injected;
+       vcpu->arch.exception.nr = events->exception.nr;
+@@ -7355,6 +7361,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(stru
+           mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
+               return -EINVAL;
++      /* INITs are latched while in SMM */
++      if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
++          (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
++           mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
++              return -EINVAL;
++
+       if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
+               vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+               set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
diff --git a/queue-4.11/perf-x86-fix-broadwell-ep-dram-rapl-events.patch b/queue-4.11/perf-x86-fix-broadwell-ep-dram-rapl-events.patch
new file mode 100644 (file)
index 0000000..ebd6ed1
--- /dev/null
@@ -0,0 +1,47 @@
+From 33b88e708e7dfa58dc896da2a98f5719d2eb315c Mon Sep 17 00:00:00 2001
+From: Vince Weaver <vincent.weaver@maine.edu>
+Date: Tue, 2 May 2017 14:08:50 -0400
+Subject: perf/x86: Fix Broadwell-EP DRAM RAPL events
+
+From: Vince Weaver <vincent.weaver@maine.edu>
+
+commit 33b88e708e7dfa58dc896da2a98f5719d2eb315c upstream.
+
+It appears as though the Broadwell-EP DRAM units share the special
+units quirk with Haswell-EP/KNL.
+
+Without this patch, you get really high results (a single DRAM using 20W
+of power).
+
+The powercap driver in drivers/powercap/intel_rapl.c already has this
+change.
+
+Signed-off-by: Vince Weaver <vincent.weaver@maine.edu>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kan Liang <kan.liang@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@gmail.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/rapl.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/rapl.c
++++ b/arch/x86/events/intel/rapl.c
+@@ -761,7 +761,7 @@ static const struct x86_cpu_id rapl_cpu_
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE,   hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E,   hsw_rapl_init),
+-      X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsw_rapl_init),
++      X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsx_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
diff --git a/queue-4.11/revert-kvm-support-vcpu-based-gfn-hva-cache.patch b/queue-4.11/revert-kvm-support-vcpu-based-gfn-hva-cache.patch
new file mode 100644 (file)
index 0000000..fdd68ee
--- /dev/null
@@ -0,0 +1,334 @@
+From 4e335d9e7ddbcf83d03e7fbe65797ebed2272c18 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 2 May 2017 16:20:18 +0200
+Subject: Revert "KVM: Support vCPU-based gfn->hva cache"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 4e335d9e7ddbcf83d03e7fbe65797ebed2272c18 upstream.
+
+This reverts commit bbd6411513aa8ef3ea02abab61318daf87c1af1e.
+
+I've been sitting on this revert for too long and it unfortunately
+missed 4.11.  It's also the reason why I haven't merged ring-based
+dirty tracking for 4.12.
+
+Using kvm_vcpu_memslots in kvm_gfn_to_hva_cache_init and
+kvm_vcpu_write_guest_offset_cached means that the MSR value can
+now be used to access SMRAM, simply by making it point to an SMRAM
+physical address.  This is problematic because it lets the guest
+OS overwrite memory that it shouldn't be able to touch.
+
+Fixes: bbd6411513aa8ef3ea02abab61318daf87c1af1e
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.c     |   22 ++++++++++++----------
+ arch/x86/kvm/x86.c       |   41 +++++++++++++++++++++--------------------
+ include/linux/kvm_host.h |   16 ++++++++--------
+ virt/kvm/kvm_main.c      |   34 +++++++++++++++++-----------------
+ 4 files changed, 58 insertions(+), 55 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -529,14 +529,16 @@ int kvm_apic_set_irq(struct kvm_vcpu *vc
+ static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
+ {
+-      return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
+-                                         sizeof(val));
++
++      return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
++                                    sizeof(val));
+ }
+ static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
+ {
+-      return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val,
+-                                        sizeof(*val));
++
++      return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
++                                    sizeof(*val));
+ }
+ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
+@@ -2285,8 +2287,8 @@ void kvm_lapic_sync_from_vapic(struct kv
+       if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
+               return;
+-      if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+-                                     sizeof(u32)))
++      if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++                                sizeof(u32)))
+               return;
+       apic_set_tpr(vcpu->arch.apic, data & 0xff);
+@@ -2338,14 +2340,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_
+               max_isr = 0;
+       data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+-      kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+-                                  sizeof(u32));
++      kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++                              sizeof(u32));
+ }
+ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+ {
+       if (vapic_addr) {
+-              if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
++              if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+                                       &vcpu->arch.apic->vapic_cache,
+                                       vapic_addr, sizeof(u32)))
+                       return -EINVAL;
+@@ -2439,7 +2441,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_v
+       vcpu->arch.pv_eoi.msr_val = data;
+       if (!pv_eoi_enabled(vcpu))
+               return 0;
+-      return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data,
++      return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+                                        addr, sizeof(u8));
+ }
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1813,7 +1813,7 @@ static void kvm_setup_pvclock_page(struc
+       struct kvm_vcpu_arch *vcpu = &v->arch;
+       struct pvclock_vcpu_time_info guest_hv_clock;
+-      if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time,
++      if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+               &guest_hv_clock, sizeof(guest_hv_clock))))
+               return;
+@@ -1834,9 +1834,9 @@ static void kvm_setup_pvclock_page(struc
+       BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+       vcpu->hv_clock.version = guest_hv_clock.version + 1;
+-      kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+-                                  &vcpu->hv_clock,
+-                                  sizeof(vcpu->hv_clock.version));
++      kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++                              &vcpu->hv_clock,
++                              sizeof(vcpu->hv_clock.version));
+       smp_wmb();
+@@ -1850,16 +1850,16 @@ static void kvm_setup_pvclock_page(struc
+       trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
+-      kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+-                                  &vcpu->hv_clock,
+-                                  sizeof(vcpu->hv_clock));
++      kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++                              &vcpu->hv_clock,
++                              sizeof(vcpu->hv_clock));
+       smp_wmb();
+       vcpu->hv_clock.version++;
+-      kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+-                                  &vcpu->hv_clock,
+-                                  sizeof(vcpu->hv_clock.version));
++      kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++                              &vcpu->hv_clock,
++                              sizeof(vcpu->hv_clock.version));
+ }
+ static int kvm_guest_time_update(struct kvm_vcpu *v)
+@@ -2092,7 +2092,7 @@ static int kvm_pv_enable_async_pf(struct
+               return 0;
+       }
+-      if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa,
++      if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+                                       sizeof(u32)))
+               return 1;
+@@ -2111,7 +2111,7 @@ static void record_steal_time(struct kvm
+       if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+               return;
+-      if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime,
++      if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+               &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+               return;
+@@ -2122,7 +2122,7 @@ static void record_steal_time(struct kvm
+       vcpu->arch.st.steal.version += 1;
+-      kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
++      kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+               &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+       smp_wmb();
+@@ -2131,14 +2131,14 @@ static void record_steal_time(struct kvm
+               vcpu->arch.st.last_steal;
+       vcpu->arch.st.last_steal = current->sched_info.run_delay;
+-      kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
++      kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+               &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+       smp_wmb();
+       vcpu->arch.st.steal.version += 1;
+-      kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
++      kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+               &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ }
+@@ -2243,7 +2243,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+               if (!(data & 1))
+                       break;
+-              if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
++              if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+                    &vcpu->arch.pv_time, data & ~1ULL,
+                    sizeof(struct pvclock_vcpu_time_info)))
+                       vcpu->arch.pv_time_enabled = false;
+@@ -2264,7 +2264,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+               if (data & KVM_STEAL_RESERVED_MASK)
+                       return 1;
+-              if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime,
++              if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+                                               data & KVM_STEAL_VALID_BITS,
+                                               sizeof(struct kvm_steal_time)))
+                       return 1;
+@@ -2878,7 +2878,7 @@ static void kvm_steal_time_set_preempted
+       vcpu->arch.st.steal.preempted = 1;
+-      kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
++      kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+                       &vcpu->arch.st.steal.preempted,
+                       offsetof(struct kvm_steal_time, preempted),
+                       sizeof(vcpu->arch.st.steal.preempted));
+@@ -8548,8 +8548,9 @@ static void kvm_del_async_pf_gfn(struct
+ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+ {
+-      return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
+-                                         sizeof(val));
++
++      return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
++                                    sizeof(val));
+ }
+ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -641,18 +641,18 @@ int kvm_read_guest_page(struct kvm *kvm,
+ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
+                         unsigned long len);
+ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
+-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+-                             void *data, unsigned long len);
++int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                         void *data, unsigned long len);
+ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
+                        int offset, int len);
+ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+                   unsigned long len);
+-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+-                              void *data, unsigned long len);
+-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+-                                     void *data, int offset, unsigned long len);
+-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+-                                 gpa_t gpa, unsigned long len);
++int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                         void *data, unsigned long len);
++int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                         void *data, int offset, unsigned long len);
++int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                            gpa_t gpa, unsigned long len);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1973,18 +1973,18 @@ static int __kvm_gfn_to_hva_cache_init(s
+       return 0;
+ }
+-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
++int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                             gpa_t gpa, unsigned long len)
+ {
+-      struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
++      struct kvm_memslots *slots = kvm_memslots(kvm);
+       return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
+ }
+-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init);
++EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+-                                     void *data, int offset, unsigned long len)
++int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                         void *data, int offset, unsigned long len)
+ {
+-      struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
++      struct kvm_memslots *slots = kvm_memslots(kvm);
+       int r;
+       gpa_t gpa = ghc->gpa + offset;
+@@ -1994,7 +1994,7 @@ int kvm_vcpu_write_guest_offset_cached(s
+               __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
+       if (unlikely(!ghc->memslot))
+-              return kvm_vcpu_write_guest(vcpu, gpa, data, len);
++              return kvm_write_guest(kvm, gpa, data, len);
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
+@@ -2006,19 +2006,19 @@ int kvm_vcpu_write_guest_offset_cached(s
+       return 0;
+ }
+-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
++EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+-                             void *data, unsigned long len)
++int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                         void *data, unsigned long len)
+ {
+-      return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len);
++      return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
+ }
+-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
++EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+-                             void *data, unsigned long len)
++int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
++                         void *data, unsigned long len)
+ {
+-      struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
++      struct kvm_memslots *slots = kvm_memslots(kvm);
+       int r;
+       BUG_ON(len > ghc->len);
+@@ -2027,7 +2027,7 @@ int kvm_vcpu_read_guest_cached(struct kv
+               __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
+       if (unlikely(!ghc->memslot))
+-              return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len);
++              return kvm_read_guest(kvm, ghc->gpa, data, len);
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
+@@ -2038,7 +2038,7 @@ int kvm_vcpu_read_guest_cached(struct kv
+       return 0;
+ }
+-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached);
++EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
+ {
diff --git a/queue-4.11/selftests-x86-ldt_gdt_32-work-around-a-glibc-sigaction-bug.patch b/queue-4.11/selftests-x86-ldt_gdt_32-work-around-a-glibc-sigaction-bug.patch
new file mode 100644 (file)
index 0000000..8293691
--- /dev/null
@@ -0,0 +1,107 @@
+From 65973dd3fd31151823f4b8c289eebbb3fb7e6bc0 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Wed, 22 Mar 2017 14:32:29 -0700
+Subject: selftests/x86/ldt_gdt_32: Work around a glibc sigaction() bug
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 65973dd3fd31151823f4b8c289eebbb3fb7e6bc0 upstream.
+
+i386 glibc is buggy and calls the sigaction syscall incorrectly.
+
+This is asymptomatic for normal programs, but it blows up on
+programs that do evil things with segmentation.  The ldt_gdt
+self-test is an example of such an evil program.
+
+This doesn't appear to be a regression -- I think I just got lucky
+with the uninitialized memory that glibc threw at the kernel when I
+wrote the test.
+
+This hackish fix manually issues sigaction(2) syscalls to undo the
+damage.  Without the fix, ldt_gdt_32 segfaults; with the fix, it
+passes for me.
+
+See: https://sourceware.org/bugzilla/show_bug.cgi?id=21269
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Garnier <thgarnie@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/aaab0f9f93c9af25396f01232608c163a760a668.1490218061.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/x86/ldt_gdt.c |   46 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 46 insertions(+)
+
+--- a/tools/testing/selftests/x86/ldt_gdt.c
++++ b/tools/testing/selftests/x86/ldt_gdt.c
+@@ -409,6 +409,51 @@ static void *threadproc(void *ctx)
+       }
+ }
++#ifdef __i386__
++
++#ifndef SA_RESTORE
++#define SA_RESTORER 0x04000000
++#endif
++
++/*
++ * The UAPI header calls this 'struct sigaction', which conflicts with
++ * glibc.  Sigh.
++ */
++struct fake_ksigaction {
++      void *handler;  /* the real type is nasty */
++      unsigned long sa_flags;
++      void (*sa_restorer)(void);
++      unsigned char sigset[8];
++};
++
++static void fix_sa_restorer(int sig)
++{
++      struct fake_ksigaction ksa;
++
++      if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
++              /*
++               * glibc has a nasty bug: it sometimes writes garbage to
++               * sa_restorer.  This interacts quite badly with anything
++               * that fiddles with SS because it can trigger legacy
++               * stack switching.  Patch it up.  See:
++               *
++               * https://sourceware.org/bugzilla/show_bug.cgi?id=21269
++               */
++              if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
++                      ksa.sa_restorer = NULL;
++                      if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
++                                  sizeof(ksa.sigset)) != 0)
++                              err(1, "rt_sigaction");
++              }
++      }
++}
++#else
++static void fix_sa_restorer(int sig)
++{
++      /* 64-bit glibc works fine. */
++}
++#endif
++
+ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+                      int flags)
+ {
+@@ -420,6 +465,7 @@ static void sethandler(int sig, void (*h
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
++      fix_sa_restorer(sig);
+ }
+ static jmp_buf jmpbuf;
index 5ffbe54af4cf42e9fe0ff7483cd11dd6fda30575..8fc9ac49b2d1ce12a5704948fe69e89331b5c238 100644 (file)
@@ -20,3 +20,23 @@ usb-make-sure-usb-phy-of-gets-built-in.patch
 usb-hub-fix-error-loop-seen-after-hub-communication-errors.patch
 usb-hub-do-not-attempt-to-autosuspend-disconnected-devices.patch
 usb-misc-legousbtower-fix-buffers-on-stack.patch
+x86-boot-fix-bss-corruption-overwrite-bug-in-early-x86-kernel-startup.patch
+selftests-x86-ldt_gdt_32-work-around-a-glibc-sigaction-bug.patch
+x86-pmem-fix-cache-flushing-for-iovec-write-8-bytes.patch
+um-fix-ptrace_pokeuser-on-x86_64.patch
+perf-x86-fix-broadwell-ep-dram-rapl-events.patch
+kvm-x86-fix-user-triggerable-warning-in-kvm_apic_accept_events.patch
+revert-kvm-support-vcpu-based-gfn-hva-cache.patch
+kvm-arm-arm64-fix-races-in-kvm_psci_vcpu_on.patch
+arm64-kvm-fix-decoding-of-rt-rt2-when-trapping-aarch32-cp-accesses.patch
+block-fix-blk_integrity_register-to-use-template-s-interval_exp-if-not-0.patch
+crypto-s5p-sss-close-possible-race-for-completed-requests.patch
+crypto-algif_aead-require-setkey-before-accept-2.patch
+crypto-ccp-use-only-the-relevant-interrupt-bits.patch
+crypto-ccp-disable-interrupts-early-on-unload.patch
+crypto-ccp-change-isr-handler-method-for-a-v3-ccp.patch
+crypto-ccp-change-isr-handler-method-for-a-v5-ccp.patch
+dm-crypt-rewrite-wipe-key-in-crypto-layer-using-random-data.patch
+dm-era-save-spacemap-metadata-root-after-the-pre-commit.patch
+dm-rq-check-blk_mq_register_dev-return-value-in-dm_mq_init_request_queue.patch
+dm-thin-fix-a-memory-leak-when-passing-discard-bio-down.patch
diff --git a/queue-4.11/um-fix-ptrace_pokeuser-on-x86_64.patch b/queue-4.11/um-fix-ptrace_pokeuser-on-x86_64.patch
new file mode 100644 (file)
index 0000000..9be0269
--- /dev/null
@@ -0,0 +1,36 @@
+From 9abc74a22d85ab29cef9896a2582a530da7e79bf Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Sat, 1 Apr 2017 00:41:57 +0200
+Subject: um: Fix PTRACE_POKEUSER on x86_64
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 9abc74a22d85ab29cef9896a2582a530da7e79bf upstream.
+
+This is broken since ever but sadly nobody noticed.
+Recent versions of GDB set DR_CONTROL unconditionally and
+UML dies due to a heap corruption. It turns out that
+the PTRACE_POKEUSER was copy&pasted from i386 and assumes
+that addresses are 4 bytes long.
+
+Fix that by using 8 as address size in the calculation.
+
+Reported-by: jie cao <cj3054@gmail.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/um/ptrace_64.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/um/ptrace_64.c
++++ b/arch/x86/um/ptrace_64.c
+@@ -125,7 +125,7 @@ int poke_user(struct task_struct *child,
+       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
+               (addr <= offsetof(struct user, u_debugreg[7]))) {
+               addr -= offsetof(struct user, u_debugreg[0]);
+-              addr = addr >> 2;
++              addr = addr >> 3;
+               if ((addr == 4) || (addr == 5))
+                       return -EIO;
+               child->thread.arch.debugregs[addr] = data;
diff --git a/queue-4.11/x86-boot-fix-bss-corruption-overwrite-bug-in-early-x86-kernel-startup.patch b/queue-4.11/x86-boot-fix-bss-corruption-overwrite-bug-in-early-x86-kernel-startup.patch
new file mode 100644 (file)
index 0000000..c6af0be
--- /dev/null
@@ -0,0 +1,52 @@
+From d594aa0277e541bb997aef0bc0a55172d8138340 Mon Sep 17 00:00:00 2001
+From: Ashish Kalra <ashish@bluestacks.com>
+Date: Wed, 19 Apr 2017 20:50:15 +0530
+Subject: x86/boot: Fix BSS corruption/overwrite bug in early x86 kernel startup
+
+From: Ashish Kalra <ashish@bluestacks.com>
+
+commit d594aa0277e541bb997aef0bc0a55172d8138340 upstream.
+
+The minimum size for a new stack (512 bytes) setup for arch/x86/boot components
+when the bootloader does not setup/provide a stack for the early boot components
+is not "enough".
+
+The setup code executing as part of early kernel startup code, uses the stack
+beyond 512 bytes and accidentally overwrites and corrupts part of the BSS
+section. This is exposed mostly in the early video setup code, where
+it was corrupting BSS variables like force_x, force_y, which in-turn affected
+kernel parameters such as screen_info (screen_info.orig_video_cols) and
+later caused an exception/panic in console_init().
+
+Most recent boot loaders setup the stack for early boot components, so this
+stack overwriting into BSS section issue has not been exposed.
+
+Signed-off-by: Ashish Kalra <ashish@bluestacks.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170419152015.10011-1-ashishkalra@Ashishs-MacBook-Pro.local
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/boot.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -16,7 +16,7 @@
+ #ifndef BOOT_BOOT_H
+ #define BOOT_BOOT_H
+-#define STACK_SIZE    512     /* Minimum number of bytes for stack */
++#define STACK_SIZE    1024    /* Minimum number of bytes for stack */
+ #ifndef __ASSEMBLY__
diff --git a/queue-4.11/x86-pmem-fix-cache-flushing-for-iovec-write-8-bytes.patch b/queue-4.11/x86-pmem-fix-cache-flushing-for-iovec-write-8-bytes.patch
new file mode 100644 (file)
index 0000000..0f31d96
--- /dev/null
@@ -0,0 +1,35 @@
+From 8376efd31d3d7c44bd05be337adde023cc531fa1 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Date: Tue, 9 May 2017 18:00:43 +0100
+Subject: x86, pmem: Fix cache flushing for iovec write < 8 bytes
+
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+
+commit 8376efd31d3d7c44bd05be337adde023cc531fa1 upstream.
+
+Commit 11e63f6d920d added cache flushing for unaligned writes from an
+iovec, covering the first and last cache line of a >= 8 byte write and
+the first cache line of a < 8 byte write.  But an unaligned write of
+2-7 bytes can still cover two cache lines, so make sure we flush both
+in that case.
+
+Fixes: 11e63f6d920d ("x86, pmem: fix broken __copy_user_nocache ...")
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pmem.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/pmem.h
++++ b/arch/x86/include/asm/pmem.h
+@@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter
+               if (bytes < 8) {
+                       if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+-                              arch_wb_cache_pmem(addr, 1);
++                              arch_wb_cache_pmem(addr, bytes);
+               } else {
+                       if (!IS_ALIGNED(dest, 8)) {
+                               dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);