--- /dev/null
+From 969439016d2cf61fef53a973d7e6d2061c3793b1 Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Mon, 23 Feb 2015 20:37:54 +0100
+Subject: can: add missing initialisations in CAN related skbuffs
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit 969439016d2cf61fef53a973d7e6d2061c3793b1 upstream.
+
+When accessing CAN network interfaces with AF_PACKET sockets e.g. by dhclient
+this can lead to a skb_under_panic due to missing skb initialisations.
+
+Add the missing initialisations at the CAN skbuff creation times on driver
+level (rx path) and in the network layer (tx path).
+
+Reported-by: Austin Schuh <austin@peloton-tech.com>
+Reported-by: Daniel Steer <daniel.steer@mclaren.com>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/dev.c | 8 ++++++++
+ net/can/af_can.c | 3 +++
+ 2 files changed, 11 insertions(+)
+
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -578,6 +578,10 @@ struct sk_buff *alloc_can_skb(struct net
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
++ skb_reset_mac_header(skb);
++ skb_reset_network_header(skb);
++ skb_reset_transport_header(skb);
++
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+@@ -602,6 +606,10 @@ struct sk_buff *alloc_canfd_skb(struct n
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
++ skb_reset_mac_header(skb);
++ skb_reset_network_header(skb);
++ skb_reset_transport_header(skb);
++
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int lo
+ goto inval_skb;
+ }
+
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
--- /dev/null
+From 2fec5104f9c61de4cf2205aa355101e19a81f490 Mon Sep 17 00:00:00 2001
+From: "Ahmed S. Darwish" <ahmed.darwish@valeo.com>
+Date: Thu, 26 Feb 2015 10:22:02 -0500
+Subject: can: kvaser_usb: Read all messages in a bulk-in URB buffer
+
+From: "Ahmed S. Darwish" <ahmed.darwish@valeo.com>
+
+commit 2fec5104f9c61de4cf2205aa355101e19a81f490 upstream.
+
+The Kvaser firmware can only read and write messages that are
+not crossing the USB endpoint's wMaxPacketSize boundary. While
+receiving commands from the CAN device, if the next command in
+the same URB buffer crossed that max packet size boundary, the
+firmware puts a zero-length placeholder command in its place
+then moves the real command to the next boundary mark.
+
+The driver did not recognize such behavior, leading to missing
+a good number of rx events during a heavy rx load session.
+
+Moreover, a tx URB context only gets freed upon receiving its
+respective tx ACK event. Over time, the free tx URB contexts
+pool gets depleted due to the missing ACK events. Consequently,
+the netif transmission queue gets __permanently__ stopped; no
+frames could be sent again except after restarting the CAN
+newtwork interface.
+
+Signed-off-by: Ahmed S. Darwish <ahmed.darwish@valeo.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/usb/kvaser_usb.c | 28 +++++++++++++++++++++++-----
+ 1 file changed, 23 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -12,6 +12,7 @@
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ */
+
++#include <linux/kernel.h>
+ #include <linux/completion.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+@@ -403,8 +404,15 @@ static int kvaser_usb_wait_msg(const str
+ while (pos <= actual_len - MSG_HEADER_LEN) {
+ tmp = buf + pos;
+
+- if (!tmp->len)
+- break;
++ /* Handle messages crossing the USB endpoint max packet
++ * size boundary. Check kvaser_usb_read_bulk_callback()
++ * for further details.
++ */
++ if (tmp->len == 0) {
++ pos = round_up(pos,
++ dev->bulk_in->wMaxPacketSize);
++ continue;
++ }
+
+ if (pos + tmp->len > actual_len) {
+ dev_err(dev->udev->dev.parent,
+@@ -980,8 +988,19 @@ static void kvaser_usb_read_bulk_callbac
+ while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+ msg = urb->transfer_buffer + pos;
+
+- if (!msg->len)
+- break;
++ /* The Kvaser firmware can only read and write messages that
++ * does not cross the USB's endpoint wMaxPacketSize boundary.
++ * If a follow-up command crosses such boundary, firmware puts
++ * a placeholder zero-length command in its place then aligns
++ * the real command to the next max packet size.
++ *
++ * Handle such cases or we're going to miss a significant
++ * number of events in case of a heavy rx load on the bus.
++ */
++ if (msg->len == 0) {
++ pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
++ continue;
++ }
+
+ if (pos + msg->len > urb->actual_length) {
+ dev_err(dev->udev->dev.parent, "Format error\n");
+@@ -989,7 +1008,6 @@ static void kvaser_usb_read_bulk_callbac
+ }
+
+ kvaser_usb_handle_message(dev, msg);
+-
+ pos += msg->len;
+ }
+
--- /dev/null
+From 79063bffc81f82689bd90e16da1b49408f3bf095 Mon Sep 17 00:00:00 2001
+From: Zefan Li <lizefan@huawei.com>
+Date: Fri, 13 Feb 2015 11:20:30 +0800
+Subject: cpuset: fix a warning when clearing configured masks in old hierarchy
+
+From: Zefan Li <lizefan@huawei.com>
+
+commit 79063bffc81f82689bd90e16da1b49408f3bf095 upstream.
+
+When we clear cpuset.cpus, cpuset.effective_cpus won't be cleared:
+
+ # mount -t cgroup -o cpuset xxx /mnt
+ # mkdir /mnt/tmp
+ # echo 0 > /mnt/tmp/cpuset.cpus
+ # echo > /mnt/tmp/cpuset.cpus
+ # cat cpuset.cpus
+
+ # cat cpuset.effective_cpus
+ 0-15
+
+And a kernel warning in update_cpumasks_hier() is triggered:
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 4028 at kernel/cpuset.c:894 update_cpumasks_hier+0x471/0x650()
+
+Signed-off-by: Zefan Li <lizefan@huawei.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Tested-by: Serge Hallyn <serge.hallyn@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpuset.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -873,7 +873,7 @@ static void update_cpumasks_hier(struct
+ * If it becomes empty, inherit the effective mask of the
+ * parent, which is guaranteed to have some CPUs.
+ */
+- if (cpumask_empty(new_cpus))
++ if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
+ cpumask_copy(new_cpus, parent->effective_cpus);
+
+ /* Skip the whole subtree if the cpumask remains the same. */
+@@ -1129,7 +1129,7 @@ static void update_nodemasks_hier(struct
+ * If it becomes empty, inherit the effective mask of the
+ * parent, which is guaranteed to have some MEMs.
+ */
+- if (nodes_empty(*new_mems))
++ if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
+ *new_mems = parent->effective_mems;
+
+ /* Skip the whole subtree if the nodemask remains the same. */
--- /dev/null
+From 283cb41f426b723a0255702b761b0fc5d1b53a81 Mon Sep 17 00:00:00 2001
+From: Jason Low <jason.low2@hp.com>
+Date: Fri, 13 Feb 2015 11:58:07 +0800
+Subject: cpuset: Fix cpuset sched_relax_domain_level
+
+From: Jason Low <jason.low2@hp.com>
+
+commit 283cb41f426b723a0255702b761b0fc5d1b53a81 upstream.
+
+The cpuset.sched_relax_domain_level can control how far we do
+immediate load balancing on a system. However, it was found on recent
+kernels that echo'ing a value into cpuset.sched_relax_domain_level
+did not reduce any immediate load balancing.
+
+The reason this occurred was because the update_domain_attr_tree() traversal
+did not update for the "top_cpuset". This resulted in nothing being changed
+when modifying the sched_relax_domain_level parameter.
+
+This patch is able to address that problem by having update_domain_attr_tree()
+allow updates for the root in the cpuset traversal.
+
+Fixes: fc560a26acce ("cpuset: replace cpuset->stack_list with cpuset_for_each_descendant_pre()")
+Signed-off-by: Jason Low <jason.low2@hp.com>
+Signed-off-by: Zefan Li <lizefan@huawei.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Tested-by: Serge Hallyn <serge.hallyn@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpuset.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -548,9 +548,6 @@ static void update_domain_attr_tree(stru
+
+ rcu_read_lock();
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+- if (cp == root_cs)
+- continue;
+-
+ /* skip the whole subtree if @cp doesn't have any CPU */
+ if (cpumask_empty(cp->cpus_allowed)) {
+ pos_css = css_rightmost_descendant(pos_css);
--- /dev/null
+From 790317e1b266c776765a4bdcedefea706ff0fada Mon Sep 17 00:00:00 2001
+From: Zefan Li <lizefan@huawei.com>
+Date: Fri, 13 Feb 2015 11:19:49 +0800
+Subject: cpuset: initialize effective masks when clone_children is enabled
+
+From: Zefan Li <lizefan@huawei.com>
+
+commit 790317e1b266c776765a4bdcedefea706ff0fada upstream.
+
+If clone_children is enabled, effective masks won't be initialized
+due to the bug:
+
+ # mount -t cgroup -o cpuset xxx /mnt
+ # echo 1 > cgroup.clone_children
+ # mkdir /mnt/tmp
+ # cat /mnt/tmp/
+ # cat cpuset.effective_cpus
+
+ # cat cpuset.cpus
+ 0-15
+
+And then this cpuset won't constrain the tasks in it.
+
+Either the bug or the fix has no effect on unified hierarchy, as
+there's no clone_chidren flag there any more.
+
+Reported-by: Christian Brauner <christianvanbrauner@gmail.com>
+Reported-by: Serge Hallyn <serge.hallyn@ubuntu.com>
+Signed-off-by: Zefan Li <lizefan@huawei.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Tested-by: Serge Hallyn <serge.hallyn@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpuset.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1992,7 +1992,9 @@ static int cpuset_css_online(struct cgro
+
+ spin_lock_irq(&callback_lock);
+ cs->mems_allowed = parent->mems_allowed;
++ cs->effective_mems = parent->mems_allowed;
+ cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
++ cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
+ spin_unlock_irq(&callback_lock);
+ out_unlock:
+ mutex_unlock(&cpuset_mutex);
--- /dev/null
+From f0483044c1c96089256cda4cf182eea1ead77fe4 Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Wed, 25 Feb 2015 13:17:48 +0100
+Subject: s390/pci: fix possible information leak in mmio syscall
+
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+
+commit f0483044c1c96089256cda4cf182eea1ead77fe4 upstream.
+
+Make sure that even in error situations we do not use copy_to_user
+on uninitialized kernel memory.
+
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci_mmio.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, uns
+ if (copy_from_user(buf, user_buffer, length))
+ goto out;
+
+- memcpy_toio(io_addr, buf, length);
+- ret = 0;
++ ret = zpci_memcpy_toio(io_addr, buf, length);
+ out:
+ if (buf != local_buf)
+ kfree(buf);
+@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsi
+ goto out;
+ io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+
+- ret = -EFAULT;
+- if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
++ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
++ ret = -EFAULT;
+ goto out;
+-
+- memcpy_fromio(buf, io_addr, length);
+-
+- if (copy_to_user(user_buffer, buf, length))
++ }
++ ret = zpci_memcpy_fromio(buf, io_addr, length);
++ if (ret)
+ goto out;
++ if (copy_to_user(user_buffer, buf, length))
++ ret = -EFAULT;
+
+- ret = 0;
+ out:
+ if (buf != local_buf)
+ kfree(buf);
--- /dev/null
+From 4d4eb4d4fbd9403682e2b75117b6b895531d8e01 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Wed, 4 Mar 2015 23:30:45 -0500
+Subject: seq_buf: Fix seq_buf_bprintf() truncation
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 4d4eb4d4fbd9403682e2b75117b6b895531d8e01 upstream.
+
+In seq_buf_bprintf(), bstr_printf() is used to copy the format into the
+buffer remaining in the seq_buf structure. The return of bstr_printf()
+is the amount of characters written to the buffer excluding the '\0',
+unless the line was truncated!
+
+If the line copied does not fit, it is truncated, and a '\0' is added
+to the end of the buffer. But in this case, '\0' is included in the length
+of the line written. To know if the buffer had overflowed, the return
+length will be the same or greater than the length of the buffer passed in.
+
+The check in seq_buf_bprintf() only checked if the length returned from
+bstr_printf() would fit in the buffer, as the seq_buf_bprintf() is only
+to be an all or nothing command. It either writes all the string into
+the seq_buf, or none of it. If the string is truncated, the pointers
+inside the seq_buf must be reset to what they were when the function was
+called. This is not the case. On overflow, it copies only part of the string.
+
+The fix is to change the overflow check to see if the length returned from
+bstr_printf() is less than the length remaining in the seq_buf buffer, and not
+if it is less than or equal to as it currently does. Then seq_buf_bprintf()
+will know if the write from bstr_printf() was truncated or not.
+
+Link: http://lkml.kernel.org/r/1425500481.2712.27.camel@perches.com
+
+Reported-by: Joe Perches <joe@perches.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/seq_buf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -154,7 +154,7 @@ int seq_buf_bprintf(struct seq_buf *s, c
+
+ if (s->len < s->size) {
+ ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+- if (seq_buf_can_fit(s, ret)) {
++ if (s->len + ret < s->size) {
+ s->len += ret;
+ return 0;
+ }
--- /dev/null
+From 4a8fe4e1811c96ad0ad9f4083f2fe4fb43b2988d Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Wed, 4 Mar 2015 09:56:02 -0500
+Subject: seq_buf: Fix seq_buf_vprintf() truncation
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 4a8fe4e1811c96ad0ad9f4083f2fe4fb43b2988d upstream.
+
+In seq_buf_vprintf(), vsnprintf() is used to copy the format into the
+buffer remaining in the seq_buf structure. The return of vsnprintf()
+is the amount of characters written to the buffer excluding the '\0',
+unless the line was truncated!
+
+If the line copied does not fit, it is truncated, and a '\0' is added
+to the end of the buffer. But in this case, '\0' is included in the length
+of the line written. To know if the buffer had overflowed, the return
+length will be the same as the length of the buffer passed in.
+
+The check in seq_buf_vprintf() only checked if the length returned from
+vsnprintf() would fit in the buffer, as the seq_buf_vprintf() is only
+to be an all or nothing command. It either writes all the string into
+the seq_buf, or none of it. If the string is truncated, the pointers
+inside the seq_buf must be reset to what they were when the function was
+called. This is not the case. On overflow, it copies only part of the string.
+
+The fix is to change the overflow check to see if the length returned from
+vsnprintf() is less than the length remaining in the seq_buf buffer, and not
+if it is less than or equal to as it currently does. Then seq_buf_vprintf()
+will know if the write from vsnpritnf() was truncated or not.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/seq_buf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, c
+
+ if (s->len < s->size) {
+ len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
+- if (seq_buf_can_fit(s, len)) {
++ if (s->len + len < s->size) {
+ s->len += len;
+ return 0;
+ }
ftrace-clear-regs_en-and-tramp_en-flags-on-disabling-record-via-sysctl.patch
ftrace-fix-en-dis-able-graph-caller-when-en-dis-abling-record-via-sysctl.patch
ftrace-fix-ftrace-enable-ordering-of-sysctl-ftrace_enabled.patch
+can-add-missing-initialisations-in-can-related-skbuffs.patch
+can-kvaser_usb-read-all-messages-in-a-bulk-in-urb-buffer.patch
+workqueue-fix-hang-involving-racing-cancel_work_sync-s-for-preempt_none.patch
+seq_buf-fix-seq_buf_vprintf-truncation.patch
+seq_buf-fix-seq_buf_bprintf-truncation.patch
+cpuset-initialize-effective-masks-when-clone_children-is-enabled.patch
+cpuset-fix-a-warning-when-clearing-configured-masks-in-old-hierarchy.patch
+cpuset-fix-cpuset-sched_relax_domain_level.patch
+tpm-ibmvtpm-additional-le-support-for-tpm_ibmvtpm_send.patch
+tpm-tpm_i2c_stm_st33-add-status-check-when-reading-data-on-the-fifo.patch
+s390-pci-fix-possible-information-leak-in-mmio-syscall.patch
+spi-atmel-fix-interrupt-setup-for-pdc-transfers.patch
+spi-dw-mid-avoid-potential-null-dereference.patch
+spi-pl022-fix-race-in-giveback-leading-to-driver-lock-up.patch
--- /dev/null
+From 76e1d14b316d6f501ebc001e7a5d86b24ce5b615 Mon Sep 17 00:00:00 2001
+From: Torsten Fleischer <torfl6749@gmail.com>
+Date: Tue, 24 Feb 2015 16:32:57 +0100
+Subject: spi: atmel: Fix interrupt setup for PDC transfers
+
+From: Torsten Fleischer <torfl6749@gmail.com>
+
+commit 76e1d14b316d6f501ebc001e7a5d86b24ce5b615 upstream.
+
+Additionally to the current DMA transfer the PDC allows to set up a next DMA
+transfer. This is useful for larger SPI transfers.
+
+The driver currently waits for ENDRX as end of the transfer. But ENDRX is set
+when the current DMA transfer is done (RCR = 0), i.e. it doesn't include the
+next DMA transfer.
+Thus a subsequent SPI transfer could be started although there is currently a
+transfer in progress. This can cause invalid accesses to the SPI slave devices
+and to SPI transfer errors.
+
+This issue has been observed on a hardware with a M25P128 SPI NOR flash.
+
+So instead of ENDRX we should wait for RXBUFF. This flag is set if there is
+no more DMA transfer in progress (RCR = RNCR = 0).
+
+Signed-off-by: Torsten Fleischer <torfl6749@gmail.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-atmel.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(stru
+ (unsigned long long)xfer->rx_dma);
+ }
+
+- /* REVISIT: We're waiting for ENDRX before we start the next
++ /* REVISIT: We're waiting for RXBUFF before we start the next
+ * transfer because we need to handle some difficult timing
+- * issues otherwise. If we wait for ENDTX in one transfer and
+- * then starts waiting for ENDRX in the next, it's difficult
+- * to tell the difference between the ENDRX interrupt we're
+- * actually waiting for and the ENDRX interrupt of the
++ * issues otherwise. If we wait for TXBUFE in one transfer and
++ * then starts waiting for RXBUFF in the next, it's difficult
++ * to tell the difference between the RXBUFF interrupt we're
++ * actually waiting for and the RXBUFF interrupt of the
+ * previous transfer.
+ *
+ * It should be doable, though. Just not now...
+ */
+- spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
++ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
+ spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
+ }
+
--- /dev/null
+From c9dafb27c84412fe4b17c3b94cc4ffeef5df1833 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Mon, 2 Mar 2015 20:15:58 +0200
+Subject: spi: dw-mid: avoid potential NULL dereference
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit c9dafb27c84412fe4b17c3b94cc4ffeef5df1833 upstream.
+
+When DMA descriptor allocation fails we should not try to assign any fields in
+the bad descriptor. The patch adds the necessary checks for that.
+
+Fixes: 7063c0d942a1 (spi/dw_spi: add DMA support)
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-dw-mid.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *d
+ 1,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ if (!txdesc)
++ return NULL;
++
+ txdesc->callback = dw_spi_dma_tx_done;
+ txdesc->callback_param = dws;
+
+@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *d
+ 1,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ if (!rxdesc)
++ return NULL;
++
+ rxdesc->callback = dw_spi_dma_rx_done;
+ rxdesc->callback_param = dws;
+
--- /dev/null
+From cd6fa8d2ca53cac3226fdcffcf763be390abae32 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Date: Fri, 27 Feb 2015 16:30:21 +0100
+Subject: spi: pl022: Fix race in giveback() leading to driver lock-up
+
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+
+commit cd6fa8d2ca53cac3226fdcffcf763be390abae32 upstream.
+
+Commit fd316941c ("spi/pl022: disable port when unused") introduced a race,
+which leads to possible driver lock up (easily reproducible on SMP).
+
+The problem happens in giveback() function where the completion of the transfer
+is signalled to SPI subsystem and then the HW SPI controller is disabled. Another
+transfer might be setup in between, which brings driver in locked-up state.
+
+Exact event sequence on SMP:
+
+core0 core1
+
+ => pump_transfers()
+ /* message->state == STATE_DONE */
+ => giveback()
+ => spi_finalize_current_message()
+
+=> pl022_unprepare_transfer_hardware()
+=> pl022_transfer_one_message
+ => flush()
+ => do_interrupt_dma_transfer()
+ => set_up_next_transfer()
+ /* Enable SSP, turn on interrupts */
+ writew((readw(SSP_CR1(pl022->virtbase)) |
+ SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase));
+
+...
+
+=> pl022_interrupt_handler()
+ => readwriter()
+
+ /* disable the SPI/SSP operation */
+ => writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+Lockup! SPI controller is disabled and the data will never be received. Whole
+SPI subsystem is waiting for transfer ACK and blocked.
+
+So, only signal transfer completion after disabling the controller.
+
+Fixes: fd316941c (spi/pl022: disable port when unused)
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-pl022.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+- spi_finalize_current_message(pl022->master);
+
+ /* disable the SPI/SSP operation */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
++ spi_finalize_current_message(pl022->master);
+ }
+
+ /**
--- /dev/null
+From 62dfd912ab3b5405b6fe72d0135c37e9648071f1 Mon Sep 17 00:00:00 2001
+From: "jmlatten@linux.vnet.ibm.com" <jmlatten@linux.vnet.ibm.com>
+Date: Fri, 20 Feb 2015 18:11:24 -0600
+Subject: tpm/ibmvtpm: Additional LE support for tpm_ibmvtpm_send
+
+From: "jmlatten@linux.vnet.ibm.com" <jmlatten@linux.vnet.ibm.com>
+
+commit 62dfd912ab3b5405b6fe72d0135c37e9648071f1 upstream.
+
+Problem: When IMA and VTPM are both enabled in kernel config,
+kernel hangs during bootup on LE OS.
+
+Why?: IMA calls tpm_pcr_read() which results in tpm_ibmvtpm_send
+and tpm_ibmtpm_recv getting called. A trace showed that
+tpm_ibmtpm_recv was hanging.
+
+Resolution: tpm_ibmtpm_recv was hanging because tpm_ibmvtpm_send
+was sending CRQ message that probably did not make much sense
+to phype because of Endianness. The fix below sends correctly
+converted CRQ for LE. This was not caught before because it
+seems IMA is not enabled by default in kernel config and
+IMA exercises this particular code path in vtpm.
+
+Tested with IMA and VTPM enabled in kernel config and VTPM
+enabled on both a BE OS and a LE OS ppc64 lpar. This exercised
+CRQ and TPM command code paths in vtpm.
+Patch is against Peter's tpmdd tree on github which included
+Vicky's previous vtpm le patches.
+
+Signed-off-by: Joy Latten <jmlatten@linux.vnet.ibm.com>
+Reviewed-by: Ashley Lai <ashley@ahsleylai.com>
+Signed-off-by: Peter Huewe <peterhuewe@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_ibmvtpm.c | 10 +++++-----
+ drivers/char/tpm/tpm_ibmvtpm.h | 6 +++---
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_c
+ {
+ struct ibmvtpm_dev *ibmvtpm;
+ struct ibmvtpm_crq crq;
+- u64 *word = (u64 *) &crq;
++ __be64 *word = (__be64 *)&crq;
+ int rc;
+
+ ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
+@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_c
+ memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+ crq.valid = (u8)IBMVTPM_VALID_CMD;
+ crq.msg = (u8)VTPM_TPM_COMMAND;
+- crq.len = (u16)count;
+- crq.data = ibmvtpm->rtce_dma_handle;
++ crq.len = cpu_to_be16(count);
++ crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
+
+- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
+- cpu_to_be64(word[1]));
++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
++ be64_to_cpu(word[1]));
+ if (rc != H_SUCCESS) {
+ dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ rc = 0;
+--- a/drivers/char/tpm/tpm_ibmvtpm.h
++++ b/drivers/char/tpm/tpm_ibmvtpm.h
+@@ -22,9 +22,9 @@
+ struct ibmvtpm_crq {
+ u8 valid;
+ u8 msg;
+- u16 len;
+- u32 data;
+- u64 reserved;
++ __be16 len;
++ __be32 data;
++ __be64 reserved;
+ } __attribute__((packed, aligned(8)));
+
+ struct ibmvtpm_crq_queue {
--- /dev/null
+From c4eadfafb91d5501095c55ffadaa1168743f39d3 Mon Sep 17 00:00:00 2001
+From: Christophe Ricard <christophe.ricard@gmail.com>
+Date: Tue, 13 Jan 2015 23:13:14 +0100
+Subject: tpm/tpm_i2c_stm_st33: Add status check when reading data on the FIFO
+
+From: Christophe Ricard <christophe.ricard@gmail.com>
+
+commit c4eadfafb91d5501095c55ffadaa1168743f39d3 upstream.
+
+Add a return value check when reading data from the FIFO register.
+
+Reviewed-by: Jason Gunthorpe <jason.gunthorpe@obsidianresearch.com>
+Signed-off-by: Christophe Ricard <christophe-h.ricard@st.com>
+Reviewed-by: Peter Huewe <peterhuewe@gmx.de>
+Signed-off-by: Peter Huewe <peterhuewe@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_i2c_stm_st33.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -397,7 +397,7 @@ static int wait_for_stat(struct tpm_chip
+ */
+ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+- int size = 0, burstcnt, len;
++ int size = 0, burstcnt, len, ret;
+ struct i2c_client *client;
+
+ client = (struct i2c_client *)TPM_VPRIV(chip);
+@@ -406,13 +406,15 @@ static int recv_data(struct tpm_chip *ch
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+- &chip->vendor.read_queue)
+- == 0) {
++ &chip->vendor.read_queue) == 0) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ len = min_t(int, burstcnt, count - size);
+- I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
++ ret = I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
++ if (ret < 0)
++ return ret;
++
+ size += len;
+ }
+ return size;
--- /dev/null
+From 8603e1b30027f943cc9c1eef2b291d42c3347af1 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 5 Mar 2015 08:04:13 -0500
+Subject: workqueue: fix hang involving racing cancel[_delayed]_work_sync()'s for PREEMPT_NONE
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 8603e1b30027f943cc9c1eef2b291d42c3347af1 upstream.
+
+cancel[_delayed]_work_sync() are implemented using
+__cancel_work_timer() which grabs the PENDING bit using
+try_to_grab_pending() and then flushes the work item with PENDING set
+to prevent the on-going execution of the work item from requeueing
+itself.
+
+try_to_grab_pending() can always grab PENDING bit without blocking
+except when someone else is doing the above flushing during
+cancelation. In that case, try_to_grab_pending() returns -ENOENT. In
+this case, __cancel_work_timer() currently invokes flush_work(). The
+assumption is that the completion of the work item is what the other
+canceling task would be waiting for too and thus waiting for the same
+condition and retrying should allow forward progress without excessive
+busy looping
+
+Unfortunately, this doesn't work if preemption is disabled or the
+latter task has real time priority. Let's say task A just got woken
+up from flush_work() by the completion of the target work item. If,
+before task A starts executing, task B gets scheduled and invokes
+__cancel_work_timer() on the same work item, its try_to_grab_pending()
+will return -ENOENT as the work item is still being canceled by task A
+and flush_work() will also immediately return false as the work item
+is no longer executing. This puts task B in a busy loop possibly
+preventing task A from executing and clearing the canceling state on
+the work item leading to a hang.
+
+task A task B worker
+
+ executing work
+__cancel_work_timer()
+ try_to_grab_pending()
+ set work CANCELING
+ flush_work()
+ block for work completion
+ completion, wakes up A
+ __cancel_work_timer()
+ while (forever) {
+ try_to_grab_pending()
+ -ENOENT as work is being canceled
+ flush_work()
+ false as work is no longer executing
+ }
+
+This patch removes the possible hang by updating __cancel_work_timer()
+to explicitly wait for clearing of CANCELING rather than invoking
+flush_work() after try_to_grab_pending() fails with -ENOENT.
+
+Link: http://lkml.kernel.org/g/20150206171156.GA8942@axis.com
+
+v3: bit_waitqueue() can't be used for work items defined in vmalloc
+ area. Switched to custom wake function which matches the target
+ work item and exclusive wait and wakeup.
+
+v2: v1 used wake_up() on bit_waitqueue() which leads to NULL deref if
+ the target bit waitqueue has wait_bit_queue's on it. Use
+ DEFINE_WAIT_BIT() and __wake_up_bit() instead. Reported by Tomeu
+ Vizoso.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Rabin Vincent <rabin.vincent@axis.com>
+Cc: Tomeu Vizoso <tomeu.vizoso@gmail.com>
+Tested-by: Jesper Nilsson <jesper.nilsson@axis.com>
+Tested-by: Rabin Vincent <rabin.vincent@axis.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/workqueue.h | 3 +-
+ kernel/workqueue.c | 56 ++++++++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 54 insertions(+), 5 deletions(-)
+
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -70,7 +70,8 @@ enum {
+ /* data contains off-queue information when !WORK_STRUCT_PWQ */
+ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
+
+- WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
++ __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
++ WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
+
+ /*
+ * When a work item is off queue, its high bits point to the last
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work
+ }
+ EXPORT_SYMBOL_GPL(flush_work);
+
++struct cwt_wait {
++ wait_queue_t wait;
++ struct work_struct *work;
++};
++
++static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
++{
++ struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
++
++ if (cwait->work != key)
++ return 0;
++ return autoremove_wake_function(wait, mode, sync, key);
++}
++
+ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ {
++ static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
+ unsigned long flags;
+ int ret;
+
+ do {
+ ret = try_to_grab_pending(work, is_dwork, &flags);
+ /*
+- * If someone else is canceling, wait for the same event it
+- * would be waiting for before retrying.
++ * If someone else is already canceling, wait for it to
++ * finish. flush_work() doesn't work for PREEMPT_NONE
++ * because we may get scheduled between @work's completion
++ * and the other canceling task resuming and clearing
++ * CANCELING - flush_work() will return false immediately
++ * as @work is no longer busy, try_to_grab_pending() will
++ * return -ENOENT as @work is still being canceled and the
++ * other canceling task won't be able to clear CANCELING as
++ * we're hogging the CPU.
++ *
++ * Let's wait for completion using a waitqueue. As this
++ * may lead to the thundering herd problem, use a custom
++ * wake function which matches @work along with exclusive
++ * wait and wakeup.
+ */
+- if (unlikely(ret == -ENOENT))
+- flush_work(work);
++ if (unlikely(ret == -ENOENT)) {
++ struct cwt_wait cwait;
++
++ init_wait(&cwait.wait);
++ cwait.wait.func = cwt_wakefn;
++ cwait.work = work;
++
++ prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
++ TASK_UNINTERRUPTIBLE);
++ if (work_is_canceling(work))
++ schedule();
++ finish_wait(&cancel_waitq, &cwait.wait);
++ }
+ } while (unlikely(ret < 0));
+
+ /* tell other tasks trying to grab @work to back off */
+@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct w
+
+ flush_work(work);
+ clear_work_data(work);
++
++ /*
++ * Paired with prepare_to_wait() above so that either
++ * waitqueue_active() is visible here or !work_is_canceling() is
++ * visible there.
++ */
++ smp_mb();
++ if (waitqueue_active(&cancel_waitq))
++ __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
++
+ return ret;
+ }
+