]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
updated 3.18 mbox
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Apr 2017 10:39:14 +0000 (12:39 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Apr 2017 10:39:14 +0000 (12:39 +0200)
mbox_todo-3.18

index 012628bc56c5121b900e05d587c93ac8283fae23..bc6f04131e3652c3785872fbc85f00350a08ce7a 100644 (file)
@@ -10670,3 +10670,4281 @@ index 083724c6ca4d..fb7c2b40753d 100644
 -- 
 2.12.2
 
+From a80c068fbf43e22f099c0587b9e1a2337378a505 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Tue, 18 Apr 2017 07:15:37 +0200
+Subject: [PATCH 52/52] Linux 4.4.62
+Content-Length: 301
+Lines: 18
+
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index ef5045b8201d..0309acc34472 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 61
++SUBLEVEL = 62
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+-- 
+2.12.2
+
+From 7d170f270a95639192cfd53dcb15e6d8530b4577 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
+Date: Thu, 8 Dec 2016 16:40:03 -0600
+Subject: [PATCH 51/52] ibmveth: set correct gso_size and gso_type
+Content-Length: 5980
+Lines: 170
+
+commit 7b5967389f5a8dfb9d32843830f5e2717e20995d upstream.
+
+This patch is based on an earlier one submitted
+by Jon Maxwell with the following commit message:
+
+"We recently encountered a bug where a few customers using ibmveth on the
+same LPAR hit an issue where a TCP session hung when large receive was
+enabled. Closer analysis revealed that the session was stuck because the
+one side was advertising a zero window repeatedly.
+
+We narrowed this down to the fact the ibmveth driver did not set gso_size
+which is translated by TCP into the MSS later up the stack. The MSS is
+used to calculate the TCP window size and as that was abnormally large,
+it was calculating a zero window, even although the sockets receive buffer
+was completely empty."
+
+We rely on the Virtual I/O Server partition in a pseries
+environment to provide the MSS through the TCP header checksum
+field. The stipulation is that users should not disable checksum
+offloading if rx packet aggregation is enabled through VIOS.
+
+Some firmware offerings provide the MSS in the RX buffer.
+This is signalled by a bit in the RX queue descriptor.
+
+Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Reviewed-by: Jonathan Maxwell <jmaxwell37@gmail.com>
+Reviewed-by: David Dai <zdai@us.ibm.com>
+Signed-off-by: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c | 65 ++++++++++++++++++++++++++++++++++++--
+ drivers/net/ethernet/ibm/ibmveth.h |  1 +
+ 2 files changed, 64 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 7af870a3c549..855c43d8f7e0 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
+ static const char ibmveth_driver_name[] = "ibmveth";
+ static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
+-#define ibmveth_driver_version "1.05"
++#define ibmveth_driver_version "1.06"
+ MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
+ MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
+@@ -137,6 +137,11 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
+       return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
+ }
++static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
++{
++      return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
++}
++
+ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
+ {
+       return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
+@@ -1172,6 +1177,45 @@ map_failed:
+       goto retry_bounce;
+ }
++static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
++{
++      int offset = 0;
++
++      /* only TCP packets will be aggregated */
++      if (skb->protocol == htons(ETH_P_IP)) {
++              struct iphdr *iph = (struct iphdr *)skb->data;
++
++              if (iph->protocol == IPPROTO_TCP) {
++                      offset = iph->ihl * 4;
++                      skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++              } else {
++                      return;
++              }
++      } else if (skb->protocol == htons(ETH_P_IPV6)) {
++              struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
++
++              if (iph6->nexthdr == IPPROTO_TCP) {
++                      offset = sizeof(struct ipv6hdr);
++                      skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
++              } else {
++                      return;
++              }
++      } else {
++              return;
++      }
++      /* if mss is not set through Large Packet bit/mss in rx buffer,
++       * expect that the mss will be written to the tcp header checksum.
++       */
++      if (lrg_pkt) {
++              skb_shinfo(skb)->gso_size = mss;
++      } else if (offset) {
++              struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
++
++              skb_shinfo(skb)->gso_size = ntohs(tcph->check);
++              tcph->check = 0;
++      }
++}
++
+ static int ibmveth_poll(struct napi_struct *napi, int budget)
+ {
+       struct ibmveth_adapter *adapter =
+@@ -1180,6 +1224,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
+       int frames_processed = 0;
+       unsigned long lpar_rc;
+       struct iphdr *iph;
++      u16 mss = 0;
+ restart_poll:
+       while (frames_processed < budget) {
+@@ -1197,9 +1242,21 @@ restart_poll:
+                       int length = ibmveth_rxq_frame_length(adapter);
+                       int offset = ibmveth_rxq_frame_offset(adapter);
+                       int csum_good = ibmveth_rxq_csum_good(adapter);
++                      int lrg_pkt = ibmveth_rxq_large_packet(adapter);
+                       skb = ibmveth_rxq_get_buffer(adapter);
++                      /* if the large packet bit is set in the rx queue
++                       * descriptor, the mss will be written by PHYP eight
++                       * bytes from the start of the rx buffer, which is
++                       * skb->data at this stage
++                       */
++                      if (lrg_pkt) {
++                              __be64 *rxmss = (__be64 *)(skb->data + 8);
++
++                              mss = (u16)be64_to_cpu(*rxmss);
++                      }
++
+                       new_skb = NULL;
+                       if (length < rx_copybreak)
+                               new_skb = netdev_alloc_skb(netdev, length);
+@@ -1233,11 +1290,15 @@ restart_poll:
+                                       if (iph->check == 0xffff) {
+                                               iph->check = 0;
+                                               iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+-                                              adapter->rx_large_packets++;
+                                       }
+                               }
+                       }
++                      if (length > netdev->mtu + ETH_HLEN) {
++                              ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
++                              adapter->rx_large_packets++;
++                      }
++
+                       napi_gro_receive(napi, skb);    /* send it up */
+                       netdev->stats.rx_packets++;
+diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
+index 4eade67fe30c..7acda04d034e 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.h
++++ b/drivers/net/ethernet/ibm/ibmveth.h
+@@ -209,6 +209,7 @@ struct ibmveth_rx_q_entry {
+ #define IBMVETH_RXQ_TOGGLE            0x80000000
+ #define IBMVETH_RXQ_TOGGLE_SHIFT      31
+ #define IBMVETH_RXQ_VALID             0x40000000
++#define IBMVETH_RXQ_LRG_PKT           0x04000000
+ #define IBMVETH_RXQ_NO_CSUM           0x02000000
+ #define IBMVETH_RXQ_CSUM_GOOD         0x01000000
+ #define IBMVETH_RXQ_OFF_MASK          0x0000FFFF
+-- 
+2.12.2
+
+From ac0cbfbb1e4b84d426f210849492afadbc4b6bb9 Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Mon, 16 Jan 2017 18:31:38 +0200
+Subject: [PATCH 50/52] net/mlx4_core: Fix when to save some qp context flags
+ for dynamic VST to VGT transitions
+Content-Length: 2238
+Lines: 54
+
+commit 7c3945bc2073554bb2ecf983e073dee686679c53 upstream.
+
+Save the qp context flags byte containing the flag disabling vlan stripping
+in the RESET to INIT qp transition, rather than in the INIT to RTR
+transition. Per the firmware spec, the flags in this byte are active
+in the RESET to INIT transition.
+
+As a result of saving the flags in the incorrect qp transition, when
+switching dynamically from VGT to VST and back to VGT, the vlan
+remained stripped (as is required for VST) and did not return to
+not-stripped (as is required for VGT).
+
+Fixes: f0f829bf42cd ("net/mlx4_core: Add immediate activate for VGT->VST->VGT")
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index d314d96dcb1c..d1fc7fa87b05 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -2955,6 +2955,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+               put_res(dev, slave, srqn, RES_SRQ);
+               qp->srq = srq;
+       }
++
++      /* Save param3 for dynamic changes from VST back to VGT */
++      qp->param3 = qpc->param3;
+       put_res(dev, slave, rcqn, RES_CQ);
+       put_res(dev, slave, mtt_base, RES_MTT);
+       res_end_move(dev, slave, RES_QP, qpn);
+@@ -3747,7 +3750,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+       u8 orig_sched_queue;
+-      __be32  orig_param3 = qpc->param3;
+       u8 orig_vlan_control = qpc->pri_path.vlan_control;
+       u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+       u8 orig_pri_path_fl = qpc->pri_path.fl;
+@@ -3789,7 +3791,6 @@ out:
+        */
+       if (!err) {
+               qp->sched_queue = orig_sched_queue;
+-              qp->param3      = orig_param3;
+               qp->vlan_control = orig_vlan_control;
+               qp->fvl_rx      =  orig_fvl_rx;
+               qp->pri_path_fl = orig_pri_path_fl;
+-- 
+2.12.2
+
+From 710f793a15de0213d4e15f123f327b2075a0c62b Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Mon, 16 Jan 2017 18:31:37 +0200
+Subject: [PATCH 49/52] net/mlx4_core: Fix racy CQ (Completion Queue) free
+Content-Length: 5257
+Lines: 146
+
+commit 291c566a28910614ce42d0ffe82196eddd6346f4 upstream.
+
+In function mlx4_cq_completion() and mlx4_cq_event(), the
+radix_tree_lookup requires a rcu_read_lock.
+This is mandatory: if another core frees the CQ, it could
+run the radix_tree_node_rcu_free() call_rcu() callback while
+its being used by the radix tree lookup function.
+
+Additionally, in function mlx4_cq_event(), since we are adding
+the rcu lock around the radix-tree lookup, we no longer need to take
+the spinlock. Also, the synchronize_irq() call for the async event
+eliminates the need for incrementing the cq reference count in
+mlx4_cq_event().
+
+Other changes:
+1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
+   we no longer take this spinlock in the interrupt context.
+   The spinlock here, therefore, simply protects against different
+   threads simultaneously invoking mlx4_cq_free() for different cq's.
+
+2. In function mlx4_cq_free(), we move the radix tree delete to before
+   the synchronize_irq() calls. This guarantees that we will not
+   access this cq during any subsequent interrupts, and therefore can
+   safely free the CQ after the synchronize_irq calls. The rcu_read_lock
+   in the interrupt handlers only needs to protect against corrupting the
+   radix tree; the interrupt handlers may access the cq outside the
+   rcu_read_lock due to the synchronize_irq calls which protect against
+   premature freeing of the cq.
+
+3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
+
+4. We leave the cq reference count mechanism in place, because it is
+   still needed for the cq completion tasklet mechanism.
+
+Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
+Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Matan Barak <matanb@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/cq.c | 38 +++++++++++++++++----------------
+ 1 file changed, 20 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
+index 3348e646db70..6eba58044456 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
+@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
+ {
+       struct mlx4_cq *cq;
++      rcu_read_lock();
+       cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+                              cqn & (dev->caps.num_cqs - 1));
++      rcu_read_unlock();
++
+       if (!cq) {
+               mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
+               return;
+       }
++      /* Acessing the CQ outside of rcu_read_lock is safe, because
++       * the CQ is freed only after interrupt handling is completed.
++       */
+       ++cq->arm_sn;
+       cq->comp(cq);
+@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
+       struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+       struct mlx4_cq *cq;
+-      spin_lock(&cq_table->lock);
+-
++      rcu_read_lock();
+       cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
+-      if (cq)
+-              atomic_inc(&cq->refcount);
+-
+-      spin_unlock(&cq_table->lock);
++      rcu_read_unlock();
+       if (!cq) {
+-              mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
++              mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
+               return;
+       }
++      /* Acessing the CQ outside of rcu_read_lock is safe, because
++       * the CQ is freed only after interrupt handling is completed.
++       */
+       cq->event(cq, event_type);
+-
+-      if (atomic_dec_and_test(&cq->refcount))
+-              complete(&cq->free);
+ }
+ static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+       if (err)
+               return err;
+-      spin_lock_irq(&cq_table->lock);
++      spin_lock(&cq_table->lock);
+       err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
+-      spin_unlock_irq(&cq_table->lock);
++      spin_unlock(&cq_table->lock);
+       if (err)
+               goto err_icm;
+@@ -347,9 +349,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+       return 0;
+ err_radix:
+-      spin_lock_irq(&cq_table->lock);
++      spin_lock(&cq_table->lock);
+       radix_tree_delete(&cq_table->tree, cq->cqn);
+-      spin_unlock_irq(&cq_table->lock);
++      spin_unlock(&cq_table->lock);
+ err_icm:
+       mlx4_cq_free_icm(dev, cq->cqn);
+@@ -368,15 +370,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
+       if (err)
+               mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
++      spin_lock(&cq_table->lock);
++      radix_tree_delete(&cq_table->tree, cq->cqn);
++      spin_unlock(&cq_table->lock);
++
+       synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+       if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+           priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+               synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
+-      spin_lock_irq(&cq_table->lock);
+-      radix_tree_delete(&cq_table->tree, cq->cqn);
+-      spin_unlock_irq(&cq_table->lock);
+-
+       if (atomic_dec_and_test(&cq->refcount))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
+-- 
+2.12.2
+
+From f1e6b1149e497dc61ceff290c1d3db259ebf7938 Mon Sep 17 00:00:00 2001
+From: Eugenia Emantayev <eugenia@mellanox.com>
+Date: Thu, 29 Dec 2016 18:37:10 +0200
+Subject: [PATCH 48/52] net/mlx4_en: Fix bad WQE issue
+Content-Length: 1449
+Lines: 38
+
+commit 6496bbf0ec481966ef9ffe5b6660d8d1b55c60cc upstream.
+
+Single send WQE in RX buffer should be stamped with software
+ownership in order to prevent the flow of QP in error in FW
+once UPDATE_QP is called.
+
+Fixes: 9f519f68cfff ('mlx4_en: Not using Shared Receive Queues')
+Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 28a4b34310b2..82bf1b539d87 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -439,8 +439,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+               ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
+               ring->stride = stride;
+-              if (ring->stride <= TXBB_SIZE)
++              if (ring->stride <= TXBB_SIZE) {
++                      /* Stamp first unused send wqe */
++                      __be32 *ptr = (__be32 *)ring->buf;
++                      __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
++                      *ptr = stamp;
++                      /* Move pointer to start of rx section */
+                       ring->buf += TXBB_SIZE;
++              }
+               ring->log_stride = ffs(ring->stride) - 1;
+               ring->buf_size = ring->size * ring->stride;
+-- 
+2.12.2
+
+From 0a007f74b826836074de8bfcb1e197cada993718 Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Thu, 1 Dec 2016 13:49:59 -0800
+Subject: [PATCH 47/52] usb: hub: Wait for connection to be reestablished after
+ port reset
+Content-Length: 2521
+Lines: 61
+
+commit 22547c4cc4fe20698a6a85a55b8788859134b8e4 upstream.
+
+On a system with a defective USB device connected to an USB hub,
+an endless sequence of port connect events was observed. The sequence
+of events as observed is as follows:
+
+- Port reports connected event (port status=USB_PORT_STAT_CONNECTION).
+- Event handler debounces port and resets it by calling hub_port_reset().
+- hub_port_reset() calls hub_port_wait_reset() to wait for the reset
+  to complete.
+- The reset completes, but USB_PORT_STAT_CONNECTION is not immediately
+  set in the port status register.
+- hub_port_wait_reset() returns -ENOTCONN.
+- Port initialization sequence is aborted.
+- A few milliseconds later, the port again reports a connected event,
+  and the sequence repeats.
+
+This continues either forever or, randomly, stops if the connection
+is already re-established when the port status is read. It results in
+a high rate of udev events. This in turn destabilizes userspace since
+the above sequence holds the device mutex pretty much continuously
+and prevents userspace from actually reading the device status.
+
+To prevent the problem from happening, let's wait for the connection
+to be re-established after a port reset. If the device was actually
+disconnected, the code will still return an error, but it will do so
+only after the long reset timeout.
+
+Cc: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/hub.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 9e62c93af96e..7c2d87befb51 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2602,8 +2602,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+               if (ret < 0)
+                       return ret;
+-              /* The port state is unknown until the reset completes. */
+-              if (!(portstatus & USB_PORT_STAT_RESET))
++              /*
++               * The port state is unknown until the reset completes.
++               *
++               * On top of that, some chips may require additional time
++               * to re-establish a connection after the reset is complete,
++               * so also wait for the connection to be re-established.
++               */
++              if (!(portstatus & USB_PORT_STAT_RESET) &&
++                  (portstatus & USB_PORT_STAT_CONNECTION))
+                       break;
+               /* switch to the long delay after two short delay failures */
+-- 
+2.12.2
+
+From f4522e36edaa9ec0cada0daa5c2628db762dd3d9 Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+Date: Tue, 6 Dec 2016 13:31:44 -0200
+Subject: [PATCH 46/52] blk-mq: Avoid memory reclaim when remapping queues
+Content-Length: 5065
+Lines: 105
+
+commit 36e1f3d107867b25c616c2fd294f5a1c9d4e5d09 upstream.
+
+While stressing memory and IO at the same time we changed SMT settings,
+we were able to consistently trigger deadlocks in the mm system, which
+froze the entire machine.
+
+I think that under memory stress conditions, the large allocations
+performed by blk_mq_init_rq_map may trigger a reclaim, which stalls
+waiting on the block layer remmaping completion, thus deadlocking the
+system.  The trace below was collected after the machine stalled,
+waiting for the hotplug event completion.
+
+The simplest fix for this is to make allocations in this path
+non-reclaimable, with GFP_NOIO.  With this patch, We couldn't hit the
+issue anymore.
+
+This should apply on top of Jens's for-next branch cleanly.
+
+Changes since v1:
+  - Use GFP_NOIO instead of GFP_NOWAIT.
+
+ Call Trace:
+[c000000f0160aaf0] [c000000f0160ab50] 0xc000000f0160ab50 (unreliable)
+[c000000f0160acc0] [c000000000016624] __switch_to+0x2e4/0x430
+[c000000f0160ad20] [c000000000b1a880] __schedule+0x310/0x9b0
+[c000000f0160ae00] [c000000000b1af68] schedule+0x48/0xc0
+[c000000f0160ae30] [c000000000b1b4b0] schedule_preempt_disabled+0x20/0x30
+[c000000f0160ae50] [c000000000b1d4fc] __mutex_lock_slowpath+0xec/0x1f0
+[c000000f0160aed0] [c000000000b1d678] mutex_lock+0x78/0xa0
+[c000000f0160af00] [d000000019413cac] xfs_reclaim_inodes_ag+0x33c/0x380 [xfs]
+[c000000f0160b0b0] [d000000019415164] xfs_reclaim_inodes_nr+0x54/0x70 [xfs]
+[c000000f0160b0f0] [d0000000194297f8] xfs_fs_free_cached_objects+0x38/0x60 [xfs]
+[c000000f0160b120] [c0000000003172c8] super_cache_scan+0x1f8/0x210
+[c000000f0160b190] [c00000000026301c] shrink_slab.part.13+0x21c/0x4c0
+[c000000f0160b2d0] [c000000000268088] shrink_zone+0x2d8/0x3c0
+[c000000f0160b380] [c00000000026834c] do_try_to_free_pages+0x1dc/0x520
+[c000000f0160b450] [c00000000026876c] try_to_free_pages+0xdc/0x250
+[c000000f0160b4e0] [c000000000251978] __alloc_pages_nodemask+0x868/0x10d0
+[c000000f0160b6f0] [c000000000567030] blk_mq_init_rq_map+0x160/0x380
+[c000000f0160b7a0] [c00000000056758c] blk_mq_map_swqueue+0x33c/0x360
+[c000000f0160b820] [c000000000567904] blk_mq_queue_reinit+0x64/0xb0
+[c000000f0160b850] [c00000000056a16c] blk_mq_queue_reinit_notify+0x19c/0x250
+[c000000f0160b8a0] [c0000000000f5d38] notifier_call_chain+0x98/0x100
+[c000000f0160b8f0] [c0000000000c5fb0] __cpu_notify+0x70/0xe0
+[c000000f0160b930] [c0000000000c63c4] notify_prepare+0x44/0xb0
+[c000000f0160b9b0] [c0000000000c52f4] cpuhp_invoke_callback+0x84/0x250
+[c000000f0160ba10] [c0000000000c570c] cpuhp_up_callbacks+0x5c/0x120
+[c000000f0160ba60] [c0000000000c7cb8] _cpu_up+0xf8/0x1d0
+[c000000f0160bac0] [c0000000000c7eb0] do_cpu_up+0x120/0x150
+[c000000f0160bb40] [c0000000006fe024] cpu_subsys_online+0x64/0xe0
+[c000000f0160bb90] [c0000000006f5124] device_online+0xb4/0x120
+[c000000f0160bbd0] [c0000000006f5244] online_store+0xb4/0xc0
+[c000000f0160bc20] [c0000000006f0a68] dev_attr_store+0x68/0xa0
+[c000000f0160bc60] [c0000000003ccc30] sysfs_kf_write+0x80/0xb0
+[c000000f0160bca0] [c0000000003cbabc] kernfs_fop_write+0x17c/0x250
+[c000000f0160bcf0] [c00000000030fe6c] __vfs_write+0x6c/0x1e0
+[c000000f0160bd90] [c000000000311490] vfs_write+0xd0/0x270
+[c000000f0160bde0] [c0000000003131fc] SyS_write+0x6c/0x110
+[c000000f0160be30] [c000000000009204] system_call+0x38/0xec
+
+Signed-off-by: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+Cc: Brian King <brking@linux.vnet.ibm.com>
+Cc: Douglas Miller <dougmill@linux.vnet.ibm.com>
+Cc: linux-block@vger.kernel.org
+Cc: linux-scsi@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index d8d63c38bf29..0d1af3e44efb 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1470,7 +1470,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
+       INIT_LIST_HEAD(&tags->page_list);
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+-                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
++                               GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
+       if (!tags->rqs) {
+               blk_mq_free_tags(tags);
+@@ -1496,7 +1496,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
+               do {
+                       page = alloc_pages_node(set->numa_node,
+-                              GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
++                              GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+                               this_order);
+                       if (page)
+                               break;
+@@ -1517,7 +1517,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
+                * Allow kmemleak to scan these pages as they contain pointers
+                * to additional allocations like via ops->init_request().
+                */
+-              kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
++              kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
+               entries_per_page = order_to_size(this_order) / rq_size;
+               to_do = min(entries_per_page, set->queue_depth - i);
+               left -= to_do * rq_size;
+-- 
+2.12.2
+
+From d35f8fa0b93e61dd95b8f86928a783c4d8a32d3e Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Wed, 29 Mar 2017 16:11:20 +0200
+Subject: [PATCH 45/52] net/packet: fix overflow in check for priv area size
+Content-Length: 1299
+Lines: 36
+
+commit 2b6867c2ce76c596676bec7d2d525af525fdc6e2 upstream.
+
+Subtracting tp_sizeof_priv from tp_block_size and casting to int
+to check whether one is less then the other doesn't always work
+(both of them are unsigned ints).
+
+Compare them as is instead.
+
+Also cast tp_sizeof_priv to u64 before using BLK_PLUS_PRIV, as
+it can overflow inside BLK_PLUS_PRIV otherwise.
+
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 3975ac809934..d76800108ddb 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4138,8 +4138,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+               if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
+                       goto out;
+               if (po->tp_version >= TPACKET_V3 &&
+-                  (int)(req->tp_block_size -
+-                        BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
++                  req->tp_block_size <=
++                        BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+                       goto out;
+               if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+                                       po->tp_reserve))
+-- 
+2.12.2
+
+From fd8bae310684b557c0b30ae9105420956a41494f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Wed, 5 Apr 2017 11:41:03 +0300
+Subject: [PATCH 44/52] crypto: caam - fix RNG deinstantiation error checking
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Content-Length: 1333
+Lines: 34
+
+commit 40c98cb57cdbc377456116ad4582c89e329721b0 upstream.
+
+RNG instantiation was previously fixed by
+commit 62743a4145bb9 ("crypto: caam - fix RNG init descriptor ret. code checking")
+while deinstantiation was not addressed.
+
+Since the descriptors used are similar, in the sense that they both end
+with a JUMP HALT command, checking for errors should be similar too,
+i.e. status code 7000_0000h should be considered successful.
+
+Fixes: 1005bccd7a4a6 ("crypto: caam - enable instantiation of all RNG4 state handles")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/caam/ctrl.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index 69d4a1326fee..53e61459c69f 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -278,7 +278,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+                       /* Try to run it through DECO0 */
+                       ret = run_descriptor_deco0(ctrldev, desc, &status);
+-                      if (ret || status) {
++                      if (ret ||
++                          (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+                               dev_err(ctrldev,
+                                       "Failed to deinstantiate RNG4 SH%d\n",
+                                       sh_idx);
+-- 
+2.12.2
+
+From ba7681e4eee6739e4f23a1ba21fb7737fe4ce4f4 Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Wed, 25 Jan 2017 17:00:25 +0000
+Subject: [PATCH 43/52] MIPS: IRQ Stack: Fix erroneous jal to plat_irq_dispatch
+Content-Length: 1415
+Lines: 38
+
+commit c25f8064c1d5731a2ce5664def890140dcdd3e5c upstream.
+
+Commit dda45f701c9d ("MIPS: Switch to the irq_stack in interrupts")
+changed both the normal and vectored interrupt handlers. Unfortunately
+the vectored version, "except_vec_vi_handler", was incorrectly modified
+to unconditionally jal to plat_irq_dispatch, rather than doing a jalr to
+the vectored handler that has been set up. This is ok for many platforms
+which set the vectored handler to plat_irq_dispatch anyway, but will
+cause problems with platforms that use other handlers.
+
+Fixes: dda45f701c9d ("MIPS: Switch to the irq_stack in interrupts")
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15110/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/genex.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
+index 2c7cd622673f..619e30e2c4f0 100644
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -330,7 +330,7 @@ NESTED(except_vec_vi_handler, 0, sp)
+       PTR_ADD sp, t0, t1
+ 2:
+-      jal     plat_irq_dispatch
++      jalr    v0
+       /* Restore sp */
+       move    sp, s1
+-- 
+2.12.2
+
+From f017e58da4aba293e4a6ab62ca5d4801f79cc929 Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Mon, 19 Dec 2016 14:21:00 +0000
+Subject: [PATCH 42/52] MIPS: Select HAVE_IRQ_EXIT_ON_IRQ_STACK
+Content-Length: 1117
+Lines: 33
+
+commit 3cc3434fd6307d06b53b98ce83e76bf9807689b9 upstream.
+
+Since do_IRQ is now invoked on a separate IRQ stack, we select
+HAVE_IRQ_EXIT_ON_IRQ_STACK so that softirq's may be invoked directly
+from irq_exit(), rather than requiring do_softirq_own_stack.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Acked-by: Jason A. Donenfeld <jason@zx2c4.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/14744/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 75bfca69e418..d5cfa937d622 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -9,6 +9,7 @@ config MIPS
+       select HAVE_CONTEXT_TRACKING
+       select HAVE_GENERIC_DMA_COHERENT
+       select HAVE_IDE
++      select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       select HAVE_OPROFILE
+       select HAVE_PERF_EVENTS
+       select PERF_USE_VMALLOC
+-- 
+2.12.2
+
+From b39b263816687fd71b10c31b3eb916defe8176f0 Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Mon, 19 Dec 2016 14:20:59 +0000
+Subject: [PATCH 41/52] MIPS: Switch to the irq_stack in interrupts
+Content-Length: 3496
+Lines: 130
+
+commit dda45f701c9d7ad4ac0bb446e3a96f6df9a468d9 upstream.
+
+When enterring interrupt context via handle_int or except_vec_vi, switch
+to the irq_stack of the current CPU if it is not already in use.
+
+The current stack pointer is masked with the thread size and compared to
+the base or the irq stack. If it does not match then the stack pointer
+is set to the top of that stack, otherwise this is a nested irq being
+handled on the irq stack so the stack pointer should be left as it was.
+
+The in-use stack pointer is placed in the callee saved register s1. It
+will be saved to the stack when plat_irq_dispatch is invoked and can be
+restored once control returns here.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Acked-by: Jason A. Donenfeld <jason@zx2c4.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/14743/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/genex.S | 81 +++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 76 insertions(+), 5 deletions(-)
+
+diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
+index baa7b6fc0a60..2c7cd622673f 100644
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -188,9 +188,44 @@ NESTED(handle_int, PT_SIZE, sp)
+       LONG_L  s0, TI_REGS($28)
+       LONG_S  sp, TI_REGS($28)
+-      PTR_LA  ra, ret_from_irq
+-      PTR_LA  v0, plat_irq_dispatch
+-      jr      v0
++
++      /*
++       * SAVE_ALL ensures we are using a valid kernel stack for the thread.
++       * Check if we are already using the IRQ stack.
++       */
++      move    s1, sp # Preserve the sp
++
++      /* Get IRQ stack for this CPU */
++      ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
++      lui     k1, %hi(irq_stack)
++#else
++      lui     k1, %highest(irq_stack)
++      daddiu  k1, %higher(irq_stack)
++      dsll    k1, 16
++      daddiu  k1, %hi(irq_stack)
++      dsll    k1, 16
++#endif
++      LONG_SRL        k0, SMP_CPUID_PTRSHIFT
++      LONG_ADDU       k1, k0
++      LONG_L  t0, %lo(irq_stack)(k1)
++
++      # Check if already on IRQ stack
++      PTR_LI  t1, ~(_THREAD_SIZE-1)
++      and     t1, t1, sp
++      beq     t0, t1, 2f
++
++      /* Switch to IRQ stack */
++      li      t1, _IRQ_STACK_SIZE
++      PTR_ADD sp, t0, t1
++
++2:
++      jal     plat_irq_dispatch
++
++      /* Restore sp */
++      move    sp, s1
++
++      j       ret_from_irq
+ #ifdef CONFIG_CPU_MICROMIPS
+       nop
+ #endif
+@@ -263,8 +298,44 @@ NESTED(except_vec_vi_handler, 0, sp)
+       LONG_L  s0, TI_REGS($28)
+       LONG_S  sp, TI_REGS($28)
+-      PTR_LA  ra, ret_from_irq
+-      jr      v0
++
++      /*
++       * SAVE_ALL ensures we are using a valid kernel stack for the thread.
++       * Check if we are already using the IRQ stack.
++       */
++      move    s1, sp # Preserve the sp
++
++      /* Get IRQ stack for this CPU */
++      ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
++      lui     k1, %hi(irq_stack)
++#else
++      lui     k1, %highest(irq_stack)
++      daddiu  k1, %higher(irq_stack)
++      dsll    k1, 16
++      daddiu  k1, %hi(irq_stack)
++      dsll    k1, 16
++#endif
++      LONG_SRL        k0, SMP_CPUID_PTRSHIFT
++      LONG_ADDU       k1, k0
++      LONG_L  t0, %lo(irq_stack)(k1)
++
++      # Check if already on IRQ stack
++      PTR_LI  t1, ~(_THREAD_SIZE-1)
++      and     t1, t1, sp
++      beq     t0, t1, 2f
++
++      /* Switch to IRQ stack */
++      li      t1, _IRQ_STACK_SIZE
++      PTR_ADD sp, t0, t1
++
++2:
++      jal     plat_irq_dispatch
++
++      /* Restore sp */
++      move    sp, s1
++
++      j       ret_from_irq
+       END(except_vec_vi_handler)
+ /*
+-- 
+2.12.2
+
+From 93a82f8dbef8ee421fac80a1bd0564124a8ac41c Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Mon, 19 Dec 2016 14:20:58 +0000
+Subject: [PATCH 40/52] MIPS: Only change $28 to thread_info if coming from
+ user mode
+Content-Length: 2181
+Lines: 60
+
+commit 510d86362a27577f5ee23f46cfb354ad49731e61 upstream.
+
+The SAVE_SOME macro is used to save the execution context on all
+exceptions.
+If an exception occurs while executing user code, the stack is switched
+to the kernel's stack for the current task, and register $28 is switched
+to point to the current_thread_info, which is at the bottom of the stack
+region.
+If the exception occurs while executing kernel code, the stack is left,
+and this change ensures that register $28 is not updated. This is the
+correct behaviour when the kernel can be executing on the separate irq
+stack, because the thread_info will not be at the base of it.
+
+With this change, register $28 is only switched to it's kernel
+conventional usage of the currrent thread info pointer at the point at
+which execution enters kernel space. Doing it on every exception was
+redundant, but OK without an IRQ stack, but will be erroneous once that
+is introduced.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Acked-by: Jason A. Donenfeld <jason@zx2c4.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/14742/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/stackframe.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
+index a71da576883c..5347f130f536 100644
+--- a/arch/mips/include/asm/stackframe.h
++++ b/arch/mips/include/asm/stackframe.h
+@@ -216,12 +216,19 @@
+               LONG_S  $25, PT_R25(sp)
+               LONG_S  $28, PT_R28(sp)
+               LONG_S  $31, PT_R31(sp)
++
++              /* Set thread_info if we're coming from user mode */
++              mfc0    k0, CP0_STATUS
++              sll     k0, 3           /* extract cu0 bit */
++              bltz    k0, 9f
++
+               ori     $28, sp, _THREAD_MASK
+               xori    $28, _THREAD_MASK
+ #ifdef CONFIG_CPU_CAVIUM_OCTEON
+               .set    mips64
+               pref    0, 0($28)       /* Prefetch the current pointer */
+ #endif
++9:
+               .set    pop
+               .endm
+-- 
+2.12.2
+
+From 3363653512853754fcc7592d2c68c4769a4825c9 Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Mon, 19 Dec 2016 14:20:57 +0000
+Subject: [PATCH 39/52] MIPS: Stack unwinding while on IRQ stack
+Content-Length: 2025
+Lines: 62
+
+commit d42d8d106b0275b027c1e8992c42aecf933436ea upstream.
+
+Within unwind stack, check if the stack pointer being unwound is within
+the CPU's irq_stack and if so use that page rather than the task's stack
+page.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Acked-by: Jason A. Donenfeld <jason@zx2c4.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Adam Buchbinder <adam.buchbinder@gmail.com>
+Cc: Maciej W. Rozycki <macro@imgtec.com>
+Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+Cc: Chris Metcalf <cmetcalf@mellanox.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/14741/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/process.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index fc537d1b649d..8c26ecac930d 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -32,6 +32,7 @@
+ #include <asm/cpu.h>
+ #include <asm/dsp.h>
+ #include <asm/fpu.h>
++#include <asm/irq.h>
+ #include <asm/msa.h>
+ #include <asm/pgtable.h>
+ #include <asm/mipsregs.h>
+@@ -552,7 +553,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
+ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+                          unsigned long pc, unsigned long *ra)
+ {
+-      unsigned long stack_page = (unsigned long)task_stack_page(task);
++      unsigned long stack_page = 0;
++      int cpu;
++
++      for_each_possible_cpu(cpu) {
++              if (on_irq_stack(cpu, *sp)) {
++                      stack_page = (unsigned long)irq_stack[cpu];
++                      break;
++              }
++      }
++
++      if (!stack_page)
++              stack_page = (unsigned long)task_stack_page(task);
++
+       return unwind_stack_by_address(stack_page, sp, pc, ra);
+ }
+ #endif
+-- 
+2.12.2
+
+From d8b8b5528ea5a394074a91e37571bcca081b27e1 Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Mon, 19 Dec 2016 14:20:56 +0000
+Subject: [PATCH 38/52] MIPS: Introduce irq_stack
+Content-Length: 2919
+Lines: 95
+
+commit fe8bd18ffea5327344d4ec2bf11f47951212abd0 upstream.
+
+Allocate a per-cpu irq stack for use within interrupt handlers.
+
+Also add a utility function on_irq_stack to determine if a given stack
+pointer is within the irq stack for that cpu.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Acked-by: Jason A. Donenfeld <jason@zx2c4.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Chris Metcalf <cmetcalf@mellanox.com>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Aaron Tomlin <atomlin@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-kernel@vger.kernel.org
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14740/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/irq.h    | 12 ++++++++++++
+ arch/mips/kernel/asm-offsets.c |  1 +
+ arch/mips/kernel/irq.c         | 11 +++++++++++
+ 3 files changed, 24 insertions(+)
+
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index 15e0fecbc300..ebb9efb02502 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -17,6 +17,18 @@
+ #include <irq.h>
++#define IRQ_STACK_SIZE                        THREAD_SIZE
++
++extern void *irq_stack[NR_CPUS];
++
++static inline bool on_irq_stack(int cpu, unsigned long sp)
++{
++      unsigned long low = (unsigned long)irq_stack[cpu];
++      unsigned long high = low + IRQ_STACK_SIZE;
++
++      return (low <= sp && sp <= high);
++}
++
+ #ifdef CONFIG_I8259
+ static inline int irq_canonicalize(int irq)
+ {
+diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
+index 154e2039ea5e..ec053ce7bb38 100644
+--- a/arch/mips/kernel/asm-offsets.c
++++ b/arch/mips/kernel/asm-offsets.c
+@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
+       OFFSET(TI_REGS, thread_info, regs);
+       DEFINE(_THREAD_SIZE, THREAD_SIZE);
+       DEFINE(_THREAD_MASK, THREAD_MASK);
++      DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+       BLANK();
+ }
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index 8eb5af805964..dc1180a8bfa1 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -25,6 +25,8 @@
+ #include <linux/atomic.h>
+ #include <asm/uaccess.h>
++void *irq_stack[NR_CPUS];
++
+ /*
+  * 'what should we do if we get a hw irq event on an illegal vector'.
+  * each architecture has to answer this themselves.
+@@ -55,6 +57,15 @@ void __init init_IRQ(void)
+               irq_set_noprobe(i);
+       arch_init_irq();
++
++      for_each_possible_cpu(i) {
++              int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
++              void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++
++              irq_stack[i] = s;
++              pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
++                      irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
++      }
+ }
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+-- 
+2.12.2
+
+From 5a527d80836e9ad0dc3dceee7de72f16c817fb8b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Sun, 20 Nov 2016 16:09:30 +0100
+Subject: [PATCH 37/52] mtd: bcm47xxpart: fix parsing first block after aligned
+ TRX
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Content-Length: 1364
+Lines: 40
+
+commit bd5d21310133921021d78995ad6346f908483124 upstream.
+
+After parsing TRX we should skip to the first block placed behind it.
+Our code was working only with TRX with length not aligned to the
+blocksize. In other cases (length aligned) it was missing the block
+places right after TRX.
+
+This fixes calculation and simplifies the comment.
+
+Signed-off-by: RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/bcm47xxpart.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
+index c0720c1ee4c9..5abab8800891 100644
+--- a/drivers/mtd/bcm47xxpart.c
++++ b/drivers/mtd/bcm47xxpart.c
+@@ -225,12 +225,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+                       last_trx_part = curr_part - 1;
+-                      /*
+-                       * We have whole TRX scanned, skip to the next part. Use
+-                       * roundown (not roundup), as the loop will increase
+-                       * offset in next step.
+-                       */
+-                      offset = rounddown(offset + trx->length, blocksize);
++                      /* Jump to the end of TRX */
++                      offset = roundup(offset + trx->length, blocksize);
++                      /* Next loop iteration will increase the offset */
++                      offset -= blocksize;
+                       continue;
+               }
+-- 
+2.12.2
+
+From 297f55bcb62ad0b6b290b01177d9395305d57020 Mon Sep 17 00:00:00 2001
+From: Janusz Dziedzic <januszx.dziedzic@intel.com>
+Date: Mon, 13 Mar 2017 14:11:32 +0200
+Subject: [PATCH 36/52] usb: dwc3: gadget: delay unmap of bounced requests
+Content-Length: 2203
+Lines: 63
+
+commit de288e36fe33f7e06fa272bc8e2f85aa386d99aa upstream.
+
+In the case of bounced ep0 requests, we must delay DMA operation until
+after ->complete() otherwise we might overwrite contents of req->buf.
+
+This caused problems with RNDIS gadget.
+
+Signed-off-by: Janusz Dziedzic <januszx.dziedzic@intel.com>
+Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 210ff64857e1..ec7a50f98f57 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -235,6 +235,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+               int status)
+ {
+       struct dwc3                     *dwc = dep->dwc;
++      unsigned int                    unmap_after_complete = false;
+       int                             i;
+       if (req->queued) {
+@@ -259,11 +260,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+       if (req->request.status == -EINPROGRESS)
+               req->request.status = status;
+-      if (dwc->ep0_bounced && dep->number <= 1)
++      /*
++       * NOTICE we don't want to unmap before calling ->complete() if we're
++       * dealing with a bounced ep0 request. If we unmap it here, we would end
++       * up overwritting the contents of req->buf and this could confuse the
++       * gadget driver.
++       */
++      if (dwc->ep0_bounced && dep->number <= 1) {
+               dwc->ep0_bounced = false;
+-
+-      usb_gadget_unmap_request(&dwc->gadget, &req->request,
+-                      req->direction);
++              unmap_after_complete = true;
++      } else {
++              usb_gadget_unmap_request(&dwc->gadget,
++                              &req->request, req->direction);
++      }
+       dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
+                       req, dep->name, req->request.actual,
+@@ -273,6 +282,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+       spin_unlock(&dwc->lock);
+       usb_gadget_giveback_request(&dep->endpoint, &req->request);
+       spin_lock(&dwc->lock);
++
++      if (unmap_after_complete)
++              usb_gadget_unmap_request(&dwc->gadget,
++                              &req->request, req->direction);
+ }
+ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
+-- 
+2.12.2
+
+From 8cfaf0ae1f566ddfcda661bd81b625a71b16459a Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 13 Mar 2017 17:06:17 +0000
+Subject: [PATCH 35/52] drm/i915: Stop using RP_DOWN_EI on Baytrail
+Content-Length: 6512
+Lines: 177
+
+commit 8f68d591d4765b2e1ce9d916ac7bc5583285c4ad upstream.
+
+On Baytrail, we manually calculate busyness over the evaluation interval
+to avoid issues with miscaluations with RC6 enabled. However, it turns
+out that the DOWN_EI interrupt generator is completely bust - it
+operates in two modes, continuous or never. Neither of which are
+conducive to good behaviour. Stop unmask the DOWN_EI interrupt and just
+compute everything from the UP_EI which does seem to correspond to the
+desired interval.
+
+v2: Fixup gen6_rps_pm_mask() as well
+v3: Inline vlv_c0_above() to combine the now identical elapsed
+calculation for up/down and simplify the threshold testing
+
+Fixes: 43cf3bf084ba ("drm/i915: Improved w/a for rps on Baytrail")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170309211232.28878-1-chris@chris-wilson.co.uk
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170313170617.31564-1-chris@chris-wilson.co.uk
+(cherry picked from commit e0e8c7cb6eb68e9256de2d8cbeb481d3701c05ac)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_drv.h |  2 +-
+ drivers/gpu/drm/i915/i915_irq.c | 73 ++++++++++++++++-------------------------
+ drivers/gpu/drm/i915/intel_pm.c |  5 +--
+ 3 files changed, 32 insertions(+), 48 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index fb9f647bb5cd..5044f2257e89 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1159,7 +1159,7 @@ struct intel_gen6_power_mgmt {
+       struct intel_rps_client semaphores, mmioflips;
+       /* manual wa residency calculations */
+-      struct intel_rps_ei up_ei, down_ei;
++      struct intel_rps_ei ei;
+       /*
+        * Protects RPS/RC6 register access and PCU communication.
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 0f42a2782afc..b7b0a38acd67 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -994,68 +994,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
+       ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+ }
+-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
+-                       const struct intel_rps_ei *old,
+-                       const struct intel_rps_ei *now,
+-                       int threshold)
+-{
+-      u64 time, c0;
+-      unsigned int mul = 100;
+-
+-      if (old->cz_clock == 0)
+-              return false;
+-
+-      if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+-              mul <<= 8;
+-
+-      time = now->cz_clock - old->cz_clock;
+-      time *= threshold * dev_priv->czclk_freq;
+-
+-      /* Workload can be split between render + media, e.g. SwapBuffers
+-       * being blitted in X after being rendered in mesa. To account for
+-       * this we need to combine both engines into our activity counter.
+-       */
+-      c0 = now->render_c0 - old->render_c0;
+-      c0 += now->media_c0 - old->media_c0;
+-      c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
+-
+-      return c0 >= time;
+-}
+-
+ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
+ {
+-      vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
+-      dev_priv->rps.up_ei = dev_priv->rps.down_ei;
++      memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+ }
+ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+ {
++      const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+       struct intel_rps_ei now;
+       u32 events = 0;
+-      if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
++      if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+               return 0;
+       vlv_c0_read(dev_priv, &now);
+       if (now.cz_clock == 0)
+               return 0;
+-      if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
+-              if (!vlv_c0_above(dev_priv,
+-                                &dev_priv->rps.down_ei, &now,
+-                                dev_priv->rps.down_threshold))
+-                      events |= GEN6_PM_RP_DOWN_THRESHOLD;
+-              dev_priv->rps.down_ei = now;
+-      }
++      if (prev->cz_clock) {
++              u64 time, c0;
++              unsigned int mul;
+-      if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+-              if (vlv_c0_above(dev_priv,
+-                               &dev_priv->rps.up_ei, &now,
+-                               dev_priv->rps.up_threshold))
+-                      events |= GEN6_PM_RP_UP_THRESHOLD;
+-              dev_priv->rps.up_ei = now;
++              mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
++              if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
++                      mul <<= 8;
++
++              time = now.cz_clock - prev->cz_clock;
++              time *= dev_priv->czclk_freq;
++
++              /* Workload can be split between render + media,
++               * e.g. SwapBuffers being blitted in X after being rendered in
++               * mesa. To account for this we need to combine both engines
++               * into our activity counter.
++               */
++              c0 = now.render_c0 - prev->render_c0;
++              c0 += now.media_c0 - prev->media_c0;
++              c0 *= mul;
++
++              if (c0 > time * dev_priv->rps.up_threshold)
++                      events = GEN6_PM_RP_UP_THRESHOLD;
++              else if (c0 < time * dev_priv->rps.down_threshold)
++                      events = GEN6_PM_RP_DOWN_THRESHOLD;
+       }
++      dev_priv->rps.ei = now;
+       return events;
+ }
+@@ -4390,7 +4373,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+       /* Let's track the enabled rps events */
+       if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+               /* WaGsvRC0ResidencyMethod:vlv */
+-              dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
++              dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+       else
+               dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index e4031fcac4bf..fd4690ed93c0 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4411,8 +4411,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
+ {
+       u32 mask = 0;
++      /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
+       if (val > dev_priv->rps.min_freq_softlimit)
+-              mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
++              mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+       if (val < dev_priv->rps.max_freq_softlimit)
+               mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+@@ -4516,7 +4517,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
+ {
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (dev_priv->rps.enabled) {
+-              if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
++              if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
+                       gen6_rps_reset_ei(dev_priv);
+               I915_WRITE(GEN6_PMINTRMSK,
+                          gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+-- 
+2.12.2
+
+From cb0a2cba62d58caf6668f630858acc15ed40ee23 Mon Sep 17 00:00:00 2001
+From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Date: Wed, 15 Feb 2017 15:52:59 +0200
+Subject: [PATCH 34/52] drm/i915: Avoid tweaking evaluation thresholds on
+ Baytrail v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Content-Length: 3679
+Lines: 88
+
+commit 34dc8993eef63681b062871413a9484008a2a78f upstream.
+
+Certain Baytrails, namely the 4 cpu core variants, have been
+plaqued by spurious system hangs, mostly occurring with light loads.
+
+Multiple bisects by various people point to a commit which changes the
+reclocking strategy for Baytrail to follow its bigger brethen:
+commit 8fb55197e64d ("drm/i915: Agressive downclocking on Baytrail")
+
+There is also a review comment attached to this commit from Deepak S
+on avoiding punit access on Cherryview and thus it was excluded on
+common reclocking path. By taking the same approach and omitting
+the punit access by not tweaking the thresholds when the hardware
+has been asked to move into different frequency, considerable gains
+in stability have been observed.
+
+With J1900 box, light render/video load would end up in system hang
+in usually less than 12 hours. With this patch applied, the cumulative
+uptime has now been 34 days without issues. To provoke system hang,
+light loads on both render and bsd engines in parallel have been used:
+glxgears >/dev/null 2>/dev/null &
+mpv --vo=vaapi --hwdec=vaapi --loop=inf vid.mp4
+
+So far, author has not witnessed system hang with above load
+and this patch applied. Reports from the tenacious people at
+kernel bugzilla are also promising.
+
+Considering that the punit access frequency with this patch is
+considerably less, there is a possibility that this will push
+the, still unknown, root cause past the triggering point on most loads.
+
+But as we now can reliably reproduce the hang independently,
+we can reduce the pain that users are having and use a
+static thresholds until a root cause is found.
+
+v3: don't break debugfs and simplification (Chris Wilson)
+
+References: https://bugzilla.kernel.org/show_bug.cgi?id=109051
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Len Brown <len.brown@intel.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: fritsch@xbmc.org
+Cc: miku@iki.fi
+Cc: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+CC: Michal Feix <michal@feix.cz>
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Deepak S <deepak.s@linux.intel.com>
+Cc: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1487166779-26945-1-git-send-email-mika.kuoppala@intel.com
+(cherry picked from commit 6067a27d1f0184596d51decbac1c1fdc4acb012f)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/intel_pm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index e7c18519274a..e4031fcac4bf 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4376,6 +4376,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+               break;
+       }
++      /* When byt can survive without system hang with dynamic
++       * sw freq adjustments, this restriction can be lifted.
++       */
++      if (IS_VALLEYVIEW(dev_priv))
++              goto skip_hw_write;
++
+       I915_WRITE(GEN6_RP_UP_EI,
+               GT_INTERVAL_FROM_US(dev_priv, ei_up));
+       I915_WRITE(GEN6_RP_UP_THRESHOLD,
+@@ -4394,6 +4400,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+                   GEN6_RP_UP_BUSY_AVG |
+                   GEN6_RP_DOWN_IDLE_AVG);
++skip_hw_write:
+       dev_priv->rps.power = new_power;
+       dev_priv->rps.up_threshold = threshold_up;
+       dev_priv->rps.down_threshold = threshold_down;
+-- 
+2.12.2
+
+From ec5e61608ad1919c1ff3cc0369dbf1b1ede9eb88 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 12 Apr 2017 12:38:50 +0200
+Subject: [PATCH 33/52] Linux 4.4.61
+Content-Length: 301
+Lines: 18
+
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index fb7c2b40753d..ef5045b8201d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 60
++SUBLEVEL = 61
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+-- 
+2.12.2
+
+From b73d08ce20c5cb2e0cec8c019a27b9574e2c4ec2 Mon Sep 17 00:00:00 2001
+From: Chris Salls <salls@cs.ucsb.edu>
+Date: Fri, 7 Apr 2017 23:48:11 -0700
+Subject: [PATCH 32/52] mm/mempolicy.c: fix error handling in set_mempolicy and
+ mbind.
+Content-Length: 2383
+Lines: 73
+
+commit cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 upstream.
+
+In the case that compat_get_bitmap fails we do not want to copy the
+bitmap to the user as it will contain uninitialized stack data and leak
+sensitive data.
+
+Signed-off-by: Chris Salls <salls@cs.ucsb.edu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index a4217fe60dff..e09b1a0e2cfe 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1492,7 +1492,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+                      compat_ulong_t, maxnode)
+ {
+-      long err = 0;
+       unsigned long __user *nm = NULL;
+       unsigned long nr_bits, alloc_size;
+       DECLARE_BITMAP(bm, MAX_NUMNODES);
+@@ -1501,14 +1500,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+       alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+       if (nmask) {
+-              err = compat_get_bitmap(bm, nmask, nr_bits);
++              if (compat_get_bitmap(bm, nmask, nr_bits))
++                      return -EFAULT;
+               nm = compat_alloc_user_space(alloc_size);
+-              err |= copy_to_user(nm, bm, alloc_size);
++              if (copy_to_user(nm, bm, alloc_size))
++                      return -EFAULT;
+       }
+-      if (err)
+-              return -EFAULT;
+-
+       return sys_set_mempolicy(mode, nm, nr_bits+1);
+ }
+@@ -1516,7 +1514,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+                      compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+                      compat_ulong_t, maxnode, compat_ulong_t, flags)
+ {
+-      long err = 0;
+       unsigned long __user *nm = NULL;
+       unsigned long nr_bits, alloc_size;
+       nodemask_t bm;
+@@ -1525,14 +1522,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+       alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+       if (nmask) {
+-              err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
++              if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
++                      return -EFAULT;
+               nm = compat_alloc_user_space(alloc_size);
+-              err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
++              if (copy_to_user(nm, nodes_addr(bm), alloc_size))
++                      return -EFAULT;
+       }
+-      if (err)
+-              return -EFAULT;
+-
+       return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
+ }
+-- 
+2.12.2
+
+From 2d1af1b7025f96c86938af7f7c54d60adb4773fe Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:27 +0800
+Subject: [PATCH 31/52] MIPS: Flush wrong invalid FTLB entry for huge page
+Content-Length: 3637
+Lines: 98
+
+commit 0115f6cbf26663c86496bc56eeea293f85b77897 upstream.
+
+On VTLB+FTLB platforms (such as Loongson-3A R2), FTLB's pagesize is
+usually configured the same as PAGE_SIZE. In such a case, Huge page
+entry is not suitable to write in FTLB.
+
+Unfortunately, when a huge page is created, its page table entries
+haven't created immediately. Then the TLB refill handler will fetch an
+invalid page table entry which has no "HUGE" bit, and this entry may be
+written to FTLB. Since it is invalid, TLB load/store handler will then
+use tlbwi to write the valid entry at the same place. However, the
+valid entry is a huge page entry which isn't suitable for FTLB.
+
+Our solution is to modify build_huge_handler_tail. Flush the invalid
+old entry (whether it is in FTLB or VTLB, this is in order to reduce
+branches) and use tlbwr to write the valid new entry.
+
+Signed-off-by: Rui Wang <wangr@lemote.com>
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: Huacai Chen <chenhc@lemote.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15754/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/tlbex.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 29f73e00253d..63b7d6f82d24 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -757,7 +757,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
+ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+                                   struct uasm_label **l,
+                                   unsigned int pte,
+-                                  unsigned int ptr)
++                                  unsigned int ptr,
++                                  unsigned int flush)
+ {
+ #ifdef CONFIG_SMP
+       UASM_i_SC(p, pte, 0, ptr);
+@@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ #else
+       UASM_i_SW(p, pte, 0, ptr);
+ #endif
++      if (cpu_has_ftlb && flush) {
++              BUG_ON(!cpu_has_tlbinv);
++
++              UASM_i_MFC0(p, ptr, C0_ENTRYHI);
++              uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++              UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++              build_tlb_write_entry(p, l, r, tlb_indexed);
++
++              uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++              UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++              build_huge_update_entries(p, pte, ptr);
++              build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
++
++              return;
++      }
++
+       build_huge_update_entries(p, pte, ptr);
+       build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+ }
+@@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler(void)
+               uasm_l_tlbl_goaround2(&l, p);
+       }
+       uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+       uasm_l_nopage_tlbl(&l, p);
+@@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handler(void)
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+       uasm_l_nopage_tlbs(&l, p);
+@@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handler(void)
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+ #endif
+       uasm_l_nopage_tlbm(&l, p);
+-- 
+2.12.2
+
+From 55f67b97ca05df00c3b61123a7e9e363819c60ee Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Wed, 15 Mar 2017 23:26:42 +0100
+Subject: [PATCH 30/52] MIPS: Lantiq: fix missing xbar kernel panic
+Content-Length: 1672
+Lines: 41
+
+commit 6ef90877eee63a0d03e83183bb44b64229b624e6 upstream.
+
+Commit 08b3c894e565 ("MIPS: lantiq: Disable xbar fpi burst mode")
+accidentally requested the resources from the pmu address region
+instead of the xbar registers region, but the check for the return
+value of request_mem_region() was wrong. Commit 98ea51cb0c8c ("MIPS:
+Lantiq: Fix another request_mem_region() return code check") fixed the
+check of the return value of request_mem_region() which made the kernel
+panics.
+This patch now makes use of the correct memory region for the cross bar.
+
+Fixes: 08b3c894e565 ("MIPS: lantiq: Disable xbar fpi burst mode")
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+Cc: John Crispin <john@phrozen.org>
+Cc: james.hogan@imgtec.com
+Cc: arnd@arndb.de
+Cc: sergei.shtylyov@cogentembedded.com
+Cc: john@phrozen.org
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15751
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/lantiq/xway/sysctrl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
+index 3e390a4e3897..daf580ce5ca2 100644
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
+               if (!np_xbar)
+                       panic("Failed to load xbar nodes from devicetree");
+-              if (of_address_to_resource(np_pmu, 0, &res_xbar))
++              if (of_address_to_resource(np_xbar, 0, &res_xbar))
+                       panic("Failed to get xbar resources");
+               if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
+                       res_xbar.name) < 0)
+-- 
+2.12.2
+
+From 768019030ab58e9622caeb6c5a06553260609327 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Thu, 23 Feb 2017 14:50:24 +0000
+Subject: [PATCH 29/52] MIPS: End spinlocks with .insn
+Content-Length: 2774
+Lines: 73
+
+commit 4b5347a24a0f2d3272032c120664b484478455de upstream.
+
+When building for microMIPS we need to ensure that the assembler always
+knows that there is code at the target of a branch or jump. Recent
+toolchains will fail to link a microMIPS kernel when this isn't the case
+due to what it thinks is a branch to non-microMIPS code.
+
+mips-mti-linux-gnu-ld kernel/built-in.o: .spinlock.text+0x2fc: Unsupported branch between ISA modes.
+mips-mti-linux-gnu-ld final link failed: Bad value
+
+This is due to inline assembly labels in spinlock.h not being followed
+by an instruction mnemonic, either due to a .subsection pseudo-op or the
+end of the inline asm block.
+
+Fix this with a .insn direction after such labels.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Reviewed-by: Maciej W. Rozycki <macro@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/15325/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/spinlock.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
+index 40196bebe849..2365ce0ad8f2 100644
+--- a/arch/mips/include/asm/spinlock.h
++++ b/arch/mips/include/asm/spinlock.h
+@@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
+               "       andi    %[ticket], %[ticket], 0xffff            \n"
+               "       bne     %[ticket], %[my_ticket], 4f             \n"
+               "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               "       .subsection 2                                   \n"
+               "4:     andi    %[ticket], %[ticket], 0xffff            \n"
+               "       sll     %[ticket], 5                            \n"
+@@ -187,7 +187,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
+               "       sc      %[ticket], %[ticket_ptr]                \n"
+               "       beqz    %[ticket], 1b                           \n"
+               "        li     %[ticket], 1                            \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               "       .subsection 2                                   \n"
+               "3:     b       2b                                      \n"
+               "        li     %[ticket], 0                            \n"
+@@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
+               "       .set    reorder                                 \n"
+               __WEAK_LLSC_MB
+               "       li      %2, 1                                   \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
+               : "memory");
+@@ -407,7 +407,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
+                       "       lui     %1, 0x8000                      \n"
+                       "       sc      %1, %0                          \n"
+                       "       li      %2, 1                           \n"
+-                      "2:                                             \n"
++                      "2:     .insn                                   \n"
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
+                         "=&r" (ret)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
+-- 
+2.12.2
+
+From 22665fe0a60a73734889e1cfc7f8fba4036e0b9a Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Sat, 25 Feb 2017 11:54:23 +0100
+Subject: [PATCH 28/52] MIPS: ralink: Fix typos in rt3883 pinctrl
+Content-Length: 1801
+Lines: 40
+
+commit 7c5a3d813050ee235817b0220dd8c42359a9efd8 upstream.
+
+There are two copy & paste errors in the definition of the 5GHz LNA and
+second ethernet pinmux.
+
+Fixes: f576fb6a0700 ("MIPS: ralink: cleanup the soc specific pinmux data")
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15328/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/ralink/rt3883.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
+index f42834c7f007..3c575093f8f1 100644
+--- a/arch/mips/ralink/rt3883.c
++++ b/arch/mips/ralink/rt3883.c
+@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+ static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+ static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+ static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+ static struct rt2880_pmx_func pci_func[] = {
+       FUNC("pci-dev", 0, 40, 32),
+       FUNC("pci-host2", 1, 40, 32),
+@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = {
+       FUNC("pci-fnc", 3, 40, 32)
+ };
+ static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+       GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
+-- 
+2.12.2
+
+From 394d71b1ea24c248a8d497d10635b86dd2fccef7 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 16 Feb 2017 12:39:01 +0000
+Subject: [PATCH 27/52] MIPS: Force o32 fp64 support on 32bit MIPS64r6 kernels
+Content-Length: 1610
+Lines: 42
+
+commit 2e6c7747730296a6d4fd700894286db1132598c4 upstream.
+
+When a 32-bit kernel is configured to support MIPS64r6 (CPU_MIPS64_R6),
+MIPS_O32_FP64_SUPPORT won't be selected as it should be because
+MIPS32_O32 is disabled (o32 is already the default ABI available on
+32-bit kernels).
+
+This results in userland FP breakage as CP0_Status.FR is read-only 1
+since r6 (when an FPU is present) so __enable_fpu() will fail to clear
+FR. This causes the FPU emulator to get used which will incorrectly
+emulate 32-bit FPU registers.
+
+Force o32 fp64 support in this case by also selecting
+MIPS_O32_FP64_SUPPORT from CPU_MIPS64_R6 if 32BIT.
+
+Fixes: 4e9d324d4288 ("MIPS: Require O32 FP64 support for MIPS64 with O32 compat")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15310/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index db459612de44..75bfca69e418 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1412,7 +1412,7 @@ config CPU_MIPS32_R6
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       select HAVE_KVM
+-      select MIPS_O32_FP64_SUPPORT
++      select MIPS_O32_FP64_SUPPORT if 32BIT
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.  New MIPS processors, starting with the Warrior
+-- 
+2.12.2
+
+From 0f5d17253b2868a3e75d623dcb2514e305bc7447 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 27 Mar 2017 09:48:04 +0200
+Subject: [PATCH 26/52] s390/uaccess: get_user() should zero on failure (again)
+Content-Length: 1611
+Lines: 40
+
+commit d09c5373e8e4eaaa09233552cbf75dc4c4f21203 upstream.
+
+Commit fd2d2b191fe7 ("s390: get_user() should zero on failure")
+intended to fix s390's get_user() implementation which did not zero
+the target operand if the read from user space faulted. Unfortunately
+the patch has no effect: the corresponding inline assembly specifies
+that the operand is only written to ("=") and the previous value is
+discarded.
+
+Therefore the compiler is free to and actually does omit the zero
+initialization.
+
+To fix this simply change the contraint modifier to "+", so the
+compiler cannot omit the initialization anymore.
+
+Fixes: c9ca78415ac1 ("s390/uaccess: provide inline variants of get_user/put_user")
+Fixes: fd2d2b191fe7 ("s390: get_user() should zero on failure")
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/uaccess.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 5c7381c5ad7f..c8d837f0fbbc 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
+               "       jg      2b\n"                           \
+               ".popsection\n"                                 \
+               EX_TABLE(0b,3b) EX_TABLE(1b,3b)                 \
+-              : "=d" (__rc), "=Q" (*(to))                     \
++              : "=d" (__rc), "+Q" (*(to))                     \
+               : "d" (size), "Q" (*(from)),                    \
+                 "d" (__reg0), "K" (-EFAULT)                   \
+               : "cc");                                        \
+-- 
+2.12.2
+
+From 765ee8ce4e3d059378aefc40666b024e4cd494f2 Mon Sep 17 00:00:00 2001
+From: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Date: Mon, 13 Mar 2017 12:14:58 -0300
+Subject: [PATCH 25/52] s390/decompressor: fix initrd corruption caused by bss
+ clear
+Content-Length: 3239
+Lines: 81
+
+commit d82c0d12c92705ef468683c9b7a8298dd61ed191 upstream.
+
+Reorder the operations in decompress_kernel() to ensure initrd is moved
+to a safe location before the bss section is zeroed.
+
+During decompression bss can overlap with the initrd and this can
+corrupt the initrd contents depending on the size of the compressed
+kernel (which affects where the initrd is placed by the bootloader) and
+the size of the bss section of the decompressor.
+
+Also use the correct initrd size when checking for overlaps with
+parmblock.
+
+Fixes: 06c0dd72aea3 ([S390] fix boot failures with compressed kernels)
+Reviewed-by: Joy Latten <joy.latten@canonical.com>
+Reviewed-by: Vineetha HariPai <vineetha.hari.pai@canonical.com>
+Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/boot/compressed/misc.c | 35 +++++++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
+index 4da604ebf6fd..ca15613eaaa4 100644
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
+ unsigned long decompress_kernel(void)
+ {
+-      unsigned long output_addr;
+-      unsigned char *output;
++      void *output, *kernel_end;
+-      output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+-      check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+-      memset(&_bss, 0, &_ebss - &_bss);
+-      free_mem_ptr = (unsigned long)&_end;
+-      free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-      output = (unsigned char *) output_addr;
++      output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
++      kernel_end = output + SZ__bss_start;
++      check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
+ #ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * Move the initrd right behind the end of the decompressed
+-       * kernel image.
++       * kernel image. This also prevents initrd corruption caused by
++       * bss clearing since kernel_end will always be located behind the
++       * current bss section..
+        */
+-      if (INITRD_START && INITRD_SIZE &&
+-          INITRD_START < (unsigned long) output + SZ__bss_start) {
+-              check_ipl_parmblock(output + SZ__bss_start,
+-                                  INITRD_START + INITRD_SIZE);
+-              memmove(output + SZ__bss_start,
+-                      (void *) INITRD_START, INITRD_SIZE);
+-              INITRD_START = (unsigned long) output + SZ__bss_start;
++      if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
++              check_ipl_parmblock(kernel_end, INITRD_SIZE);
++              memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
++              INITRD_START = (unsigned long) kernel_end;
+       }
+ #endif
++      /*
++       * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
++       * initialized afterwards since they reside in bss.
++       */
++      memset(&_bss, 0, &_ebss - &_bss);
++      free_mem_ptr = (unsigned long) &_end;
++      free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
+       puts("Uncompressing Linux... ");
+       __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+       puts("Ok, booting the kernel.\n");
+-- 
+2.12.2
+
+From 1c47303355dc970d692f3625839da43f6b969622 Mon Sep 17 00:00:00 2001
+From: Tobias Klauser <tklauser@distanz.ch>
+Date: Sun, 2 Apr 2017 20:08:04 -0700
+Subject: [PATCH 24/52] nios2: reserve boot memory for device tree
+Content-Length: 2379
+Lines: 69
+
+commit 921d701e6f31e1ffaca3560416af1aa04edb4c4f upstream.
+
+Make sure to reserve the boot memory for the flattened device tree.
+Otherwise it might get overwritten, e.g. when initial_boot_params is
+copied, leading to a corrupted FDT and a boot hang/crash:
+
+  bootconsole [early0] enabled
+  Early console on uart16650 initialized at 0xf8001600
+  OF: fdt: Error -11 processing FDT
+  Kernel panic - not syncing: setup_cpuinfo: No CPU found in devicetree!
+
+  ---[ end Kernel panic - not syncing: setup_cpuinfo: No CPU found in devicetree!
+
+Guenter Roeck says:
+
+> I think I found the problem. In unflatten_and_copy_device_tree(), with added
+> debug information:
+>
+> OF: fdt: initial_boot_params=c861e400, dt=c861f000 size=28874 (0x70ca)
+>
+> ... and then initial_boot_params is copied to dt, which results in corrupted
+> fdt since the memory overlaps. Looks like the initial_boot_params memory
+> is not reserved and (re-)allocated by early_init_dt_alloc_memory_arch().
+
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Reference: http://lkml.kernel.org/r/20170226210338.GA19476@roeck-us.net
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Tobias Klauser <tklauser@distanz.ch>
+Acked-by: Ley Foon Tan <ley.foon.tan@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/nios2/kernel/prom.c  | 7 +++++++
+ arch/nios2/kernel/setup.c | 3 +++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
+index 718dd197909f..de73beb36910 100644
+--- a/arch/nios2/kernel/prom.c
++++ b/arch/nios2/kernel/prom.c
+@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+       return alloc_bootmem_align(size, align);
+ }
++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
++                                           bool nomap)
++{
++      reserve_bootmem(base, size, BOOTMEM_DEFAULT);
++      return 0;
++}
++
+ void __init early_init_devtree(void *params)
+ {
+       __be32 *dtb = (u32 *)__dtb_start;
+diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
+index a4ff86d58d5c..6c4e351a7930 100644
+--- a/arch/nios2/kernel/setup.c
++++ b/arch/nios2/kernel/setup.c
+@@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p)
+       }
+ #endif /* CONFIG_BLK_DEV_INITRD */
++      early_init_fdt_reserve_self();
++      early_init_fdt_scan_reserved_mem();
++
+       unflatten_and_copy_device_tree();
+       setup_cpuinfo();
+-- 
+2.12.2
+
+From ca9bd55235b346da89dadc1821e37bb4ec22b7eb Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 4 Apr 2017 14:56:05 +1000
+Subject: [PATCH 23/52] powerpc: Don't try to fix up misaligned
+ load-with-reservation instructions
+Content-Length: 2443
+Lines: 67
+
+commit 48fe9e9488743eec9b7c1addd3c93f12f2123d54 upstream.
+
+In the past, there was only one load-with-reservation instruction,
+lwarx, and if a program attempted a lwarx on a misaligned address, it
+would take an alignment interrupt and the kernel handler would emulate
+it as though it was lwzx, which was not really correct, but benign since
+it is loading the right amount of data, and the lwarx should be paired
+with a stwcx. to the same address, which would also cause an alignment
+interrupt which would result in a SIGBUS being delivered to the process.
+
+We now have 5 different sizes of load-with-reservation instruction. Of
+those, lharx and ldarx cause an immediate SIGBUS by luck since their
+entries in aligninfo[] overlap instructions which were not fixed up, but
+lqarx overlaps with lhz and will be emulated as such. lbarx can never
+generate an alignment interrupt since it only operates on 1 byte.
+
+To straighten this out and fix the lqarx case, this adds code to detect
+the l[hwdq]arx instructions and return without fixing them up, resulting
+in a SIGBUS being delivered to the process.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/align.c | 27 +++++++++++++++++++--------
+ 1 file changed, 19 insertions(+), 8 deletions(-)
+
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index 86150fbb42c3..91e5c1758b5c 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs)
+       nb = aligninfo[instr].len;
+       flags = aligninfo[instr].flags;
+-      /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+-      if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+-              nb = 8;
+-              flags = LD+SW;
+-      } else if (IS_XFORM(instruction) &&
+-                 ((instruction >> 1) & 0x3ff) == 660) {
+-              nb = 8;
+-              flags = ST+SW;
++      /*
++       * Handle some cases which give overlaps in the DSISR values.
++       */
++      if (IS_XFORM(instruction)) {
++              switch (get_xop(instruction)) {
++              case 532:       /* ldbrx */
++                      nb = 8;
++                      flags = LD+SW;
++                      break;
++              case 660:       /* stdbrx */
++                      nb = 8;
++                      flags = ST+SW;
++                      break;
++              case 20:        /* lwarx */
++              case 84:        /* ldarx */
++              case 116:       /* lharx */
++              case 276:       /* lqarx */
++                      return 0;       /* not emulated ever */
++              }
+       }
+       /* Byteswap little endian loads and stores */
+-- 
+2.12.2
+
+From a67004a3896eacd109a0138b5526957381fe4337 Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Date: Wed, 29 Mar 2017 19:19:42 +0200
+Subject: [PATCH 22/52] powerpc/mm: Add missing global TLB invalidate if cxl is
+ active
+Content-Length: 1830
+Lines: 48
+
+commit 88b1bf7268f56887ca88eb09c6fb0f4fc970121a upstream.
+
+Commit 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl") converted local
+TLB invalidates to global if the cxl driver is active. This is necessary
+because the CAPP snoops invalidations to forward them to the PSL on the
+cxl adapter. However one path was forgotten. native_flush_hash_range()
+still does local TLB invalidates, as found out the hard way recently.
+
+This patch fixes it by following the same logic as previously: if the
+cxl driver is active, the local TLB invalidates are 'upgraded' to
+global.
+
+Fixes: 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl")
+Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/mm/hash_native_64.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
+index c8822af10a58..19d9b2d2d212 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -645,6 +645,10 @@ static void native_flush_hash_range(unsigned long number, int local)
+       unsigned long psize = batch->psize;
+       int ssize = batch->ssize;
+       int i;
++      unsigned int use_local;
++
++      use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
++              mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
+       local_irq_save(flags);
+@@ -671,8 +675,7 @@ static void native_flush_hash_range(unsigned long number, int local)
+               } pte_iterate_hashed_end();
+       }
+-      if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+-          mmu_psize_defs[psize].tlbiel && local) {
++      if (use_local) {
+               asm volatile("ptesync":::"memory");
+               for (i = 0; i < number; i++) {
+                       vpn = batch->vpn[i];
+-- 
+2.12.2
+
+From 435cc436a88652046b9ca89fb56acf3a4b1a44b8 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 4 Apr 2017 08:51:34 +0100
+Subject: [PATCH 21/52] metag/usercopy: Add missing fixups
+Content-Length: 4904
+Lines: 163
+
+commit b884a190afcecdbef34ca508ea5ee88bb7c77861 upstream.
+
+The rapf copy loops in the Meta usercopy code is missing some extable
+entries for HTP cores with unaligned access checking enabled, where
+faults occur on the instruction immediately after the faulting access.
+
+Add the fixup labels and extable entries for these cases so that corner
+case user copy failures don't cause kernel crashes.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/lib/usercopy.c | 72 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 48 insertions(+), 24 deletions(-)
+
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index e09c95ba028c..2792fc621088 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -259,27 +259,31 @@
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+-              "SUB    %3, %3, #32\n"                                  \
+               "23:\n"                                                 \
+-              "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "SUB    %3, %3, #32\n"                                  \
+               "24:\n"                                                 \
++              "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "25:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "26:\n"                                                 \
+               "SUB    %3, %3, #32\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+-              "25:\n"                                                 \
++              "27:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "26:\n"                                                 \
++              "28:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "29:\n"                                                 \
+               "SUB    %3, %3, #32\n"                                  \
+-              "27:\n"                                                 \
++              "30:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "28:\n"                                                 \
++              "31:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "32:\n"                                                 \
+               "SUB    %0, %0, #8\n"                                   \
+-              "29:\n"                                                 \
++              "33:\n"                                                 \
+               "SETL   [%0++], D0.7, D1.7\n"                           \
+               "SUB    %3, %3, #32\n"                                  \
+               "1:"                                                    \
+@@ -311,7 +315,11 @@
+               "       .long 26b,3b\n"                                 \
+               "       .long 27b,3b\n"                                 \
+               "       .long 28b,3b\n"                                 \
+-              "       .long 29b,4b\n"                                 \
++              "       .long 29b,3b\n"                                 \
++              "       .long 30b,3b\n"                                 \
++              "       .long 31b,3b\n"                                 \
++              "       .long 32b,3b\n"                                 \
++              "       .long 33b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+@@ -402,47 +410,55 @@
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+-              "SUB    %3, %3, #16\n"                                  \
+               "23:\n"                                                 \
+-              "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "24:\n"                                                 \
+-              "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+-              "25:\n"                                                 \
++              "24:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "26:\n"                                                 \
++              "25:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "26:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "27:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "28:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "29:\n"                                                 \
++              "SUB    %3, %3, #16\n"                                  \
++              "30:\n"                                                 \
++              "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "31:\n"                                                 \
++              "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "32:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+-              "29:\n"                                                 \
++              "33:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "30:\n"                                                 \
++              "34:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "35:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "31:\n"                                                 \
++              "36:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "32:\n"                                                 \
++              "37:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "38:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "33:\n"                                                 \
++              "39:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "34:\n"                                                 \
++              "40:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "41:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "35:\n"                                                 \
++              "42:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "36:\n"                                                 \
++              "43:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "44:\n"                                                 \
+               "SUB    %0, %0, #4\n"                                   \
+-              "37:\n"                                                 \
++              "45:\n"                                                 \
+               "SETD   [%0++], D0.7\n"                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "1:"                                                    \
+@@ -482,7 +498,15 @@
+               "       .long 34b,3b\n"                                 \
+               "       .long 35b,3b\n"                                 \
+               "       .long 36b,3b\n"                                 \
+-              "       .long 37b,4b\n"                                 \
++              "       .long 37b,3b\n"                                 \
++              "       .long 38b,3b\n"                                 \
++              "       .long 39b,3b\n"                                 \
++              "       .long 40b,3b\n"                                 \
++              "       .long 41b,3b\n"                                 \
++              "       .long 42b,3b\n"                                 \
++              "       .long 43b,3b\n"                                 \
++              "       .long 44b,3b\n"                                 \
++              "       .long 45b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-- 
+2.12.2
+
+From 3040ecd4253a4ef996e6f940801ee4b80b01c87a Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Mon, 3 Apr 2017 17:41:40 +0100
+Subject: [PATCH 20/52] metag/usercopy: Fix src fixup in from user rapf loops
+Content-Length: 3332
+Lines: 85
+
+commit 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 upstream.
+
+The fixup code to rewind the source pointer in
+__asm_copy_from_user_{32,64}bit_rapf_loop() always rewound the source by
+a single unit (4 or 8 bytes), however this is insufficient if the fault
+didn't occur on the first load in the loop, as the source pointer will
+have been incremented but nothing will have been stored until all 4
+register [pairs] are loaded.
+
+Read the LSM_STEP field of TXSTATUS (which is already loaded into a
+register), a bit like the copy_to_user versions, to determine how many
+iterations of MGET[DL] have taken place, all of which need rewinding.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/lib/usercopy.c | 36 ++++++++++++++++++++++++++++--------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index 4422928a1746..e09c95ba028c 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -687,29 +687,49 @@ EXPORT_SYMBOL(__copy_user);
+  *
+  *    Rationale:
+  *            A fault occurs while reading from user buffer, which is the
+- *            source. Since the fault is at a single address, we only
+- *            need to rewind by 8 bytes.
++ *            source.
+  *            Since we don't write to kernel buffer until we read first,
+  *            the kernel buffer is at the right state and needn't be
+- *            corrected.
++ *            corrected, but the source must be rewound to the beginning of
++ *            the block, which is LSM_STEP*8 bytes.
++ *            LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *            and stored in D0Ar2
++ *
++ *            NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *                    LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *                    a fault happens at the 4th write, LSM_STEP will be 0
++ *                    instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)    \
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+-              "SUB    %1, %1, #8\n")
++              "LSR    D0Ar2, D0Ar2, #5\n"                             \
++              "ANDS   D0Ar2, D0Ar2, #0x38\n"                          \
++              "ADDZ   D0Ar2, D0Ar2, #32\n"                            \
++              "SUB    %1, %1, D0Ar2\n")
+ /*    rewind 'from' pointer when a fault occurs
+  *
+  *    Rationale:
+  *            A fault occurs while reading from user buffer, which is the
+- *            source. Since the fault is at a single address, we only
+- *            need to rewind by 4 bytes.
++ *            source.
+  *            Since we don't write to kernel buffer until we read first,
+  *            the kernel buffer is at the right state and needn't be
+- *            corrected.
++ *            corrected, but the source must be rewound to the beginning of
++ *            the block, which is LSM_STEP*4 bytes.
++ *            LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *            and stored in D0Ar2
++ *
++ *            NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *                    LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *                    a fault happens at the 4th write, LSM_STEP will be 0
++ *                    instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)    \
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+-              "SUB    %1, %1, #4\n")
++              "LSR    D0Ar2, D0Ar2, #6\n"                             \
++              "ANDS   D0Ar2, D0Ar2, #0x1c\n"                          \
++              "ADDZ   D0Ar2, D0Ar2, #16\n"                            \
++              "SUB    %1, %1, D0Ar2\n")
+ /*
+-- 
+2.12.2
+
+From beb0ad97ad099ac99f0354e195bd129586a60694 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 4 Apr 2017 11:43:26 +0100
+Subject: [PATCH 19/52] metag/usercopy: Set flags before ADDZ
+Content-Length: 2357
+Lines: 63
+
+commit fd40eee1290ad7add7aa665e3ce6b0f9fe9734b4 upstream.
+
+The fixup code for the copy_to_user rapf loops reads TXStatus.LSM_STEP
+to decide how far to rewind the source pointer. There is a special case
+for the last execution of an MGETL/MGETD, since it leaves LSM_STEP=0
+even though the number of MGETLs/MGETDs attempted was 4. This uses ADDZ
+which is conditional upon the Z condition flag, but the AND instruction
+which masked the TXStatus.LSM_STEP field didn't set the condition flags
+based on the result.
+
+Fix that now by using ANDS which does set the flags, and also marking
+the condition codes as clobbered by the inline assembly.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/lib/usercopy.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index e1d553872fd7..4422928a1746 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -315,7 +315,7 @@
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-              : "D1Ar1", "D0Ar2", "memory")
++              : "D1Ar1", "D0Ar2", "cc", "memory")
+ /*    rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -341,7 +341,7 @@
+ #define __asm_copy_to_user_64bit_rapf_loop(to,        from, ret, n, id)\
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+-              "AND    D0Ar2, D0Ar2, #0x7\n"                           \
++              "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+@@ -486,7 +486,7 @@
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-              : "D1Ar1", "D0Ar2", "memory")
++              : "D1Ar1", "D0Ar2", "cc", "memory")
+ /*    rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -512,7 +512,7 @@
+ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+-              "AND    D0Ar2, D0Ar2, #0x7\n"                           \
++              "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+-- 
+2.12.2
+
+From 29b5eb517c6961ea9e9b16c49b5cf7fd93860be2 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 11:14:02 +0100
+Subject: [PATCH 18/52] metag/usercopy: Zero rest of buffer from copy_from_user
+Content-Length: 7463
+Lines: 230
+
+commit 563ddc1076109f2b3f88e6d355eab7b6fd4662cb upstream.
+
+Currently we try to zero the destination for a failed read from userland
+in fixup code in the usercopy.c macros. The rest of the destination
+buffer is then zeroed from __copy_user_zeroing(), which is used for both
+copy_from_user() and __copy_from_user().
+
+Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0
+before the fixup code entry labels, and __copy_from_user() shouldn't even
+be zeroing the rest of the buffer.
+
+Move the zeroing out into copy_from_user() and rename
+__copy_user_zeroing() to raw_copy_from_user() since it no longer does
+any zeroing. This also conveniently matches the name needed for
+RAW_COPY_USER support in a later patch.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/include/asm/uaccess.h | 15 ++++++-----
+ arch/metag/lib/usercopy.c        | 57 +++++++++++++---------------------------
+ 2 files changed, 26 insertions(+), 46 deletions(-)
+
+diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
+index 273e61225c27..07238b39638c 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
+ #define strlen_user(str) strnlen_user(str, 32767)
+-extern unsigned long __must_check __copy_user_zeroing(void *to,
+-                                                    const void __user *from,
+-                                                    unsigned long n);
++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
++                                      unsigned long n);
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      unsigned long res = n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+-              return __copy_user_zeroing(to, from, n);
+-      memset(to, 0, n);
+-      return n;
++              res = raw_copy_from_user(to, from, n);
++      if (unlikely(res))
++              memset(to + (n - res), 0, res);
++      return res;
+ }
+-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
+ #define __copy_from_user_inatomic __copy_from_user
+ extern unsigned long __must_check __copy_user(void __user *to,
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index 714d8562aa20..e1d553872fd7 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -29,7 +29,6 @@
+               COPY                                             \
+               "1:\n"                                           \
+               "       .section .fixup,\"ax\"\n"                \
+-              "       MOV D1Ar1,#0\n"                          \
+               FIXUP                                            \
+               "       MOVT    D1Ar1,#HI(1b)\n"                 \
+               "       JUMP    D1Ar1,#LO(1b)\n"                 \
+@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user);
+       __asm_copy_user_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"    \
+               "2:     SETB [%0++],D1Ar1\n",   \
+-              "3:     ADD  %2,%2,#1\n"        \
+-              "       SETB [%0++],D1Ar1\n",   \
++              "3:     ADD  %2,%2,#1\n",       \
+               "       .long 2b,3b\n")
+ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "2:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "3:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
++              "3:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+ #define __asm_copy_from_user_2(to, from, ret) \
+@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user);
+       __asm_copy_from_user_2x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "4:     SETB [%0++],D1Ar1\n",           \
+-              "5:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
++              "5:     ADD  %2,%2,#1\n",               \
+               "       .long 4b,5b\n")
+ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "2:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "3:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
++              "3:     ADD  %2,%2,#4\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+ #define __asm_copy_from_user_4(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+-
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+       asm volatile (                          \
+               "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
+               "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+-              "       MOV D1Ar1,#0\n"                 \
+-              "       MOV D0Ar2,#0\n"                 \
+               "3:     ADD  %2,%2,#8\n"                \
+-              "       SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "       MOVT    D0Ar2,#HI(1b)\n"        \
+               "       JUMP    D0Ar2,#LO(1b)\n"        \
+               "       .previous\n"                    \
+@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user);
+               "SUB    %1, %1, #4\n")
+-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+-   userland.  The return-value is the number of bytes that were
+-   inaccessible.  */
+-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+-                                unsigned long n)
++/*
++ * Copy from user to kernel. The return-value is the number of bytes that were
++ * inaccessible.
++ */
++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
++                               unsigned long n)
+ {
+       register char *dst asm ("A0.2") = pdst;
+       register const char __user *src asm ("A1.2") = psrc;
+@@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+               __asm_copy_from_user_1(dst, src, retn);
+               n--;
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+@@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                       __asm_copy_from_user_1(dst, src, retn);
+                       n--;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_from_user_2(dst, src, retn);
+               n -= 2;
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+@@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                       __asm_copy_from_user_2(dst, src, retn);
+                       n -= 2;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+ #endif
+@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+               n -= 4;
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+       /* If we get here, there were no memory read faults.  */
+@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+       /* If we get here, retn correctly reflects the number of failing
+          bytes.  */
+       return retn;
+-
+- copy_exception_bytes:
+-      /* We already have "retn" bytes cleared, and need to clear the
+-         remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
+-         memset is preferred here, since this isn't speed-critical code and
+-         we'd rather have this a leaf-function than calling memset.  */
+-      {
+-              char *endp;
+-              for (endp = dst + n; dst < endp; dst++)
+-                      *dst = 0;
+-      }
+-
+-      return retn + n;
+ }
+-EXPORT_SYMBOL(__copy_user_zeroing);
++EXPORT_SYMBOL(raw_copy_from_user);
+ #define __asm_clear_8x64(to, ret) \
+       asm volatile (                                  \
+-- 
+2.12.2
+
+From dde6f22c1e122907717f45405cbc2c6227e259e5 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 13:35:01 +0100
+Subject: [PATCH 17/52] metag/usercopy: Add early abort to copy_to_user
+Content-Length: 2525
+Lines: 99
+
+commit fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b upstream.
+
+When copying to userland on Meta, if any faults are encountered
+immediately abort the copy instead of continuing on and repeatedly
+faulting, and worse potentially copying further bytes successfully to
+subsequent valid pages.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/lib/usercopy.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index a6ced9691ddb..714d8562aa20 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -538,23 +538,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+       if ((unsigned long) src & 1) {
+               __asm_copy_to_user_1(dst, src, retn);
+               n--;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+               while (n > 0) {
+                       __asm_copy_to_user_1(dst, src, retn);
+                       n--;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_to_user_2(dst, src, retn);
+               n -= 2;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+               while (n >= 2) {
+                       __asm_copy_to_user_2(dst, src, retn);
+                       n -= 2;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+@@ -569,6 +577,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+       if (n >= RAPF_MIN_BUF_SIZE) {
+@@ -581,6 +591,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+ #endif
+@@ -588,11 +600,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+       while (n >= 16) {
+               __asm_copy_to_user_16(dst, src, retn);
+               n -= 16;
++              if (retn)
++                      return retn + n;
+       }
+       while (n >= 4) {
+               __asm_copy_to_user_4(dst, src, retn);
+               n -= 4;
++              if (retn)
++                      return retn + n;
+       }
+       switch (n) {
+@@ -609,6 +625,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+               break;
+       }
++      /*
++       * If we get here, retn correctly reflects the number of failing
++       * bytes.
++       */
+       return retn;
+ }
+ EXPORT_SYMBOL(__copy_user);
+-- 
+2.12.2
+
+From ae781dee56e4805311f0615ca04ea226bfbcafcd Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 11:23:18 +0100
+Subject: [PATCH 16/52] metag/usercopy: Fix alignment error checking
+Content-Length: 2038
+Lines: 57
+
+commit 2257211942bbbf6c798ab70b487d7e62f7835a1a upstream.
+
+Fix the error checking of the alignment adjustment code in
+raw_copy_from_user(), which mistakenly considers it safe to skip the
+error check when aligning the source buffer on a 2 or 4 byte boundary.
+
+If the destination buffer was unaligned it may have started to copy
+using byte or word accesses, which could well be at the start of a new
+(valid) source page. This would result in it appearing to have copied 1
+or 2 bytes at the end of the first (invalid) page rather than none at
+all.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/lib/usercopy.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index b4eb1f17069f..a6ced9691ddb 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -717,6 +717,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+       if ((unsigned long) src & 1) {
+               __asm_copy_from_user_1(dst, src, retn);
+               n--;
++              if (retn)
++                      goto copy_exception_bytes;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+@@ -730,6 +732,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_from_user_2(dst, src, retn);
+               n -= 2;
++              if (retn)
++                      goto copy_exception_bytes;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+@@ -741,12 +745,6 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+               }
+       }
+-      /* We only need one check after the unalignment-adjustments,
+-         because if both adjustments were done, either both or
+-         neither reference had an exception.  */
+-      if (retn != 0)
+-              goto copy_exception_bytes;
+-
+ #ifdef USE_RAPF
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
+-- 
+2.12.2
+
+From ce962cf480331380d7eb3c8e3c625a975e0aa38f Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 10:37:44 +0100
+Subject: [PATCH 15/52] metag/usercopy: Drop unused macros
+Content-Length: 4774
+Lines: 140
+
+commit ef62a2d81f73d9cddef14bc3d9097a57010d551c upstream.
+
+Metag's lib/usercopy.c has a bunch of copy_from_user macros for larger
+copies between 5 and 16 bytes which are completely unused. Before fixing
+zeroing lets drop these macros so there is less to fix.
+
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/metag/lib/usercopy.c | 113 ----------------------------------------------
+ 1 file changed, 113 deletions(-)
+
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index b3ebfe9c8e88..b4eb1f17069f 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -651,119 +651,6 @@ EXPORT_SYMBOL(__copy_user);
+ #define __asm_copy_from_user_4(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+-#define __asm_copy_from_user_5(to, from, ret) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "4:     SETB [%0++],D1Ar1\n",           \
+-              "5:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 4b,5b\n")
+-
+-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "4:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "5:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_6(to, from, ret) \
+-      __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_7(to, from, ret) \
+-      __asm_copy_from_user_6x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "6:     SETB [%0++],D1Ar1\n",           \
+-              "7:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "4:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "5:     ADD  %2,%2,#4\n"                        \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,             \
+-              "       .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_8(to, from, ret) \
+-      __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_9(to, from, ret) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "6:     SETB [%0++],D1Ar1\n",           \
+-              "7:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "6:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "7:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_10(to, from, ret) \
+-      __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_11(to, from, ret)                \
+-      __asm_copy_from_user_10x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "8:     SETB [%0++],D1Ar1\n",           \
+-              "9:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "6:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "7:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_12(to, from, ret) \
+-      __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_13(to, from, ret) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "8:     SETB [%0++],D1Ar1\n",           \
+-              "9:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "8:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "9:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_14(to, from, ret) \
+-      __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_15(to, from, ret) \
+-      __asm_copy_from_user_14x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "10:    SETB [%0++],D1Ar1\n",           \
+-              "11:    ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 10b,11b\n")
+-
+-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "8:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "9:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_16(to, from, ret) \
+-      __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+       asm volatile (                          \
+-- 
+2.12.2
+
+From 5cc244782dabaee110ed9c3900d40cd4b481a517 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+Date: Fri, 17 Jun 2016 17:33:59 +0000
+Subject: [PATCH 14/52] ring-buffer: Fix return value check in
+ test_ringbuffer()
+Content-Length: 1507
+Lines: 46
+
+commit 62277de758b155dc04b78f195a1cb5208c37b2df upstream.
+
+In case of error, the function kthread_run() returns ERR_PTR()
+and never returns NULL. The NULL test in the return value check
+should be replaced with IS_ERR().
+
+Link: http://lkml.kernel.org/r/1466184839-14927-1-git-send-email-weiyj_lk@163.com
+
+Fixes: 6c43e554a ("ring-buffer: Add ring buffer startup selftest")
+Signed-off-by: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index acbb0e73d3a2..7d7f99b0db47 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4875,9 +4875,9 @@ static __init int test_ringbuffer(void)
+               rb_data[cpu].cnt = cpu;
+               rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
+                                                "rbtester/%d", cpu);
+-              if (WARN_ON(!rb_threads[cpu])) {
++              if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
+                       pr_cont("FAILED\n");
+-                      ret = -1;
++                      ret = PTR_ERR(rb_threads[cpu]);
+                       goto out_free;
+               }
+@@ -4887,9 +4887,9 @@ static __init int test_ringbuffer(void)
+       /* Now create the rb hammer! */
+       rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
+-      if (WARN_ON(!rb_hammer)) {
++      if (WARN_ON(IS_ERR(rb_hammer))) {
+               pr_cont("FAILED\n");
+-              ret = -1;
++              ret = PTR_ERR(rb_hammer);
+               goto out_free;
+       }
+-- 
+2.12.2
+
+From 926e1ed2b8ce683f137ea8e0683ac4f6d27c8afb Mon Sep 17 00:00:00 2001
+From: "bsegall@google.com" <bsegall@google.com>
+Date: Fri, 7 Apr 2017 16:04:51 -0700
+Subject: [PATCH 13/52] ptrace: fix PTRACE_LISTEN race corrupting task->state
+Content-Length: 2204
+Lines: 57
+
+commit 5402e97af667e35e54177af8f6575518bf251d51 upstream.
+
+In PT_SEIZED + LISTEN mode STOP/CONT signals cause a wakeup against
+__TASK_TRACED.  If this races with the ptrace_unfreeze_traced at the end
+of a PTRACE_LISTEN, this can wake the task /after/ the check against
+__TASK_TRACED, but before the reset of state to TASK_TRACED.  This
+causes it to instead clobber TASK_WAKING, allowing a subsequent wakeup
+against TRACED while the task is still on the rq wake_list, corrupting
+it.
+
+Oleg said:
+ "The kernel can crash or this can lead to other hard-to-debug problems.
+  In short, "task->state = TASK_TRACED" in ptrace_unfreeze_traced()
+  assumes that nobody else can wake it up, but PTRACE_LISTEN breaks the
+  contract. Obviusly it is very wrong to manipulate task->state if this
+  task is already running, or WAKING, or it sleeps again"
+
+[akpm@linux-foundation.org: coding-style fixes]
+Fixes: 9899d11f ("ptrace: ensure arch_ptrace/ptrace_request can never race with SIGKILL")
+Link: http://lkml.kernel.org/r/xm26y3vfhmkp.fsf_-_@bsegall-linux.mtv.corp.google.com
+Signed-off-by: Ben Segall <bsegall@google.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/ptrace.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index a46c40bfb5f6..c7e8ed99c953 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -151,11 +151,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
+       WARN_ON(!task->ptrace || task->parent != current);
++      /*
++       * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
++       * Recheck state under the lock to close this race.
++       */
+       spin_lock_irq(&task->sighand->siglock);
+-      if (__fatal_signal_pending(task))
+-              wake_up_state(task, __TASK_TRACED);
+-      else
+-              task->state = TASK_TRACED;
++      if (task->state == __TASK_TRACED) {
++              if (__fatal_signal_pending(task))
++                      wake_up_state(task, __TASK_TRACED);
++              else
++                      task->state = TASK_TRACED;
++      }
+       spin_unlock_irq(&task->sighand->siglock);
+ }
+-- 
+2.12.2
+
+From 5a69c2b268ed938d44011274e6bc87562542ef94 Mon Sep 17 00:00:00 2001
+From: Jan-Marek Glogowski <glogow@fbihome.de>
+Date: Mon, 20 Feb 2017 12:25:58 +0100
+Subject: [PATCH 12/52] Reset TreeId to zero on SMB2 TREE_CONNECT
+Content-Length: 1105
+Lines: 34
+
+commit 806a28efe9b78ffae5e2757e1ee924b8e50c08ab upstream.
+
+Currently the cifs module breaks the CIFS specs on reconnect as
+described in http://msdn.microsoft.com/en-us/library/cc246529.aspx:
+
+"TreeId (4 bytes): Uniquely identifies the tree connect for the
+command. This MUST be 0 for the SMB2 TREE_CONNECT Request."
+
+Signed-off-by: Jan-Marek Glogowski <glogow@fbihome.de>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+Tested-by: Aurelien Aptel <aaptel@suse.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/smb2pdu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2fa754c5fd62..6cb5c4b30e78 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -952,6 +952,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+               return -EINVAL;
+       }
++      /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
++      if (tcon)
++              tcon->tid = 0;
++
+       rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+       if (rc) {
+               kfree(unc_path);
+-- 
+2.12.2
+
+From 8ff7eb4bc8b8cf0416e0746dcdb1545fc6869e98 Mon Sep 17 00:00:00 2001
+From: Quentin Schulz <quentin.schulz@free-electrons.com>
+Date: Tue, 21 Mar 2017 16:52:14 +0100
+Subject: [PATCH 11/52] iio: bmg160: reset chip when probing
+Content-Length: 1865
+Lines: 59
+
+commit 4bdc9029685ac03be50b320b29691766d2326c2b upstream.
+
+The gyroscope chip might need to be reset to be used.
+
+Without the chip being reset, the driver stopped at the first
+regmap_read (to get the CHIP_ID) and failed to probe.
+
+The datasheet of the gyroscope says that a minimum wait of 30ms after
+the reset has to be done.
+
+This patch has been checked on a BMX055 and the datasheet of the BMG160
+and the BMI055 give the same reset register and bits.
+
+Signed-off-by: Quentin Schulz <quentin.schulz@free-electrons.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/gyro/bmg160_core.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
+index acb3b303d800..90841abd3ce4 100644
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -28,6 +28,7 @@
+ #include <linux/iio/trigger_consumer.h>
+ #include <linux/iio/triggered_buffer.h>
+ #include <linux/regmap.h>
++#include <linux/delay.h>
+ #include "bmg160.h"
+ #define BMG160_IRQ_NAME               "bmg160_event"
+@@ -53,6 +54,9 @@
+ #define BMG160_NO_FILTER              0
+ #define BMG160_DEF_BW                 100
++#define BMG160_GYRO_REG_RESET         0x14
++#define BMG160_GYRO_RESET_VAL         0xb6
++
+ #define BMG160_REG_INT_MAP_0          0x17
+ #define BMG160_INT_MAP_0_BIT_ANY      BIT(1)
+@@ -186,6 +190,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
+       int ret;
+       unsigned int val;
++      /*
++       * Reset chip to get it in a known good state. A delay of 30ms after
++       * reset is required according to the datasheet.
++       */
++      regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
++                   BMG160_GYRO_RESET_VAL);
++      usleep_range(30000, 30700);
++
+       ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
+       if (ret < 0) {
+               dev_err(data->dev, "Error reading reg_chip_id\n");
+-- 
+2.12.2
+
+From d4ad442b9982fba9eab0f9003c8cd185a1afeff6 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 16 Mar 2017 18:20:50 +0000
+Subject: [PATCH 10/52] arm/arm64: KVM: Take mmap_sem in
+ kvm_arch_prepare_memory_region
+Content-Length: 2022
+Lines: 62
+
+commit 72f310481a08db821b614e7b5d00febcc9064b36 upstream.
+
+We don't hold the mmap_sem while searching for VMAs (via find_vma), in
+kvm_arch_prepare_memory_region, which can end up in expected failures.
+
+Fixes: commit 8eef91239e57 ("arm/arm64: KVM: map MMIO regions at creation time")
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Eric Auger <eric.auger@rehat.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+[ Handle dirty page logging failure case ]
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/mmu.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 5366a736151e..f91ee2f27b41 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1761,6 +1761,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+           (KVM_PHYS_SIZE >> PAGE_SHIFT))
+               return -EFAULT;
++      down_read(&current->mm->mmap_sem);
+       /*
+        * A memory region could potentially cover multiple VMAs, and any holes
+        * between them, so iterate over all of them to find out if we can map
+@@ -1804,8 +1805,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                       pa += vm_start - vma->vm_start;
+                       /* IO region dirty page logging not allowed */
+-                      if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+-                              return -EINVAL;
++                      if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
++                              ret = -EINVAL;
++                              goto out;
++                      }
+                       ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
+                                                   vm_end - vm_start,
+@@ -1817,7 +1820,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+       } while (hva < reg_end);
+       if (change == KVM_MR_FLAGS_ONLY)
+-              return ret;
++              goto out;
+       spin_lock(&kvm->mmu_lock);
+       if (ret)
+@@ -1825,6 +1828,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+       else
+               stage2_flush_memslot(kvm, memslot);
+       spin_unlock(&kvm->mmu_lock);
++out:
++      up_read(&current->mm->mmap_sem);
+       return ret;
+ }
+-- 
+2.12.2
+
+From 8e88806117e4868bc459a3042e55f8bf06c0b9e0 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 16 Mar 2017 18:20:49 +0000
+Subject: [PATCH 09/52] arm/arm64: KVM: Take mmap_sem in stage2_unmap_vm
+Content-Length: 1130
+Lines: 37
+
+commit 90f6e150e44a0dc3883110eeb3ab35d1be42b6bb upstream.
+
+We don't hold the mmap_sem while searching for the VMAs when
+we try to unmap each memslot for a VM. Fix this properly to
+avoid unexpected results.
+
+Fixes: commit 957db105c997 ("arm/arm64: KVM: Introduce stage2_unmap_vm")
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/mmu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 11b6595c2672..5366a736151e 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -796,6 +796,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+       int idx;
+       idx = srcu_read_lock(&kvm->srcu);
++      down_read(&current->mm->mmap_sem);
+       spin_lock(&kvm->mmu_lock);
+       slots = kvm_memslots(kvm);
+@@ -803,6 +804,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+               stage2_unmap_memslot(kvm, memslot);
+       spin_unlock(&kvm->mmu_lock);
++      up_read(&current->mm->mmap_sem);
+       srcu_read_unlock(&kvm->srcu, idx);
+ }
+-- 
+2.12.2
+
+From 193b590c71cd4c1fd54f4b4cab1ba73b6212c073 Mon Sep 17 00:00:00 2001
+From: Shuxiao Zhang <zhangshuxiao@xiaomi.com>
+Date: Thu, 6 Apr 2017 22:30:29 +0800
+Subject: [PATCH 08/52] staging: android: ashmem: lseek failed due to no
+ FMODE_LSEEK.
+Content-Length: 1274
+Lines: 35
+
+commit 97fbfef6bd597888485b653175fb846c6998b60c upstream.
+
+vfs_llseek will check whether the file mode has
+FMODE_LSEEK, no return failure. But ashmem can be
+lseek, so add FMODE_LSEEK to ashmem file.
+
+Comment From Greg Hackmann:
+       ashmem_llseek() passes the llseek() call through to the backing
+       shmem file.  91360b02ab48 ("ashmem: use vfs_llseek()") changed
+       this from directly calling the file's llseek() op into a VFS
+       layer call.  This also adds a check for the FMODE_LSEEK bit, so
+       without that bit ashmem_llseek() now always fails with -ESPIPE.
+
+Fixes: 91360b02ab48 ("ashmem: use vfs_llseek()")
+Signed-off-by: Shuxiao Zhang <zhangshuxiao@xiaomi.com>
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/android/ashmem.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index 3f2a3d611e4b..9c6357c03905 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -392,6 +392,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+                       ret = PTR_ERR(vmfile);
+                       goto out;
+               }
++              vmfile->f_mode |= FMODE_LSEEK;
+               asma->file = vmfile;
+       }
+       get_file(asma->file);
+-- 
+2.12.2
+
+From 69d8d58bf50d9cd1bb6f000bbdf54026e74717a3 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Mon, 3 Apr 2017 11:30:34 +1000
+Subject: [PATCH 07/52] sysfs: be careful of error returns from ops->show()
+Content-Length: 2847
+Lines: 76
+
+commit c8a139d001a1aab1ea8734db14b22dac9dd143b6 upstream.
+
+ops->show() can return a negative error code.
+Commit 65da3484d9be ("sysfs: correctly handle short reads on PREALLOC attrs.")
+(in v4.4) caused this to be stored in an unsigned 'size_t' variable, so errors
+would look like large numbers.
+As a result, if an error is returned, sysfs_kf_read() will return the
+value of 'count', typically 4096.
+
+Commit 17d0774f8068 ("sysfs: correctly handle read offset on PREALLOC attrs")
+(in v4.8) extended this error to use the unsigned large 'len' as a size for
+memmove().
+Consequently, if ->show returns an error, then the first read() on the
+sysfs file will return 4096 and could return uninitialized memory to
+user-space.
+If the application performs a subsequent read, this will trigger a memmove()
+with extremely large count, and is likely to crash the machine is bizarre ways.
+
+This bug can currently only be triggered by reading from an md
+sysfs attribute declared with __ATTR_PREALLOC() during the
+brief period between when mddev_put() deletes an mddev from
+the ->all_mddevs list, and when mddev_delayed_delete() - which is
+scheduled on a workqueue - completes.
+Before this, an error won't be returned by the ->show()
+After this, the ->show() won't be called.
+
+I can reproduce it reliably only by putting delay like
+       usleep_range(500000,700000);
+early in mddev_delayed_delete(). Then after creating an
+md device md0 run
+  echo clear > /sys/block/md0/md/array_state; cat /sys/block/md0/md/array_state
+
+The bug can be triggered without the usleep.
+
+Fixes: 65da3484d9be ("sysfs: correctly handle short reads on PREALLOC attrs.")
+Fixes: 17d0774f8068 ("sysfs: correctly handle read offset on PREALLOC attrs")
+Signed-off-by: NeilBrown <neilb@suse.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: Miroslav Benes <mbenes@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/sysfs/file.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index b803213d1307..39c75a86c67f 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
+ {
+       const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+       struct kobject *kobj = of->kn->parent->priv;
+-      size_t len;
++      ssize_t len;
+       /*
+        * If buf != of->prealloc_buf, we don't know how
+@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
+       if (WARN_ON_ONCE(buf != of->prealloc_buf))
+               return 0;
+       len = ops->show(kobj, of->kn->priv, buf);
++      if (len < 0)
++              return len;
+       if (pos) {
+               if (len <= pos)
+                       return 0;
+               len -= pos;
+               memmove(buf, buf + pos, len);
+       }
+-      return min(count, len);
++      return min_t(ssize_t, count, len);
+ }
+ /* kernfs write callback for regular sysfs files */
+-- 
+2.12.2
+
+From c21636bd64c511160846bdf87ef4c7ff48680c99 Mon Sep 17 00:00:00 2001
+From: Li Qiang <liq3ea@gmail.com>
+Date: Mon, 27 Mar 2017 20:10:53 -0700
+Subject: [PATCH 06/52] drm/vmwgfx: fix integer overflow in
+ vmw_surface_define_ioctl()
+Content-Length: 1355
+Lines: 35
+
+commit e7e11f99564222d82f0ce84bd521e57d78a6b678 upstream.
+
+In vmw_surface_define_ioctl(), the 'num_sizes' is the sum of the
+'req->mip_levels' array. This array can be assigned any value from
+the user space. As both the 'num_sizes' and the array is uint32_t,
+it is easy to make 'num_sizes' overflow. The later 'mip_levels' is
+used as the loop count. This can lead an oob write. Add the check of
+'req->mip_levels' to avoid this.
+
+Signed-off-by: Li Qiang <liqiang6-s@360.cn>
+Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 79f78a68d92d..c9c04ccccdd9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -715,8 +715,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+                       128;
+       num_sizes = 0;
+-      for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
++      for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
++              if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
++                      return -EINVAL;
+               num_sizes += req->mip_levels[i];
++      }
+       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+           num_sizes == 0)
+-- 
+2.12.2
+
+From 235e914069bd501be22597e6c0176f16b477ae37 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Mon, 27 Mar 2017 13:06:05 +0200
+Subject: [PATCH 05/52] drm/vmwgfx: Remove getparam error message
+Content-Length: 963
+Lines: 29
+
+commit 53e16798b0864464c5444a204e1bb93ae246c429 upstream.
+
+The mesa winsys sometimes uses unimplemented parameter requests to
+check for features. Remove the error message to avoid bloating the
+kernel log.
+
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Brian Paul <brianp@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index 1802d0e7fab8..5ec24fd801cd 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+               param->value = dev_priv->has_dx;
+               break;
+       default:
+-              DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+-                        param->param);
+               return -EINVAL;
+       }
+-- 
+2.12.2
+
+From ad4ae2feef4f65b860f139e0d8455e2a16efb93c Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Mon, 27 Mar 2017 11:21:25 +0200
+Subject: [PATCH 04/52] drm/ttm, drm/vmwgfx: Relax permission checking when
+ opening surfaces
+Content-Length: 6721
+Lines: 176
+
+commit fe25deb7737ce6c0879ccf79c99fa1221d428bf2 upstream.
+
+Previously, when a surface was opened using a legacy (non prime) handle,
+it was verified to have been created by a client in the same master realm.
+Relax this so that opening is also allowed recursively if the client
+already has the surface open.
+
+This works around a regression in svga mesa where opening of a shared
+surface is used recursively to obtain surface information.
+
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ttm/ttm_object.c         | 10 +++++++---
+ drivers/gpu/drm/vmwgfx/vmwgfx_fence.c    |  6 ++----
+ drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  4 ++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c  | 22 +++++++++-------------
+ include/drm/ttm/ttm_object.h             |  5 ++++-
+ 5 files changed, 24 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
+index 4f5fa8d65fe9..144367c0c28f 100644
+--- a/drivers/gpu/drm/ttm/ttm_object.c
++++ b/drivers/gpu/drm/ttm/ttm_object.c
+@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
+       if (unlikely(ret != 0))
+               goto out_err0;
+-      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+       if (unlikely(ret != 0))
+               goto out_err1;
+@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
+ int ttm_ref_object_add(struct ttm_object_file *tfile,
+                      struct ttm_base_object *base,
+-                     enum ttm_ref_type ref_type, bool *existed)
++                     enum ttm_ref_type ref_type, bool *existed,
++                     bool require_existed)
+ {
+       struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+       struct ttm_ref_object *ref;
+@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
+               }
+               rcu_read_unlock();
++              if (require_existed)
++                      return -EPERM;
++
+               ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+                                          false, false);
+               if (unlikely(ret != 0))
+@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+       prime = (struct ttm_prime_object *) dma_buf->priv;
+       base = &prime->base;
+       *handle = base->hash.key;
+-      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+       dma_buf_put(dma_buf);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index b2f329917eda..6c649f7b5929 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -1144,10 +1144,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+               (void) vmw_fence_obj_reference(fence);
+               if (user_fence_rep != NULL) {
+-                      bool existed;
+-
+-                      ret = ttm_ref_object_add(tfile, base,
+-                                               TTM_REF_USAGE, &existed);
++                      ret = ttm_ref_object_add(vmw_fp->tfile, base,
++                                               TTM_REF_USAGE, NULL, false);
+                       if (unlikely(ret != 0)) {
+                               DRM_ERROR("Failed to reference a fence "
+                                         "object.\n");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index e57667ca7557..dbca128a9aa6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+               return ret;
+       ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+-                               TTM_REF_SYNCCPU_WRITE, &existed);
++                               TTM_REF_SYNCCPU_WRITE, &existed, false);
+       if (ret != 0 || existed)
+               ttm_bo_synccpu_write_release(&user_bo->dma.base);
+@@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+       *handle = user_bo->prime.base.hash.key;
+       return ttm_ref_object_add(tfile, &user_bo->prime.base,
+-                                TTM_REF_USAGE, NULL);
++                                TTM_REF_USAGE, NULL, false);
+ }
+ /*
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index b363f0be6512..79f78a68d92d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -904,17 +904,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
+       uint32_t handle;
+       struct ttm_base_object *base;
+       int ret;
++      bool require_exist = false;
+       if (handle_type == DRM_VMW_HANDLE_PRIME) {
+               ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
+               if (unlikely(ret != 0))
+                       return ret;
+       } else {
+-              if (unlikely(drm_is_render_client(file_priv))) {
+-                      DRM_ERROR("Render client refused legacy "
+-                                "surface reference.\n");
+-                      return -EACCES;
+-              }
++              if (unlikely(drm_is_render_client(file_priv)))
++                      require_exist = true;
++
+               if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+                       DRM_ERROR("Locked master refused legacy "
+                                 "surface reference.\n");
+@@ -942,17 +941,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
+               /*
+                * Make sure the surface creator has the same
+-               * authenticating master.
++               * authenticating master, or is already registered with us.
+                */
+               if (drm_is_primary_client(file_priv) &&
+-                  user_srf->master != file_priv->master) {
+-                      DRM_ERROR("Trying to reference surface outside of"
+-                                " master domain.\n");
+-                      ret = -EACCES;
+-                      goto out_bad_resource;
+-              }
++                  user_srf->master != file_priv->master)
++                      require_exist = true;
+-              ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++              ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
++                                       require_exist);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Could not add a reference to a surface.\n");
+                       goto out_bad_resource;
+diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
+index ed953f98f0e1..1487011fe057 100644
+--- a/include/drm/ttm/ttm_object.h
++++ b/include/drm/ttm/ttm_object.h
+@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+  * @ref_type: The type of reference.
+  * @existed: Upon completion, indicates that an identical reference object
+  * already existed, and the refcount was upped on that object instead.
++ * @require_existed: Fail with -EPERM if an identical ref object didn't
++ * already exist.
+  *
+  * Checks that the base object is shareable and adds a ref object to it.
+  *
+@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+  */
+ extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+                             struct ttm_base_object *base,
+-                            enum ttm_ref_type ref_type, bool *existed);
++                            enum ttm_ref_type ref_type, bool *existed,
++                            bool require_existed);
+ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+                                 struct ttm_base_object *base);
+-- 
+2.12.2
+
+From 0e075f266749ea6507758123f553fece6664e4e2 Mon Sep 17 00:00:00 2001
+From: Murray McAllister <murray.mcallister@insomniasec.com>
+Date: Mon, 27 Mar 2017 11:15:12 +0200
+Subject: [PATCH 03/52] drm/vmwgfx: avoid calling vzalloc with a 0 size in
+ vmw_get_cap_3d_ioctl()
+Content-Length: 1144
+Lines: 30
+
+commit 63774069d9527a1aeaa4aa20e929ef5e8e9ecc38 upstream.
+
+In vmw_get_cap_3d_ioctl(), a user can supply 0 for a size that is
+used in vzalloc(). This eventually calls dump_stack() (in warn_alloc()),
+which can leak useful addresses to dmesg.
+
+Add check to avoid a size of 0.
+
+Signed-off-by: Murray McAllister <murray.mcallister@insomniasec.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index b8c6a03c8c54..1802d0e7fab8 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -186,7 +186,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+       bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+-      if (unlikely(arg->pad64 != 0)) {
++      if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
+               DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+               return -EINVAL;
+       }
+-- 
+2.12.2
+
+From b26629453c7b2a6c82000b36fbd1cfc4d9101808 Mon Sep 17 00:00:00 2001
+From: Murray McAllister <murray.mcallister@insomniasec.com>
+Date: Mon, 27 Mar 2017 11:12:53 +0200
+Subject: [PATCH 02/52] drm/vmwgfx: NULL pointer dereference in
+ vmw_surface_define_ioctl()
+Content-Length: 1176
+Lines: 32
+
+commit 36274ab8c596f1240c606bb514da329add2a1bcd upstream.
+
+Before memory allocations vmw_surface_define_ioctl() checks the
+upper-bounds of a user-supplied size, but does not check if the
+supplied size is 0.
+
+Add check to avoid NULL pointer dereferences.
+
+Signed-off-by: Murray McAllister <murray.mcallister@insomniasec.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 7d620e82e000..b363f0be6512 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -718,8 +718,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+               num_sizes += req->mip_levels[i];
+-      if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+-          DRM_VMW_MAX_MIP_LEVELS)
++      if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
++          num_sizes == 0)
+               return -EINVAL;
+       size = vmw_user_surface_size + 128 +
+-- 
+2.12.2
+
+From ed528923541afc1228c5a66e98845148aca51e24 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Mon, 27 Mar 2017 11:09:08 +0200
+Subject: [PATCH 01/52] drm/vmwgfx: Type-check lookups of fence objects
+Content-Length: 5168
+Lines: 154
+
+commit f7652afa8eadb416b23eb57dec6f158529942041 upstream.
+
+A malicious caller could otherwise hand over handles to other objects
+causing all sorts of interesting problems.
+
+Testing done: Ran a Fedora 25 desktop using both Xorg and
+gnome-shell/Wayland.
+
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 77 +++++++++++++++++++++++------------
+ 1 file changed, 50 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 8e689b439890..b2f329917eda 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
+                    struct vmw_fence_obj **p_fence)
+ {
+       struct vmw_fence_obj *fence;
+-      int ret;
++      int ret;
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (unlikely(fence == NULL))
+@@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
+ }
++/**
++ * vmw_fence_obj_lookup - Look up a user-space fence object
++ *
++ * @tfile: A struct ttm_object_file identifying the caller.
++ * @handle: A handle identifying the fence object.
++ * @return: A struct vmw_user_fence base ttm object on success or
++ * an error pointer on failure.
++ *
++ * The fence object is looked up and type-checked. The caller needs
++ * to have opened the fence object first, but since that happens on
++ * creation and fence objects aren't shareable, that's not an
++ * issue currently.
++ */
++static struct ttm_base_object *
++vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
++{
++      struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
++
++      if (!base) {
++              pr_err("Invalid fence object handle 0x%08lx.\n",
++                     (unsigned long)handle);
++              return ERR_PTR(-EINVAL);
++      }
++
++      if (base->refcount_release != vmw_user_fence_base_release) {
++              pr_err("Invalid fence object handle 0x%08lx.\n",
++                     (unsigned long)handle);
++              ttm_base_object_unref(&base);
++              return ERR_PTR(-EINVAL);
++      }
++
++      return base;
++}
++
++
+ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+ {
+@@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+               arg->kernel_cookie = jiffies + wait_timeout;
+       }
+-      base = ttm_base_object_lookup(tfile, arg->handle);
+-      if (unlikely(base == NULL)) {
+-              printk(KERN_ERR "Wait invalid fence object handle "
+-                     "0x%08lx.\n",
+-                     (unsigned long)arg->handle);
+-              return -EINVAL;
+-      }
++      base = vmw_fence_obj_lookup(tfile, arg->handle);
++      if (IS_ERR(base))
++              return PTR_ERR(base);
+       fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+@@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_private *dev_priv = vmw_priv(dev);
+-      base = ttm_base_object_lookup(tfile, arg->handle);
+-      if (unlikely(base == NULL)) {
+-              printk(KERN_ERR "Fence signaled invalid fence object handle "
+-                     "0x%08lx.\n",
+-                     (unsigned long)arg->handle);
+-              return -EINVAL;
+-      }
++      base = vmw_fence_obj_lookup(tfile, arg->handle);
++      if (IS_ERR(base))
++              return PTR_ERR(base);
+       fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+       fman = fman_from_fence(fence);
+@@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+               (struct drm_vmw_fence_event_arg *) data;
+       struct vmw_fence_obj *fence = NULL;
+       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++      struct ttm_object_file *tfile = vmw_fp->tfile;
+       struct drm_vmw_fence_rep __user *user_fence_rep =
+               (struct drm_vmw_fence_rep __user *)(unsigned long)
+               arg->fence_rep;
+@@ -1106,15 +1134,11 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+        */
+       if (arg->handle) {
+               struct ttm_base_object *base =
+-                      ttm_base_object_lookup_for_ref(dev_priv->tdev,
+-                                                     arg->handle);
+-
+-              if (unlikely(base == NULL)) {
+-                      DRM_ERROR("Fence event invalid fence object handle "
+-                                "0x%08lx.\n",
+-                                (unsigned long)arg->handle);
+-                      return -EINVAL;
+-              }
++                      vmw_fence_obj_lookup(tfile, arg->handle);
++
++              if (IS_ERR(base))
++                      return PTR_ERR(base);
++
+               fence = &(container_of(base, struct vmw_user_fence,
+                                      base)->fence);
+               (void) vmw_fence_obj_reference(fence);
+@@ -1122,7 +1146,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+               if (user_fence_rep != NULL) {
+                       bool existed;
+-                      ret = ttm_ref_object_add(vmw_fp->tfile, base,
++                      ret = ttm_ref_object_add(tfile, base,
+                                                TTM_REF_USAGE, &existed);
+                       if (unlikely(ret != 0)) {
+                               DRM_ERROR("Failed to reference a fence "
+@@ -1166,8 +1190,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+       return 0;
+ out_no_create:
+       if (user_fence_rep != NULL)
+-              ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+-                                        handle, TTM_REF_USAGE);
++              ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ out_no_ref_obj:
+       vmw_fence_obj_unreference(&fence);
+       return ret;
+-- 
+2.12.2
+